repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
forwis/KVMGT-kernel
tools/perf/tests/attr.py
3174
9441
#! /usr/bin/python import os import sys import glob import optparse import tempfile import logging import shutil import ConfigParser class Fail(Exception): def __init__(self, test, msg): self.msg = msg self.test = test def getMsg(self): return '\'%s\' - %s' % (self.test.path, self.msg) class Unsup(Exception): def __init__(self, test): self.test = test def getMsg(self): return '\'%s\'' % self.test.path class Event(dict): terms = [ 'cpu', 'flags', 'type', 'size', 'config', 'sample_period', 'sample_type', 'read_format', 'disabled', 'inherit', 'pinned', 'exclusive', 'exclude_user', 'exclude_kernel', 'exclude_hv', 'exclude_idle', 'mmap', 'comm', 'freq', 'inherit_stat', 'enable_on_exec', 'task', 'watermark', 'precise_ip', 'mmap_data', 'sample_id_all', 'exclude_host', 'exclude_guest', 'exclude_callchain_kernel', 'exclude_callchain_user', 'wakeup_events', 'bp_type', 'config1', 'config2', 'branch_sample_type', 'sample_regs_user', 'sample_stack_user', ] def add(self, data): for key, val in data: log.debug(" %s = %s" % (key, val)) self[key] = val def __init__(self, name, data, base): log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) self.add(data) def compare_data(self, a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') b_list = b.split('|') for a_item in a_list: for b_item in b_list: if (a_item == b_item): return True elif (a_item == '*') or (b_item == '*'): return True return False def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); if not self.has_key(t) or not other.has_key(t): return False if not self.compare_data(self[t], other[t]): return False return True def diff(self, other): for t in Event.terms: if not self.has_key(t) or not other.has_key(t): continue if not self.compare_data(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) # Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - expected command return value (0 by default) # # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options): parser = ConfigParser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) self.path = path self.test_dir = options.test_dir self.perf = options.perf self.command = parser.get('config', 'command') self.args = parser.get('config', 'args') try: self.ret = parser.get('config', 'ret') except: self.ret = 0 self.expect = {} self.result = {} log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): if name.find("event") == -1: return False else: return True def load_events(self, path, events): parser_event = ConfigParser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()): parser_items = parser_event.items(section); base_items = {} # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] parser_base = ConfigParser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') e = Event(section, parser_items, base_items) events[section] = e def run_cmd(self, tempdir): cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) log.info(" '%s' ret %d " % (cmd, ret)) if ret != int(self.ret): raise Unsup(self) def compare(self, expect, result): match = {} log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)): exp_list.append(res_name) log.debug(" ->OK") else: log.debug(" ->FAIL"); log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if (not exp_list): exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list # For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items(): group = exp_event.group if (group == ''): continue for res_name in match[exp_name]: res_group = result[res_name].group if res_group not in match[group]: raise Fail(self, 'group failure') log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): group_fd = event['group_fd']; if group_fd == '-1': continue; for iname, ievent in events.items(): if (ievent['fd'] == group_fd): event.group = iname log.debug('[%s] has group leader [%s]' % (name, iname)) break; def run(self): tempdir = tempfile.mkdtemp(); try: # run the test script self.run_cmd(tempdir); # load events expectation for the test log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); # resolve group_fd to event names self.resolve_groups(self.expect); self.resolve_groups(self.result); # do the expectation - results matching - both ways self.compare(self.expect, self.result) self.compare(self.result, self.expect) finally: # cleanup shutil.rmtree(tempdir) def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() except Unsup, obj: log.warning("unsupp %s" % obj.getMsg()) def setup_log(verbose): global log level = logging.CRITICAL if verbose == 1: level = logging.WARNING if verbose == 2: level = logging.INFO if verbose >= 3: level = logging.DEBUG log = logging.getLogger('test') log.setLevel(level) ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) log.addHandler(ch) USAGE = '''%s [OPTIONS] -d dir # tests dir -p path # perf binary -t test # single test -v # verbose level ''' % sys.argv[0] def main(): parser = optparse.OptionParser(usage=USAGE) parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-d", "--test-dir", action="store", type="string", dest="test_dir") parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", action="count", dest="verbose") options, args = parser.parse_args() if args: parser.error('FAILED wrong arguments %s' % ' '.join(args)) return -1 setup_log(options.verbose) if not options.test_dir: print 'FAILED no -d option specified' sys.exit(-1) if not options.test: options.test = 'test*' try: run_tests(options) except Fail, obj: print "FAILED %s" % obj.getMsg(); sys.exit(-1) sys.exit(0) if __name__ == '__main__': main()
gpl-2.0
snnn/tensorflow
tensorflow/python/keras/engine/training_utils.py
4
42744
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related utilities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import OrderedDict import copy import math import numpy as np import six from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras.engine import base_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.util import nest def _map_nested(data, func): """Maps each nested element using func.""" if isinstance(data, list): return [_map_nested(nested_data, func) for nested_data in data] elif isinstance(data, tuple): return tuple(_map_nested(nested_data, func) for nested_data in data) elif isinstance(data, dict): return { k: _map_nested(nested_data, func) for k, nested_data in data.items() } else: return func(data) def _nested_all(data, cond_func): """Checks if all elements in a nested structure satisfy cond_func.""" if isinstance(data, (tuple, list)): return all([_nested_all(nested_data, cond_func) for nested_data in data]) elif isinstance(data, dict): return all( [_nested_all(nested_data, cond_func) for nested_data in data.values()]) else: return cond_func(data) def _nested_any(data, cond_func): """Checks if any nested_elements in a nested structure satisfy cond_func.""" if isinstance(data, (tuple, list)): return any([_nested_any(nested_data, cond_func) for nested_data in data]) elif isinstance(data, dict): return any( [_nested_any(nested_data, cond_func) for nested_data in data.values()]) else: return cond_func(data) def _convert_lists_to_tuples(data): """Converts all lists to tuples, since Datasets expect tuples.""" if isinstance(data, (tuple, list)): return tuple(_convert_lists_to_tuples(nested_data) for nested_data in data) elif isinstance(data, dict): return { k: _convert_lists_to_tuples(nested_data) for k, nested_data in data.items() } else: return data def _get_batch_axis_size(data): """Returns batch axis shape for nested data.""" if isinstance(data, (tuple, list)): return _get_batch_axis_size(data[0]) elif isinstance(data, dict): return _get_batch_axis_size(list(data.values())) else: return int(data.shape[0]) def convert_to_iterator(x=None, y=None, sample_weights=None, batch_size=None, steps_per_epoch=None, epochs=1, shuffle=False, is_validation=False): """Converts NumPy arrays or EagerTensors to an EagerIterator. Combines all provided data into a single EagerIterator. Arguments: x: NumPy array or EagerTensor, or list of Numpy arrays or EagerTensors representing inputs to a model. y: Optional. NumPy array or EagerTensor, or list of Numpy arrays or EagerTensors representing targets of a model. sample_weights: Optional NumPy array or EagerTensor representing sample weights. batch_size: Used to batch data and calculate how many steps EagerIterator should take per epoch. steps_per_epoch: If provided, how many steps EagerIterator should take per epoch. epochs: Epochs to repeat iterator for. shuffle: Whether to shuffle data after each epoch. is_validation: Whether this call is for validation during a training (e.g., `fit()`) call. This info is used to construct error messages (if any). Raises: ValueError: if steps_per_epoch cannot be calculated from the data provided. Returns: (Iterator, steps_per_epoch). """ if isinstance(x, iterator_ops.EagerIterator): return x, steps_per_epoch if not _nested_any(sample_weights, lambda x: x is None): data = (x, y, sample_weights) elif not _nested_any(y, lambda x: x is None): data = (x, y) else: # always wrap in a tuple, so we know y, sample_weights weren't set # even when x has multiple elements data = (x,) data = _convert_lists_to_tuples(data) if steps_per_epoch is None and batch_size is not None: num_samples = _get_batch_axis_size(data) steps_per_epoch = int(math.ceil(num_samples / batch_size)) if steps_per_epoch is None: alternative_arg_name = ( 'validation_steps' if is_validation else 'steps_per_epoch') raise ValueError( 'Could not determine how to convert EagerTensors into EagerIterator. ' 'Please provide either `batch_size` or ' '`%s`.' % alternative_arg_name) # TODO(omalleyt) for NumPy arrays in graph mode # placeholder ops should be used # this is only ideal for eager mode dataset = dataset_ops.Dataset.from_tensor_slices(data) if batch_size is not None: dataset = dataset.batch(batch_size) if shuffle: dataset = dataset.shuffle(buffer_size=10000) dataset = dataset.repeat(epochs) iterator = dataset.make_one_shot_iterator() return iterator, steps_per_epoch def check_num_samples(ins, batch_size=None, steps=None, steps_name='steps'): """Determine the number of samples provided for training and evaluation. The number of samples is not defined when running with `steps`, in which case the number of samples is set to `None`. Arguments: ins: List of tensors to be fed to the Keras function. batch_size: Integer batch size or `None` if not defined. steps: Total number of steps (batches of samples) before declaring `_predict_loop` finished. Ignored with the default value of `None`. steps_name: The public API's parameter name for `steps`. Raises: ValueError: when `steps` is `None` and the attribute `ins.shape` does not exist. Also raises ValueError when `steps` is not `None` and `batch_size` is not `None` because they are mutually exclusive. Returns: When steps is `None`, returns the number of samples to be processed based on the size of the first dimension of the first input numpy array. When steps is not `None` and `batch_size` is `None`, returns `None`. Raises: ValueError: In case of invalid arguments. """ if steps is not None and batch_size is not None: raise ValueError( 'If ' + steps_name + ' is set, the `batch_size` must be None.') if check_steps_argument(ins, steps, steps_name): return None if hasattr(ins[0], 'shape'): return int(ins[0].shape[0]) return None # Edge case where ins == [static_learning_phase] def standardize_single_array(x): if x is None: return None if x.shape is not None and len(x.shape) == 1: if tensor_util.is_tensor(x): return array_ops.expand_dims(x, axis=1) else: return np.expand_dims(x, 1) return x def standardize_input_data(data, names, shapes=None, check_batch_axis=True, exception_prefix=''): """Normalizes inputs and targets provided by users. Users may pass data as a list of arrays, dictionary of arrays, or as a single array. We normalize this to an ordered list of arrays (same order as `names`), while checking that the provided arrays have shapes that match the network's expectations. Arguments: data: User-provided input data (polymorphic). names: List of expected array names. shapes: Optional list of expected array shapes. check_batch_axis: Boolean; whether to check that the batch axis of the arrays matches the expected value found in `shapes`. exception_prefix: String prefix used for exception formatting. Returns: List of standardized input arrays (one array per model input). Raises: ValueError: in case of improperly formatted user-provided data. """ if not names: if (data is not None and hasattr(data, '__len__') and len(data) and not isinstance(data, dict)): raise ValueError('Error when checking model ' + exception_prefix + ': ' 'expected no data, but got:', data) return [] if data is None: return [None for _ in range(len(names))] if isinstance(data, dict): try: data = [ data[x].values if data[x].__class__.__name__ == 'DataFrame' else data[x] for x in names ] except KeyError as e: raise ValueError('No data provided for "' + e.args[0] + '". Need data ' 'for each key in: ' + str(names)) elif isinstance(data, (list, tuple)): if isinstance(data[0], (list, tuple)): data = [np.asarray(d) for d in data] elif len(names) == 1 and isinstance(data[0], (float, int)): data = [np.asarray(data)] else: data = [ x.values if x.__class__.__name__ == 'DataFrame' else x for x in data ] else: data = data.values if data.__class__.__name__ == 'DataFrame' else data data = [data] data = [standardize_single_array(x) for x in data] if len(data) != len(names): if data and hasattr(data[0], 'shape'): raise ValueError('Error when checking model ' + exception_prefix + ': the list of Numpy arrays that you are passing to ' 'your model is not the size the model expected. ' 'Expected to see ' + str(len(names)) + ' array(s), ' 'but instead got the following list of ' + str(len(data)) + ' arrays: ' + str(data)[:200] + '...') elif len(names) > 1: raise ValueError( 'Error when checking model ' + exception_prefix + ': you are passing a list as input to your model, ' 'but the model expects a list of ' + str(len(names)) + ' Numpy arrays instead. The list you passed was: ' + str(data)[:200]) elif len(data) == 1 and not hasattr(data[0], 'shape'): raise TypeError('Error when checking model ' + exception_prefix + ': data should be a Numpy array, or list/dict of ' 'Numpy arrays. Found: ' + str(data)[:200] + '...') elif len(names) == 1: data = [np.asarray(data)] # Check shapes compatibility. if shapes: for i in range(len(names)): if shapes[i] is not None: if tensor_util.is_tensor(data[i]): tensorshape = data[i].get_shape() if not tensorshape: continue data_shape = tuple(tensorshape.as_list()) else: data_shape = data[i].shape shape = shapes[i] if len(data_shape) != len(shape): raise ValueError('Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have ' + str(len(shape)) + ' dimensions, but got array ' 'with shape ' + str(data_shape)) if not check_batch_axis: data_shape = data_shape[1:] shape = shape[1:] for dim, ref_dim in zip(data_shape, shape): if ref_dim != dim and ref_dim is not None and dim is not None: raise ValueError( 'Error when checking ' + exception_prefix + ': expected ' + names[i] + ' to have shape ' + str(shape) + ' but got array with shape ' + str(data_shape)) return data def standardize_sample_or_class_weights(x_weight, output_names, weight_type): """Maps `sample_weight` or `class_weight` to model outputs. Arguments: x_weight: User-provided `sample_weight` or `class_weight` argument. output_names: List of output names (strings) in the model. weight_type: A string used purely for exception printing. Returns: A list of `sample_weight` or `class_weight` where there are exactly one element per model output. Raises: ValueError: In case of invalid user-provided argument. """ if x_weight is None or (isinstance(x_weight, list) and len(x_weight) == 0): # pylint: disable=g-explicit-length-test return [None for _ in output_names] if len(output_names) == 1: if isinstance(x_weight, list) and len(x_weight) == 1: return x_weight if isinstance(x_weight, dict) and output_names[0] in x_weight: return [x_weight[output_names[0]]] else: return [x_weight] if isinstance(x_weight, list): if len(x_weight) != len(output_names): raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. ' 'You should provide one `' + weight_type + '`' 'array per model output.') return x_weight if isinstance(x_weight, dict): x_weights = [] for name in output_names: x_weights.append(x_weight.get(name)) return x_weights else: raise TypeError( 'The model has multiple outputs, so `' + weight_type + '` ' 'should be either a list or a dict. ' 'Provided `' + weight_type + '` type not understood: ' + str(x_weight)) def standardize_class_weights(class_weight, output_names): return standardize_sample_or_class_weights(class_weight, output_names, 'class_weight') def standardize_sample_weights(sample_weight, output_names): return standardize_sample_or_class_weights(sample_weight, output_names, 'sample_weight') def check_array_lengths(inputs, targets, weights=None): """Does user input validation for numpy arrays. Arguments: inputs: list of Numpy arrays of inputs. targets: list of Numpy arrays of targets. weights: list of Numpy arrays of sample weights. Raises: ValueError: in case of incorrectly formatted data. """ def set_of_lengths(x): # Returns a set with the variation between # different shapes, with None => 0 if x is None: return {} else: return set([y.shape[0] for y in x if y is not None and not tensor_util.is_tensor(y)]) set_x = set_of_lengths(inputs) set_y = set_of_lengths(targets) set_w = set_of_lengths(weights) if len(set_x) > 1: raise ValueError('All input arrays (x) should have ' 'the same number of samples. Got array shapes: ' + str([x.shape for x in inputs])) if len(set_y) > 1: raise ValueError('All target arrays (y) should have ' 'the same number of samples. Got array shapes: ' + str([y.shape for y in targets])) if set_x and set_y and list(set_x)[0] != list(set_y)[0]: raise ValueError('Input arrays should have ' 'the same number of samples as target arrays. ' 'Found ' + str(list(set_x)[0]) + ' input samples ' 'and ' + str(list(set_y)[0]) + ' target samples.') if len(set_w) > 1: raise ValueError('All sample_weight arrays should have ' 'the same number of samples. Got array shapes: ' + str([w.shape for w in weights])) if set_y and set_w and list(set_y)[0] != list(set_w)[0]: raise ValueError('Sample_weight arrays should have ' 'the same number of samples as target arrays. Got ' + str(list(set_y)[0]) + ' input samples and ' + str(list(set_w)[0]) + ' target samples.') def check_loss_and_target_compatibility(targets, loss_fns, output_shapes): """Does validation on the compatibility of targets and loss functions. This helps prevent users from using loss functions incorrectly. This check is purely for UX purposes. Arguments: targets: list of Numpy arrays of targets. loss_fns: list of loss functions. output_shapes: list of shapes of model outputs. Raises: ValueError: if a loss function or target array is incompatible with an output. """ key_losses = { losses.mean_squared_error, losses.binary_crossentropy, losses.categorical_crossentropy } for y, loss, shape in zip(targets, loss_fns, output_shapes): if y is None or loss is None or tensor_util.is_tensor(y): continue if loss is losses.categorical_crossentropy: if y.shape[-1] == 1: raise ValueError('You are passing a target array of shape ' + str( y.shape) + ' while using as loss `categorical_crossentropy`. ' '`categorical_crossentropy` expects ' 'targets to be binary matrices (1s and 0s) ' 'of shape (samples, classes). ' 'If your targets are integer classes, ' 'you can convert them to the expected format via:\n' '```\n' 'from keras.utils import to_categorical\n' 'y_binary = to_categorical(y_int)\n' '```\n' '\n' 'Alternatively, you can use the loss function ' '`sparse_categorical_crossentropy` instead, ' 'which does expect integer targets.') if loss in key_losses: for target_dim, out_dim in zip(y.shape[1:], shape[1:]): if out_dim is not None and target_dim != out_dim: raise ValueError('A target array with shape ' + str(y.shape) + ' was passed for an output of shape ' + str(shape) + ' while using as loss `' + loss.__name__ + '`. ' 'This loss expects ' 'targets to have the same shape ' 'as the output.') def collect_per_output_metric_info(metrics, output_names, output_shapes, loss_fns, sample_weights=None): """Maps metric names and functions to model outputs. Arguments: metrics: a list or dict of metric functions. output_names: a list of the names (strings) of model outputs. output_shapes: a list of the shapes (strings) of model outputs. loss_fns: a list of the loss functions corresponding to the model outputs. sample_weights: a list of weights to be applied on the model outputs. Returns: A list (one entry per model output) of dicts. For instance, if the model has 2 outputs, and for the first output we want to compute "binary_accuracy" and "binary_crossentropy", and just "binary_accuracy" for the second output, the list would look like: `[[('acc', binary_accuracy()), ('ce', binary_crossentropy())], [('acc', binary_accuracy())]]` Raises: TypeError: if an incorrect type is passed for the `metrics` argument. """ if not metrics: return [{} for _ in output_names] if isinstance(metrics, list): # we then apply all metrics to all outputs. nested_metrics = [copy.copy(metrics) for _ in output_names] elif isinstance(metrics, dict): nested_metrics = [] for name in output_names: output_metrics = metrics.get(name, []) if not isinstance(output_metrics, list): output_metrics = [output_metrics] nested_metrics.append(output_metrics) else: raise TypeError('Type of `metrics` argument not understood. ' 'Expected a list or dictionary, found: ' + str(metrics)) per_output_metrics = [] for i, metrics in enumerate(nested_metrics): metrics_dict = OrderedDict() for metric in metrics: weighted = False if (sample_weights is None) else ( sample_weights[i] is not None) metric_name = get_metric_name(metric, weighted) metric_fn = get_metric_function( metric, output_shape=output_shapes[i], loss_fn=loss_fns[i]) metrics_dict[metric_name] = metric_fn per_output_metrics.append(metrics_dict) return per_output_metrics def batch_shuffle(index_array, batch_size): """Shuffles an array in a batch-wise fashion. Useful for shuffling HDF5 arrays (where one cannot access arbitrary indices). Arguments: index_array: array of indices to be shuffled. batch_size: integer. Returns: The `index_array` array, shuffled in a batch-wise fashion. """ batch_count = int(len(index_array) / batch_size) # to reshape we need to be cleanly divisible by batch size # we stash extra items and reappend them after shuffling last_batch = index_array[batch_count * batch_size:] index_array = index_array[:batch_count * batch_size] index_array = index_array.reshape((batch_count, batch_size)) np.random.shuffle(index_array) index_array = index_array.flatten() return np.append(index_array, last_batch) def weighted_masked_objective(fn): """Adds support for masking and sample-weighting to an objective function. It transforms an objective function `fn(y_true, y_pred)` into a sample-weighted, cost-masked objective function `fn(y_true, y_pred, weights, mask)`. Arguments: fn: The objective function to wrap, with signature `fn(y_true, y_pred)`. Returns: A function with signature `fn(y_true, y_pred, weights, mask)`. """ if fn is None: return None def weighted(y_true, y_pred, weights, mask=None): """Wrapper function. Arguments: y_true: `y_true` argument of `fn`. y_pred: `y_pred` argument of `fn`. weights: Weights tensor. mask: Mask tensor. Returns: Scalar tensor. """ # score_array has ndim >= 2 score_array = fn(y_true, y_pred) if mask is not None: mask = math_ops.cast(mask, y_pred.dtype) # Update weights with mask. if weights is None: weights = mask else: # Update shape of weights if possible before adding mask. # Update dimensions of weights to match with mask if possible. mask, _, weights = metrics_module.squeeze_or_expand_dimensions( mask, None, weights) try: # Broadcast weights if possible. weights = weights_broadcast_ops.broadcast_weights(weights, mask) weights *= mask except ValueError: score_array *= mask score_array /= K.mean(mask) # TODO(psv): Handle case when mask and weight shapes are not # compatible. # Apply sample weighting. if weights is not None: # Update dimensions of weights to match with values if possible. score_array, _, weights = metrics_module.squeeze_or_expand_dimensions( score_array, None, weights) try: # Broadcast weights if possible. weights = weights_broadcast_ops.broadcast_weights(weights, score_array) except ValueError: # Reduce values to same ndim as weight array. ndim = K.ndim(score_array) weight_ndim = K.ndim(weights) score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim))) score_array = math_ops.multiply(score_array, weights) score_array = math_ops.reduce_sum(score_array) weights = math_ops.reduce_sum(weights) score_array = metrics_module.safe_div(score_array, weights) return K.mean(score_array) return weighted def standardize_weights(y, sample_weight=None, class_weight=None, sample_weight_mode=None): """Performs sample weight validation and standardization. Everything gets normalized to a single sample-wise (or timestep-wise) weight array. Arguments: y: Numpy array of model targets to be weighted. sample_weight: User-provided `sample_weight` argument. class_weight: User-provided `class_weight` argument. sample_weight_mode: One of `None` or `"temporal"`. `"temporal"` indicated that we expect 2D weight data that will be applied to the last 2 dimensions of the targets (i.e. we are weighting timesteps, not samples). Returns: A numpy array of target weights, one entry per sample to weight. Raises: ValueError: In case of invalid user-provided arguments. """ # Iterator may return sample_weight as 1-tuple if isinstance(sample_weight, tuple): sample_weight = sample_weight[0] if sample_weight_mode is not None: if sample_weight_mode != 'temporal': raise ValueError('"sample_weight_mode ' 'should be None or "temporal". ' 'Found: ' + str(sample_weight_mode)) if len(y.shape) < 3: raise ValueError('Found a sample_weight array for ' 'an input with shape ' + str(y.shape) + '. ' 'Timestep-wise sample weighting (use of ' 'sample_weight_mode="temporal") is restricted to ' 'outputs that are at least 3D, i.e. that have ' 'a time dimension.') if sample_weight is not None and len(sample_weight.shape) != 2: raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. ' 'In order to use timestep-wise sample weighting, ' 'you should pass a 2D sample_weight array.') else: if sample_weight is not None and len(sample_weight.shape) != 1: raise ValueError('Found a sample_weight array with shape ' + str(sample_weight.shape) + '. ' 'In order to use timestep-wise sample weights, ' 'you should specify ' 'sample_weight_mode="temporal" ' 'in compile(). If you just mean to use ' 'sample-wise weights, make sure your ' 'sample_weight array is 1D.') if sample_weight is not None: if len(sample_weight.shape) > len(y.shape): raise ValueError( 'Found a sample_weight with shape' + str(sample_weight.shape) + '.' 'Expected sample_weight with rank ' 'less than or equal to ' + str(len(y.shape))) if (not tensor_util.is_tensor(sample_weight) and y.shape[:sample_weight.ndim] != sample_weight.shape): raise ValueError( 'Found a sample_weight array with shape ' + str(sample_weight.shape) + ' for an input with shape ' + str(y.shape) + '. ' 'sample_weight cannot be broadcast.') return sample_weight elif isinstance(class_weight, dict): if len(y.shape) > 2: raise ValueError('`class_weight` not supported for ' '3+ dimensional targets.') if y.shape[1] > 1: y_classes = np.argmax(y, axis=1) elif y.shape[1] == 1: y_classes = np.reshape(y, y.shape[0]) else: y_classes = y weights = np.asarray( [class_weight[cls] for cls in y_classes if cls in class_weight]) if len(weights) != len(y_classes): # subtract the sets to pick all missing classes existing_classes = set(y_classes) existing_class_weight = set(class_weight.keys()) raise ValueError('`class_weight` must contain all classes in the data.' ' The classes %s exist in the data but not in ' '`class_weight`.' % (existing_classes - existing_class_weight)) return weights else: return None def has_symbolic_tensors(ls): if context.executing_eagerly(): return False return has_tensors(ls) def has_tensors(ls): if isinstance(ls, (list, tuple)): return any(tensor_util.is_tensor(v) for v in ls) if isinstance(ls, dict): return any(tensor_util.is_tensor(v) for _, v in six.iteritems(ls)) return tensor_util.is_tensor(ls) def get_metric_name(metric, weighted=False): """Returns the name corresponding to the given metric input. Arguments: metric: Metric function name or reference. weighted: Boolean indicating if the given metric is weighted. Returns: The metric name. """ metric_name_prefix = 'weighted_' if weighted else '' if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' else: metric_fn = metrics_module.get(metric) # Get metric name as string if hasattr(metric_fn, 'name'): suffix = metric_fn.name else: suffix = metric_fn.__name__ metric_name = metric_name_prefix + suffix return metric_name def get_metric_function(metric, output_shape=None, loss_fn=None): """Returns the metric function corresponding to the given metric input. Arguments: metric: Metric function name or reference. output_shape: The shape of the output that this metric will be calculated for. loss_fn: The loss function used. Returns: The metric function. """ if metric in ['accuracy', 'acc']: if output_shape[-1] == 1 or loss_fn == losses.binary_crossentropy: return metrics_module.binary_accuracy # case: binary accuracy elif loss_fn == losses.sparse_categorical_crossentropy: # case: categorical accuracy with sparse targets return metrics_module.sparse_categorical_accuracy return metrics_module.categorical_accuracy # case: categorical accuracy elif metric in ['crossentropy', 'ce']: if output_shape[-1] == 1 or loss_fn == losses.binary_crossentropy: return metrics_module.binary_crossentropy # case: binary cross-entropy elif loss_fn == losses.sparse_categorical_crossentropy: # case: categorical cross-entropy with sparse targets return metrics_module.sparse_categorical_crossentropy # case: categorical cross-entropy return metrics_module.categorical_crossentropy return metrics_module.get(metric) def validate_iterator_input(x, y, sample_weight, validation_split=None): """Validates user input arguments when a dataset iterator is passed. Arguments: x: Input data. A `tf.data` dataset iterator. y: Target data. It could be either Numpy array(s) or TensorFlow tensor(s). Expected to be `None` when `x` is a dataset iterator. sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. Expected to be `None` when `x` is a dataset iterator validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Expected to be `None` when `x` is a dataset iterator. Raises: ValueError: if argument `y` or `sample_weight` or `validation_split` are provided by user. """ if y is not None: raise ValueError('You passed a dataset or dataset iterator (%s) as ' 'input `x` to your model. In that case, you should ' 'not specify a target (`y`) argument, since the dataset ' 'or dataset iterator generates both input data and ' 'target data. ' 'Received: %s' % (x, y)) if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when input ' '`x` is a dataset or a dataset iterator. Instead, you' 'can provide sample_weight as the third element of your' 'dataset, i.e. (inputs, targets, sample_weight). ' 'Received: x=%s, sample_weight=%s' % (x, sample_weight)) if validation_split is not None and validation_split != 0.0: raise ValueError( '`validation_split` argument is not supported when ' 'input `x` is a dataset or a dataset iterator. ' 'Received: x=%s, validation_split=%f' % (x, validation_split)) def check_generator_arguments(y=None, sample_weight=None): """Validates arguments passed when using a generator.""" if y is not None: raise ValueError('`y` argument is not supported when data is' 'a generator or Sequence instance. Instead pass targets' ' as the second element of the generator.') if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when data is' 'a generator or Sequence instance. Instead pass sample' ' weights as the third element of the generator.') def check_steps_argument(input_data, steps, steps_name): """Validates `steps` argument based on input data's type. The cases when `steps` value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is `None`. 3. input data passed is a symbolic tensor. Arguments: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or `None`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for `steps`. Returns: boolean, True if `steps` argument is required, else False. Raises: ValueError: if `steps` argument is required for given input data type but not provided. """ is_x_iterator = ( isinstance(input_data, iterator_ops.Iterator) or isinstance(input_data, iterator_ops.EagerIterator)) if (input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and not input_data)): if steps is None: input_type_str = 'iterators' if is_x_iterator else 'data tensors' raise ValueError('When using {input_type} as input to a model, you should' ' specify the `{steps_name}` argument.'.format( input_type=input_type_str, steps_name=steps_name)) return True return False def cast_single_tensor(x): if tensor_util.is_tensor(x) and x.dtype.is_floating: return math_ops.cast(x, dtype=K.floatx()) return x def cast_if_floating_dtype(x): """Casts the given data tensors to the default floating point type. Casts only if the input is already a floating point type. Args: x: tensor or list/tuple of tensors. Returns: Converted input. Raises: RuntimeError: if data isn't tensors. """ if not has_tensors(x): raise RuntimeError( 'Please provide tensors for casting, got: {x}'.format(x=x)) return nest.map_structure(cast_single_tensor, x) def get_output_sample_weight_and_mode(skip_target_weighing_indices, sample_weight_mode, output_name, output_index): """Returns the sample weight and weight mode for a single output.""" if output_index in skip_target_weighing_indices: return None, None if sample_weight_mode == 'temporal': default_value = [[1.]] shape = [None, None] mode = 'temporal' else: default_value = [1.] shape = [None] mode = None if context.executing_eagerly(): weight = None else: weight = array_ops.placeholder_with_default( constant_op.constant(default_value, dtype=K.floatx()), shape=shape, name=output_name + '_sample_weights') return weight, mode def prepare_sample_weights(output_names, sample_weight_mode, skip_target_weighing_indices): """Prepares sample weights for the model. Args: output_names: List of model output names. sample_weight_mode: sample weight mode user input passed from compile API. skip_target_weighing_indices: Indices of output for which sample weights should be skipped. Returns: A pair of list of sample weights and sample weight modes (one for each output). Raises: ValueError: In case of invalid `sample_weight_mode` input. """ sample_weights = [] sample_weight_modes = [] if isinstance(sample_weight_mode, dict): unknown_output = set(sample_weight_mode.keys()) - set(output_names) if unknown_output: raise ValueError('Unknown entry in ' 'sample_weight_mode dictionary: "' + unknown_output + '". Only expected the following keys: ' + str(output_names)) for i, name in enumerate(output_names): if (i not in skip_target_weighing_indices and name not in sample_weight_mode): raise ValueError('Output missing from sample_weight_modes dictionary') weight, mode = get_output_sample_weight_and_mode( skip_target_weighing_indices, sample_weight_mode.get(name), name, i) sample_weights.append(weight) sample_weight_modes.append(mode) elif isinstance(sample_weight_mode, list): if len(sample_weight_mode) != len(output_names): raise ValueError('When passing a list as sample_weight_mode, ' 'it should have one entry per model output. ' 'The model has ' + str(len(output_names)) + ' outputs, but you passed ' + str(len(sample_weight_mode)) + 'sample_weight_modes') for i, name in enumerate(output_names): weight, mode = get_output_sample_weight_and_mode( skip_target_weighing_indices, sample_weight_mode[i], name, i) sample_weights.append(weight) sample_weight_modes.append(mode) else: for i, name in enumerate(output_names): weight, mode = get_output_sample_weight_and_mode( skip_target_weighing_indices, sample_weight_mode, name, i) sample_weights.append(weight) sample_weight_modes.append(mode) return sample_weights, sample_weight_modes # TODO(rohanj): This is a hack to get around not depending on feature_column and # create a cyclical dependency. Figure out a cleaner solution def is_feature_layer(layer): """Returns whether `layer` is a FeatureLayer or not.""" return getattr(layer, '_is_feature_layer', False) class ModelInputs(object): """Encapsulates model inputs. Allows for transforming model inputs while keeping the same structure. """ def __init__(self, inputs): self._inputs = inputs self._is_dict = isinstance(self._inputs, dict) self._is_single_input = not isinstance(self._inputs, (list, tuple, dict)) self._flattened_inputs = [] self._input_names = [] if isinstance(self._inputs, dict): for k in sorted(self._inputs.keys()): self._flattened_inputs.append(self._inputs[k]) self._input_names.append(k) else: self._flattened_inputs = nest.flatten(self._inputs) self._input_names = [ 'input_%d' % (i + 1) for i in range(len(self._flattened_inputs)) ] assert len(self._input_names) == len(self._flattened_inputs) def get_input_names(self): """Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys. """ return self._input_names def _get(self, return_single_as_list=False): """Returns provided inputs, potentially transformed. Inputs are returned in the same format they were provided i.e. lists are returned as lists, single entries as single entries (unless `return_single_as_list` is true), dictionaries as dictionaries. Args: return_single_as_list: Returns a list of size 1 for single entry case. """ if self._is_dict: return dict(zip(self._input_names, self._flattened_inputs)) if self._is_single_input and not return_single_as_list: return self._flattened_inputs[0] return self._flattened_inputs def get_input_values(self): """Returns input values passed in.""" if context.executing_eagerly(): for i in range(len(self._flattened_inputs)): v = self._flattened_inputs[i] if tensor_util.is_tensor(v): v = cast_single_tensor(v) else: v = ops.convert_to_tensor(v, dtype=K.floatx()) self._flattened_inputs[i] = v return self._get(return_single_as_list=False) def get_symbolic_inputs(self, return_single_as_list=False): """Returns inputs to be set as self.inputs for a model.""" for i in range(len(self._flattened_inputs)): k = self._input_names[i] v = self._flattened_inputs[i] if context.executing_eagerly(): v = base_layer.DeferredTensor( shape=(None for _ in v.shape), dtype=v.dtype) else: if isinstance(v, list): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, (np.ndarray)): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + v.shape[1:] v = K.placeholder(shape=shape, name=k) self._flattened_inputs[i] = v return self._get(return_single_as_list) def as_dict(self): """An iterable over a dictionary version of inputs.""" for i in range(len(self._flattened_inputs)): yield self._input_names[i], self._flattened_inputs[i] def as_list(self): """Returning the inputs as a list.""" return self._flattened_inputs
apache-2.0
ericvandenbergfb/spark
python/pyspark/ml/param/shared.py
14
23785
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py. from pyspark.ml.param import * class HasMaxIter(Params): """ Mixin for param maxIter: max number of iterations (>= 0). """ maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", typeConverter=TypeConverters.toInt) def __init__(self): super(HasMaxIter, self).__init__() def setMaxIter(self, value): """ Sets the value of :py:attr:`maxIter`. """ return self._set(maxIter=value) def getMaxIter(self): """ Gets the value of maxIter or its default value. """ return self.getOrDefault(self.maxIter) class HasRegParam(Params): """ Mixin for param regParam: regularization parameter (>= 0). """ regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", typeConverter=TypeConverters.toFloat) def __init__(self): super(HasRegParam, self).__init__() def setRegParam(self, value): """ Sets the value of :py:attr:`regParam`. """ return self._set(regParam=value) def getRegParam(self): """ Gets the value of regParam or its default value. """ return self.getOrDefault(self.regParam) class HasFeaturesCol(Params): """ Mixin for param featuresCol: features column name. """ featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasFeaturesCol, self).__init__() self._setDefault(featuresCol='features') def setFeaturesCol(self, value): """ Sets the value of :py:attr:`featuresCol`. """ return self._set(featuresCol=value) def getFeaturesCol(self): """ Gets the value of featuresCol or its default value. """ return self.getOrDefault(self.featuresCol) class HasLabelCol(Params): """ Mixin for param labelCol: label column name. """ labelCol = Param(Params._dummy(), "labelCol", "label column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasLabelCol, self).__init__() self._setDefault(labelCol='label') def setLabelCol(self, value): """ Sets the value of :py:attr:`labelCol`. """ return self._set(labelCol=value) def getLabelCol(self): """ Gets the value of labelCol or its default value. """ return self.getOrDefault(self.labelCol) class HasPredictionCol(Params): """ Mixin for param predictionCol: prediction column name. """ predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasPredictionCol, self).__init__() self._setDefault(predictionCol='prediction') def setPredictionCol(self, value): """ Sets the value of :py:attr:`predictionCol`. """ return self._set(predictionCol=value) def getPredictionCol(self): """ Gets the value of predictionCol or its default value. """ return self.getOrDefault(self.predictionCol) class HasProbabilityCol(Params): """ Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities. """ probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", typeConverter=TypeConverters.toString) def __init__(self): super(HasProbabilityCol, self).__init__() self._setDefault(probabilityCol='probability') def setProbabilityCol(self, value): """ Sets the value of :py:attr:`probabilityCol`. """ return self._set(probabilityCol=value) def getProbabilityCol(self): """ Gets the value of probabilityCol or its default value. """ return self.getOrDefault(self.probabilityCol) class HasRawPredictionCol(Params): """ Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name. """ rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasRawPredictionCol, self).__init__() self._setDefault(rawPredictionCol='rawPrediction') def setRawPredictionCol(self, value): """ Sets the value of :py:attr:`rawPredictionCol`. """ return self._set(rawPredictionCol=value) def getRawPredictionCol(self): """ Gets the value of rawPredictionCol or its default value. """ return self.getOrDefault(self.rawPredictionCol) class HasInputCol(Params): """ Mixin for param inputCol: input column name. """ inputCol = Param(Params._dummy(), "inputCol", "input column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasInputCol, self).__init__() def setInputCol(self, value): """ Sets the value of :py:attr:`inputCol`. """ return self._set(inputCol=value) def getInputCol(self): """ Gets the value of inputCol or its default value. """ return self.getOrDefault(self.inputCol) class HasInputCols(Params): """ Mixin for param inputCols: input column names. """ inputCols = Param(Params._dummy(), "inputCols", "input column names.", typeConverter=TypeConverters.toListString) def __init__(self): super(HasInputCols, self).__init__() def setInputCols(self, value): """ Sets the value of :py:attr:`inputCols`. """ return self._set(inputCols=value) def getInputCols(self): """ Gets the value of inputCols or its default value. """ return self.getOrDefault(self.inputCols) class HasOutputCol(Params): """ Mixin for param outputCol: output column name. """ outputCol = Param(Params._dummy(), "outputCol", "output column name.", typeConverter=TypeConverters.toString) def __init__(self): super(HasOutputCol, self).__init__() self._setDefault(outputCol=self.uid + '__output') def setOutputCol(self, value): """ Sets the value of :py:attr:`outputCol`. """ return self._set(outputCol=value) def getOutputCol(self): """ Gets the value of outputCol or its default value. """ return self.getOrDefault(self.outputCol) class HasNumFeatures(Params): """ Mixin for param numFeatures: number of features. """ numFeatures = Param(Params._dummy(), "numFeatures", "number of features.", typeConverter=TypeConverters.toInt) def __init__(self): super(HasNumFeatures, self).__init__() def setNumFeatures(self, value): """ Sets the value of :py:attr:`numFeatures`. """ return self._set(numFeatures=value) def getNumFeatures(self): """ Gets the value of numFeatures or its default value. """ return self.getOrDefault(self.numFeatures) class HasCheckpointInterval(Params): """ Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. """ checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations.", typeConverter=TypeConverters.toInt) def __init__(self): super(HasCheckpointInterval, self).__init__() def setCheckpointInterval(self, value): """ Sets the value of :py:attr:`checkpointInterval`. """ return self._set(checkpointInterval=value) def getCheckpointInterval(self): """ Gets the value of checkpointInterval or its default value. """ return self.getOrDefault(self.checkpointInterval) class HasSeed(Params): """ Mixin for param seed: random seed. """ seed = Param(Params._dummy(), "seed", "random seed.", typeConverter=TypeConverters.toInt) def __init__(self): super(HasSeed, self).__init__() self._setDefault(seed=hash(type(self).__name__)) def setSeed(self, value): """ Sets the value of :py:attr:`seed`. """ return self._set(seed=value) def getSeed(self): """ Gets the value of seed or its default value. """ return self.getOrDefault(self.seed) class HasTol(Params): """ Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0). """ tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms (>= 0).", typeConverter=TypeConverters.toFloat) def __init__(self): super(HasTol, self).__init__() def setTol(self, value): """ Sets the value of :py:attr:`tol`. """ return self._set(tol=value) def getTol(self): """ Gets the value of tol or its default value. """ return self.getOrDefault(self.tol) class HasStepSize(Params): """ Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0). """ stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization (>= 0).", typeConverter=TypeConverters.toFloat) def __init__(self): super(HasStepSize, self).__init__() def setStepSize(self, value): """ Sets the value of :py:attr:`stepSize`. """ return self._set(stepSize=value) def getStepSize(self): """ Gets the value of stepSize or its default value. """ return self.getOrDefault(self.stepSize) class HasHandleInvalid(Params): """ Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later. """ handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.", typeConverter=TypeConverters.toString) def __init__(self): super(HasHandleInvalid, self).__init__() def setHandleInvalid(self, value): """ Sets the value of :py:attr:`handleInvalid`. """ return self._set(handleInvalid=value) def getHandleInvalid(self): """ Gets the value of handleInvalid or its default value. """ return self.getOrDefault(self.handleInvalid) class HasElasticNetParam(Params): """ Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty. """ elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", typeConverter=TypeConverters.toFloat) def __init__(self): super(HasElasticNetParam, self).__init__() self._setDefault(elasticNetParam=0.0) def setElasticNetParam(self, value): """ Sets the value of :py:attr:`elasticNetParam`. """ return self._set(elasticNetParam=value) def getElasticNetParam(self): """ Gets the value of elasticNetParam or its default value. """ return self.getOrDefault(self.elasticNetParam) class HasFitIntercept(Params): """ Mixin for param fitIntercept: whether to fit an intercept term. """ fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", typeConverter=TypeConverters.toBoolean) def __init__(self): super(HasFitIntercept, self).__init__() self._setDefault(fitIntercept=True) def setFitIntercept(self, value): """ Sets the value of :py:attr:`fitIntercept`. """ return self._set(fitIntercept=value) def getFitIntercept(self): """ Gets the value of fitIntercept or its default value. """ return self.getOrDefault(self.fitIntercept) class HasStandardization(Params): """ Mixin for param standardization: whether to standardize the training features before fitting the model. """ standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", typeConverter=TypeConverters.toBoolean) def __init__(self): super(HasStandardization, self).__init__() self._setDefault(standardization=True) def setStandardization(self, value): """ Sets the value of :py:attr:`standardization`. """ return self._set(standardization=value) def getStandardization(self): """ Gets the value of standardization or its default value. """ return self.getOrDefault(self.standardization) class HasThresholds(Params): """ Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold. """ thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.", typeConverter=TypeConverters.toListFloat) def __init__(self): super(HasThresholds, self).__init__() def setThresholds(self, value): """ Sets the value of :py:attr:`thresholds`. """ return self._set(thresholds=value) def getThresholds(self): """ Gets the value of thresholds or its default value. """ return self.getOrDefault(self.thresholds) class HasThreshold(Params): """ Mixin for param threshold: threshold in binary classification prediction, in range [0, 1] """ threshold = Param(Params._dummy(), "threshold", "threshold in binary classification prediction, in range [0, 1]", typeConverter=TypeConverters.toFloat) def __init__(self): super(HasThreshold, self).__init__() self._setDefault(threshold=0.5) def setThreshold(self, value): """ Sets the value of :py:attr:`threshold`. """ return self._set(threshold=value) def getThreshold(self): """ Gets the value of threshold or its default value. """ return self.getOrDefault(self.threshold) class HasWeightCol(Params): """ Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0. """ weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", typeConverter=TypeConverters.toString) def __init__(self): super(HasWeightCol, self).__init__() def setWeightCol(self, value): """ Sets the value of :py:attr:`weightCol`. """ return self._set(weightCol=value) def getWeightCol(self): """ Gets the value of weightCol or its default value. """ return self.getOrDefault(self.weightCol) class HasSolver(Params): """ Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'. """ solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", typeConverter=TypeConverters.toString) def __init__(self): super(HasSolver, self).__init__() self._setDefault(solver='auto') def setSolver(self, value): """ Sets the value of :py:attr:`solver`. """ return self._set(solver=value) def getSolver(self): """ Gets the value of solver or its default value. """ return self.getOrDefault(self.solver) class HasVarianceCol(Params): """ Mixin for param varianceCol: column name for the biased sample variance of prediction. """ varianceCol = Param(Params._dummy(), "varianceCol", "column name for the biased sample variance of prediction.", typeConverter=TypeConverters.toString) def __init__(self): super(HasVarianceCol, self).__init__() def setVarianceCol(self, value): """ Sets the value of :py:attr:`varianceCol`. """ return self._set(varianceCol=value) def getVarianceCol(self): """ Gets the value of varianceCol or its default value. """ return self.getOrDefault(self.varianceCol) class HasAggregationDepth(Params): """ Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2). """ aggregationDepth = Param(Params._dummy(), "aggregationDepth", "suggested depth for treeAggregate (>= 2).", typeConverter=TypeConverters.toInt) def __init__(self): super(HasAggregationDepth, self).__init__() self._setDefault(aggregationDepth=2) def setAggregationDepth(self, value): """ Sets the value of :py:attr:`aggregationDepth`. """ return self._set(aggregationDepth=value) def getAggregationDepth(self): """ Gets the value of aggregationDepth or its default value. """ return self.getOrDefault(self.aggregationDepth) class HasParallelism(Params): """ Mixin for param parallelism: the number of threads to use when running parallel algorithms (>= 1). """ parallelism = Param(Params._dummy(), "parallelism", "the number of threads to use when running parallel algorithms (>= 1).", typeConverter=TypeConverters.toInt) def __init__(self): super(HasParallelism, self).__init__() self._setDefault(parallelism=1) def setParallelism(self, value): """ Sets the value of :py:attr:`parallelism`. """ return self._set(parallelism=value) def getParallelism(self): """ Gets the value of parallelism or its default value. """ return self.getOrDefault(self.parallelism) class DecisionTreeParams(Params): """ Mixin for Decision Tree parameters. """ maxDepth = Param(Params._dummy(), "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.", typeConverter=TypeConverters.toInt) maxBins = Param(Params._dummy(), "maxBins", "Max number of bins for discretizing continuous features. Must be >=2 and >= number of categories for any categorical feature.", typeConverter=TypeConverters.toInt) minInstancesPerNode = Param(Params._dummy(), "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.", typeConverter=TypeConverters.toInt) minInfoGain = Param(Params._dummy(), "minInfoGain", "Minimum information gain for a split to be considered at a tree node.", typeConverter=TypeConverters.toFloat) maxMemoryInMB = Param(Params._dummy(), "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small, then 1 node will be split per iteration, and its aggregates may exceed this size.", typeConverter=TypeConverters.toInt) cacheNodeIds = Param(Params._dummy(), "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. Users can set how often should the cache be checkpointed or disable it by setting checkpointInterval.", typeConverter=TypeConverters.toBoolean) def __init__(self): super(DecisionTreeParams, self).__init__() def setMaxDepth(self, value): """ Sets the value of :py:attr:`maxDepth`. """ return self._set(maxDepth=value) def getMaxDepth(self): """ Gets the value of maxDepth or its default value. """ return self.getOrDefault(self.maxDepth) def setMaxBins(self, value): """ Sets the value of :py:attr:`maxBins`. """ return self._set(maxBins=value) def getMaxBins(self): """ Gets the value of maxBins or its default value. """ return self.getOrDefault(self.maxBins) def setMinInstancesPerNode(self, value): """ Sets the value of :py:attr:`minInstancesPerNode`. """ return self._set(minInstancesPerNode=value) def getMinInstancesPerNode(self): """ Gets the value of minInstancesPerNode or its default value. """ return self.getOrDefault(self.minInstancesPerNode) def setMinInfoGain(self, value): """ Sets the value of :py:attr:`minInfoGain`. """ return self._set(minInfoGain=value) def getMinInfoGain(self): """ Gets the value of minInfoGain or its default value. """ return self.getOrDefault(self.minInfoGain) def setMaxMemoryInMB(self, value): """ Sets the value of :py:attr:`maxMemoryInMB`. """ return self._set(maxMemoryInMB=value) def getMaxMemoryInMB(self): """ Gets the value of maxMemoryInMB or its default value. """ return self.getOrDefault(self.maxMemoryInMB) def setCacheNodeIds(self, value): """ Sets the value of :py:attr:`cacheNodeIds`. """ return self._set(cacheNodeIds=value) def getCacheNodeIds(self): """ Gets the value of cacheNodeIds or its default value. """ return self.getOrDefault(self.cacheNodeIds)
apache-2.0
zubair-arbi/edx-platform
lms/djangoapps/mobile_api/social_facebook/friends/tests.py
128
14336
# pylint: disable=E1101 """ Tests for friends """ import json import httpretty from django.core.urlresolvers import reverse from xmodule.modulestore.tests.factories import CourseFactory from ..test_utils import SocialFacebookTestCase class TestFriends(SocialFacebookTestCase): """ Tests for /api/mobile/v0.5/friends/... """ def setUp(self): super(TestFriends, self).setUp() self.course = CourseFactory.create() @httpretty.activate def test_no_friends_enrolled(self): # User 1 set up self.user_create_and_signin(1) # Link user_1's edX account to FB self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # Set the interceptor self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_on_facebook(self): # User 1 set up self.user_create_and_signin(1) # Enroll user_1 in the course self.enroll_in_course(self.users[1], self.course) self.set_sharing_preferences(self.users[1], True) # Link user_1's edX account to FB self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) # Set the interceptor self.set_facebook_interceptor_for_friends({'data': []}) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_linked_to_edx(self): # User 1 set up self.user_create_and_signin(1) # Enroll user_1 in the course self.enroll_in_course(self.users[1], self.course) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) # Enroll user_2 in the course self.enroll_in_course(self.users[2], self.course) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) # Enroll user_3 in the course self.enroll_in_course(self.users[3], self.course) self.set_sharing_preferences(self.users[3], True) # Set the interceptor self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) course_id = unicode(self.course.id) url = reverse('friends-in-course', kwargs={"course_id": course_id}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) # Assert that no friends are returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_share_settings_false(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], False) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) self.assertTrue('friends' in response.data and len(response.data['friends']) == 0) @httpretty.activate def test_no_friends_no_oauth_token(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], False) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json'}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 400) @httpretty.activate def test_one_friend_in_course(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) # Assert that USERNAME_1 is returned self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) self.assertTrue('id' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID']) self.assertTrue('name' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME']) @httpretty.activate def test_three_friends_in_course(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) self.enroll_in_course(self.users[2], self.course) self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID']) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) self.enroll_in_course(self.users[3], self.course) self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID']) self.set_sharing_preferences(self.users[3], True) self.set_facebook_interceptor_for_friends( { 'data': [ {'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}, {'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}, {'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}, ] } ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get( url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN} ) self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) # Assert that USERNAME_1 is returned self.assertTrue( 'id' in response.data['friends'][0] and response.data['friends'][0]['id'] == self.USERS[1]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][0] and response.data['friends'][0]['name'] == self.USERS[1]['USERNAME'] ) # Assert that USERNAME_2 is returned self.assertTrue( 'id' in response.data['friends'][1] and response.data['friends'][1]['id'] == self.USERS[2]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][1] and response.data['friends'][1]['name'] == self.USERS[2]['USERNAME'] ) # Assert that USERNAME_3 is returned self.assertTrue( 'id' in response.data['friends'][2] and response.data['friends'][2]['id'] == self.USERS[3]['FB_ID'] ) self.assertTrue( 'name' in response.data['friends'][2] and response.data['friends'][2]['name'] == self.USERS[3]['USERNAME'] ) @httpretty.activate def test_three_friends_in_paged_response(self): # User 1 set up self.user_create_and_signin(1) self.enroll_in_course(self.users[1], self.course) self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID']) self.set_sharing_preferences(self.users[1], True) # User 2 set up self.user_create_and_signin(2) self.enroll_in_course(self.users[2], self.course) self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID']) self.set_sharing_preferences(self.users[2], True) # User 3 set up self.user_create_and_signin(3) self.enroll_in_course(self.users[3], self.course) self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID']) self.set_sharing_preferences(self.users[3], True) self.set_facebook_interceptor_for_friends( { 'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}], "paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_1"}, "summary": {"total_count": 652} } ) # Set the interceptor for the first paged content httpretty.register_uri( httpretty.GET, "https://graph.facebook.com/v2.2/me/friends/next_1", body=json.dumps( { "data": [{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}], "paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_2"}, "summary": {"total_count": 652} } ), status=201 ) # Set the interceptor for the last paged content httpretty.register_uri( httpretty.GET, "https://graph.facebook.com/v2.2/me/friends/next_2", body=json.dumps( { "data": [{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}], "paging": { "previous": "https://graph.facebook.com/v2.2/10154805434030300/friends?limit=25&offset=25" }, "summary": {"total_count": 652} } ), status=201 ) url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)}) response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}) self.assertEqual(response.status_code, 200) self.assertTrue('friends' in response.data) # Assert that USERNAME_1 is returned self.assertTrue('id' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID']) self.assertTrue('name' in response.data['friends'][0]) self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME']) # Assert that USERNAME_2 is returned self.assertTrue('id' in response.data['friends'][1]) self.assertTrue(response.data['friends'][1]['id'] == self.USERS[2]['FB_ID']) self.assertTrue('name' in response.data['friends'][1]) self.assertTrue(response.data['friends'][1]['name'] == self.USERS[2]['USERNAME']) # Assert that USERNAME_3 is returned self.assertTrue('id' in response.data['friends'][2]) self.assertTrue(response.data['friends'][2]['id'] == self.USERS[3]['FB_ID']) self.assertTrue('name' in response.data['friends'][2]) self.assertTrue(response.data['friends'][2]['name'] == self.USERS[3]['USERNAME'])
agpl-3.0
pythonbyexample/PBE
other/squirrel.py
1
17661
#!/usr/bin/env python # Squirrel Eat Squirrel (a 2D Katamari Damacy clone) # By Al Sweigart al@inventwithpython.com # http://inventwithpython.com/pygame # Released under a "Simplified BSD" license import random, sys, time, math, pygame from pygame.locals import * FPS = 30 # frames per second to update the screen WINWIDTH = 640 # width of the program's window, in pixels WINHEIGHT = 480 # height in pixels HALF_WINWIDTH = int(WINWIDTH / 2) HALF_WINHEIGHT = int(WINHEIGHT / 2) GRASSCOLOR = (24, 255, 0) WHITE = (255, 255, 255) RED = (255, 0, 0) CAMERASLACK = 90 # how far from the center the squirrel moves before moving the camera MOVERATE = 9 # how fast the player moves BOUNCERATE = 6 # how fast the player bounces (large is slower) BOUNCEHEIGHT = 30 # how high the player bounces STARTSIZE = 25 # how big the player starts off WINSIZE = 300 # how big the player needs to be to win INVULNTIME = 2 # how long the player is invulnerable after being hit in seconds GAMEOVERTIME = 4 # how long the "game over" text stays on the screen in seconds MAXHEALTH = 3 # how much health the player starts with NUMGRASS = 80 # number of grass objects in the active area NUMSQUIRRELS = 30 # number of squirrels in the active area SQUIRRELMINSPEED = 3 # slowest squirrel speed SQUIRRELMAXSPEED = 7 # fastest squirrel speed DIRCHANGEFREQ = 2 # % chance of direction change per frame LEFT = 'left' RIGHT = 'right' """ This program has three data structures to represent the player, enemy squirrels, and grass background objects. The data structures are dictionaries with the following keys: Keys used by all three data structures: 'x' - the left edge coordinate of the object in the game world (not a pixel coordinate on the screen) 'y' - the top edge coordinate of the object in the game world (not a pixel coordinate on the screen) 'rect' - the pygame.Rect object representing where on the screen the object is located. Player data structure keys: 'surface' - the pygame.Surface object that stores the image of the squirrel which will be drawn to the screen. 'facing' - either set to LEFT or RIGHT, stores which direction the player is facing. 'size' - the width and height of the player in pixels. (The width & height are always the same.) 'bounce' - represents at what point in a bounce the player is in. 0 means standing (no bounce), up to BOUNCERATE (the completion of the bounce) 'health' - an integer showing how many more times the player can be hit by a larger squirrel before dying. Enemy Squirrel data structure keys: 'surface' - the pygame.Surface object that stores the image of the squirrel which will be drawn to the screen. 'movex' - how many pixels per frame the squirrel moves horizontally. A negative integer is moving to the left, a positive to the right. 'movey' - how many pixels per frame the squirrel moves vertically. A negative integer is moving up, a positive moving down. 'width' - the width of the squirrel's image, in pixels 'height' - the height of the squirrel's image, in pixels 'bounce' - represents at what point in a bounce the player is in. 0 means standing (no bounce), up to BOUNCERATE (the completion of the bounce) 'bouncerate' - how quickly the squirrel bounces. A lower number means a quicker bounce. 'bounceheight' - how high (in pixels) the squirrel bounces Grass data structure keys: 'grassImage' - an integer that refers to the index of the pygame.Surface object in GRASSIMAGES used for this grass object """ def main(): global FPSCLOCK, DISPLAYSURF, BASICFONT, L_SQUIR_IMG, R_SQUIR_IMG, GRASSIMAGES pygame.init() FPSCLOCK = pygame.time.Clock() pygame.display.set_icon(pygame.image.load('gameicon.png')) DISPLAYSURF = pygame.display.set_mode((WINWIDTH, WINHEIGHT)) pygame.display.set_caption('Squirrel Eat Squirrel') BASICFONT = pygame.font.Font('freesansbold.ttf', 32) # load the image files L_SQUIR_IMG = pygame.image.load('squirrel.png') R_SQUIR_IMG = pygame.transform.flip(L_SQUIR_IMG, True, False) GRASSIMAGES = [] for i in range(1, 5): GRASSIMAGES.append(pygame.image.load('grass%s.png' % i)) while True: runGame() def runGame(): # set up variables for the start of a new game invulnerableMode = False # if the player is invulnerable invulnerableStartTime = 0 # time the player became invulnerable gameOverMode = False # if the player has lost gameOverStartTime = 0 # time the player lost winMode = False # if the player has won # create the surfaces to hold game text gameOverSurf = BASICFONT.render('Game Over', True, WHITE) gameOverRect = gameOverSurf.get_rect() gameOverRect.center = (HALF_WINWIDTH, HALF_WINHEIGHT) winSurf = BASICFONT.render('You have achieved OMEGA SQUIRREL!', True, WHITE) winRect = winSurf.get_rect() winRect.center = (HALF_WINWIDTH, HALF_WINHEIGHT) winSurf2 = BASICFONT.render('(Press "r" to restart.)', True, WHITE) winRect2 = winSurf2.get_rect() winRect2.center = (HALF_WINWIDTH, HALF_WINHEIGHT + 30) # camerax and cameray are the top left of where the camera view is camerax = 0 cameray = 0 grassObjs = [] # stores all the grass objects in the game squirrelObjs = [] # stores all the non-player squirrel objects # stores the player object: playerObj = {'surface': pygame.transform.scale(L_SQUIR_IMG, (STARTSIZE, STARTSIZE)), 'facing': LEFT, 'size': STARTSIZE, 'x': HALF_WINWIDTH, 'y': HALF_WINHEIGHT, 'bounce':0, 'health': MAXHEALTH} moveLeft = False moveRight = False moveUp = False moveDown = False # start off with some random grass images on the screen for i in range(10): grassObjs.append(makeNewGrass(camerax, cameray)) grassObjs[i]['x'] = random.randint(0, WINWIDTH) grassObjs[i]['y'] = random.randint(0, WINHEIGHT) while True: # main game loop # Check if we should turn off invulnerability if invulnerableMode and time.time() - invulnerableStartTime > INVULNTIME: invulnerableMode = False # move all the squirrels for sObj in squirrelObjs: # move the squirrel, and adjust for their bounce sObj['x'] += sObj['movex'] sObj['y'] += sObj['movey'] sObj['bounce'] += 1 if sObj['bounce'] > sObj['bouncerate']: sObj['bounce'] = 0 # reset bounce amount # random chance they change direction if random.randint(0, 99) < DIRCHANGEFREQ: sObj['movex'] = getRandomVelocity() sObj['movey'] = getRandomVelocity() if sObj['movex'] > 0: # faces right sObj['surface'] = pygame.transform.scale(R_SQUIR_IMG, (sObj['width'], sObj['height'])) else: # faces left sObj['surface'] = pygame.transform.scale(L_SQUIR_IMG, (sObj['width'], sObj['height'])) # go through all the objects and see if any need to be deleted. for i in range(len(grassObjs) - 1, -1, -1): if isOutsideActiveArea(camerax, cameray, grassObjs[i]): del grassObjs[i] for i in range(len(squirrelObjs) - 1, -1, -1): if isOutsideActiveArea(camerax, cameray, squirrelObjs[i]): del squirrelObjs[i] # add more grass & squirrels if we don't have enough. while len(grassObjs) < NUMGRASS: grassObjs.append(makeNewGrass(camerax, cameray)) while len(squirrelObjs) < NUMSQUIRRELS: squirrelObjs.append(makeNewSquirrel(camerax, cameray)) # adjust camerax and cameray if beyond the "camera slack" playerCenterx = playerObj['x'] + int(playerObj['size'] / 2) playerCentery = playerObj['y'] + int(playerObj['size'] / 2) if (camerax + HALF_WINWIDTH) - playerCenterx > CAMERASLACK: camerax = playerCenterx + CAMERASLACK - HALF_WINWIDTH elif playerCenterx - (camerax + HALF_WINWIDTH) > CAMERASLACK: camerax = playerCenterx - CAMERASLACK - HALF_WINWIDTH if (cameray + HALF_WINHEIGHT) - playerCentery > CAMERASLACK: cameray = playerCentery + CAMERASLACK - HALF_WINHEIGHT elif playerCentery - (cameray + HALF_WINHEIGHT) > CAMERASLACK: cameray = playerCentery - CAMERASLACK - HALF_WINHEIGHT # draw the green background DISPLAYSURF.fill(GRASSCOLOR) # draw all the grass objects on the screen for gObj in grassObjs: gRect = pygame.Rect( (gObj['x'] - camerax, gObj['y'] - cameray, gObj['width'], gObj['height']) ) DISPLAYSURF.blit(GRASSIMAGES[gObj['grassImage']], gRect) # draw the other squirrels for sObj in squirrelObjs: sObj['rect'] = pygame.Rect( (sObj['x'] - camerax, sObj['y'] - cameray - getBounceAmount(sObj['bounce'], sObj['bouncerate'], sObj['bounceheight']), sObj['width'], sObj['height']) ) DISPLAYSURF.blit(sObj['surface'], sObj['rect']) # draw the player squirrel flashIsOn = round(time.time(), 1) * 10 % 2 == 1 if not gameOverMode and not (invulnerableMode and flashIsOn): playerObj['rect'] = pygame.Rect( (playerObj['x'] - camerax, playerObj['y'] - cameray - getBounceAmount(playerObj['bounce'], BOUNCERATE, BOUNCEHEIGHT), playerObj['size'], playerObj['size']) ) DISPLAYSURF.blit(playerObj['surface'], playerObj['rect']) # draw the health meter drawHealthMeter(playerObj['health']) for event in pygame.event.get(): # event handling loop if event.type == QUIT: terminate() elif event.type == KEYDOWN: if event.key in (K_UP, K_w): moveDown = False moveUp = True elif event.key in (K_DOWN, K_s): moveUp = False moveDown = True elif event.key in (K_LEFT, K_a): moveRight = False moveLeft = True if playerObj['facing'] != LEFT: # change player image playerObj['surface'] = pygame.transform.scale(L_SQUIR_IMG, (playerObj['size'], playerObj['size'])) playerObj['facing'] = LEFT elif event.key in (K_RIGHT, K_d): moveLeft = False moveRight = True if playerObj['facing'] != RIGHT: # change player image playerObj['surface'] = pygame.transform.scale(R_SQUIR_IMG, (playerObj['size'], playerObj['size'])) playerObj['facing'] = RIGHT elif winMode and event.key == K_r: return elif event.type == KEYUP: # stop moving the player's squirrel if event.key in (K_LEFT, K_a): moveLeft = False elif event.key in (K_RIGHT, K_d): moveRight = False elif event.key in (K_UP, K_w): moveUp = False elif event.key in (K_DOWN, K_s): moveDown = False elif event.key == K_ESCAPE: terminate() if not gameOverMode: # actually move the player if moveLeft: playerObj['x'] -= MOVERATE if moveRight: playerObj['x'] += MOVERATE if moveUp: playerObj['y'] -= MOVERATE if moveDown: playerObj['y'] += MOVERATE if (moveLeft or moveRight or moveUp or moveDown) or playerObj['bounce'] != 0: playerObj['bounce'] += 1 if playerObj['bounce'] > BOUNCERATE: playerObj['bounce'] = 0 # reset bounce amount # check if the player has collided with any squirrels for i in range(len(squirrelObjs)-1, -1, -1): sqObj = squirrelObjs[i] if 'rect' in sqObj and playerObj['rect'].colliderect(sqObj['rect']): # a player/squirrel collision has occurred if sqObj['width'] * sqObj['height'] <= playerObj['size']**2: # player is larger and eats the squirrel playerObj['size'] += int( (sqObj['width'] * sqObj['height'])**0.2 ) + 1 del squirrelObjs[i] if playerObj['facing'] == LEFT: playerObj['surface'] = pygame.transform.scale(L_SQUIR_IMG, (playerObj['size'], playerObj['size'])) if playerObj['facing'] == RIGHT: playerObj['surface'] = pygame.transform.scale(R_SQUIR_IMG, (playerObj['size'], playerObj['size'])) if playerObj['size'] > WINSIZE: winMode = True # turn on "win mode" elif not invulnerableMode: # player is smaller and takes damage invulnerableMode = True invulnerableStartTime = time.time() playerObj['health'] -= 1 if playerObj['health'] == 0: gameOverMode = True # turn on "game over mode" gameOverStartTime = time.time() else: # game is over, show "game over" text DISPLAYSURF.blit(gameOverSurf, gameOverRect) if time.time() - gameOverStartTime > GAMEOVERTIME: return # end the current game # check if the player has won. if winMode: DISPLAYSURF.blit(winSurf, winRect) DISPLAYSURF.blit(winSurf2, winRect2) pygame.display.update() FPSCLOCK.tick(FPS) def drawHealthMeter(currentHealth): for i in range(currentHealth): # draw red health bars pygame.draw.rect(DISPLAYSURF, RED, (15, 5 + (10 * MAXHEALTH) - i * 10, 20, 10)) for i in range(MAXHEALTH): # draw the white outlines pygame.draw.rect(DISPLAYSURF, WHITE, (15, 5 + (10 * MAXHEALTH) - i * 10, 20, 10), 1) def terminate(): pygame.quit() sys.exit() def getBounceAmount(currentBounce, bounceRate, bounceHeight): # Returns the number of pixels to offset based on the bounce. # Larger bounceRate means a slower bounce. # Larger bounceHeight means a higher bounce. # currentBounce will always be less than bounceRate return int(math.sin( (math.pi / float(bounceRate)) * currentBounce ) * bounceHeight) def getRandomVelocity(): speed = random.randint(SQUIRRELMINSPEED, SQUIRRELMAXSPEED) if random.randint(0, 1) == 0: return speed else: return -speed def getRandomOffCameraPos(camerax, cameray, objWidth, objHeight): # create a Rect of the camera view cameraRect = pygame.Rect(camerax, cameray, WINWIDTH, WINHEIGHT) while True: x = random.randint(camerax - WINWIDTH, camerax + (2 * WINWIDTH)) y = random.randint(cameray - WINHEIGHT, cameray + (2 * WINHEIGHT)) # create a Rect object with the random coordinates and use colliderect() # to make sure the right edge isn't in the camera view. objRect = pygame.Rect(x, y, objWidth, objHeight) if not objRect.colliderect(cameraRect): return x, y def makeNewSquirrel(camerax, cameray): sq = {} generalSize = random.randint(5, 25) multiplier = random.randint(1, 3) sq['width'] = (generalSize + random.randint(0, 10)) * multiplier sq['height'] = (generalSize + random.randint(0, 10)) * multiplier sq['x'], sq['y'] = getRandomOffCameraPos(camerax, cameray, sq['width'], sq['height']) sq['movex'] = getRandomVelocity() sq['movey'] = getRandomVelocity() if sq['movex'] < 0: # squirrel is facing left sq['surface'] = pygame.transform.scale(L_SQUIR_IMG, (sq['width'], sq['height'])) else: # squirrel is facing right sq['surface'] = pygame.transform.scale(R_SQUIR_IMG, (sq['width'], sq['height'])) sq['bounce'] = 0 sq['bouncerate'] = random.randint(10, 18) sq['bounceheight'] = random.randint(10, 50) return sq def makeNewGrass(camerax, cameray): gr = {} gr['grassImage'] = random.randint(0, len(GRASSIMAGES) - 1) gr['width'] = GRASSIMAGES[0].get_width() gr['height'] = GRASSIMAGES[0].get_height() gr['x'], gr['y'] = getRandomOffCameraPos(camerax, cameray, gr['width'], gr['height']) gr['rect'] = pygame.Rect( (gr['x'], gr['y'], gr['width'], gr['height']) ) return gr def isOutsideActiveArea(camerax, cameray, obj): # Return False if camerax and cameray are more than # a half-window length beyond the edge of the window. boundsLeftEdge = camerax - WINWIDTH boundsTopEdge = cameray - WINHEIGHT boundsRect = pygame.Rect(boundsLeftEdge, boundsTopEdge, WINWIDTH * 3, WINHEIGHT * 3) objRect = pygame.Rect(obj['x'], obj['y'], obj['width'], obj['height']) return not boundsRect.colliderect(objRect) if __name__ == '__main__': main()
bsd-3-clause
townbull/keystone-dtrust
keystone/openstack/common/notifier/log_notifier.py
8
1264
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from keystone.openstack.common import jsonutils from keystone.openstack.common import log as logging CONF = cfg.CONF def notify(_context, message): """Notifies the recipient of the desired event given the model. Log notifications using OpenStack's default logging system. """ priority = message.get('priority', CONF.default_notification_level) priority = priority.lower() logger = logging.getLogger( 'keystone.openstack.common.notification.%s' % message['event_type']) getattr(logger, priority)(jsonutils.dumps(message))
apache-2.0
ahmad88me/jamboard
jamboard/settings.py
1
2984
""" Django settings for jamboard project. Generated by 'django-admin startproject' using Django 1.8. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'qts-or7qor9#c^%h5xhml^vr9q_0#txzigbycn5n-)61-m6#eq' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'jamboard', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'jamboard.urls' print "\n\n\ndir: "+os.path.join(BASE_DIR, 'jamboard') TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'jamboard.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), # 'ENGINE': 'django.db.backends.mysql', # 'NAME': 'jamboard', # 'USER': os.environ['jam_db_user'], # 'PASSWORD': os.environ['jam_db_password'], #'HOST': '54.88.191.135', #'PORT': '3306', } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/'
gpl-2.0
cnsoft/kbengine-cocos2dx
kbe/res/scripts/common/Lib/email/mime/audio.py
159
2674
# Copyright (C) 2001-2007 Python Software Foundation # Author: Anthony Baxter # Contact: email-sig@python.org """Class representing audio/* type MIME documents.""" __all__ = ['MIMEAudio'] import sndhdr from io import BytesIO from email import encoders from email.mime.nonmultipart import MIMENonMultipart _sndhdr_MIMEmap = {'au' : 'basic', 'wav' :'x-wav', 'aiff':'x-aiff', 'aifc':'x-aiff', } # There are others in sndhdr that don't have MIME types. :( # Additional ones to be added to sndhdr? midi, mp3, realaudio, wma?? def _whatsnd(data): """Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix. """ hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None class MIMEAudio(MIMENonMultipart): """Class for generating audio/* MIME documents.""" def __init__(self, _audiodata, _subtype=None, _encoder=encoders.encode_base64, **_params): """Create an audio/* type MIME document. _audiodata is a string containing the raw audio data. If this data can be decoded by the standard Python `sndhdr' module, then the subtype will be automatically included in the Content-Type header. Otherwise, you can specify the specific audio subtype via the _subtype parameter. If _subtype is not given, and no subtype can be guessed, a TypeError is raised. _encoder is a function which will perform the actual encoding for transport of the image data. It takes one argument, which is this Image instance. It should use get_payload() and set_payload() to change the payload to the encoded form. It should also add any Content-Transfer-Encoding or other headers to the message as necessary. The default encoding is Base64. Any additional keyword arguments are passed to the base class constructor, which turns them into parameters on the Content-Type header. """ if _subtype is None: _subtype = _whatsnd(_audiodata) if _subtype is None: raise TypeError('Could not find audio MIME subtype') MIMENonMultipart.__init__(self, 'audio', _subtype, **_params) self.set_payload(_audiodata) _encoder(self)
lgpl-3.0
hsiaoyi0504/scikit-learn
sklearn/decomposition/tests/test_sparse_pca.py
142
5990
# Author: Vlad Niculae # License: BSD 3 clause import sys import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import if_not_mac_os from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA from sklearn.utils import check_random_state def generate_toy_data(n_components, n_samples, image_size, random_state=None): n_features = image_size[0] * image_size[1] rng = check_random_state(random_state) U = rng.randn(n_samples, n_components) V = rng.randn(n_components, n_features) centers = [(3, 3), (6, 7), (8, 1)] sz = [1, 2, 1] for k in range(n_components): img = np.zeros(image_size) xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k] ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k] img[xmin:xmax][:, ymin:ymax] = 1.0 V[k, :] = img.ravel() # Y is defined by : Y = UV + noise Y = np.dot(U, V) Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise return Y, U, V # SparsePCA can be a bit slow. To avoid having test times go up, we # test different aspects of the code in the same test def test_correct_shapes(): rng = np.random.RandomState(0) X = rng.randn(12, 10) spca = SparsePCA(n_components=8, random_state=rng) U = spca.fit_transform(X) assert_equal(spca.components_.shape, (8, 10)) assert_equal(U.shape, (12, 8)) # test overcomplete decomposition spca = SparsePCA(n_components=13, random_state=rng) U = spca.fit_transform(X) assert_equal(spca.components_.shape, (13, 10)) assert_equal(U.shape, (12, 13)) def test_fit_transform(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) # Test that CD gives similar results spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0, alpha=alpha) spca_lasso.fit(Y) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_) @if_not_mac_os() def test_fit_transform_parallel(): alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha, random_state=0) spca_lars.fit(Y) U1 = spca_lars.transform(Y) # Test multiple CPUs spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha, random_state=0).fit(Y) U2 = spca.transform(Y) assert_true(not np.all(spca_lars.components_ == 0)) assert_array_almost_equal(U1, U2) def test_transform_nan(): # Test that SparsePCA won't return NaN when there is 0 feature in all # samples. rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array Y[:, 0] = 0 estimator = SparsePCA(n_components=8) assert_false(np.any(np.isnan(estimator.fit_transform(Y)))) def test_fit_transform_tall(): rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array spca_lars = SparsePCA(n_components=3, method='lars', random_state=rng) U1 = spca_lars.fit_transform(Y) spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng) U2 = spca_lasso.fit(Y).transform(Y) assert_array_almost_equal(U1, U2) def test_initialization(): rng = np.random.RandomState(0) U_init = rng.randn(5, 3) V_init = rng.randn(3, 4) model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng) model.fit(rng.randn(5, 4)) assert_array_equal(model.components_, V_init) def test_mini_batch_correct_shapes(): rng = np.random.RandomState(0) X = rng.randn(12, 10) pca = MiniBatchSparsePCA(n_components=8, random_state=rng) U = pca.fit_transform(X) assert_equal(pca.components_.shape, (8, 10)) assert_equal(U.shape, (12, 8)) # test overcomplete decomposition pca = MiniBatchSparsePCA(n_components=13, random_state=rng) U = pca.fit_transform(X) assert_equal(pca.components_.shape, (13, 10)) assert_equal(U.shape, (12, 13)) def test_mini_batch_fit_transform(): raise SkipTest("skipping mini_batch_fit_transform.") alpha = 1 rng = np.random.RandomState(0) Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0, alpha=alpha).fit(Y) U1 = spca_lars.transform(Y) # Test multiple CPUs if sys.platform == 'win32': # fake parallelism for win32 import sklearn.externals.joblib.parallel as joblib_par _mp = joblib_par.multiprocessing joblib_par.multiprocessing = None try: U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0).fit(Y).transform(Y) finally: joblib_par.multiprocessing = _mp else: # we can efficiently use parallelism U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha, random_state=0).fit(Y).transform(Y) assert_true(not np.all(spca_lars.components_ == 0)) assert_array_almost_equal(U1, U2) # Test that CD gives similar results spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha, random_state=0).fit(Y) assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
bsd-3-clause
bernardopires/django-tenant-schemas
tenant_schemas/tests/test_utils.py
4
1524
from __future__ import absolute_import import sys import types from django.apps import AppConfig from django.test import TestCase from tenant_schemas import utils class AppLabelsTestCase(TestCase): def setUp(self): self._modules = set() def tearDown(self): for name in self._modules: sys.modules.pop(name, None) def set_up_module(self, whole_name): parts = whole_name.split('.') name = '' for part in parts: name += ('.%s' % part) if name else part module = types.ModuleType(name) module.__path__ = ['/tmp'] self._modules.add(name) sys.modules[name] = module return sys.modules[whole_name] def test_app_labels(self): """ Verifies that app_labels handle Django 1.7+ AppConfigs properly. https://docs.djangoproject.com/en/1.7/ref/applications/ """ self.set_up_module('example1') apps = self.set_up_module('example2.apps') # set up AppConfig on the `test_app.apps` module class Example2AppConfig(AppConfig): name = 'example2' label = 'example2_app' # with different name path = '/tmp' # for whatever reason path is required apps.Example2AppConfig = Example2AppConfig self.assertEqual( utils.app_labels([ 'example1', 'example2.apps.Example2AppConfig' ]), ['example1', 'example2_app'], )
mit
tensorflow/lingvo
lingvo/core/ops/mass_op_test.py
1
38651
# Lint as: python3 # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for mass_op.""" import collections from lingvo import compat as tf from lingvo.core import ops from lingvo.core import test_utils import numpy as np FLAGS = tf.flags.FLAGS BOS = 1 EOS = 2 MassOutput = collections.namedtuple( 'MassOutput', ['src_ids', 'tgt_ids', 'tgt_labels', 'tgt_weights']) # Set default to empty. MassOutput.__new__.__defaults__ = (None,) * len(MassOutput._fields) def FindResultFromList(result, expected_results): """Find the given result from a list of expected results. Args: result: A MassOutput tuple, from running ops.mass(). expected_results: A list of MassOutput. The test asserts `result` is equal to at least one result from `expected_results`. Returns: The index of first match found, or None for not found. We use this when the specific output from ops.mass() is not stable across different platforms. Specifically, the implementation currently uses std::shuffle(), which have different implementations between libc++ and stdlibc++. """ for idx, expected in enumerate(expected_results): match = True for attr in MassOutput._fields: if not np.array_equal(getattr(result, attr), getattr(expected, attr)): match = False break if match: return idx tf.logging.error('Found unexpected output from op.mass that fails to match' ' any expected result.') for attr in MassOutput._fields: tf.logging.info('%s = %s', attr, np.array_repr(getattr(result, attr))) return None class MassOpTest(test_utils.TestCase): def testFixedStart(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=8, random_start_prob=0, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([src_ids, tgt_ids, tgt_labels, tgt_weights]) self.assertAllEqual( src_ids, np.array([[ 3, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 3, 3, 3, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)) self.assertAllEqual( tgt_ids, np.array([ [BOS, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0], [BOS, 4, 5, 6, 7, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 3, 3, 3, 3, 3, 3, 3, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 3, 3, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32)) self.assertAllEqual( tgt_labels, np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32)) self.assertAllEqual( tgt_weights, np.array( [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) def testRandomStart(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=100000, random_start_prob=1, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights) expected_output1 = MassOutput( np.array([[ 4, 5, 6, 7, 8, 9, 10, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [ 4, 3, 3, 3, 3, 3, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 3, 3, 3, 3, 3, 3, 3, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [4, 5, 3, 3, 3, 3, 3, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 3, 3, 3, 3, 3, 3, 3, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [ 3, 4, 5, 6, 7, 8, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 3, 7, 8, 9, 10, 11, 12, 13, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [3, 3, 5, 6, 7, 8, 9, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) expected_output2 = MassOutput( np.array([[ 3, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 3, 3, 3, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 3, 3, 3, 3, 3, 3, 3, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 3, 3, 3, 3, 3, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 1, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 5, 6, 7, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 3, 7, 8, 9, 10, 11, 12, 13, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [3, 3, 5, 6, 7, 8, 9, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) match_idx = FindResultFromList(result, [expected_output1, expected_output2]) self.assertIsNotNone(match_idx, '{} is not a valid result'.format(result)) def testSegmented(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=3, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights) expected_output1 = MassOutput( np.array([[ 4, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, 3, 0, 0, 0, 0, 0, 0 ], [ 4, 3, 3, 7, 8, 9, 10, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 3, 3, 3, 12, 3, 14, 3, 3, 3, 0, 0, 0, 0, 0, 0], [ 3, 3, 6, 3, 3, 3, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array([[ 3, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0 ], [ 3, 4, 5, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 3, 3, 3, 8, 9, 10, 3, 12, 3, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [BOS, 4, 3, 6, 7, 8, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) expected_output2 = MassOutput( np.array([[ 4, 5, 6, 3, 3, 3, 10, 3, 3, 3, 14, 15, 16, 3, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 6, 7, 8, 9, 10, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [4, 3, 3, 3, 8, 9, 10, 11, 12, 3, 3, 3, 16, 3, 0, 0, 0, 0, 0, 0 ], [4, 5, 3, 3, 8, 9, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 3, 3, 3, 6, 7, 8, 3, 10, 11, 12, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 3, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 4, 5, 6, 3, 3, 3, 3, 3, 12, 13, 14, 3, 16, 0, 0, 0, 0, 0, 0 ], [3, 3, 5, 6, 3, 3, 9, 10, 11, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array([[ 0., 0., 0., 1., 1., 1., 0., 1., 1., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0. ], [ 1., 1., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 1., 1., 1., 0., 0., 0., 0., 0., 1., 1., 1., 0., 1., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 1., 1., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ]], dtype=np.float32)) match_idx = FindResultFromList(result, [expected_output1, expected_output2]) self.assertIsNotNone(match_idx, '{} is not a valid result'.format(result)) def testNoMaskTarget(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=100000, random_start_prob=0, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=False, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) self.assertAllEqual( src_ids, np.array([[ 3, 3, 3, 3, 3, 3, 3, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 3, 3, 3, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32)) self.assertAllEqual( tgt_ids, np.array([[ 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [ BOS, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32)) self.assertAllEqual( tgt_labels, np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32)) self.assertAllEqual( tgt_weights, np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) def testKeepOrRandMaskedTokens(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=100000, random_start_prob=0, keep_prob=0.5, rand_prob=0.5, mask_prob=0, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights) expected_output1 = MassOutput( np.array([[ 4, 7, 6, 9, 5, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 8, 9, 8, 12, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 7, 12, 5, 7, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 6, 11, 5, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([ [BOS, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 3, 3, 3, 3, 3, 3, 3, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 3, 3, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) expected_output2 = MassOutput( np.array([[ 4, 8, 6, 7, 6, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 4, 9, 4, 12, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 5, 7, 6, 4, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 6, 11, 12, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array([ [1, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0], [3, 3, 3, 3, 3, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 3, 3, 3, 3, 3, 3, 3, 10, 11, 12, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 3, 3, 8, 9, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) match_idx = FindResultFromList(result, [expected_output1, expected_output2]) self.assertIsNotNone(match_idx, '{} is not a valid result'.format(result)) def testKeepMaskedTokens(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=3, keep_prob=1, rand_prob=0, mask_prob=0, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights) expected_output1 = MassOutput( np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array([[ 3, 4, 5, 6, 7, 8, 9, 3, 3, 3, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0 ], [ 3, 4, 5, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 3, 3, 3, 8, 9, 10, 3, 12, 3, 14, 15, 16, 0, 0, 0, 0, 0, 0 ], [BOS, 4, 3, 6, 7, 8, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) expected_output2 = MassOutput( np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array([[ 3, 3, 3, 6, 7, 8, 3, 10, 11, 12, 3, 3, 3, 16, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 3, 3, 3, 3, 3, 10, 11, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 4, 5, 6, 3, 3, 3, 3, 3, 12, 13, 14, 3, 16, 0, 0, 0, 0, 0, 0 ], [3, 3, 5, 6, 3, 3, 9, 10, 11, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) match_idx = FindResultFromList(result, [expected_output1, expected_output2]) self.assertIsNotNone(match_idx, '{} is not a valid result'.format(result)) def testSpanLen1(self): ids = np.array( [[4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0], [4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32) weights = np.array( [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32) actual_seq_len = np.array([14, 10, 14, 10], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=1, span_len=1, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([ src_ids, tgt_ids, tgt_labels, tgt_weights, ]) result = MassOutput(src_ids, tgt_ids, tgt_labels, tgt_weights) expected_output1 = MassOutput( np.array([ [4, 3, 3, 7, 8, 9, 3, 3, 3, 13, 14, 3, 16, 3, 0, 0, 0, 0, 0, 0], [3, 3, 6, 3, 3, 9, 10, 11, 12, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4, 5, 3, 3, 8, 9, 10, 3, 3, 13, 3, 15, 3, 3, 0, 0, 0, 0, 0, 0], [3, 5, 6, 3, 8, 3, 10, 3, 12, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 3, 4, 5, 3, 3, 3, 9, 10, 11, 3, 3, 14, 3, 16, 0, 0, 0, 0, 0, 0 ], [ 1, 4, 3, 6, 7, 3, 3, 3, 3, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 3, 5, 6, 3, 3, 3, 10, 11, 3, 13, 3, 15, 16, 0, 0, 0, 0, 0, 0 ], [BOS, 3, 3, 6, 3, 8, 3, 10, 3, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, EOS, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, EOS, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) expected_output2 = MassOutput( np.array([[ 4, 5, 6, 3, 3, 3, 10, 3, 12, 13, 3, 3, 16, 3, 0, 0, 0, 0, 0, 0 ], [ 3, 5, 3, 7, 3, 9, 3, 3, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 3, 5, 3, 3, 8, 9, 3, 3, 12, 3, 3, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [3, 3, 3, 7, 8, 9, 3, 11, 12, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.int32), np.array([[ 3, 3, 3, 6, 7, 8, 3, 10, 3, 3, 13, 14, 3, 16, 0, 0, 0, 0, 0, 0 ], [1, 3, 5, 3, 7, 3, 9, 10, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ], [ 1, 3, 5, 6, 3, 3, 9, 10, 3, 12, 13, 3, 3, 3, 0, 0, 0, 0, 0, 0 ], [1, 4, 5, 3, 3, 3, 9, 3, 3, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=np.int32), np.array([[ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 2, 0, 0, 0, 0, 0, 0 ], [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]], dtype=np.int32), np.array( [[0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.float32)) match_idx = FindResultFromList(result, [expected_output1, expected_output2]) self.assertIsNotNone(match_idx, '{} is not a valid result'.format(result)) def testZeroLengthSeq(self): ids = np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.int32) weights = np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.float32) actual_seq_len = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32) g = tf.Graph() with g.as_default(): (src_ids, tgt_ids, tgt_labels, tgt_weights) = ops.mass( ids, weights, actual_seq_len, mask_id=3, mask_ratio=0.5, mask_minlen=0, span_len=8, random_start_prob=0, keep_prob=0, rand_prob=0, mask_prob=1, mask_target=True, vocab_size=9) with self.session(graph=g) as sess: (src_ids, tgt_ids, tgt_labels, tgt_weights) = sess.run([src_ids, tgt_ids, tgt_labels, tgt_weights]) self.assertAllEqual( src_ids, np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.int32)) self.assertAllEqual( tgt_ids, np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.int32)) self.assertAllEqual( tgt_labels, np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.int32)) self.assertAllEqual( tgt_weights, np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0]], dtype=np.float32)) if __name__ == '__main__': tf.test.main()
apache-2.0
shsingh/ansible
lib/ansible/module_utils/network/eos/config/lacp_interfaces/lacp_interfaces.py
12
6884
# -*- coding: utf-8 -*- # Copyright 2019 Red Hat # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """ The eos_lacp_interfaces class It is in this file where the current configuration (as dict) is compared to the provided configuration (as dict) and the command set necessary to bring the current configuration to it's desired end-state is created """ from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils.network.common.cfg.base import ConfigBase from ansible.module_utils.network.common.utils import to_list, dict_diff, param_list_to_dict from ansible.module_utils.network.eos.facts.facts import Facts from ansible.module_utils.network.eos.utils.utils import normalize_interface class Lacp_interfaces(ConfigBase): """ The eos_lacp_interfaces class """ gather_subset = [ '!all', '!min', ] gather_network_resources = [ 'lacp_interfaces', ] def get_lacp_interfaces_facts(self): """ Get the 'facts' (the current configuration) :rtype: A dictionary :returns: The current configuration as a dictionary """ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources) lacp_interfaces_facts = facts['ansible_network_resources'].get('lacp_interfaces') if not lacp_interfaces_facts: return [] return lacp_interfaces_facts def execute_module(self): """ Execute the module :rtype: A dictionary :returns: The result from module execution """ result = {'changed': False} warnings = list() commands = list() existing_lacp_interfaces_facts = self.get_lacp_interfaces_facts() commands.extend(self.set_config(existing_lacp_interfaces_facts)) if commands: if not self._module.check_mode: self._connection.edit_config(commands) result['changed'] = True result['commands'] = commands changed_lacp_interfaces_facts = self.get_lacp_interfaces_facts() result['before'] = existing_lacp_interfaces_facts if result['changed']: result['after'] = changed_lacp_interfaces_facts result['warnings'] = warnings return result def set_config(self, existing_lacp_interfaces_facts): """ Collect the configuration from the args passed to the module, collect the current configuration (as a dict from facts) :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ want = self._module.params['config'] have = existing_lacp_interfaces_facts resp = self.set_state(want, have) return to_list(resp) def set_state(self, want, have): """ Select the appropriate function based on the state provided :param want: the desired configuration as a dictionary :param have: the current configuration as a dictionary :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ state = self._module.params['state'] want = param_list_to_dict(want) have = param_list_to_dict(have) if state == 'overridden': commands = self._state_overridden(want, have) elif state == 'deleted': commands = self._state_deleted(want, have) elif state == 'merged': commands = self._state_merged(want, have) elif state == 'replaced': commands = self._state_replaced(want, have) return commands @staticmethod def _state_replaced(want, have): """ The command generator when state is replaced :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for key, desired in want.items(): interface_name = normalize_interface(key) if interface_name in have: extant = have[interface_name] else: extant = dict() add_config = dict_diff(extant, desired) del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, add_config, del_config)) return commands @staticmethod def _state_overridden(want, have): """ The command generator when state is overridden :rtype: A list :returns: the commands necessary to migrate the current configuration to the desired configuration """ commands = [] for key, extant in have.items(): if key in want: desired = want[key] else: desired = dict() add_config = dict_diff(extant, desired) del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, add_config, del_config)) return commands @staticmethod def _state_merged(want, have): """ The command generator when state is merged :rtype: A list :returns: the commands necessary to merge the provided into the current configuration """ commands = [] for key, desired in want.items(): interface_name = normalize_interface(key) if interface_name in have: extant = have[interface_name] else: extant = dict() add_config = dict_diff(extant, desired) commands.extend(generate_commands(key, add_config, {})) return commands @staticmethod def _state_deleted(want, have): """ The command generator when state is deleted :rtype: A list :returns: the commands necessary to remove the current configuration of the provided objects """ commands = [] for key in want: desired = dict() if key in have: extant = have[key] else: continue del_config = dict_diff(desired, extant) commands.extend(generate_commands(key, {}, del_config)) return commands def generate_commands(interface, to_set, to_remove): commands = [] for key in to_remove.keys(): commands.append("no lacp {0}".format(key.replace("_", "-"))) for key, value in to_set.items(): if value is None: continue commands.append("lacp {0} {1}".format(key.replace("_", "-"), value)) if commands: commands.insert(0, "interface {0}".format(interface)) return commands
gpl-3.0
amorwilliams/gst
server/apps/commands.py
1
2340
# -*- coding:utf-8 -*- from flask_script import Command, Option, prompt_bool import os class CreateDB(Command): """ Creates database using SQLAlchemy """ def run(self): try: from database import create_all create_all() except ImportError, e: print "Please, make sure database.create_all exists in order to create a db." class DropDB(Command): """ Drops database using SQLAlchemy """ def run(self): try: from database import drop_all drop_all() except ImportError, e: print "Please, make sure database.drop_all exists in order to drop a db." class Test(Command): """ Run tests """ verbosity = 2 failfast = False def get_options(self): return [ Option('--verbosity', '-v', dest='verbose', type=int, default=self.verbosity), Option('--failfast', dest='failfast', default=self.failfast, action='store_false') ] def run(self, verbosity, failfast): import sys import glob import unittest exists = os.path.exists isdir = os.path.isdir join = os.path.join project_path = os.path.abspath(os.path.dirname('.')) sys.path.insert(0, project_path) # our special folder for blueprints if exists('apps'): sys.path.insert(0, join('apps')) loader = unittest.TestLoader() all_tests = [] if exists('apps'): for path in glob.glob('apps/*'): if isdir(path): tests_dir = join(path, 'tests') if exists(join(path, 'tests.py')): all_tests.append(loader.discover(path, 'tests.py')) elif exists(tests_dir): all_tests.append(loader.discover(tests_dir, pattern='test*.py')) if exists('tests') and isdir('tests'): all_tests.append(loader.discover('tests', pattern='test*.py')) elif exists('tests.py'): all_tests.append(loader.discover('.', pattern='tests.py')) test_suite = unittest.TestSuite(all_tests) unittest.TextTestRunner( verbosity=verbosity, failfast=failfast).run(test_suite)
mit
blarghmatey/pip
tests/functional/test_uninstall_user.py
36
3417
""" tests specific to uninstalling --user installs """ import pytest from os.path import isdir, isfile from tests.lib import pyversion, assert_all_changes from tests.functional.test_install_user import _patch_dist_in_site_packages class Tests_UninstallUserSite: @pytest.mark.network def test_uninstall_from_usersite(self, script, virtualenv): """ Test uninstall from usersite """ virtualenv.system_site_packages = True result1 = script.pip('install', '--user', 'INITools==0.3') result2 = script.pip('uninstall', '-y', 'INITools') assert_all_changes(result1, result2, [script.venv / 'build', 'cache']) def test_uninstall_from_usersite_with_dist_in_global_site( self, script, virtualenv): """ Test uninstall from usersite (with same dist in global site) """ # the test framework only supports testing using virtualenvs. # the sys.path ordering for virtualenvs with --system-site-packages is # this: virtualenv-site, user-site, global-site. # this test will use 2 modifications to simulate the # user-site/global-site relationship # 1) a monkey patch which will make it appear piptestpackage is not in # the virtualenv site if we don't patch this, pip will return an # installation error: "Will not install to the usersite because it # will lack sys.path precedence..." # 2) adding usersite to PYTHONPATH, so usersite has sys.path precedence # over the virtualenv site virtualenv.system_site_packages = True script.environ["PYTHONPATH"] = script.base_path / script.user_site _patch_dist_in_site_packages(script) script.pip_install_local('pip-test-package==0.1') result2 = script.pip_install_local('--user', 'pip-test-package==0.1.1') result3 = script.pip('uninstall', '-vy', 'pip-test-package') # uninstall console is mentioning user scripts, but not global scripts assert script.user_bin_path in result3.stdout assert script.bin_path not in result3.stdout # uninstall worked assert_all_changes(result2, result3, [script.venv / 'build', 'cache']) # site still has 0.2 (can't look in result1; have to check) egg_info_folder = ( script.base_path / script.site_packages / 'pip_test_package-0.1-py%s.egg-info' % pyversion ) assert isdir(egg_info_folder) def test_uninstall_editable_from_usersite(self, script, virtualenv, data): """ Test uninstall editable local user install """ virtualenv.system_site_packages = True script.user_site_path.makedirs() # install to_install = data.packages.join("FSPkg") result1 = script.pip( 'install', '--user', '-e', to_install, expect_error=False, ) egg_link = script.user_site / 'FSPkg.egg-link' assert egg_link in result1.files_created, str(result1.stdout) # uninstall result2 = script.pip('uninstall', '-y', 'FSPkg') assert not isfile(script.base_path / egg_link) assert_all_changes( result1, result2, [ script.venv / 'build', 'cache', script.user_site / 'easy-install.pth', ] )
mit
sandeepkoduri/GAE-html-to-pdf
libs/PyPDF2/generic.py
5
45254
# vim: sw=4:expandtab:foldmethod=marker # # Copyright (c) 2006, Mathieu Fenniak # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Implementation of generic PDF objects (dictionary, number, string, and so on) """ __author__ = "Mathieu Fenniak" __author_email__ = "biziqe@mathieu.fenniak.net" import re from .utils import readNonWhitespace, RC4_encrypt, skipOverComment from .utils import b_, u_, chr_, ord_ from .utils import PdfStreamError import warnings from . import filters from . import utils import decimal import codecs import sys #import debugging ObjectPrefix = b_('/<[tf(n%') NumberSigns = b_('+-') IndirectPattern = re.compile(b_(r"(\d+)\s+(\d+)\s+R[^a-zA-Z]")) def readObject(stream, pdf): tok = stream.read(1) stream.seek(-1, 1) # reset to start idx = ObjectPrefix.find(tok) if idx == 0: # name object return NameObject.readFromStream(stream, pdf) elif idx == 1: # hexadecimal string OR dictionary peek = stream.read(2) stream.seek(-2, 1) # reset to start if peek == b_('<<'): return DictionaryObject.readFromStream(stream, pdf) else: return readHexStringFromStream(stream) elif idx == 2: # array object return ArrayObject.readFromStream(stream, pdf) elif idx == 3 or idx == 4: # boolean object return BooleanObject.readFromStream(stream) elif idx == 5: # string object return readStringFromStream(stream) elif idx == 6: # null object return NullObject.readFromStream(stream) elif idx == 7: # comment while tok not in (b_('\r'), b_('\n')): tok = stream.read(1) tok = readNonWhitespace(stream) stream.seek(-1, 1) return readObject(stream, pdf) else: # number object OR indirect reference if tok in NumberSigns: # number return NumberObject.readFromStream(stream) peek = stream.read(20) stream.seek(-len(peek), 1) # reset to start if IndirectPattern.match(peek) != None: return IndirectObject.readFromStream(stream, pdf) else: return NumberObject.readFromStream(stream) class PdfObject(object): def getObject(self): """Resolves indirect references.""" return self class NullObject(PdfObject): def writeToStream(self, stream, encryption_key): stream.write(b_("null")) def readFromStream(stream): nulltxt = stream.read(4) if nulltxt != b_("null"): raise utils.PdfReadError("Could not read Null object") return NullObject() readFromStream = staticmethod(readFromStream) class BooleanObject(PdfObject): def __init__(self, value): self.value = value def writeToStream(self, stream, encryption_key): if self.value: stream.write(b_("true")) else: stream.write(b_("false")) def readFromStream(stream): word = stream.read(4) if word == b_("true"): return BooleanObject(True) elif word == b_("fals"): stream.read(1) return BooleanObject(False) else: raise utils.PdfReadError('Could not read Boolean object') readFromStream = staticmethod(readFromStream) class ArrayObject(list, PdfObject): def writeToStream(self, stream, encryption_key): stream.write(b_("[")) for data in self: stream.write(b_(" ")) data.writeToStream(stream, encryption_key) stream.write(b_(" ]")) def readFromStream(stream, pdf): arr = ArrayObject() tmp = stream.read(1) if tmp != b_("["): raise utils.PdfReadError("Could not read array") while True: # skip leading whitespace tok = stream.read(1) while tok.isspace(): tok = stream.read(1) stream.seek(-1, 1) # check for array ending peekahead = stream.read(1) if peekahead == b_("]"): break stream.seek(-1, 1) # read and append obj arr.append(readObject(stream, pdf)) return arr readFromStream = staticmethod(readFromStream) class IndirectObject(PdfObject): def __init__(self, idnum, generation, pdf): self.idnum = idnum self.generation = generation self.pdf = pdf def getObject(self): return self.pdf.getObject(self).getObject() def __repr__(self): return "IndirectObject(%r, %r)" % (self.idnum, self.generation) def __eq__(self, other): return ( other != None and isinstance(other, IndirectObject) and self.idnum == other.idnum and self.generation == other.generation and self.pdf is other.pdf ) def __ne__(self, other): return not self.__eq__(other) def writeToStream(self, stream, encryption_key): stream.write(b_("%s %s R" % (self.idnum, self.generation))) def readFromStream(stream, pdf): idnum = b_("") while True: tok = stream.read(1) if not tok: # stream has truncated prematurely raise PdfStreamError("Stream has ended unexpectedly") if tok.isspace(): break idnum += tok generation = b_("") while True: tok = stream.read(1) if not tok: # stream has truncated prematurely raise PdfStreamError("Stream has ended unexpectedly") if tok.isspace(): if not generation: continue break generation += tok r = readNonWhitespace(stream) if r != b_("R"): raise utils.PdfReadError("Error reading indirect object reference at byte %s" % utils.hexStr(stream.tell())) return IndirectObject(int(idnum), int(generation), pdf) readFromStream = staticmethod(readFromStream) class FloatObject(decimal.Decimal, PdfObject): def __new__(cls, value="0", context=None): try: return decimal.Decimal.__new__(cls, utils.str_(value), context) except: return decimal.Decimal.__new__(cls, str(value)) def __repr__(self): if self == self.to_integral(): return str(self.quantize(decimal.Decimal(1))) else: # Standard formatting adds useless extraneous zeros. o = "%.5f" % self # Remove the zeros. while o and o[-1] == '0': o = o[:-1] return o def as_numeric(self): return float(b_(repr(self))) def writeToStream(self, stream, encryption_key): stream.write(b_(repr(self))) class NumberObject(int, PdfObject): NumberPattern = re.compile(b_('[^+-.0-9]')) ByteDot = b_(".") def __new__(cls, value): val = int(value) try: return int.__new__(cls, val) except OverflowError: return int.__new__(cls, 0) def as_numeric(self): return int(b_(repr(self))) def writeToStream(self, stream, encryption_key): stream.write(b_(repr(self))) def readFromStream(stream): num = utils.readUntilRegex(stream, NumberObject.NumberPattern) if num.find(NumberObject.ByteDot) != -1: return FloatObject(num) else: return NumberObject(num) readFromStream = staticmethod(readFromStream) ## # Given a string (either a "str" or "unicode"), create a ByteStringObject or a # TextStringObject to represent the string. def createStringObject(string): if isinstance(string, utils.string_type): return TextStringObject(string) elif isinstance(string, utils.bytes_type): try: if string.startswith(codecs.BOM_UTF16_BE): retval = TextStringObject(string.decode("utf-16")) retval.autodetect_utf16 = True return retval else: # This is probably a big performance hit here, but we need to # convert string objects into the text/unicode-aware version if # possible... and the only way to check if that's possible is # to try. Some strings are strings, some are just byte arrays. retval = TextStringObject(decode_pdfdocencoding(string)) retval.autodetect_pdfdocencoding = True return retval except UnicodeDecodeError: return ByteStringObject(string) else: raise TypeError("createStringObject should have str or unicode arg") def readHexStringFromStream(stream): stream.read(1) txt = "" x = b_("") while True: tok = readNonWhitespace(stream) if not tok: # stream has truncated prematurely raise PdfStreamError("Stream has ended unexpectedly") if tok == b_(">"): break x += tok if len(x) == 2: txt += chr(int(x, base=16)) x = b_("") if len(x) == 1: x += b_("0") if len(x) == 2: txt += chr(int(x, base=16)) return createStringObject(b_(txt)) def readStringFromStream(stream): tok = stream.read(1) parens = 1 txt = b_("") while True: tok = stream.read(1) if not tok: # stream has truncated prematurely raise PdfStreamError("Stream has ended unexpectedly") if tok == b_("("): parens += 1 elif tok == b_(")"): parens -= 1 if parens == 0: break elif tok == b_("\\"): tok = stream.read(1) if tok == b_("n"): tok = b_("\n") elif tok == b_("r"): tok = b_("\r") elif tok == b_("t"): tok = b_("\t") elif tok == b_("b"): tok = b_("\b") elif tok == b_("f"): tok = b_("\f") elif tok == b_("c"): tok = b_("\c") elif tok == b_("("): tok = b_("(") elif tok == b_(")"): tok = b_(")") elif tok == b_("/"): tok = b_("/") elif tok == b_("\\"): tok = b_("\\") elif tok in (b_(" "), b_("/"), b_("%"), b_("<"), b_(">"), b_("["), b_("]"), b_("#"), b_("_"), b_("&"), b_('$')): # odd/unnessecary escape sequences we have encountered tok = b_(tok) elif tok.isdigit(): # "The number ddd may consist of one, two, or three # octal digits; high-order overflow shall be ignored. # Three octal digits shall be used, with leading zeros # as needed, if the next character of the string is also # a digit." (PDF reference 7.3.4.2, p 16) for i in range(2): ntok = stream.read(1) if ntok.isdigit(): tok += ntok else: break tok = b_(chr(int(tok, base=8))) elif tok in b_("\n\r"): # This case is hit when a backslash followed by a line # break occurs. If it's a multi-char EOL, consume the # second character: tok = stream.read(1) if not tok in b_("\n\r"): stream.seek(-1, 1) # Then don't add anything to the actual string, since this # line break was escaped: tok = b_('') else: raise utils.PdfReadError(r"Unexpected escaped string: %s" % tok) txt += tok return createStringObject(txt) ## # Represents a string object where the text encoding could not be determined. # This occurs quite often, as the PDF spec doesn't provide an alternate way to # represent strings -- for example, the encryption data stored in files (like # /O) is clearly not text, but is still stored in a "String" object. class ByteStringObject(utils.bytes_type, PdfObject): ## # For compatibility with TextStringObject.original_bytes. This method # returns self. original_bytes = property(lambda self: self) def writeToStream(self, stream, encryption_key): bytearr = self if encryption_key: bytearr = RC4_encrypt(encryption_key, bytearr) stream.write(b_("<")) stream.write(utils.hexencode(bytearr)) stream.write(b_(">")) ## # Represents a string object that has been decoded into a real unicode string. # If read from a PDF document, this string appeared to match the # PDFDocEncoding, or contained a UTF-16BE BOM mark to cause UTF-16 decoding to # occur. class TextStringObject(utils.string_type, PdfObject): autodetect_pdfdocencoding = False autodetect_utf16 = False ## # It is occasionally possible that a text string object gets created where # a byte string object was expected due to the autodetection mechanism -- # if that occurs, this "original_bytes" property can be used to # back-calculate what the original encoded bytes were. original_bytes = property(lambda self: self.get_original_bytes()) def get_original_bytes(self): # We're a text string object, but the library is trying to get our raw # bytes. This can happen if we auto-detected this string as text, but # we were wrong. It's pretty common. Return the original bytes that # would have been used to create this object, based upon the autodetect # method. if self.autodetect_utf16: return codecs.BOM_UTF16_BE + self.encode("utf-16be") elif self.autodetect_pdfdocencoding: return encode_pdfdocencoding(self) else: raise Exception("no information about original bytes") def writeToStream(self, stream, encryption_key): # Try to write the string out as a PDFDocEncoding encoded string. It's # nicer to look at in the PDF file. Sadly, we take a performance hit # here for trying... try: bytearr = encode_pdfdocencoding(self) except UnicodeEncodeError: bytearr = codecs.BOM_UTF16_BE + self.encode("utf-16be") if encryption_key: bytearr = RC4_encrypt(encryption_key, bytearr) obj = ByteStringObject(bytearr) obj.writeToStream(stream, None) else: stream.write(b_("(")) for c in bytearr: if not chr_(c).isalnum() and c != b_(' '): stream.write(b_("\\%03o" % ord_(c))) else: stream.write(b_(chr_(c))) stream.write(b_(")")) class NameObject(str, PdfObject): delimiterPattern = re.compile(b_(r"\s+|[\(\)<>\[\]{}/%]")) surfix = b_("/") def writeToStream(self, stream, encryption_key): stream.write(b_(self)) def readFromStream(stream, pdf): debug = False if debug: print((stream.tell())) name = stream.read(1) if name != NameObject.surfix: raise utils.PdfReadError("name read error") name += utils.readUntilRegex(stream, NameObject.delimiterPattern, ignore_eof=True) if debug: print(name) try: return NameObject(name.decode('utf-8')) except (UnicodeEncodeError, UnicodeDecodeError) as e: # Name objects should represent irregular characters # with a '#' followed by the symbol's hex number if not pdf.strict: warnings.warn("Illegal character in Name Object", utils.PdfReadWarning) return NameObject(name) else: raise utils.PdfReadError("Illegal character in Name Object") readFromStream = staticmethod(readFromStream) class DictionaryObject(dict, PdfObject): def raw_get(self, key): return dict.__getitem__(self, key) def __setitem__(self, key, value): if not isinstance(key, PdfObject): raise ValueError("key must be PdfObject") if not isinstance(value, PdfObject): raise ValueError("value must be PdfObject") return dict.__setitem__(self, key, value) def setdefault(self, key, value=None): if not isinstance(key, PdfObject): raise ValueError("key must be PdfObject") if not isinstance(value, PdfObject): raise ValueError("value must be PdfObject") return dict.setdefault(self, key, value) def __getitem__(self, key): return dict.__getitem__(self, key).getObject() ## # Retrieves XMP (Extensible Metadata Platform) data relevant to the # this object, if available. # <p> # Stability: Added in v1.12, will exist for all future v1.x releases. # @return Returns a {@link #xmp.XmpInformation XmlInformation} instance # that can be used to access XMP metadata from the document. Can also # return None if no metadata was found on the document root. def getXmpMetadata(self): metadata = self.get("/Metadata", None) if metadata == None: return None metadata = metadata.getObject() from . import xmp if not isinstance(metadata, xmp.XmpInformation): metadata = xmp.XmpInformation(metadata) self[NameObject("/Metadata")] = metadata return metadata ## # Read-only property that accesses the {@link # #DictionaryObject.getXmpData getXmpData} function. # <p> # Stability: Added in v1.12, will exist for all future v1.x releases. xmpMetadata = property(lambda self: self.getXmpMetadata(), None, None) def writeToStream(self, stream, encryption_key): stream.write(b_("<<\n")) for key, value in list(self.items()): key.writeToStream(stream, encryption_key) stream.write(b_(" ")) value.writeToStream(stream, encryption_key) stream.write(b_("\n")) stream.write(b_(">>")) def readFromStream(stream, pdf): debug = False tmp = stream.read(2) if tmp != b_("<<"): raise utils.PdfReadError("Dictionary read error at byte %s: stream must begin with '<<'" % utils.hexStr(stream.tell())) data = {} while True: tok = readNonWhitespace(stream) if tok == b_('\x00'): continue elif tok == b_('%'): stream.seek(-1, 1) skipOverComment(stream) continue if not tok: # stream has truncated prematurely raise PdfStreamError("Stream has ended unexpectedly") if debug: print(("Tok:", tok)) if tok == b_(">"): stream.read(1) break stream.seek(-1, 1) key = readObject(stream, pdf) tok = readNonWhitespace(stream) stream.seek(-1, 1) value = readObject(stream, pdf) if not data.get(key): data[key] = value elif pdf.strict: # multiple definitions of key not permitted raise utils.PdfReadError("Multiple definitions in dictionary at byte %s for key %s" \ % (utils.hexStr(stream.tell()), key)) else: warnings.warn("Multiple definitions in dictionary at byte %s for key %s" \ % (utils.hexStr(stream.tell()), key), utils.PdfReadWarning) pos = stream.tell() s = readNonWhitespace(stream) if s == b_('s') and stream.read(5) == b_('tream'): eol = stream.read(1) # odd PDF file output has spaces after 'stream' keyword but before EOL. # patch provided by Danial Sandler while eol == b_(' '): eol = stream.read(1) assert eol in (b_("\n"), b_("\r")) if eol == b_("\r"): # read \n after if stream.read(1) != b_('\n'): stream.seek(-1, 1) # this is a stream object, not a dictionary assert "/Length" in data length = data["/Length"] if debug: print(data) if isinstance(length, IndirectObject): t = stream.tell() length = pdf.getObject(length) stream.seek(t, 0) data["__streamdata__"] = stream.read(length) if debug: print("here") #if debug: print(binascii.hexlify(data["__streamdata__"])) e = readNonWhitespace(stream) ndstream = stream.read(8) if (e + ndstream) != b_("endstream"): # (sigh) - the odd PDF file has a length that is too long, so # we need to read backwards to find the "endstream" ending. # ReportLab (unknown version) generates files with this bug, # and Python users into PDF files tend to be our audience. # we need to do this to correct the streamdata and chop off # an extra character. pos = stream.tell() stream.seek(-10, 1) end = stream.read(9) if end == b_("endstream"): # we found it by looking back one character further. data["__streamdata__"] = data["__streamdata__"][:-1] else: if debug: print(("E", e, ndstream, debugging.toHex(end))) stream.seek(pos, 0) raise utils.PdfReadError("Unable to find 'endstream' marker after stream at byte %s." % utils.hexStr(stream.tell())) else: stream.seek(pos, 0) if "__streamdata__" in data: return StreamObject.initializeFromDictionary(data) else: retval = DictionaryObject() retval.update(data) return retval readFromStream = staticmethod(readFromStream) class TreeObject(DictionaryObject): def __init__(self): DictionaryObject.__init__(self) def hasChildren(self): return '/First' in self def __iter__(self): return self.children() def children(self): if not self.hasChildren(): raise StopIteration child = self['/First'] while True: yield child if child == self['/Last']: raise StopIteration child = child['/Next'] def addChild(self, child, pdf): childObj = child.getObject() child = pdf.getReference(childObj) assert isinstance(child, IndirectObject) if '/First' not in self: self[NameObject('/First')] = child self[NameObject('/Count')] = NumberObject(0) prev = None else: prev = self['/Last'] self[NameObject('/Last')] = child self[NameObject('/Count')] = NumberObject(self[NameObject('/Count')] + 1) if prev: prevRef = pdf.getReference(prev) assert isinstance(prevRef, IndirectObject) childObj[NameObject('/Prev')] = prevRef prev[NameObject('/Next')] = child parentRef = pdf.getReference(self) assert isinstance(parentRef, IndirectObject) childObj[NameObject('/Parent')] = parentRef def removeChild(self, child): childObj = child.getObject() if NameObject('/Parent') not in childObj: raise ValueError("Removed child does not appear to be a tree item") elif childObj[NameObject('/Parent')] != self: raise ValueError("Removed child is not a member of this tree") found = False prevRef = None prev = None curRef = self[NameObject('/First')] cur = curRef.getObject() lastRef = self[NameObject('/Last')] last = lastRef.getObject() while cur != None: if cur == childObj: if prev == None: if NameObject('/Next') in cur: # Removing first tree node nextRef = cur[NameObject('/Next')] next = nextRef.getObject() del next[NameObject('/Prev')] self[NameObject('/First')] = nextRef self[NameObject('/Count')] = self[NameObject('/Count')] - 1 else: # Removing only tree node assert self[NameObject('/Count')] == 1 del self[NameObject('/Count')] del self[NameObject('/First')] if NameObject('/Last') in self: del self[NameObject('/Last')] else: if NameObject('/Next') in cur: # Removing middle tree node nextRef = cur[NameObject('/Next')] next = nextRef.getObject() next[NameObject('/Prev')] = prevRef prev[NameObject('/Next')] = nextRef self[NameObject('/Count')] = self[NameObject('/Count')] - 1 else: # Removing last tree node assert cur == last del prev[NameObject('/Next')] self[NameObject('/Last')] = prevRef self[NameObject('/Count')] = self[NameObject('/Count')] - 1 found = True break prevRef = curRef prev = cur if NameObject('/Next') in cur: curRef = cur[NameObject('/Next')] cur = curRef.getObject() else: curRef = None cur = None if not found: raise ValueError("Removal couldn't find item in tree") del childObj[NameObject('/Parent')] if NameObject('/Next') in childObj: del childObj[NameObject('/Next')] if NameObject('/Prev') in childObj: del childObj[NameObject('/Prev')] def emptyTree(self): for child in self: childObj = child.getObject() del childObj[NameObject('/Parent')] if NameObject('/Next') in childObj: del childObj[NameObject('/Next')] if NameObject('/Prev') in childObj: del childObj[NameObject('/Prev')] if NameObject('/Count') in self: del self[NameObject('/Count')] if NameObject('/First') in self: del self[NameObject('/First')] if NameObject('/Last') in self: del self[NameObject('/Last')] class StreamObject(DictionaryObject): def __init__(self): self._data = None self.decodedSelf = None def writeToStream(self, stream, encryption_key): self[NameObject("/Length")] = NumberObject(len(self._data)) DictionaryObject.writeToStream(self, stream, encryption_key) del self["/Length"] stream.write(b_("\nstream\n")) data = self._data if encryption_key: data = RC4_encrypt(encryption_key, data) stream.write(data) stream.write(b_("\nendstream")) def initializeFromDictionary(data): if "/Filter" in data: retval = EncodedStreamObject() else: retval = DecodedStreamObject() retval._data = data["__streamdata__"] del data["__streamdata__"] del data["/Length"] retval.update(data) return retval initializeFromDictionary = staticmethod(initializeFromDictionary) def flateEncode(self): if "/Filter" in self: f = self["/Filter"] if isinstance(f, ArrayObject): f.insert(0, NameObject("/FlateDecode")) else: newf = ArrayObject() newf.append(NameObject("/FlateDecode")) newf.append(f) f = newf else: f = NameObject("/FlateDecode") retval = EncodedStreamObject() retval[NameObject("/Filter")] = f retval._data = filters.FlateDecode.encode(self._data) return retval class DecodedStreamObject(StreamObject): def getData(self): return self._data def setData(self, data): self._data = data class EncodedStreamObject(StreamObject): def __init__(self): self.decodedSelf = None def getData(self): if self.decodedSelf: # cached version of decoded object return self.decodedSelf.getData() else: # create decoded object decoded = DecodedStreamObject() decoded._data = filters.decodeStreamData(self) for key, value in list(self.items()): if not key in ("/Length", "/Filter", "/DecodeParms"): decoded[key] = value self.decodedSelf = decoded return decoded._data def setData(self, data): raise utils.PdfReadError("Creating EncodedStreamObject is not currently supported") class RectangleObject(ArrayObject): """ This class is used to represent *page boxes* in PyPDF2. These boxes include: * :attr:`artBox <PyPDF2.pdf.PageObject.artBox>` * :attr:`bleedBox <PyPDF2.pdf.PageObject.bleedBox>` * :attr:`cropBox <PyPDF2.pdf.PageObject.cropBox>` * :attr:`mediaBox <PyPDF2.pdf.PageObject.mediaBox>` * :attr:`trimBox <PyPDF2.pdf.PageObject.trimBox>` """ def __init__(self, arr): # must have four points assert len(arr) == 4 # automatically convert arr[x] into NumberObject(arr[x]) if necessary ArrayObject.__init__(self, [self.ensureIsNumber(x) for x in arr]) def ensureIsNumber(self, value): if not isinstance(value, (NumberObject, FloatObject)): value = FloatObject(value) return value def __repr__(self): return "RectangleObject(%s)" % repr(list(self)) def getLowerLeft_x(self): return self[0] def getLowerLeft_y(self): return self[1] def getUpperRight_x(self): return self[2] def getUpperRight_y(self): return self[3] def getUpperLeft_x(self): return self.getLowerLeft_x() def getUpperLeft_y(self): return self.getUpperRight_y() def getLowerRight_x(self): return self.getUpperRight_x() def getLowerRight_y(self): return self.getLowerLeft_y() def getLowerLeft(self): return self.getLowerLeft_x(), self.getLowerLeft_y() def getLowerRight(self): return self.getLowerRight_x(), self.getLowerRight_y() def getUpperLeft(self): return self.getUpperLeft_x(), self.getUpperLeft_y() def getUpperRight(self): return self.getUpperRight_x(), self.getUpperRight_y() def setLowerLeft(self, value): self[0], self[1] = [self.ensureIsNumber(x) for x in value] def setLowerRight(self, value): self[2], self[1] = [self.ensureIsNumber(x) for x in value] def setUpperLeft(self, value): self[0], self[3] = [self.ensureIsNumber(x) for x in value] def setUpperRight(self, value): self[2], self[3] = [self.ensureIsNumber(x) for x in value] def getWidth(self): return self.getUpperRight_x() - self.getLowerLeft_x() def getHeight(self): return self.getUpperRight_y() - self.getLowerLeft_y() lowerLeft = property(getLowerLeft, setLowerLeft, None, None) """ Property to read and modify the lower left coordinate of this box in (x,y) form. """ lowerRight = property(getLowerRight, setLowerRight, None, None) """ Property to read and modify the lower right coordinate of this box in (x,y) form. """ upperLeft = property(getUpperLeft, setUpperLeft, None, None) """ Property to read and modify the upper left coordinate of this box in (x,y) form. """ upperRight = property(getUpperRight, setUpperRight, None, None) """ Property to read and modify the upper right coordinate of this box in (x,y) form. """ class Field(TreeObject): """ A class representing a field dictionary. This class is accessed through :meth:`getFields()<PyPDF2.PdfFileReader.getFields>` """ def __init__(self, data): DictionaryObject.__init__(self) attributes = ("/FT", "/Parent", "/Kids", "/T", "/TU", "/TM", "/Ff", "/V", "/DV", "/AA") for attr in attributes: try: self[NameObject(attr)] = data[attr] except KeyError: pass fieldType = property(lambda self: self.get("/FT")) """ Read-only property accessing the type of this field. """ parent = property(lambda self: self.get("/Parent")) """ Read-only property accessing the parent of this field. """ kids = property(lambda self: self.get("/Kids")) """ Read-only property accessing the kids of this field. """ name = property(lambda self: self.get("/T")) """ Read-only property accessing the name of this field. """ altName = property(lambda self: self.get("/TU")) """ Read-only property accessing the alternate name of this field. """ mappingName = property(lambda self: self.get("/TM")) """ Read-only property accessing the mapping name of this field. This name is used by PyPDF2 as a key in the dictionary returned by :meth:`getFields()<PyPDF2.PdfFileReader.getFields>` """ flags = property(lambda self: self.get("/Ff")) """ Read-only property accessing the field flags, specifying various characteristics of the field (see Table 8.70 of the PDF 1.7 reference). """ value = property(lambda self: self.get("/V")) """ Read-only property accessing the value of this field. Format varies based on field type. """ defaultValue = property(lambda self: self.get("/DV")) """ Read-only property accessing the default value of this field. """ additionalActions = property(lambda self: self.get("/AA")) """ Read-only property accessing the additional actions dictionary. This dictionary defines the field's behavior in response to trigger events. See Section 8.5.2 of the PDF 1.7 reference. """ class Destination(TreeObject): """ A class representing a destination within a PDF file. See section 8.2.1 of the PDF 1.6 reference. :param str title: Title of this destination. :param int page: Page number of this destination. :param str typ: How the destination is displayed. :param args: Additional arguments may be necessary depending on the type. :raises PdfReadError: If destination type is invalid. Valid ``typ`` arguments (see PDF spec for details): /Fit No additional arguments /XYZ [left] [top] [zoomFactor] /FitH [top] /FitV [left] /FitR [left] [bottom] [right] [top] /FitB No additional arguments /FitBH [top] /FitBV [left] """ def __init__(self, title, page, typ, *args): DictionaryObject.__init__(self) self[NameObject("/Title")] = title self[NameObject("/Page")] = page self[NameObject("/Type")] = typ # from table 8.2 of the PDF 1.7 reference. if typ == "/XYZ": (self[NameObject("/Left")], self[NameObject("/Top")], self[NameObject("/Zoom")]) = args elif typ == "/FitR": (self[NameObject("/Left")], self[NameObject("/Bottom")], self[NameObject("/Right")], self[NameObject("/Top")]) = args elif typ in ["/FitH", "/FitBH"]: self[NameObject("/Top")], = args elif typ in ["/FitV", "/FitBV"]: self[NameObject("/Left")], = args elif typ in ["/Fit", "/FitB"]: pass else: raise utils.PdfReadError("Unknown Destination Type: %r" % typ) def getDestArray(self): return ArrayObject([self.raw_get('/Page'), self['/Type']] + [self[x] for x in ['/Left', '/Bottom', '/Right', '/Top', '/Zoom'] if x in self]) def writeToStream(self, stream, encryption_key): stream.write(b_("<<\n")) key = NameObject('/D') key.writeToStream(stream, encryption_key) stream.write(b_(" ")) value = self.getDestArray() value.writeToStream(stream, encryption_key) key = NameObject("/S") key.writeToStream(stream, encryption_key) stream.write(b_(" ")) value = NameObject("/GoTo") value.writeToStream(stream, encryption_key) stream.write(b_("\n")) stream.write(b_(">>")) title = property(lambda self: self.get("/Title")) """ Read-only property accessing the destination title. :rtype: str """ page = property(lambda self: self.get("/Page")) """ Read-only property accessing the destination page number. :rtype: int """ typ = property(lambda self: self.get("/Type")) """ Read-only property accessing the destination type. :rtype: str """ zoom = property(lambda self: self.get("/Zoom", None)) """ Read-only property accessing the zoom factor. :rtype: int, or ``None`` if not available. """ left = property(lambda self: self.get("/Left", None)) """ Read-only property accessing the left horizontal coordinate. :rtype: int, or ``None`` if not available. """ right = property(lambda self: self.get("/Right", None)) """ Read-only property accessing the right horizontal coordinate. :rtype: int, or ``None`` if not available. """ top = property(lambda self: self.get("/Top", None)) """ Read-only property accessing the top vertical coordinate. :rtype: int, or ``None`` if not available. """ bottom = property(lambda self: self.get("/Bottom", None)) """ Read-only property accessing the bottom vertical coordinate. :rtype: int, or ``None`` if not available. """ class Bookmark(Destination): def writeToStream(self, stream, encryption_key): stream.write(b_("<<\n")) for key in [NameObject(x) for x in ['/Title', '/Parent', '/First', '/Last', '/Next', '/Prev'] if x in self]: key.writeToStream(stream, encryption_key) stream.write(b_(" ")) value = self.raw_get(key) value.writeToStream(stream, encryption_key) stream.write(b_("\n")) key = NameObject('/Dest') key.writeToStream(stream, encryption_key) stream.write(b_(" ")) value = self.getDestArray() value.writeToStream(stream, encryption_key) stream.write(b_("\n")) stream.write(b_(">>")) def encode_pdfdocencoding(unicode_string): retval = b_('') for c in unicode_string: try: retval += b_(chr(_pdfDocEncoding_rev[c])) except KeyError: raise UnicodeEncodeError("pdfdocencoding", c, -1, -1, "does not exist in translation table") return retval def decode_pdfdocencoding(byte_array): retval = u_('') for b in byte_array: c = _pdfDocEncoding[ord_(b)] if c == u_('\u0000'): raise UnicodeDecodeError("pdfdocencoding", utils.barray(b), -1, -1, "does not exist in translation table") retval += c return retval _pdfDocEncoding = ( u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u0000'), u_('\u02d8'), u_('\u02c7'), u_('\u02c6'), u_('\u02d9'), u_('\u02dd'), u_('\u02db'), u_('\u02da'), u_('\u02dc'), u_('\u0020'), u_('\u0021'), u_('\u0022'), u_('\u0023'), u_('\u0024'), u_('\u0025'), u_('\u0026'), u_('\u0027'), u_('\u0028'), u_('\u0029'), u_('\u002a'), u_('\u002b'), u_('\u002c'), u_('\u002d'), u_('\u002e'), u_('\u002f'), u_('\u0030'), u_('\u0031'), u_('\u0032'), u_('\u0033'), u_('\u0034'), u_('\u0035'), u_('\u0036'), u_('\u0037'), u_('\u0038'), u_('\u0039'), u_('\u003a'), u_('\u003b'), u_('\u003c'), u_('\u003d'), u_('\u003e'), u_('\u003f'), u_('\u0040'), u_('\u0041'), u_('\u0042'), u_('\u0043'), u_('\u0044'), u_('\u0045'), u_('\u0046'), u_('\u0047'), u_('\u0048'), u_('\u0049'), u_('\u004a'), u_('\u004b'), u_('\u004c'), u_('\u004d'), u_('\u004e'), u_('\u004f'), u_('\u0050'), u_('\u0051'), u_('\u0052'), u_('\u0053'), u_('\u0054'), u_('\u0055'), u_('\u0056'), u_('\u0057'), u_('\u0058'), u_('\u0059'), u_('\u005a'), u_('\u005b'), u_('\u005c'), u_('\u005d'), u_('\u005e'), u_('\u005f'), u_('\u0060'), u_('\u0061'), u_('\u0062'), u_('\u0063'), u_('\u0064'), u_('\u0065'), u_('\u0066'), u_('\u0067'), u_('\u0068'), u_('\u0069'), u_('\u006a'), u_('\u006b'), u_('\u006c'), u_('\u006d'), u_('\u006e'), u_('\u006f'), u_('\u0070'), u_('\u0071'), u_('\u0072'), u_('\u0073'), u_('\u0074'), u_('\u0075'), u_('\u0076'), u_('\u0077'), u_('\u0078'), u_('\u0079'), u_('\u007a'), u_('\u007b'), u_('\u007c'), u_('\u007d'), u_('\u007e'), u_('\u0000'), u_('\u2022'), u_('\u2020'), u_('\u2021'), u_('\u2026'), u_('\u2014'), u_('\u2013'), u_('\u0192'), u_('\u2044'), u_('\u2039'), u_('\u203a'), u_('\u2212'), u_('\u2030'), u_('\u201e'), u_('\u201c'), u_('\u201d'), u_('\u2018'), u_('\u2019'), u_('\u201a'), u_('\u2122'), u_('\ufb01'), u_('\ufb02'), u_('\u0141'), u_('\u0152'), u_('\u0160'), u_('\u0178'), u_('\u017d'), u_('\u0131'), u_('\u0142'), u_('\u0153'), u_('\u0161'), u_('\u017e'), u_('\u0000'), u_('\u20ac'), u_('\u00a1'), u_('\u00a2'), u_('\u00a3'), u_('\u00a4'), u_('\u00a5'), u_('\u00a6'), u_('\u00a7'), u_('\u00a8'), u_('\u00a9'), u_('\u00aa'), u_('\u00ab'), u_('\u00ac'), u_('\u0000'), u_('\u00ae'), u_('\u00af'), u_('\u00b0'), u_('\u00b1'), u_('\u00b2'), u_('\u00b3'), u_('\u00b4'), u_('\u00b5'), u_('\u00b6'), u_('\u00b7'), u_('\u00b8'), u_('\u00b9'), u_('\u00ba'), u_('\u00bb'), u_('\u00bc'), u_('\u00bd'), u_('\u00be'), u_('\u00bf'), u_('\u00c0'), u_('\u00c1'), u_('\u00c2'), u_('\u00c3'), u_('\u00c4'), u_('\u00c5'), u_('\u00c6'), u_('\u00c7'), u_('\u00c8'), u_('\u00c9'), u_('\u00ca'), u_('\u00cb'), u_('\u00cc'), u_('\u00cd'), u_('\u00ce'), u_('\u00cf'), u_('\u00d0'), u_('\u00d1'), u_('\u00d2'), u_('\u00d3'), u_('\u00d4'), u_('\u00d5'), u_('\u00d6'), u_('\u00d7'), u_('\u00d8'), u_('\u00d9'), u_('\u00da'), u_('\u00db'), u_('\u00dc'), u_('\u00dd'), u_('\u00de'), u_('\u00df'), u_('\u00e0'), u_('\u00e1'), u_('\u00e2'), u_('\u00e3'), u_('\u00e4'), u_('\u00e5'), u_('\u00e6'), u_('\u00e7'), u_('\u00e8'), u_('\u00e9'), u_('\u00ea'), u_('\u00eb'), u_('\u00ec'), u_('\u00ed'), u_('\u00ee'), u_('\u00ef'), u_('\u00f0'), u_('\u00f1'), u_('\u00f2'), u_('\u00f3'), u_('\u00f4'), u_('\u00f5'), u_('\u00f6'), u_('\u00f7'), u_('\u00f8'), u_('\u00f9'), u_('\u00fa'), u_('\u00fb'), u_('\u00fc'), u_('\u00fd'), u_('\u00fe'), u_('\u00ff') ) assert len(_pdfDocEncoding) == 256 _pdfDocEncoding_rev = {} for i in range(256): char = _pdfDocEncoding[i] if char == u_("\u0000"): continue assert char not in _pdfDocEncoding_rev _pdfDocEncoding_rev[char] = i
mit
MadCat34/Sick-Beard
lib/requests/packages/chardet2/eucjpprober.py
52
3600
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants import sys from .constants import eStart, eError, eItsMe from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCJPDistributionAnalysis from .jpcntx import EUCJPContextAnalysis from .mbcssm import EUCJPSMModel class EUCJPProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCJPSMModel) self._mDistributionAnalyzer = EUCJPDistributionAnalysis() self._mContextAnalyzer = EUCJPContextAnalysis() self.reset() def reset(self): MultiByteCharSetProber.reset(self) self._mContextAnalyzer.reset() def get_charset_name(self): return "EUC-JP" def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): # PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == eItsMe: self._mState = constants.eFoundIt break elif codingState == eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mContextAnalyzer.feed(self._mLastChar, charLen) self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mContextAnalyzer.feed(aBuf[i-1:i+1], charLen) self._mDistributionAnalyzer.feed(aBuf[i-1:i+1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if self._mContextAnalyzer.got_enough_data() and \ (self.get_confidence() > constants.SHORTCUT_THRESHOLD): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): contxtCf = self._mContextAnalyzer.get_confidence() distribCf = self._mDistributionAnalyzer.get_confidence() return max(contxtCf, distribCf)
gpl-3.0
moijes12/oh-mainline
vendor/packages/python-openid/openid/store/memstore.py
165
3597
"""A simple store using only in-process memory.""" from openid.store import nonce import copy import time class ServerAssocs(object): def __init__(self): self.assocs = {} def set(self, assoc): self.assocs[assoc.handle] = assoc def get(self, handle): return self.assocs.get(handle) def remove(self, handle): try: del self.assocs[handle] except KeyError: return False else: return True def best(self): """Returns association with the oldest issued date. or None if there are no associations. """ best = None for assoc in self.assocs.values(): if best is None or best.issued < assoc.issued: best = assoc return best def cleanup(self): """Remove expired associations. @return: tuple of (removed associations, remaining associations) """ remove = [] for handle, assoc in self.assocs.iteritems(): if assoc.getExpiresIn() == 0: remove.append(handle) for handle in remove: del self.assocs[handle] return len(remove), len(self.assocs) class MemoryStore(object): """In-process memory store. Use for single long-running processes. No persistence supplied. """ def __init__(self): self.server_assocs = {} self.nonces = {} def _getServerAssocs(self, server_url): try: return self.server_assocs[server_url] except KeyError: assocs = self.server_assocs[server_url] = ServerAssocs() return assocs def storeAssociation(self, server_url, assoc): assocs = self._getServerAssocs(server_url) assocs.set(copy.deepcopy(assoc)) def getAssociation(self, server_url, handle=None): assocs = self._getServerAssocs(server_url) if handle is None: return assocs.best() else: return assocs.get(handle) def removeAssociation(self, server_url, handle): assocs = self._getServerAssocs(server_url) return assocs.remove(handle) def useNonce(self, server_url, timestamp, salt): if abs(timestamp - time.time()) > nonce.SKEW: return False anonce = (str(server_url), int(timestamp), str(salt)) if anonce in self.nonces: return False else: self.nonces[anonce] = None return True def cleanupNonces(self): now = time.time() expired = [] for anonce in self.nonces.iterkeys(): if abs(anonce[1] - now) > nonce.SKEW: # removing items while iterating over the set could be bad. expired.append(anonce) for anonce in expired: del self.nonces[anonce] return len(expired) def cleanupAssociations(self): remove_urls = [] removed_assocs = 0 for server_url, assocs in self.server_assocs.iteritems(): removed, remaining = assocs.cleanup() removed_assocs += removed if not remaining: remove_urls.append(server_url) # Remove entries from server_assocs that had none remaining. for server_url in remove_urls: del self.server_assocs[server_url] return removed_assocs def __eq__(self, other): return ((self.server_assocs == other.server_assocs) and (self.nonces == other.nonces)) def __ne__(self, other): return not (self == other)
agpl-3.0
mcueto/djangorestframework-apicontrol
rest_framework_apicontrol/urls.py
1
1260
"""companies URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from rest_framework import routers from .views import ( AppViewSet, OrganizationalUnitTypeViewSet, OrganizationalUnitViewSet ) router = routers.DefaultRouter() router.register(r'apps', AppViewSet) router.register(r'organizationalunittypes', OrganizationalUnitTypeViewSet) router.register(r'organizationalunits', OrganizationalUnitViewSet) urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^api/', include(router.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')) ]
gpl-3.0
alu0100207385/dsi_3Django
build/lib.linux-i686-2.7/django/utils/unittest/suite.py
219
9301
"""TestSuite""" import sys import unittest from django.utils.unittest import case, util __unittest = True class BaseTestSuite(unittest.TestSuite): """A simple test suite that doesn't provide class or module shared fixtures. """ def __init__(self, tests=()): self._tests = [] self.addTests(tests) def __repr__(self): return "<%s tests=%s>" % (util.strclass(self.__class__), list(self)) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return list(self) == list(other) def __ne__(self, other): return not self == other # Can't guarantee hash invariant, so flag as unhashable __hash__ = None def __iter__(self): return iter(self._tests) def countTestCases(self): cases = 0 for test in self: cases += test.countTestCases() return cases def addTest(self, test): # sanity checks if not hasattr(test, '__call__'): raise TypeError("%r is not callable" % (repr(test),)) if isinstance(test, type) and issubclass(test, (case.TestCase, TestSuite)): raise TypeError("TestCases and TestSuites must be instantiated " "before passing them to addTest()") self._tests.append(test) def addTests(self, tests): if isinstance(tests, basestring): raise TypeError("tests must be an iterable of tests, not a string") for test in tests: self.addTest(test) def run(self, result): for test in self: if result.shouldStop: break test(result) return result def __call__(self, *args, **kwds): return self.run(*args, **kwds) def debug(self): """Run the tests without collecting errors in a TestResult""" for test in self: test.debug() class TestSuite(BaseTestSuite): """A test suite is a composite test consisting of a number of TestCases. For use, create an instance of TestSuite, then add test case instances. When all tests have been added, the suite can be passed to a test runner, such as TextTestRunner. It will run the individual test cases in the order in which they were added, aggregating the results. When subclassing, do not forget to call the base class constructor. """ def run(self, result): self._wrapped_run(result) self._tearDownPreviousClass(None, result) self._handleModuleTearDown(result) return result def debug(self): """Run the tests without collecting errors in a TestResult""" debug = _DebugResult() self._wrapped_run(debug, True) self._tearDownPreviousClass(None, debug) self._handleModuleTearDown(debug) ################################ # private methods def _wrapped_run(self, result, debug=False): for test in self: if result.shouldStop: break if _isnotsuite(test): self._tearDownPreviousClass(test, result) self._handleModuleFixture(test, result) self._handleClassSetUp(test, result) result._previousTestClass = test.__class__ if (getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False)): continue if hasattr(test, '_wrapped_run'): test._wrapped_run(result, debug) elif not debug: test(result) else: test.debug() def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except TypeError: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: try: setUpClass() except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) def _get_previous_module(self, result): previousModule = None previousClass = getattr(result, '_previousTestClass', None) if previousClass is not None: previousModule = previousClass.__module__ return previousModule def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except KeyError: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: try: setUpModule() except Exception as e: if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True errorName = 'setUpModule (%s)' % currentModule self._addClassOrModuleLevelException(result, e, errorName) def _addClassOrModuleLevelException(self, result, exception, errorName): error = _ErrorHolder(errorName) addSkip = getattr(result, 'addSkip', None) if addSkip is not None and isinstance(exception, case.SkipTest): addSkip(error, str(exception)) else: result.addError(error, sys.exc_info()) def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except KeyError: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: try: tearDownModule() except Exception as e: if isinstance(result, _DebugResult): raise errorName = 'tearDownModule (%s)' % previousModule self._addClassOrModuleLevelException(result, e, errorName) def _tearDownPreviousClass(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if getattr(previousClass, '_classSetupFailed', False): return if getattr(result, '_moduleSetUpFailed', False): return if getattr(previousClass, "__unittest_skip__", False): return tearDownClass = getattr(previousClass, 'tearDownClass', None) if tearDownClass is not None: try: tearDownClass() except Exception as e: if isinstance(result, _DebugResult): raise className = util.strclass(previousClass) errorName = 'tearDownClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) class _ErrorHolder(object): """ Placeholder for a TestCase inside a result. As far as a TestResult is concerned, this looks exactly like a unit test. Used to insert arbitrary errors into a test suite run. """ # Inspired by the ErrorHolder from Twisted: # http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py # attribute used by TestResult._exc_info_to_string failureException = None def __init__(self, description): self.description = description def id(self): return self.description def shortDescription(self): return None def __repr__(self): return "<ErrorHolder description=%r>" % (self.description,) def __str__(self): return self.id() def run(self, result): # could call result.addError(...) - but this test-like object # shouldn't be run anyway pass def __call__(self, result): return self.run(result) def countTestCases(self): return 0 def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except TypeError: return True return False class _DebugResult(object): "Used by the TestSuite to hold previous class when running in debug." _previousTestClass = None _moduleSetUpFailed = False shouldStop = False
bsd-3-clause
schleichdi2/openpli-e2
lib/python/Screens/About.py
1
10450
from Screen import Screen from Components.config import config from Components.ActionMap import ActionMap from Components.Sources.StaticText import StaticText from Components.Harddisk import harddiskmanager from Components.NimManager import nimmanager from Components.About import about from Components.ScrollLabel import ScrollLabel from Components.Button import Button from Components.Label import Label from Components.ProgressBar import ProgressBar from Tools.StbHardware import getFPVersion from enigma import eTimer, eLabel, eConsoleAppContainer from Components.HTMLComponent import HTMLComponent from Components.GUIComponent import GUIComponent import skin class About(Screen): def __init__(self, session): Screen.__init__(self, session) self.setTitle(_("About")) hddsplit = skin.parameters.get("AboutHddSplit", 0) AboutText = _("Hardware: ") + about.getHardwareTypeString() + "\n" AboutText += _("CPU: ") + about.getCPUInfoString() + "\n" AboutText += _("Image: ") + about.getImageTypeString() + "\n" AboutText += _("Installed: ") + about.getFlashDateString() + "\n" AboutText += _("Kernel version: ") + about.getKernelVersionString() + "\n" EnigmaVersion = "Enigma: " + about.getEnigmaVersionString() self["EnigmaVersion"] = StaticText(EnigmaVersion) AboutText += EnigmaVersion + "\n" AboutText += _("Enigma (re)starts: %d\n") % config.misc.startCounter.value GStreamerVersion = "GStreamer: " + about.getGStreamerVersionString().replace("GStreamer","") self["GStreamerVersion"] = StaticText(GStreamerVersion) AboutText += GStreamerVersion + "\n" ImageVersion = _("Last upgrade: ") + about.getImageVersionString() self["ImageVersion"] = StaticText(ImageVersion) AboutText += ImageVersion + "\n" AboutText += _("DVB drivers: ") + about.getDriverInstalledDate() + "\n" AboutText += _("Python version: ") + about.getPythonVersionString() + "\n" fp_version = getFPVersion() if fp_version is None: fp_version = "" else: fp_version = _("Frontprocessor version: %d") % fp_version AboutText += fp_version + "\n" self["FPVersion"] = StaticText(fp_version) self["TunerHeader"] = StaticText(_("Detected NIMs:")) AboutText += "\n" + _("Detected NIMs:") + "\n" nims = nimmanager.nimList(showFBCTuners=False) for count in range(len(nims)): if count < 4: self["Tuner" + str(count)] = StaticText(nims[count]) else: self["Tuner" + str(count)] = StaticText("") AboutText += nims[count] + "\n" self["HDDHeader"] = StaticText(_("Detected HDD:")) AboutText += "\n" + _("Detected HDD:") + "\n" hddlist = harddiskmanager.HDDList() hddinfo = "" if hddlist: formatstring = hddsplit and "%s:%s, %.1f %sB %s" or "%s\n(%s, %.1f %sB %s)" for count in range(len(hddlist)): if hddinfo: hddinfo += "\n" hdd = hddlist[count][1] if int(hdd.free()) > 1024: hddinfo += formatstring % (hdd.model(), hdd.capacity(), hdd.free()/1024.0, "G", _("free")) else: hddinfo += formatstring % (hdd.model(), hdd.capacity(), hdd.free(), "M", _("free")) else: hddinfo = _("none") self["hddA"] = StaticText(hddinfo) AboutText += hddinfo + "\n\n" + _("Network Info:") for x in about.GetIPsFromNetworkInterfaces(): AboutText += "\n" + x[0] + ": " + x[1] self["AboutScrollLabel"] = ScrollLabel(AboutText) self["key_green"] = Button(_("Translations")) self["key_red"] = Button(_("Latest Commits")) self["key_blue"] = Button(_("Memory Info")) self["actions"] = ActionMap(["ColorActions", "SetupActions", "DirectionActions"], { "cancel": self.close, "ok": self.close, "red": self.showCommits, "green": self.showTranslationInfo, "blue": self.showMemoryInfo, "up": self["AboutScrollLabel"].pageUp, "down": self["AboutScrollLabel"].pageDown }) def showTranslationInfo(self): self.session.open(TranslationInfo) def showCommits(self): self.session.open(CommitInfo) def showMemoryInfo(self): self.session.open(MemoryInfo) class TranslationInfo(Screen): def __init__(self, session): Screen.__init__(self, session) self.setTitle(_("Translation")) # don't remove the string out of the _(), or it can't be "translated" anymore. # TRANSLATORS: Add here whatever should be shown in the "translator" about screen, up to 6 lines (use \n for newline) info = _("TRANSLATOR_INFO") if info == "TRANSLATOR_INFO": info = "(N/A)" infolines = _("").split("\n") infomap = {} for x in infolines: l = x.split(': ') if len(l) != 2: continue (type, value) = l infomap[type] = value print infomap self["key_red"] = Button(_("Cancel")) self["TranslationInfo"] = StaticText(info) translator_name = infomap.get("Language-Team", "none") if translator_name == "none": translator_name = infomap.get("Last-Translator", "") self["TranslatorName"] = StaticText(translator_name) self["actions"] = ActionMap(["SetupActions"], { "cancel": self.close, "ok": self.close, }) class CommitInfo(Screen): def __init__(self, session): Screen.__init__(self, session) self.setTitle(_("Latest Commits")) self.skinName = ["CommitInfo", "About"] self["AboutScrollLabel"] = ScrollLabel(_("Please wait")) self["actions"] = ActionMap(["SetupActions", "DirectionActions"], { "cancel": self.close, "ok": self.close, "up": self["AboutScrollLabel"].pageUp, "down": self["AboutScrollLabel"].pageDown, "left": self.left, "right": self.right }) self["key_red"] = Button(_("Cancel")) self.project = 0 self.projects = [ ("enigma2", "Enigma2"), ("openpli-oe-core", "Openpli Oe Core"), ("enigma2-plugins", "Enigma2 Plugins"), ("aio-grab", "Aio Grab"), ("gst-plugin-dvbmediasink", "Gst Plugin Dvbmediasink"), ("HenksatSettings", "Henksat Settings"), ("enigma2-plugin-extensions-xmltvimport", "Plugin Xmltvimport"), ("enigma2-plugin-skins-magic", "Skin Magic SD"), ("tuxtxt", "Tuxtxt") ] self.cachedProjects = {} self.Timer = eTimer() self.Timer.callback.append(self.readGithubCommitLogs) self.Timer.start(50, True) def readGithubCommitLogs(self): url = 'https://api.github.com/repos/openpli/%s/commits' % self.projects[self.project][0] commitlog = "" from datetime import datetime from json import loads from urllib2 import urlopen try: commitlog += 80 * '-' + '\n' commitlog += url.split('/')[-2] + '\n' commitlog += 80 * '-' + '\n' try: # OpenPli 5.0 uses python 2.7.11 and here we need to bypass the certificate check from ssl import _create_unverified_context log = loads(urlopen(url, timeout=5, context=_create_unverified_context()).read()) except: log = loads(urlopen(url, timeout=5).read()) for c in log: creator = c['commit']['author']['name'] title = c['commit']['message'] date = datetime.strptime(c['commit']['committer']['date'], '%Y-%m-%dT%H:%M:%SZ').strftime('%x %X') commitlog += date + ' ' + creator + '\n' + title + 2 * '\n' commitlog = commitlog.encode('utf-8') self.cachedProjects[self.projects[self.project][1]] = commitlog except: commitlog += _("Currently the commit log cannot be retrieved - please try later again") self["AboutScrollLabel"].setText(commitlog) def updateCommitLogs(self): if self.cachedProjects.has_key(self.projects[self.project][1]): self["AboutScrollLabel"].setText(self.cachedProjects[self.projects[self.project][1]]) else: self["AboutScrollLabel"].setText(_("Please wait")) self.Timer.start(50, True) def left(self): self.project = self.project == 0 and len(self.projects) - 1 or self.project - 1 self.updateCommitLogs() def right(self): self.project = self.project != len(self.projects) - 1 and self.project + 1 or 0 self.updateCommitLogs() class MemoryInfo(Screen): def __init__(self, session): Screen.__init__(self, session) self["actions"] = ActionMap(["SetupActions", "ColorActions"], { "cancel": self.close, "ok": self.getMemoryInfo, "green": self.getMemoryInfo, "blue": self.clearMemory, }) self["key_red"] = Label(_("Cancel")) self["key_green"] = Label(_("Refresh")) self["key_blue"] = Label(_("Clear")) self['lmemtext'] = Label() self['lmemvalue'] = Label() self['rmemtext'] = Label() self['rmemvalue'] = Label() self['pfree'] = Label() self['pused'] = Label() self["slide"] = ProgressBar() self["slide"].setValue(100) self["params"] = MemoryInfoSkinParams() self['info'] = Label(_("This info is for developers only.\nFor a normal users it is not relevant.\nDon't panic please when you see values being displayed that you think look suspicious!")) self.setTitle(_("Memory Info")) self.onLayoutFinish.append(self.getMemoryInfo) def getMemoryInfo(self): try: ltext = rtext = "" lvalue = rvalue = "" mem = 1 free = 0 rows_in_column = self["params"].rows_in_column for i, line in enumerate(open('/proc/meminfo','r')): s = line.strip().split(None, 2) if len(s) == 3: name, size, units = s elif len(s) == 2: name, size = s units = "" else: continue if name.startswith("MemTotal"): mem = int(size) if name.startswith("MemFree") or name.startswith("Buffers") or name.startswith("Cached"): free += int(size) if i < rows_in_column: ltext += "".join((name,"\n")) lvalue += "".join((size," ",units,"\n")) else: rtext += "".join((name,"\n")) rvalue += "".join((size," ",units,"\n")) self['lmemtext'].setText(ltext) self['lmemvalue'].setText(lvalue) self['rmemtext'].setText(rtext) self['rmemvalue'].setText(rvalue) self["slide"].setValue(int(100.0*(mem-free)/mem+0.25)) self['pfree'].setText("%.1f %s" % (100.*free/mem,'%')) self['pused'].setText("%.1f %s" % (100.*(mem-free)/mem,'%')) except Exception, e: print "[About] getMemoryInfo FAIL:", e def clearMemory(self): eConsoleAppContainer().execute("sync") open("/proc/sys/vm/drop_caches", "w").write("3") self.getMemoryInfo() class MemoryInfoSkinParams(HTMLComponent, GUIComponent): def __init__(self): GUIComponent.__init__(self) self.rows_in_column = 25 def applySkin(self, desktop, screen): if self.skinAttributes is not None: attribs = [ ] for (attrib, value) in self.skinAttributes: if attrib == "rowsincolumn": self.rows_in_column = int(value) self.skinAttributes = attribs return GUIComponent.applySkin(self, desktop, screen) GUI_WIDGET = eLabel
gpl-2.0
dwrpayne/zulip
zerver/tornadoviews.py
3
3799
from __future__ import absolute_import from django.views.decorators.csrf import csrf_exempt from zerver.models import get_client from zerver.decorator import asynchronous, \ authenticated_json_post_view, internal_notify_view, RespondAsynchronously, \ has_request_variables, REQ from zerver.lib.response import json_success, json_error from zerver.lib.validator import check_bool, check_list, check_string from zerver.lib.event_queue import allocate_client_descriptor, get_client_descriptor, \ process_notification, fetch_events from zerver.lib.handlers import allocate_handler_id from zerver.lib.narrow import check_supported_events_narrow_filter import time import ujson import logging from zerver.lib.rest import rest_dispatch as _rest_dispatch rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs))) @internal_notify_view def notify(request): process_notification(ujson.loads(request.POST['data'])) return json_success() @has_request_variables def cleanup_event_queue(request, user_profile, queue_id=REQ()): client = get_client_descriptor(queue_id) if client is None: return json_error("Bad event queue id: %s" % (queue_id,)) if user_profile.id != client.user_profile_id: return json_error("You are not authorized to access this queue") request._log_data['extra'] = "[%s]" % (queue_id,) client.cleanup() return json_success() @authenticated_json_post_view def json_get_events(request, user_profile): return get_events_backend(request, user_profile, apply_markdown=True) @asynchronous @has_request_variables def get_events_backend(request, user_profile, handler, user_client = REQ(converter=get_client, default=None), last_event_id = REQ(converter=int, default=None), queue_id = REQ(default=None), apply_markdown = REQ(default=False, validator=check_bool), all_public_streams = REQ(default=False, validator=check_bool), event_types = REQ(default=None, validator=check_list(check_string)), dont_block = REQ(default=False, validator=check_bool), narrow = REQ(default=[], validator=check_list(None)), lifespan_secs = REQ(default=0, converter=int)): if user_client is None: user_client = request.client events_query = dict( user_profile_id = user_profile.id, user_profile_email = user_profile.email, queue_id = queue_id, last_event_id = last_event_id, event_types = event_types, client_type_name = user_client.name, all_public_streams = all_public_streams, lifespan_secs = lifespan_secs, narrow = narrow, dont_block = dont_block, handler_id = handler.handler_id) if queue_id is None: events_query['new_queue_data'] = dict( user_profile_id = user_profile.id, realm_id = user_profile.realm.id, user_profile_email = user_profile.email, event_types = event_types, client_type_name = user_client.name, apply_markdown = apply_markdown, all_public_streams = all_public_streams, queue_timeout = lifespan_secs, last_connection_time = time.time(), narrow = narrow) result = fetch_events(events_query) if "extra_log_data" in result: request._log_data['extra'] = result["extra_log_data"] if result["type"] == "async": handler._request = request return RespondAsynchronously if result["type"] == "error": return json_error(result["message"]) return json_success(result["response"])
apache-2.0
jmoz/posterous-python-wp_fork
posterous/models.py
1
6187
# Copyright: # Copyright (c) 2010, Benjamin Reitzammer <http://github.com/nureineide>, # All rights reserved. # # License: # This program is free software. You can distribute/modify this program under # the terms of the Apache License Version 2.0 available at # http://www.apache.org/licenses/LICENSE-2.0.txt from posterous.utils import parse_datetime from types import ListType class Model(object): """ Base class """ def __init__(self, api=None): self._api = api @classmethod def parse(self, api, json): if isinstance(json, list): return self.parse_list(api, json) else: return self.parse_obj(api, json) @classmethod def parse_list(self, api, json_list): results = list() for obj in json_list: results.append(self.parse_obj(api, obj)) return results class ApiToken(Model): @classmethod def parse_obj(self, api, json): token = self(api) token = json['api_token'] return token class Post(Model): @classmethod def parse_obj(self, api, json): post = self(api) for k, v in json.iteritems(): if k == 'media': setattr(post, k, Media.parse(api, v)) elif k == 'comments': pass else: setattr(post, k, v) return post def update(self, *args, **kwargs): return self._api.update_post(self.id, *args, **kwargs) def new_comment(self, *args, **kwargs): return self._api.new_comment(self.id, *args, **kwargs) class Postv2(Model): @classmethod def parse_obj(self, api, json): post = self(api) for k, v in json.iteritems(): if k == 'tags': setattr(post, k, Tag.parse(api, v)) elif k == 'media': setattr(post, k, Mediav2.parse(api, v)) elif k == 'comments': setattr(post, k, Commentv2.parse(api, v)) else: setattr(post, k, v) return post class Site(Model): @classmethod def parse_obj(self, api, json): site = self(api) for k, v in json.iteritems(): setattr(site, k, v) return site def read_posts(self, **kwargs): return self._api.read_posts(self.id, **kwargs) def new_post(self, *args, **kwargs): return self._api.new_post(self.id, *args, **kwargs) def tags(self): return self._api.get_tags(self.id) class Commentv2(Model): @classmethod def parse_obj(self, api, json, obj=None): comment = obj or self(api) for k, v in json.iteritems(): setattr(comment, k, v) return comment class Tag(Model): @classmethod def parse_obj(self, api, json): tag = self(api) for k, v in json.iteritems(): setattr(tag, k, v) return tag def __str__(self): try: return self.tag_string except AttributeError: return '' class Media(Model): @classmethod def parse_obj(self, api, json, obj=None): # attributes from the medium tag are set on original Media object. media = obj or self(api) for k, v in json.iteritems(): if k == 'medium': Media.parse_obj(api, v, media) elif k == 'thumb': setattr(media, k, Media.parse_obj(api, v)) else: setattr(media, k, v) return media class Mediav2(Model): @classmethod def parse_obj(self, api, json, obj=None): # attributes from the medium tag are set on original Media object. mediav2 = self(api) for k, v in json.iteritems(): if k == "audio_files" and v is not None: setattr(mediav2, k, Audiofilesv2.parse_obj(api, v)) elif k == "images" and v is not None: setattr(mediav2, k, Imagesv2.parse_obj(api, v)) elif k == "videos" and v is not None: setattr(mediav2, k, Videosv2.parse_obj(api, v)) else: setattr(mediav2, k, v) return mediav2 def download(self): # TODO: download file pass class Audiofilesv2(Model): @classmethod def parse_obj(self, api, json, obj=None): audio_files = self(api) setattr(audio_files, 'audio_file', []) for item in json: audio_files.audio_file.append(MediaItem.parse_obj(api, item)) return audio_files """class Audiofilev2(Model): @classmethod def parse_obj(self, api, json, obj=None): audiofile = self(api) for k, v in json.iteritems(): setattr(audiofile, k, v) return audiofile """ class Videosv2(Model): @classmethod def parse_obj(self, api, json): videos = self(api) setattr(videos, 'video', []) for item in json: videos.video.append(MediaItem.parse_obj(api, item)) return videos class Imagesv2(Model): @classmethod def parse_obj(self, api, json): images = self(api) setattr(images, 'image', []) for item in json: images.image.append(MediaItem.parse_obj(api, item)) return images class MediaItem(Model): @classmethod def parse_obj(self, api, json): media_item = self(api) for k, v in json.iteritems(): setattr(media_item, k, v) return media_item class JSONModel(Model): @classmethod def parse_obj(self, api, json): return json class ModelFactory(object): """ Used by parsers for creating instances of models. """ post = Post postv2 = Postv2 site = Site commentv2 = Commentv2 tag = Tag media = Media json = JSONModel mediav2 = Mediav2 apitoken = ApiToken """Used to cast response tags to the correct type""" attribute_map = { ('id', 'views', 'count', 'filesize', 'height', 'width', 'commentscount', 'num_posts'): int, ('private', 'commentsenabled', 'primary'): lambda v: v.lower() == 'true', ('date'): lambda v: parse_datetime(v) }
mit
thoma5B/Django-Wiki
wiki/management/commands/wikiviz.py
13
16740
#!/usr/bin/env python from __future__ import unicode_literals from __future__ import print_function from __future__ import absolute_import """Django model to DOT (Graphviz) converter by Antonio Cavedoni <antonio@cavedoni.org> edited as management script by Benjamin Bach <benjamin@overtag.dk> Depends on package 'graphviz', ie. 'apt-get install graphviz' Example usage: $ ./manage.py wikiviz wiki --inheritance | dot -Tpdf -o <filename>.pdf Place this script in the management.commands package of your application or project. options: -h, --help show this help message and exit. -a, --all_applications show models from all applications. -d, --disable_fields don't show the class member fields. -g, --group_models draw an enclosing box around models from the same app. -i, --include_models=User,Person,Car only include selected models in graph. -n, --verbose_names use verbose_name for field and models. -L, --language specify language used for verrbose_name localization -x, --exclude_columns exclude specific column(s) from the graph. -X, --exclude_models exclude specific model(s) from the graph. -e, --inheritance show inheritance arrows. """ __version__ = "0.99" __svnid__ = "$Id$" __license__ = "Python" __author__ = "Antonio Cavedoni <http://cavedoni.com/>" __contributors__ = [ "Stefano J. Attardi <http://attardi.org/>", "limodou <http://www.donews.net/limodou/>", "Carlo C8E Miron", "Andre Campos <cahenan@gmail.com>", "Justin Findlay <jfindlay@gmail.com>", "Alexander Houben <alexander@houben.ch>", "Bas van Oostveen <v.oostveen@gmail.com>", "Joern Hees <gitdev@joernhees.de>", "Benjamin Bach <benjamin@overtag.dk>", ] import sys import os from django.core.management.base import BaseCommand from optparse import make_option from django.utils.translation import activate as activate_language from django.utils.safestring import mark_safe from django.template import Context, loader from django.db import models from django.db.models import get_models from django.db.models.fields.related import \ ForeignKey, OneToOneField, ManyToManyField, RelatedField try: from django.db.models.fields.generic import GenericRelation except ImportError: from django.contrib.contenttypes.generic import GenericRelation def parse_file_or_list(arg): if not arg: return [] if not ',' in arg and os.path.isfile(arg): return [e.strip() for e in open(arg).readlines()] return arg.split(',') def generate_dot(app_labels, **kwargs): disable_fields = kwargs.get('disable_fields', False) include_models = parse_file_or_list(kwargs.get('include_models', "")) all_applications = kwargs.get('all_applications', False) use_subgraph = kwargs.get('group_models', False) verbose_names = kwargs.get('verbose_names', False) inheritance = kwargs.get('inheritance', False) language = kwargs.get('language', None) if language is not None: activate_language(language) exclude_columns = parse_file_or_list(kwargs.get('exclude_columns', "")) exclude_models = parse_file_or_list(kwargs.get('exclude_models', "")) def skip_field(field): if exclude_columns: if verbose_names and field.verbose_name: if field.verbose_name in exclude_columns: return True if field.name in exclude_columns: return True return False t = loader.get_template_from_string(""" digraph name { fontname = "Helvetica" fontsize = 8 node [ fontname = "Helvetica" fontsize = 8 shape = "plaintext" ] edge [ fontname = "Helvetica" fontsize = 8 ] """) c = Context({}) dot = t.render(c) apps = [] if all_applications: apps = models.get_apps() for app_label in app_labels: app = models.get_app(app_label) if app not in apps: apps.append(app) graphs = [] for app in apps: graph = Context({ 'name': '"%s"' % app.__name__, 'app_name': "%s" % '.'.join(app.__name__.split('.')[:-1]), 'cluster_app_name': "cluster_%s" % app.__name__.replace(".", "_"), 'disable_fields': disable_fields, 'use_subgraph': use_subgraph, 'models': [] }) appmodels = get_models(app) abstract_models = [] for appmodel in appmodels: abstract_models = abstract_models + [abstract_model for abstract_model in appmodel.__bases__ if hasattr( abstract_model, '_meta') and abstract_model._meta.abstract] abstract_models = list(set(abstract_models)) # remove duplicates appmodels = abstract_models + appmodels for appmodel in appmodels: appmodel_abstracts = [abstract_model.__name__ for abstract_model in appmodel.__bases__ if hasattr(abstract_model, '_meta') and abstract_model._meta.abstract] # collect all attribs of abstract superclasses def getBasesAbstractFields(c): _abstract_fields = [] for e in c.__bases__: if hasattr(e, '_meta') and e._meta.abstract: _abstract_fields.extend(e._meta.fields) _abstract_fields.extend(getBasesAbstractFields(e)) return _abstract_fields abstract_fields = getBasesAbstractFields(appmodel) model = { 'app_name': appmodel.__module__.replace(".", "_"), 'name': appmodel.__name__, 'abstracts': appmodel_abstracts, 'fields': [], 'relations': [] } # consider given model name ? def consider(model_name): if exclude_models and model_name in exclude_models: return False return not include_models or model_name in include_models if not consider(appmodel._meta.object_name): continue if verbose_names and appmodel._meta.verbose_name: model['label'] = appmodel._meta.verbose_name else: model['label'] = model['name'] # model attributes def add_attributes(field): if verbose_names and field.verbose_name: label = field.verbose_name else: label = field.name t = type(field).__name__ if isinstance(field, (OneToOneField, ForeignKey)): t += " ({0})".format(field.rel.field_name) # TODO: ManyToManyField, GenericRelation model['fields'].append({ 'name': field.name, 'label': label, 'type': t, 'blank': field.blank, 'abstract': field in abstract_fields, }) # Find all the real attributes. Relations are depicted as graph # edges instead of attributes attributes = [ field for field in appmodel._meta.local_fields if not isinstance( field, RelatedField)] # find primary key and print it first, ignoring implicit id if # other pk exists pk = appmodel._meta.pk if not appmodel._meta.abstract and pk in attributes: add_attributes(pk) for field in attributes: if skip_field(field): continue if not field.primary_key: add_attributes(field) # FIXME: actually many_to_many fields aren't saved in this model's db table, so why should we add an attribute-line for them in the resulting graph? # if appmodel._meta.many_to_many: # for field in appmodel._meta.many_to_many: # if skip_field(field): # continue # add_attributes(field) # relations def add_relation(field, extras=""): if verbose_names and field.verbose_name: label = field.verbose_name else: label = field.name # show related field name if hasattr(field, 'related_query_name'): label += ' (%s)' % field.related_query_name() # handle self-relationships if field.rel.to == 'self': target_model = field.model else: target_model = field.rel.to _rel = { 'target_app': target_model.__module__.replace('.', '_'), 'target': target_model.__name__, 'type': type(field).__name__, 'name': field.name, 'label': label, 'arrows': extras, 'needs_node': True } if _rel not in model['relations'] and consider(_rel['target']): model['relations'].append(_rel) for field in appmodel._meta.local_fields: # excluding field redundant with inheritance relation if field.attname.endswith('_ptr_id'): continue # excluding fields inherited from abstract classes. they too # show as local_fields if field in abstract_fields: continue if skip_field(field): continue if isinstance(field, OneToOneField): add_relation(field, '[arrowhead=none, arrowtail=none]') elif isinstance(field, ForeignKey): add_relation(field, '[arrowhead=none, arrowtail=dot]') for field in appmodel._meta.local_many_to_many: if skip_field(field): continue if isinstance(field, ManyToManyField): if (getattr(field, 'creates_table', False) or # django 1.1. (hasattr(field.rel.through, '_meta') and field.rel.through._meta.auto_created)): # django 1.2 add_relation( field, '[arrowhead=dot arrowtail=dot, dir=both]') elif isinstance(field, GenericRelation): add_relation( field, mark_safe('[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]')) if inheritance: # add inheritance arrows for parent in appmodel.__bases__: if hasattr(parent, "_meta"): # parent is a model l = "multi-table" if parent._meta.abstract: l = "abstract" if appmodel._meta.proxy: l = "proxy" l += r"\ninheritance" _rel = { 'target_app': parent.__module__.replace(".", "_"), 'target': parent.__name__, 'type': "inheritance", 'name': "inheritance", 'label': l, 'arrows': '[arrowhead=empty, arrowtail=none]', 'needs_node': True } # TODO: seems as if abstract models aren't part of # models.getModels, which is why they are printed by # this without any attributes. if _rel not in model[ 'relations'] and consider(_rel['target']): model['relations'].append(_rel) graph['models'].append(model) graphs.append(graph) nodes = [] for graph in graphs: nodes.extend([e['name'] for e in graph['models']]) for graph in graphs: # don't draw duplication nodes because of relations for model in graph['models']: for relation in model['relations']: if relation['target'] in nodes: relation['needs_node'] = False # render templates t = loader.get_template_from_string("""{% if use_subgraph %} subgraph {{ cluster_app_name }} { label=< <TABLE BORDER="0" CELLBORDER="0" CELLSPACING="0"> <TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" ><FONT FACE="Helvetica Bold" COLOR="Black" POINT-SIZE="12" >{{ app_name }}</FONT></TD></TR> </TABLE> > color=olivedrab4 style="rounded" {% endif %} {% for model in models %} {{ model.app_name }}_{{ model.name }} [label=< <TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0"> <TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4" ><FONT FACE="Helvetica Bold" COLOR="white" >{{ model.label }}{% if model.abstracts %}<BR/>&lt;<FONT FACE="Helvetica Italic">{{ model.abstracts|join:"," }}</FONT>&gt;{% endif %}</FONT></TD></TR> {% if not disable_fields %} {% for field in model.fields %} <TR><TD ALIGN="LEFT" BORDER="0" ><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.label }}</FONT ></TD> <TD ALIGN="LEFT" ><FONT {% if field.blank %}COLOR="#7B7B7B" {% endif %}FACE="Helvetica {% if field.abstract %}Italic{% else %}Bold{% endif %}">{{ field.type }}</FONT ></TD></TR> {% endfor %} {% endif %} </TABLE> >] {% endfor %} {% if use_subgraph %} } {% endif %}""") dot += '\n' + t.render(graph) for graph in graphs: t = loader.get_template_from_string("""{% for model in models %} {% for relation in model.relations %} {% if relation.needs_node %} {{ relation.target_app }}_{{ relation.target }} [label=< <TABLE BGCOLOR="palegoldenrod" BORDER="0" CELLBORDER="0" CELLSPACING="0"> <TR><TD COLSPAN="2" CELLPADDING="4" ALIGN="CENTER" BGCOLOR="olivedrab4" ><FONT FACE="Helvetica Bold" COLOR="white" >{{ relation.target }}</FONT></TD></TR> </TABLE> >] {% endif %} {{ model.app_name }}_{{ model.name }} -> {{ relation.target_app }}_{{ relation.target }} [label="{{ relation.label }}"] {{ relation.arrows }}; {% endfor %} {% endfor %}""") dot += '\n' + t.render(graph) t = loader.get_template_from_string("}") c = Context({}) dot += '\n' + t.render(c) return dot class Command(BaseCommand): args = ('--dummy') help = 'Create a graph of your app!' # @ReservedAssignment option_list = BaseCommand.option_list + ( make_option( '--all_applications', '-a', dest='all_applications', default=False, action='store_true', help='Include all applications'), make_option( '--disable_fields', '-d', action='store', dest='disable_fields', default="", help='Specify fields to exclude'), make_option( '--group_models', '-g', action='store', dest='group_models', help=''), make_option( '--include_models', '-i', action='store', dest='include_models', help=''), make_option( '--verbose_names', '-n', action='store', dest='verbose_names', help=''), make_option( '--language', '-l', action='store', dest='language', help=''), make_option( '--exclude_models', '-X', action='store', dest='exclude_models', help=''), make_option( '--inheritance', '-e', action='store_true', dest='inheritance', help=''),) def handle(self, *args, **options): if not args and not options.get('all_applications', False): print(__doc__) sys.exit() print(generate_dot(args, **options))
gpl-3.0
wfleurant/cjdns
node_build/dependencies/libuv/build/gyp/test/mac/gyptest-ldflags.py
100
2078
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies that filenames passed to various linker flags are converted into build-directory relative paths correctly. """ import TestGyp import sys if sys.platform == 'darwin': test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode']) # The xcode-ninja generator handles gypfiles which are not at the # project root incorrectly. # cf. https://code.google.com/p/gyp/issues/detail?id=460 if test.format == 'xcode-ninja': test.skip_test() CHDIR = 'ldflags' test.run_gyp('subdirectory/test.gyp', chdir=CHDIR) test.build('subdirectory/test.gyp', test.ALL, chdir=CHDIR) test.pass_test() # These flags from `man ld` couldl show up in OTHER_LDFLAGS and need path # translation. # # Done: # -exported_symbols_list filename # -unexported_symbols_list file # -reexported_symbols_list file # -sectcreate segname sectname file # # Will be done on demand: # -weak_library path_to_library # -reexport_library path_to_library # -lazy_library path_to_library # -upward_library path_to_library # -syslibroot rootdir # -framework name[,suffix] # -weak_framework name[,suffix] # -reexport_framework name[,suffix] # -lazy_framework name[,suffix] # -upward_framework name[,suffix] # -force_load path_to_archive # -filelist file[,dirname] # -dtrace file # -order_file file # should use ORDER_FILE # -exported_symbols_order file # -bundle_loader executable # should use BUNDLE_LOADER # -alias_list filename # -seg_addr_table filename # -dylib_file install_name:file_name # -interposable_list filename # -object_path_lto filename # # # obsolete: # -sectorder segname sectname orderfile # -seg_addr_table_filename path # # # ??: # -map map_file_path # -sub_library library_name # -sub_umbrella framework_name
gpl-3.0
cjh1/VTK
Examples/Modelling/Python/faultLines.py
42
2235
#!/usr/bin/env python # Create a constrained Delaunay triangulation following fault lines. The # fault lines serve as constraint edges in the Delaunay triangulation. import vtk from vtk.util.misc import vtkGetDataRoot from vtk.util.colors import * VTK_DATA_ROOT = vtkGetDataRoot() # Generate some points by reading a VTK data file. The data file also # has edges that represent constraint lines. This is originally from a # geologic horizon. reader = vtk.vtkPolyDataReader() reader.SetFileName(VTK_DATA_ROOT + "/Data/faults.vtk") # Perform a 2D triangulation with constraint edges. delny = vtk.vtkDelaunay2D() delny.SetInputConnection(reader.GetOutputPort()) delny.SetSourceConnection(reader.GetOutputPort()) delny.SetTolerance(0.00001) normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(delny.GetOutputPort()) mapMesh = vtk.vtkPolyDataMapper() mapMesh.SetInputConnection(normals.GetOutputPort()) meshActor = vtk.vtkActor() meshActor.SetMapper(mapMesh) meshActor.GetProperty().SetColor(beige) # Now pretty up the mesh with tubed edges and balls at the vertices. tuber = vtk.vtkTubeFilter() tuber.SetInputConnection(reader.GetOutputPort()) tuber.SetRadius(25) mapLines = vtk.vtkPolyDataMapper() mapLines.SetInputConnection(tuber.GetOutputPort()) linesActor = vtk.vtkActor() linesActor.SetMapper(mapLines) linesActor.GetProperty().SetColor(1, 0, 0) linesActor.GetProperty().SetColor(tomato) # Create graphics objects # Create the rendering window, renderer, and interactive renderer ren = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size ren.AddActor(linesActor) ren.AddActor(meshActor) ren.SetBackground(1, 1, 1) renWin.SetSize(350, 250) cam1 = vtk.vtkCamera() cam1.SetClippingRange(2580, 129041) cam1.SetFocalPoint(461550, 6.58e+006, 2132) cam1.SetPosition(463960, 6.559e+06, 16982) cam1.SetViewUp(-0.321899, 0.522244, 0.78971) light = vtk.vtkLight() light.SetPosition(0, 0, 1) light.SetFocalPoint(0, 0, 0) ren.SetActiveCamera(cam1) ren.AddLight(light) ren.GetActiveCamera().Zoom(1.5) iren.LightFollowCameraOff() iren.Initialize() renWin.Render() iren.Start()
bsd-3-clause
anqurvanillapy/yungboy
yungboy/yungboy.py
1
2547
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse, os, subprocess from PIL import Image def parse_argument(): parser = argparse.ArgumentParser() parser.add_argument( 'filename', metavar='filename', action='store', ) return parser.parse_args() def convert_to_gba(filename): img = Image.open(filename) width, height = img.size if width > 240 or height > 160: print 'error: resolution of image is higher than 240x120' return pix = img.load() data = [] c_head = '''int main(){const unsigned int targetbmp[%d]={\n''' % (width * height) c_eof = ''' };*(unsigned int*)0x04000000=0x0403;int x,y;for(y=0;y<%d;y++){for(x=0;x<=%d;x++){ ((unsigned short*)0x06000000)[240*y+x]=targetbmp[%d*y+x];}}while(1);return 0;} ''' % (height, width, width) for y in range(height): for x in range(width): r, g, b = pix[x, y] hexstr = convert_to_16color(r, g, b) data.append(hexstr) ofilename = os.path.splitext(filename)[0] + '.c' with open(ofilename, 'w') as filehandle: filehandle.write(c_head) for item in data: filehandle.write(item) filehandle.write(c_eof) with open('makefile', 'w') as filehandle: makefile = ''' PATH := $(DEVKITARM)/bin:$(PATH) PROJ := {0} TARGET := $(PROJ) OBJS := $(PROJ).o PREFIX := arm-none-eabi- CC := $(PREFIX)gcc LD := $(PREFIX)gcc OBJCOPY := $(PREFIX)objcopy ARCH := -mthumb-interwork -mthumb SPECS := -specs=gba.specs CFLAGS := $(ARCH) -O2 -Wall -fno-strict-aliasing LDFLAGS := $(ARCH) $(SPECS) .PHONY : build clean build: $(TARGET).gba $(TARGET).gba : $(TARGET).elf \t$(OBJCOPY) -v -O binary $< $@ \t-@gbafix $@ $(TARGET).elf : $(OBJS) \t$(LD) $^ $(LDFLAGS) -o $@ $(OBJS) : %.o : %.c \t$(CC) -c $< $(CFLAGS) -o $@ clean : \t@rm -fv *.gba \t@rm -fv *.elf \t@rm -fv *.o '''.format(os.path.splitext(filename)[0]) filehandle.write(makefile) subp = subprocess.Popen( ['make'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = subp.communicate() if err: print err else: print out def convert_to_16color(r, g, b): # converts to 16-bit RGB values r, g, b = int(r / 256.0 * 32), int(g / 256.0 * 32), int(b / 256.0 * 32) color = r | (g << 5) | (b << 10) return "0x{:04x},".format(color) if __name__ == '__main__': args = parse_argument() gbaimg = convert_to_gba(args.filename)
mit
MrTheodor/bakery
tools/tab_lammps2gromacs.py
2
3971
#!/usr/bin/env python """ Copyright (C) 2016 Jakub Krajniak <jkrajniak@gmail.com> This file is distributed under free software licence: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import argparse import math def _args(): parser = argparse.ArgumentParser( description='Converts LAMMPS tabulated potentials to GROMACS format') parser.add_argument('input_file') parser.add_argument('output_file') parser.add_argument('--table_type', choices=('pair', 'bond', 'angle', 'dihedral'), default='pair') parser.add_argument('--length_scale', default=1.0, type=float) return parser.parse_args() def _pair_convert(input_f, output_f, args): for line in input_f: l = line.strip() if not l.startswith('#') and not l.startswith('N') and l: sl = l.split() if len(sl) == 4: output_f.write('{} 0.0 0.0 0.0 0.0 {} {}\n'.format( float(sl[1])*args.length_scale, float(sl[2])*4.184, float(sl[3])*41.84)) elif len(sl) == 3: output_f.write('{} 0.0 0.0 0.0 0.0 {} {}\n'.format( float(sl[0])*args.length_scale, float(sl[1])*4.184, float(sl[2])*41.84)) def _bond_convert(input_f, output_f, args): for line in input_f: l = line.strip() if not l.startswith('#') and not l.startswith('N') and l: sl = l.split() if len(sl) < 4: continue output_f.write('{} {} {}\n'.format( float(sl[1])*args.length_scale, float(sl[2])*4.184, float(sl[3])*41.84)) def _angle_convert(input_f, output_f, _): for line in input_f: l = line.strip() if not l.startswith('#') and not l.startswith('N') and l: sl = l.split() if len(sl) < 3: continue output_f.write('{} {} {}\n'.format( float(sl[1]), float(sl[2])*4.184, float(sl[3])*4.184*180.0/math.pi)) def _dihedral_convert(input_f, output_f, _): degrees = True nof = False data = [] for line in input_f: l = line.strip() if not l.startswith('#') and not l.startswith('N') and l: sl = l.split() if len(sl) < 2: continue phi = float(sl[1]) if degrees: phi = math.radians(phi) - math.pi if nof: data.append([phi, float(sl[2])*4.184, 0.0]) else: output_f.write('{} {} {}\n'.format( phi, float(sl[2])*4.184, float(sl[3])*4.184)) elif l.startswith('N'): degrees = 'RADIANS' not in l nof = 'NOF' in l # Calculate force and then write a file. if data: for idx in range(0, len(data)-1): data[idx][2] = (data[idx+1][1] - data[idx][1])/(data[idx+1][0] - data[idx][0]) output_f.write('{} {} {}\n'.format(*data[idx])) def main(): args = _args() input_f = open(args.input_file, 'r') output_f = open(args.output_file, 'w') table_type2func = { 'pair': _pair_convert, 'bond': _bond_convert, 'angle': _angle_convert, 'dihedral': _dihedral_convert } table_type2func[args.table_type](input_f, output_f, args) output_f.close() input_f.close() if __name__ == '__main__': main()
gpl-3.0
leki75/ansible
lib/ansible/modules/monitoring/zabbix_screen.py
35
17493
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013-2014, Epic Games, Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: zabbix_screen short_description: Zabbix screen creates/updates/deletes description: - This module allows you to create, modify and delete Zabbix screens and associated graph data. version_added: "2.0" author: - "(@cove)" - "Tony Minfei Ding" - "Harrison Gu (@harrisongu)" requirements: - "python >= 2.6" - zabbix-api options: server_url: description: - Url of Zabbix server, with protocol (http or https). required: true aliases: [ "url" ] login_user: description: - Zabbix user name. required: true login_password: description: - Zabbix user password. required: true http_login_user: description: - Basic Auth login required: false default: None version_added: "2.1" http_login_password: description: - Basic Auth password required: false default: None version_added: "2.1" timeout: description: - The timeout of API request (seconds). default: 10 screens: description: - List of screens to be created/updated/deleted(see example). - If the screen(s) already been added, the screen(s) name won't be updated. - When creating or updating screen(s), C(screen_name), C(host_group) are required. - When deleting screen(s), the C(screen_name) is required. - > The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed. required: true notes: - Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed. ''' EXAMPLES = ''' # Create/update a screen. - name: Create a new screen or update an existing screen's items local_action: module: zabbix_screen server_url: http://monitor.example.com login_user: username login_password: password screens: - screen_name: ExampleScreen1 host_group: Example group1 state: present graph_names: - Example graph1 - Example graph2 graph_width: 200 graph_height: 100 # Create/update multi-screen - name: Create two of new screens or update the existing screens' items local_action: module: zabbix_screen server_url: http://monitor.example.com login_user: username login_password: password screens: - screen_name: ExampleScreen1 host_group: Example group1 state: present graph_names: - Example graph1 - Example graph2 graph_width: 200 graph_height: 100 - screen_name: ExampleScreen2 host_group: Example group2 state: present graph_names: - Example graph1 - Example graph2 graph_width: 200 graph_height: 100 # Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurrent updates - name: Create a new screen or update an existing screen's items local_action: module: zabbix_screen server_url: http://monitor.example.com login_user: username login_password: password state: present screens: - screen_name: ExampleScreen host_group: Example group state: present graph_names: - Example graph1 - Example graph2 graph_width: 200 graph_height: 100 when: inventory_hostname==groups['group_name'][0] ''' try: from zabbix_api import ZabbixAPI, ZabbixAPISubClass from zabbix_api import ZabbixAPIException from zabbix_api import Already_Exists # Extend the ZabbixAPI # Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call, # we have to inherit the ZabbixAPI class to add 'screenitem' support. class ZabbixAPIExtends(ZabbixAPI): screenitem = None def __init__(self, server, timeout, user, passwd, **kwargs): ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd) self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs)) HAS_ZABBIX_API = True except ImportError: HAS_ZABBIX_API = False class Screen(object): def __init__(self, module, zbx): self._module = module self._zapi = zbx # get group id by group name def get_host_group_id(self, group_name): if group_name == "": self._module.fail_json(msg="group_name is required") hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}}) if len(hostGroup_list) < 1: self._module.fail_json(msg="Host group not found: %s" % group_name) else: hostGroup_id = hostGroup_list[0]['groupid'] return hostGroup_id # get monitored host_id by host_group_id def get_host_ids_by_group_id(self, group_id): host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1}) if len(host_list) < 1: self._module.fail_json(msg="No host in the group.") else: host_ids = [] for i in host_list: host_id = i['hostid'] host_ids.append(host_id) return host_ids # get screen def get_screen_id(self, screen_name): if screen_name == "": self._module.fail_json(msg="screen_name is required") try: screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}}) if len(screen_id_list) >= 1: screen_id = screen_id_list[0]['screenid'] return screen_id return None except Exception as e: self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e)) # create screen def create_screen(self, screen_name, h_size, v_size): try: if self._module.check_mode: self._module.exit_json(changed=True) screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size}) return screen['screenids'][0] except Exception as e: self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e)) # update screen def update_screen(self, screen_id, screen_name, h_size, v_size): try: if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size}) except Exception as e: self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e)) # delete screen def delete_screen(self, screen_id, screen_name): try: if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.screen.delete([screen_id]) except Exception as e: self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e)) # get graph ids def get_graph_ids(self, hosts, graph_name_list): graph_id_lists = [] vsize = 1 for host in hosts: graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) size = len(graph_id_list) if size > 0: graph_id_lists.extend(graph_id_list) if vsize < size: vsize = size return graph_id_lists, vsize # getGraphs def get_graphs_by_host_id(self, graph_name_list, host_id): graph_ids = [] for graph_name in graph_name_list: graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id}) graph_id_list = [] if len(graphs_list) > 0: for graph in graphs_list: graph_id = graph['graphid'] graph_id_list.append(graph_id) if len(graph_id_list) > 0: graph_ids.extend(graph_id_list) return graph_ids # get screen items def get_screen_items(self, screen_id): screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id}) return screen_item_list # delete screen items def delete_screen_items(self, screen_id, screen_item_id_list): try: if len(screen_item_id_list) == 0: return True screen_item_list = self.get_screen_items(screen_id) if len(screen_item_list) > 0: if self._module.check_mode: self._module.exit_json(changed=True) self._zapi.screenitem.delete(screen_item_id_list) return True return False except ZabbixAPIException: pass # get screen's hsize and vsize def get_hsize_vsize(self, hosts, v_size): h_size = len(hosts) if h_size == 1: if v_size == 1: h_size = 1 elif v_size in range(2, 9): h_size = 2 else: h_size = 3 v_size = (v_size - 1) / h_size + 1 return h_size, v_size # create screen_items def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size): if len(hosts) < 4: if width is None or width < 0: width = 500 else: if width is None or width < 0: width = 200 if height is None or height < 0: height = 100 try: # when there're only one host, only one row is not good. if len(hosts) == 1: graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0]) for i, graph_id in enumerate(graph_id_list): if graph_id is not None: self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, 'width': width, 'height': height, 'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1, 'elements': 0, 'valign': 0, 'halign': 0, 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) else: for i, host in enumerate(hosts): graph_id_list = self.get_graphs_by_host_id(graph_name_list, host) for j, graph_id in enumerate(graph_id_list): if graph_id is not None: self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id, 'width': width, 'height': height, 'x': i, 'y': j, 'colspan': 1, 'rowspan': 1, 'elements': 0, 'valign': 0, 'halign': 0, 'style': 0, 'dynamic': 0, 'sort_triggers': 0}) except Already_Exists: pass def main(): module = AnsibleModule( argument_spec=dict( server_url=dict(type='str', required=True, aliases=['url']), login_user=dict(type='str', required=True), login_password=dict(type='str', required=True, no_log=True), http_login_user=dict(type='str', required=False, default=None), http_login_password=dict(type='str', required=False, default=None, no_log=True), timeout=dict(type='int', default=10), screens=dict(type='list', required=True) ), supports_check_mode=True ) if not HAS_ZABBIX_API: module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)") server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] http_login_user = module.params['http_login_user'] http_login_password = module.params['http_login_password'] timeout = module.params['timeout'] screens = module.params['screens'] zbx = None # login to zabbix try: zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password) zbx.login(login_user, login_password) except Exception as e: module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) screen = Screen(module, zbx) created_screens = [] changed_screens = [] deleted_screens = [] for zabbix_screen in screens: screen_name = zabbix_screen['screen_name'] screen_id = screen.get_screen_id(screen_name) state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present" if state == "absent": if screen_id: screen_item_list = screen.get_screen_items(screen_id) screen_item_id_list = [] for screen_item in screen_item_list: screen_item_id = screen_item['screenitemid'] screen_item_id_list.append(screen_item_id) screen.delete_screen_items(screen_id, screen_item_id_list) screen.delete_screen(screen_id, screen_name) deleted_screens.append(screen_name) else: host_group = zabbix_screen['host_group'] graph_names = zabbix_screen['graph_names'] graph_width = None if 'graph_width' in zabbix_screen: graph_width = zabbix_screen['graph_width'] graph_height = None if 'graph_height' in zabbix_screen: graph_height = zabbix_screen['graph_height'] host_group_id = screen.get_host_group_id(host_group) hosts = screen.get_host_ids_by_group_id(host_group_id) screen_item_id_list = [] resource_id_list = [] graph_ids, v_size = screen.get_graph_ids(hosts, graph_names) h_size, v_size = screen.get_hsize_vsize(hosts, v_size) if not screen_id: # create screen screen_id = screen.create_screen(screen_name, h_size, v_size) screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) created_screens.append(screen_name) else: screen_item_list = screen.get_screen_items(screen_id) for screen_item in screen_item_list: screen_item_id = screen_item['screenitemid'] resource_id = screen_item['resourceid'] screen_item_id_list.append(screen_item_id) resource_id_list.append(resource_id) # when the screen items changed, then update if graph_ids != resource_id_list: deleted = screen.delete_screen_items(screen_id, screen_item_id_list) if deleted: screen.update_screen(screen_id, screen_name, h_size, v_size) screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size) changed_screens.append(screen_name) if created_screens and changed_screens: module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens))) elif created_screens: module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens)) elif changed_screens: module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens)) elif deleted_screens: module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens)) else: module.exit_json(changed=False) from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
rversteegen/commandergenius
project/jni/python/src/Lib/cmd.py
163
14962
"""A generic class to build line-oriented command interpreters. Interpreters constructed with this class obey the following conventions: 1. End of file on input is processed as the command 'EOF'. 2. A command is parsed out of each line by collecting the prefix composed of characters in the identchars member. 3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method is passed a single argument consisting of the remainder of the line. 4. Typing an empty line repeats the last command. (Actually, it calls the method `emptyline', which may be overridden in a subclass.) 5. There is a predefined `help' method. Given an argument `topic', it calls the command `help_topic'. With no arguments, it lists all topics with defined help_ functions, broken into up to three topics; documented commands, miscellaneous help topics, and undocumented commands. 6. The command '?' is a synonym for `help'. The command '!' is a synonym for `shell', if a do_shell method exists. 7. If completion is enabled, completing commands will be done automatically, and completing of commands args is done by calling complete_foo() with arguments text, line, begidx, endidx. text is string we are matching against, all returned matches must begin with it. line is the current input line (lstripped), begidx and endidx are the beginning and end indexes of the text being matched, which could be used to provide different completion depending upon which position the argument is in. The `default' method may be overridden to intercept commands for which there is no do_ method. The `completedefault' method may be overridden to intercept completions for commands that have no complete_ method. The data member `self.ruler' sets the character used to draw separator lines in the help messages. If empty, no ruler line is drawn. It defaults to "=". If the value of `self.intro' is nonempty when the cmdloop method is called, it is printed out on interpreter startup. This value may be overridden via an optional argument to the cmdloop() method. The data members `self.doc_header', `self.misc_header', and `self.undoc_header' set the headers used for the help function's listings of documented functions, miscellaneous topics, and undocumented functions respectively. These interpreters use raw_input; thus, if the readline module is loaded, they automatically support Emacs-like command history and editing features. """ import string __all__ = ["Cmd"] PROMPT = '(Cmd) ' IDENTCHARS = string.ascii_letters + string.digits + '_' class Cmd: """A simple framework for writing line-oriented command interpreters. These are often useful for test harnesses, administrative tools, and prototypes that will later be wrapped in a more sophisticated interface. A Cmd instance or subclass instance is a line-oriented interpreter framework. There is no good reason to instantiate Cmd itself; rather, it's useful as a superclass of an interpreter class you define yourself in order to inherit Cmd's methods and encapsulate action methods. """ prompt = PROMPT identchars = IDENTCHARS ruler = '=' lastcmd = '' intro = None doc_leader = "" doc_header = "Documented commands (type help <topic>):" misc_header = "Miscellaneous help topics:" undoc_header = "Undocumented commands:" nohelp = "*** No help on %s" use_rawinput = 1 def __init__(self, completekey='tab', stdin=None, stdout=None): """Instantiate a line-oriented interpreter framework. The optional argument 'completekey' is the readline name of a completion key; it defaults to the Tab key. If completekey is not None and the readline module is available, command completion is done automatically. The optional arguments stdin and stdout specify alternate input and output file objects; if not specified, sys.stdin and sys.stdout are used. """ import sys if stdin is not None: self.stdin = stdin else: self.stdin = sys.stdin if stdout is not None: self.stdout = stdout else: self.stdout = sys.stdout self.cmdqueue = [] self.completekey = completekey def cmdloop(self, intro=None): """Repeatedly issue a prompt, accept input, parse an initial prefix off the received input, and dispatch to action methods, passing them the remainder of the line as argument. """ self.preloop() if self.use_rawinput and self.completekey: try: import readline self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey+": complete") except ImportError: pass try: if intro is not None: self.intro = intro if self.intro: self.stdout.write(str(self.intro)+"\n") stop = None while not stop: if self.cmdqueue: line = self.cmdqueue.pop(0) else: if self.use_rawinput: try: line = raw_input(self.prompt) except EOFError: line = 'EOF' else: self.stdout.write(self.prompt) self.stdout.flush() line = self.stdin.readline() if not len(line): line = 'EOF' else: line = line[:-1] # chop \n line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline readline.set_completer(self.old_completer) except ImportError: pass def precmd(self, line): """Hook method executed just before the command line is interpreted, but after the input prompt is generated and issued. """ return line def postcmd(self, stop, line): """Hook method executed just after a command dispatch is finished.""" return stop def preloop(self): """Hook method executed once when the cmdloop() method is called.""" pass def postloop(self): """Hook method executed once when the cmdloop() method is about to return. """ pass def parseline(self, line): """Parse the line into a command name and a string containing the arguments. Returns a tuple containing (command, args, line). 'command' and 'args' may be None if the line couldn't be parsed. """ line = line.strip() if not line: return None, None, line elif line[0] == '?': line = 'help ' + line[1:] elif line[0] == '!': if hasattr(self, 'do_shell'): line = 'shell ' + line[1:] else: return None, None, line i, n = 0, len(line) while i < n and line[i] in self.identchars: i = i+1 cmd, arg = line[:i], line[i:].strip() return cmd, arg, line def onecmd(self, line): """Interpret the argument as though it had been typed in response to the prompt. This may be overridden, but should not normally need to be; see the precmd() and postcmd() methods for useful execution hooks. The return value is a flag indicating whether interpretation of commands by the interpreter should stop. """ cmd, arg, line = self.parseline(line) if not line: return self.emptyline() if cmd is None: return self.default(line) self.lastcmd = line if cmd == '': return self.default(line) else: try: func = getattr(self, 'do_' + cmd) except AttributeError: return self.default(line) return func(arg) def emptyline(self): """Called when an empty line is entered in response to the prompt. If this method is not overridden, it repeats the last nonempty command entered. """ if self.lastcmd: return self.onecmd(self.lastcmd) def default(self, line): """Called on an input line when the command prefix is not recognized. If this method is not overridden, it prints an error message and returns. """ self.stdout.write('*** Unknown syntax: %s\n'%line) def completedefault(self, *ignored): """Method called to complete an input line when no command-specific complete_*() method is available. By default, it returns an empty list. """ return [] def completenames(self, text, *ignored): dotext = 'do_'+text return [a[3:] for a in self.get_names() if a.startswith(dotext)] def complete(self, text, state): """Return the next possible completion for 'text'. If a command has not been entered, then complete against command list. Otherwise try to call complete_<command> to get list of completions. """ if state == 0: import readline origline = readline.get_line_buffer() line = origline.lstrip() stripped = len(origline) - len(line) begidx = readline.get_begidx() - stripped endidx = readline.get_endidx() - stripped if begidx>0: cmd, args, foo = self.parseline(line) if cmd == '': compfunc = self.completedefault else: try: compfunc = getattr(self, 'complete_' + cmd) except AttributeError: compfunc = self.completedefault else: compfunc = self.completenames self.completion_matches = compfunc(text, line, begidx, endidx) try: return self.completion_matches[state] except IndexError: return None def get_names(self): # Inheritance says we have to look in class and # base classes; order is not important. names = [] classes = [self.__class__] while classes: aclass = classes.pop(0) if aclass.__bases__: classes = classes + list(aclass.__bases__) names = names + dir(aclass) return names def complete_help(self, *args): return self.completenames(*args) def do_help(self, arg): if arg: # XXX check arg syntax try: func = getattr(self, 'help_' + arg) except AttributeError: try: doc=getattr(self, 'do_' + arg).__doc__ if doc: self.stdout.write("%s\n"%str(doc)) return except AttributeError: pass self.stdout.write("%s\n"%str(self.nohelp % (arg,))) return func() else: names = self.get_names() cmds_doc = [] cmds_undoc = [] help = {} for name in names: if name[:5] == 'help_': help[name[5:]]=1 names.sort() # There can be duplicates if routines overridden prevname = '' for name in names: if name[:3] == 'do_': if name == prevname: continue prevname = name cmd=name[3:] if cmd in help: cmds_doc.append(cmd) del help[cmd] elif getattr(self, name).__doc__: cmds_doc.append(cmd) else: cmds_undoc.append(cmd) self.stdout.write("%s\n"%str(self.doc_leader)) self.print_topics(self.doc_header, cmds_doc, 15,80) self.print_topics(self.misc_header, help.keys(),15,80) self.print_topics(self.undoc_header, cmds_undoc, 15,80) def print_topics(self, header, cmds, cmdlen, maxcol): if cmds: self.stdout.write("%s\n"%str(header)) if self.ruler: self.stdout.write("%s\n"%str(self.ruler * len(header))) self.columnize(cmds, maxcol-1) self.stdout.write("\n") def columnize(self, list, displaywidth=80): """Display a list of strings as a compact set of columns. Each column is only as wide as necessary. Columns are separated by two spaces (one was not legible enough). """ if not list: self.stdout.write("<empty>\n") return nonstrings = [i for i in range(len(list)) if not isinstance(list[i], str)] if nonstrings: raise TypeError, ("list[i] not a string for i in %s" % ", ".join(map(str, nonstrings))) size = len(list) if size == 1: self.stdout.write('%s\n'%str(list[0])) return # Try every row count from 1 upwards for nrows in range(1, len(list)): ncols = (size+nrows-1) // nrows colwidths = [] totwidth = -2 for col in range(ncols): colwidth = 0 for row in range(nrows): i = row + nrows*col if i >= size: break x = list[i] colwidth = max(colwidth, len(x)) colwidths.append(colwidth) totwidth += colwidth + 2 if totwidth > displaywidth: break if totwidth <= displaywidth: break else: nrows = len(list) ncols = 1 colwidths = [0] for row in range(nrows): texts = [] for col in range(ncols): i = row + nrows*col if i >= size: x = "" else: x = list[i] texts.append(x) while texts and not texts[-1]: del texts[-1] for col in range(len(texts)): texts[col] = texts[col].ljust(colwidths[col]) self.stdout.write("%s\n"%str(" ".join(texts)))
lgpl-2.1
mthnzbk/pisi-player
pisiplayer/settingsdialog.py
1
6744
# # # Copyright 2016 Metehan Özbek <mthnzbk@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # from PyQt5.QtWidgets import (QToolBox, QPushButton, QColorDialog, QFontComboBox, QHBoxLayout, QWidget, QLabel, QComboBox, QVBoxLayout) from PyQt5.QtGui import QIcon, QColor from PyQt5.QtCore import QPoint, Qt, pyqtSignal from .settings import settings class SubtitleWidget(QWidget): def __init__(self, parent=None): super().__init__() self.parent = parent hlayout = QHBoxLayout() self.setLayout(hlayout) font_combobox = QFontComboBox() font_combobox.setEditable(False) font_combobox.setFixedHeight(30) font_combobox.setStyleSheet("QFontComboBox {background-color: white; color: black; border-radius: 3px;\ border-color: lightgray; border-style: solid; border-width:2px;} \ QFontComboBox::down-arrow {image: url(/usr/share/icons/breeze/actions/24/arrow-down)} \ QFontComboBox::drop-down {border:none;}") font_combobox.setCurrentText(settings().value("Subtitle/font")) hlayout.addWidget(font_combobox) self.color_button = QPushButton() self.color_button.setFixedSize(30, 30) self.color_button.setStyleSheet("QPushButton {border: 1px solid black; border-radius: 3px; \ background-color: %s; }"%(settings().value("Subtitle/color") or QColor("#ffffff")).name()) hlayout.addWidget(self.color_button) self.color_button.clicked.connect(self.colorSelected) font_combobox.currentIndexChanged[str].connect(self.fontChanged) def fontChanged(self, font): settings().setValue("Subtitle/font", font) settings().sync() self.parent.settingsChanged.emit() def colorSelected(self): color = QColorDialog.getColor(Qt.white, self) if not color.name() == "#000000": settings().setValue("Subtitle/color", color) settings().sync() self.color_button.setStyleSheet("QPushButton {border: 1px solid black; border-radius: 3px; \ background-color: %s; }"%color.name()) self.parent.settingsChanged.emit() class YoutubeWidget(QWidget): def __init__(self, parent=None): super().__init__() self.parent = parent vlayout = QVBoxLayout() self.setLayout(vlayout) hlayout = QHBoxLayout() label = QLabel() label.setText(self.tr("Çözünürlük:")) hlayout.addWidget(label) comboBox = QComboBox() comboBox.setFixedHeight(30) comboBox.addItems(["small", "medium", "hd720"]) comboBox.setCurrentText(settings().value("Youtube/quality") or "medium") comboBox.setStyleSheet("QComboBox {background-color: white; color: black; border-radius: 3px;\ border-color: lightgray; border-style: solid; border-width:2px; padding-left:5px;} \ QComboBox::down-arrow {image: url(/usr/share/icons/breeze/actions/24/arrow-down)} \ QComboBox::drop-down {border:none;}") hlayout.addWidget(comboBox) hlayout2 = QHBoxLayout() label2 = QLabel() label2.setText(self.tr("Format")) hlayout2.addWidget(label2) comboBox2 = QComboBox() comboBox2.setFixedHeight(30) comboBox2.addItems(["webm", "mp4"]) comboBox2.setCurrentText(settings().value("Youtube/format") or "mp4") comboBox2.setStyleSheet("QComboBox {background-color: white; color: black; border-radius: 3px;\ border-color: lightgray; border-style: solid; border-width:2px; padding-left:5px;} \ QComboBox::down-arrow {image: url(/usr/share/icons/breeze/actions/24/arrow-down)} \ QComboBox::drop-down {border:none;}") hlayout2.addWidget(comboBox2) comboBox.currentTextChanged.connect(self.qualityChanged) comboBox2.currentTextChanged.connect(self.formatChanged) vlayout.addLayout(hlayout) vlayout.addLayout(hlayout2) def qualityChanged(self, quality): settings().setValue("Youtube/quality", quality) settings().sync() self.parent.settingsChanged.emit() def formatChanged(self, format): settings().setValue("Youtube/format", format) settings().sync() self.parent.settingsChanged.emit() class FalancaWidget(QWidget): pass class SettingsDialog(QToolBox): settingsChanged = pyqtSignal() def __init__(self, parent=None): super().__init__() self.parent = parent self.setWindowFlags(Qt.Tool) self.setVisible(False) self.resize(300, 250) self.move(settings().value("Settings/position") or QPoint(250,250)) self.setWindowIcon(QIcon.fromTheme("pisiplayer")) self.setWindowTitle(self.tr("Ayarlar")) self.setStyleSheet("""QToolBox::tab { background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #E1E1E1, stop: 0.4 #DDDDDD, stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3); border-radius: 5px; color: darkgray; } QToolBox::tab:selected { /* italicize selected tabs */ font: italic; color: white; }""") page1 = self.addItem(SubtitleWidget(self), self.tr("Altyazılar")) page2 = self.addItem(YoutubeWidget(self), self.tr("Youtube")) def closeEvent(self, event): settings().setValue("Settings/position", self.pos()) settings().sync() event.accept()
gpl-3.0
kron4eg/django-btt
btt/tracker/views.py
1
1841
# -*- coding: utf-8 -*- from cgi import parse_qsl from django.conf import settings from django.http import HttpResponse from django.utils.datastructures import MultiValueDictKeyError from lib import render_to from lib.bt.bencoding import encode, decode from torrent.models import Torrent from tracker.models import Peer @render_to('btt/base.html') def announce(request): qs = dict(parse_qsl(request.META['QUERY_STRING'])) if qs.get('info_hash') is None: return {} info_hash = qs['info_hash'].encode('hex') responce_dict = {'interval': settings.ANNOUNCE_INTERVAL} try: torrent = Torrent.objects.get(info_hash=info_hash) except Torrent.DoesNotExist: responce_dict['failure reason'] = 'torrent not redistered' responce_dict['interval'] = 3600 return HttpResponse(encode(responce_dict)) try: ip = request.META['REMOTE_ADDR'] port = request.GET['port'] peer_id = request.GET['peer_id'] uploaded = request.GET['uploaded'] downloaded = request.GET['downloaded'] left = request.GET['left'] compact = request.GET.get('compact', 0) event = request.GET.get('event') except MultiValueDictKeyError: responce_dict['failure reason'] = 'invalid request' responce_dict['interval'] = 600 return HttpResponse(encode(responce_dict)) if 'started' in event: peer, created = Peer.objects.get_or_create(peer_id=peer_id, port=port, ip=ip, torrent=torrent) if created: peer.save() elif 'stopped' in event: try: peer = Peer.objects.get(peer_id=peer_id, ip=ip, torrent=torrent) peer.delete() except Peer.DoesNotExist: pass elif 'completed' in event: pass return HttpResponse(encode(responce_dict))
gpl-3.0
StephenCarlson/BYU-Mars-Rover
joystick.py
1
8429
import pygame from pygame.locals import * import socket # UDP_IP = "192.168.1.143" UDP_IP = "127.0.0.1" # UDP_IP = "192.168.7.2" # UDP_IP = '127.0.0.255' # Broadcast Loopback # UDP_IP = '255.255.255.255' # Broadcast Sun Adapter # UDP_IP = '192.168.0.255' # Broadcast Local Network? # UDP_IP = '71.195.237.116' # Broadcast # UDP_IP = '192.168.10.131' # Broadcast # UDP_PORT = 80 UDP_PORT = 27015 MESSAGE = "Rover Test Frame" FPS = 20 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Broadcast # sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # Broadcast # sock.bind((UDP_IP, UDP_PORT)) # Broadcast print "UDP target IP:", UDP_IP print "UDP target port:", UDP_PORT sock.sendto(MESSAGE, (UDP_IP, UDP_PORT)) # Define some colors BLACK = ( 0, 0, 0) WHITE = ( 255, 255, 255) allowMouseIntegral = False # gimbalPan = 0 # gimbalTilt = 0 gimbal = [0,0] # driveFwd = 0; # driveTurn = 0; drive = [0,0] # This is a simple class that will help us printScr to the screen # It has nothing to do with the joysticks, just outputing the # information. class TextprintScr: def __init__(self): self.reset() self.font = pygame.font.Font(None, 20) def printScr(self, screen, textString): textBitmap = self.font.render(textString, True, BLACK) screen.blit(textBitmap, [self.x, self.y]) self.y += self.line_height def reset(self): self.x = 10 self.y = 10 self.line_height = 15 def indent(self): self.x += 10 def unindent(self): self.x -= 10 pygame.init() # Set the width and height of the screen [width,height] size = [500, 300] screen = pygame.display.set_mode(size) pygame.display.set_caption("BYU Rover Control Panel") #Loop until the user clicks the close button. done = False # Used to manage how fast the screen updates clock = pygame.time.Clock() # Initialize the joysticks pygame.joystick.init() # Get ready to printScr textprintScr = TextprintScr() # -------- Main Program Loop ----------- while done==False: # EVENT PROCESSING STEP for event in pygame.event.get(): # User did something if event.type == pygame.QUIT: # If user clicked close done=True # Flag that we are done so we exit this loop # Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION if event.type == pygame.JOYBUTTONDOWN: print("Joystick button pressed.") if event.type == pygame.JOYBUTTONUP: print("Joystick button released.") if event.type == MOUSEBUTTONDOWN: #print event.button if event.button==1: print "LMB" allowMouseIntegral = not allowMouseIntegral pygame.event.set_grab(allowMouseIntegral) pygame.mouse.set_visible(not allowMouseIntegral) if(not allowMouseIntegral): pygame.mouse.set_pos((348+int(gimbal[0]/20), 148+int(gimbal[1]/20))) if event.button==3 and allowMouseIntegral: print "RMB" # gimbalPan= 0 # gimbalTilt= 0 gimbal = [0,0] if event.button==2: print "MMB" if event.type == MOUSEBUTTONUP: #print event.button if event.button==1: print "lmb" if event.type == KEYDOWN: #print event.key if event.key==K_w: print 'W' #driveFwd = 1000 #driveFwd -= int(abs(driveFwd*.1))+1 # print driveFwd if event.key==K_a: print 'A' #driveTurn = -1000 #driveFwd -= int(abs(driveFwd*.1))+1 if event.key==K_s: print 'S' #driveFwd = -1000 #driveFwd -= int(abs(driveFwd*.1))+1 if event.key==K_d: print 'D' #driveTurn = 1000 #driveFwd -= int(abs(driveFwd*.1))+1 if event.type == KEYUP: #print event.key if event.key==K_w: print 'w' #driveFwd = 0 if event.key==K_a: print 'a' #driveTurn = 0 if event.key==K_s: print 's' #driveFwd = 0 if event.key==K_d: print 'd' #driveTurn = 0 # DRAWING STEP # First, clear the screen to white. Don't put other drawing commands # above this, or they will be erased with this command. screen.fill(WHITE) textprintScr.reset() # --- Capture Mouse and set Gimbal #print pygame.mouse.get_pos() #mouseX,mouseY = pygame.mouse.get_pos() mouseX,mouseY = pygame.mouse.get_rel() if allowMouseIntegral: # gimbalPan += mouseX # gimbalTilt += mouseY gimbal[0] += mouseX gimbal[1] += mouseY #print mouseX,mouseY #textprintScr.printScr(screen, "Gimbal: {} {}".format(gimbalPan,gimbalTilt)) textprintScr.printScr(screen, "Gimbal: {} {}".format(gimbal[0],gimbal[1])) # --- Aquire Key States and set Drive accordingly keystate = pygame.key.get_pressed() drive = [(keystate[K_d]-keystate[K_a])*1000,(keystate[K_w]-keystate[K_s])*1000] # Get count of joysticks joystick_count = pygame.joystick.get_count() textprintScr.printScr(screen, "Number of joysticks: {}".format(joystick_count) ) textprintScr.indent() # For each joystick: for i in range(joystick_count): joystick = pygame.joystick.Joystick(i) joystick.init() textprintScr.printScr(screen, "Joystick {}".format(i) ) textprintScr.indent() # Get the name from the OS for the controller/joystick name = joystick.get_name() textprintScr.printScr(screen, "Joystick name: {}".format(name) ) # Usually axis run in pairs, up/down for one, and left/right for # the other. axes = joystick.get_numaxes() textprintScr.printScr(screen, "Number of axes: {}".format(axes) ) textprintScr.indent() for i in range( axes ): axis = joystick.get_axis( i ) textprintScr.printScr(screen, "Axis {} value: {:>6.3f}".format(i, axis) ) textprintScr.unindent() buttons = joystick.get_numbuttons() textprintScr.printScr(screen, "Number of buttons: {}".format(buttons) ) textprintScr.indent() for i in range( buttons ): button = joystick.get_button( i ) textprintScr.printScr(screen, "Button {:>2} value: {}".format(i,button) ) textprintScr.unindent() # Hat switch. All or nothing for direction, not like joysticks. # Value comes back in an array. hats = joystick.get_numhats() textprintScr.printScr(screen, "Number of hats: {}".format(hats) ) textprintScr.indent() for i in range( hats ): hat = joystick.get_hat( i ) textprintScr.printScr(screen, "Hat {} value: {}".format(i, str(hat)) ) textprintScr.unindent() textprintScr.unindent() payload = repr(joystick.get_axis(0)) sock.sendto(payload, (UDP_IP, UDP_PORT)) # --- Over-ride the Mouse/Keyboard setpoints if joystick is present if(pygame.joystick.get_count()==1): joystick = pygame.joystick.Joystick(i) joystick.init() if(joystick.get_numaxes() >=2 ): drive[0] = int(joystick.get_axis(0)*1000) drive[1] = int(joystick.get_axis(1)*1000) if(joystick.get_numaxes() >=4 ): gimbal[0] = int(joystick.get_axis(2)*1000) gimbal[1] = int(joystick.get_axis(3)*1000) # --- Constrain all values to +/- 1000 #if gimbalPan in range(-1000,1000) #if gimbalTilt in range(-1000,1000) # if gimbalPan > 1000: gimbalPan= 1000 # if gimbalPan < -1000: gimbalPan= -1000 # if gimbalTilt > 1000: gimbalTilt= 1000 # if gimbalTilt < -1000: gimbalTilt= -1000 for i in range(0,len(gimbal)): if gimbal[i] > 1000: gimbal[i]= 1000 if gimbal[i] < -1000: gimbal[i]= -1000 # if driveFwd > 1000: driveFwd= 1000 # if driveFwd < -1000: driveFwd= -1000 # if driveTurn > 1000: driveTurn= 1000 # if driveTurn < -1000: driveTurn= -1000 for i in range(0,len(drive)): if drive[i] > 1000: drive[i] = 1000 if drive[i] < -1000: drive[i] = -1000 # --- Draw values pygame.draw.rect(screen, (255, 128, 0), (98, 98, 104, 104),2) pygame.draw.rect(screen, (0, 128, 255), (148+int(drive[0]/20), 148+int(-drive[1]/20), 4, 4)) pygame.draw.rect(screen, (0, 0, 0), (298, 98, 104, 104),2) pygame.draw.rect(screen, (0, 0, 0), (348+int(gimbal[0]/20), 148+int(gimbal[1]/20), 4, 4)) # --- Assert UDP packet values # ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT # Go ahead and update the screen with what we've drawn. pygame.display.flip() # Limit to 20 frames per second clock.tick(FPS) # Close the window and quit. # If you forget this line, the program will 'hang' # on exit if running from IDLE. pygame.quit ()
gpl-2.0
afonsinoguimaraes/repository.magellan
plugin.video.live.magellan/websocket/_handshake.py
57
4830
""" websocket - WebSocket client library for Python Copyright (C) 2010 Hiroki Ohtani(liris) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA """ import six if six.PY3: from base64 import encodebytes as base64encode else: from base64 import encodestring as base64encode import uuid import hashlib import hmac import os import sys from ._logging import * from ._url import * from ._socket import* from ._http import * from ._exceptions import * __all__ = ["handshake_response", "handshake"] if hasattr(hmac, "compare_digest"): compare_digest = hmac.compare_digest else: def compare_digest(s1, s2): return s1 == s2 # websocket supported version. VERSION = 13 class handshake_response(object): def __init__(self, status, headers, subprotocol): self.status = status self.headers = headers self.subprotocol = subprotocol def handshake(sock, hostname, port, resource, **options): headers, key = _get_handshake_headers(resource, hostname, port, options) header_str = "\r\n".join(headers) send(sock, header_str) dump("request header", header_str) status, resp = _get_resp_headers(sock) success, subproto = _validate(resp, key, options.get("subprotocols")) if not success: raise WebSocketException("Invalid WebSocket Header") return handshake_response(status, resp, subproto) def _get_handshake_headers(resource, host, port, options): headers = [] headers.append("GET %s HTTP/1.1" % resource) headers.append("Upgrade: websocket") headers.append("Connection: Upgrade") if port == 80: hostport = host else: hostport = "%s:%d" % (host, port) if "host" in options and options["host"]: headers.append("Host: %s" % options["host"]) else: headers.append("Host: %s" % hostport) if "origin" in options and options["origin"]: headers.append("Origin: %s" % options["origin"]) else: headers.append("Origin: http://%s" % hostport) key = _create_sec_websocket_key() headers.append("Sec-WebSocket-Key: %s" % key) headers.append("Sec-WebSocket-Version: %s" % VERSION) subprotocols = options.get("subprotocols") if subprotocols: headers.append("Sec-WebSocket-Protocol: %s" % ",".join(subprotocols)) if "header" in options: header = options["header"] if isinstance(header, dict): header = map(": ".join, header.items()) headers.extend(header) cookie = options.get("cookie", None) if cookie: headers.append("Cookie: %s" % cookie) headers.append("") headers.append("") return headers, key def _get_resp_headers(sock, success_status=101): status, resp_headers = read_headers(sock) if status != success_status: raise WebSocketBadStatusException("Handshake status %d", status) return status, resp_headers _HEADERS_TO_CHECK = { "upgrade": "websocket", "connection": "upgrade", } def _validate(headers, key, subprotocols): subproto = None for k, v in _HEADERS_TO_CHECK.items(): r = headers.get(k, None) if not r: return False, None r = r.lower() if v != r: return False, None if subprotocols: subproto = headers.get("sec-websocket-protocol", None).lower() if not subproto or subproto not in [s.lower() for s in subprotocols]: error("Invalid subprotocol: " + str(subprotocols)) return False, None result = headers.get("sec-websocket-accept", None) if not result: return False, None result = result.lower() if isinstance(result, six.text_type): result = result.encode('utf-8') value = (key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11").encode('utf-8') hashed = base64encode(hashlib.sha1(value).digest()).strip().lower() success = compare_digest(hashed, result) if success: return True, subproto else: return False, None def _create_sec_websocket_key(): randomness = os.urandom(16) return base64encode(randomness).decode('utf-8').strip()
gpl-2.0
msrb/samba
python/examples/dnsserver.py
46
2757
#!/usr/bin/env python # script to test the dnsserver RPC protocol import sys from optparse import OptionParser sys.path.insert(0, "bin/python") import samba import samba.getopt as options from samba.dcerpc import dnsserver, security, dnsp ########### main code ########### if __name__ == "__main__": parser = OptionParser("dnsserver [options] server") sambaopts = options.SambaOptions(parser) credopts = options.CredentialsOptionsDouble(parser) parser.add_option_group(credopts) (opts, args) = parser.parse_args() if len(args) < 3: print("Usage: dnsserver.py [options] DNSSERVER DNSZONE NEWNAME") sys.exit(1) server = args[0] dnszone = args[1] newname = args[2] lp = sambaopts.get_loadparm() creds = credopts.get_credentials(lp) if not creds.authentication_requested(): parser.error("You must supply credentials") binding_str = "ncacn_ip_tcp:%s[print,sign]" % server dns_conn = dnsserver.dnsserver(binding_str, lp, creds) print("querying a NS record") res = dns_conn.DnssrvEnumRecords2(0x00070000, 0, server, dnszone, newname, None, dnsp.DNS_TYPE_NS, 0x0f, None, None) print("adding a NS glue record") name = dnsserver.DNS_RPC_NAME() name.str = newname addrec = dnsserver.DNS_RPC_RECORD() addrec.wType = dnsp.DNS_TYPE_NS addrec.dwFlags = 0 addrec.dwSerial = 0 addrec.dwTtlSeconds = 3600 addrec.dwTimeStamp = 0 addrec.dwReserved = 0 addrec.data = name addrecbuf = dnsserver.DNS_RPC_RECORD_BUF() addrecbuf.rec = addrec res = dns_conn.DnssrvUpdateRecord2(0x00070000, 0, server, dnszone, newname, addrecbuf, None) print("querying the NS record") res = dns_conn.DnssrvEnumRecords2(0x00070000, 0, server, dnszone, newname, None, dnsp.DNS_TYPE_NS, 0x0f, None, None)
gpl-3.0
nurmd2/nurmd
addons/mass_mailing/wizard/test_mailing.py
13
1987
# -*- coding: utf-8 -*- from openerp import tools from openerp.osv import osv, fields class TestMassMailing(osv.TransientModel): _name = 'mail.mass_mailing.test' _description = 'Sample Mail Wizard' _columns = { 'email_to': fields.char('Recipients', required=True, help='Comma-separated list of email addresses.'), 'mass_mailing_id': fields.many2one('mail.mass_mailing', 'Mailing', required=True), } _defaults = { 'email_to': lambda self, cr, uid, ctx=None: self.pool['mail.message']._get_default_from(cr, uid, context=ctx), } def send_mail_test(self, cr, uid, ids, context=None): Mail = self.pool['mail.mail'] for wizard in self.browse(cr, uid, ids, context=context): mailing = wizard.mass_mailing_id test_emails = tools.email_split(wizard.email_to) mail_ids = [] for test_mail in test_emails: mail_values = { 'email_from': mailing.email_from, 'reply_to': mailing.reply_to, 'email_to': test_mail, 'subject': mailing.name, 'body_html': '', 'notification': True, 'mailing_id': mailing.id, 'attachment_ids': [(4, attachment.id) for attachment in mailing.attachment_ids], } mail_mail_obj = Mail.browse(cr, uid, Mail.create(cr, uid, mail_values, context=context), context=context) unsubscribe_url = Mail._get_unsubscribe_url(cr, uid, mail_mail_obj, test_mail, context=context) body = tools.append_content_to_html(mailing.body_html, unsubscribe_url, plaintext=False, container_tag='p') Mail.write(cr, uid, mail_mail_obj.id, {'body_html': mailing.body_html}, context=context) mail_ids.append(mail_mail_obj.id) Mail.send(cr, uid, mail_ids, context=context) return True
gpl-3.0
sniemi/SamPy
bolshoi/collecthalomasses.py
1
3662
""" Collects dark matter halo information from Bolshoi merger tree files. This one uses parallel python: http://www.parallelpython.com """ import glob as g import pp import smnIO.write as write def findDMhaloes(file, times, columns): """ Find all dark matter haloes from a given file that are in self.times. Capture the data that are specified in self.columns. Returns a dictionary where each key is a single time. Each line of data is a string so it's easy to write out to a file. """ output = {} print 'Now processing file {0:>s}\n'.format(file) fh = open(file, 'r') for line in iter(fh): if not line.startswith('#'): tmp = line.split() for key in times: if round(float(tmp[columns['scale']]), 4) == round(key, 4): a = float(tmp[columns['mvir']]) b = float(tmp[columns['orig_mvir']]) c = float(tmp[columns['phantom']]) #make a string from out data string = '%f %f %f\n' % (a, b, c) #save the string to dictionary if output.has_key(key): output[key] += [string, ] else: output[key] = [string, ] return output def writeOutput(data, file, times): """ Writes the output data to ascii files. Each time step is recorded to a single file. The filename will contain the output *redshift* """ print 'Outputting data from %s' % file print 'File contains {0:d} timesteps'.format(len(data.keys())) for key in data: tmp = file.split('/')[-1] filename = 'DMhaloz{0:>s}{1:>s}'.format(str(times[key]), tmp) print 'Now outputting to {0:>s} '.format(filename) fh = open(filename, 'a') for line in data[key]: fh.write(line) fh.close() if __name__ == '__main__': #number of cores to use ncpus = 6 #inpute merger tree files inputs = g.glob('/Users/niemi/Desktop/Research/Bolshoi/bolshoi_newisotrees/*.dat') #this is for Bolshoi's tree outputs #scale : redshift times = {0.9973: 0.00270730973629, 0.9073: 0.102171277417, 0.8323: 0.201489847411, 0.7663: 0.304971943103, 0.7123: 0.403902849923, 0.6643: 0.505343971097, 0.6223: 0.606941989394, 0.5863: 0.705611461709, 0.5563: 0.797591227755, 0.5283: 0.892863903085, 0.4983: 1.00682319888, 0.3303: 2.02755071147, 0.2463: 3.06008932197, 0.1983: 4.04286434695, 0.1683: 4.94177064765, 0.1443: 5.93000693001} columns = {'mvir': 9, 'orig_mvir': 10, 'phantom': 8, 'scale': 0} #tuple of all parallel python servers to connect with ppservers = () #creates a job server jobServer = pp.Server(ncpus, ppservers=ppservers) print 'Processing with', jobServer.get_ncpus(), 'workers' #submits jobs jobs = [(input, jobServer.submit(findDMhaloes, (input, times, columns))) for input in inputs] #write the output of each job for input, job in jobs: writeOutput(job(), input, times) print 'Finished writing the individual outputs' jobServer.print_stats() print 'will write the combined output' for key in times: out = 'dmMFz%s.txt' % str(times[key]) print 'Will write', out write.combineFiles(g.glob('*%s*.dat' % str(times[key])), out) print 'All done, check the output file(s)'
bsd-2-clause
Zanzibar82/plugin.video.pelisalacarta
pelisalacarta/channels/mcanime.py
12
21380
# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Canal para mcanime # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os,sys from core import logger from core import config from core import scrapertools from core.item import Item from servers import servertools __channel__ = "mcanime" __category__ = "A" __type__ = "generic" __title__ = "MCAnime" __language__ = "ES" DEBUG = config.get_setting("debug") def isGeneric(): return True def mainlist(item): logger.info("[gnula.py] mainlist") itemlist = [] itemlist.append( Item(channel=__channel__, title="Novedades" , action="home" ,url="http://www.mcanime.net/")) itemlist.append( Item(channel=__channel__, title="Foro anime en línea" , action="forum" ,url="http://www.mcanime.net/foro/viewforum.php?f=113")) itemlist.append( Item(channel=__channel__, title="Descarga directa - Novedades" , action="ddnovedades",url="http://www.mcanime.net/descarga_directa/anime")) itemlist.append( Item(channel=__channel__, title="Descarga directa - Listado alfabético", action="ddalpha" ,url="http://www.mcanime.net/descarga_directa/anime")) itemlist.append( Item(channel=__channel__, title="Descarga directa - Categorías" , action="ddcat" ,url="http://www.mcanime.net/descarga_directa/anime")) itemlist.append( Item(channel=__channel__, title="Enciclopedia - Estrenos" , action="estrenos" ,url="http://www.mcanime.net/enciclopedia/estrenos/anime")) return itemlist def estrenos(item): logger.info("[mcanime.py] estrenos") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # Extrae las entradas (carpetas) ''' <dl id="addRow9203" class="min row1"> <dd class="thumb"> <img src="/images/anime/th_9203.jpg" width="75" height="100" alt="" /> </dd> <dt><a href="/enciclopedia/anime/cobra_the_animation_rokunin_no_yuushi/9203">Cobra The Animation: Rokunin no Yuushi</a> <i>(Serie)</i></dt> <dd>Cobra es un conocido pirata espacial, pero decide cambiar su cara y borrar todas sus memorias. El ahora es un hombre normal, con un trabajo normal y una vida aburrida, pero comienza a recordar su verdadera identidad y sus aventuras comienzan de nuevo. <a href="/enciclopedia/anime/cobra_the_animation_rokunin_no_yuushi/9203">leer m·s.</a></dd> <dd class="small mgn"><a href="/descarga_directa/anime/cobra_the_animation_rokunin_no_yuushi/9203" class="srch_dd">Descargar&nbsp;&nbsp;<img width="14" height="14" src="/images/dds/download_icon.gif" alt="[DD]" /></a></dd> </dl> ''' patron = '<dl id="[^"]+" class="min row.">(.*?)</dl>' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: data = match patron = '<dd class="thumb">[^<]+' patron += '<img src="([^"]+)"[^>]+>[^<]+' patron += '</dd>[^<]+' patron += '<dt><a href="[^"]+">([^<]+)</a>\s*<i>([^<]+)</i>\s*</dt>[^<]+' matches2 = re.compile(patron,re.DOTALL).findall(data) if len(matches2)>0: scrapedtitle = matches2[0][1].strip() + " " + matches2[0][2].strip() scrapedthumbnail = urlparse.urljoin(item.url,matches2[0][0]) scrapedplot = "" scrapedurl = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") patron = '</dt>(.*?)<dd class="small mgn"><a href="([^"]+)"' matches2 = re.compile(patron,re.DOTALL).findall(data) if len(matches2)>0: try: scrapedplot = unicode( matches2[0][0].strip(), "utf-8" ).encode("iso-8859-1") except: scrapedplot = matches2[0][0].strip() scrapedplot = scrapertools.htmlclean(scrapedplot) scrapedplot = scrapedplot.replace("\n"," ") scrapedplot = scrapedplot.replace("\r"," ") scrapedplot = scrapedplot.replace("\r\n"," ") scrapedurl = urlparse.urljoin(item.url,matches2[0][1]) # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='ddseriedetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def home(item): logger.info("[mcanime.py] home") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # Extrae las entradas (carpetas) patronvideos = '<div class="release" style="background-image.url\(\'([^\']+)\'\)\;">[^<]+' patronvideos += '<h4>([^<]+)<a href="([^"]+)">([^<]+)</a> <span class="date">([^<]+)</span></h4>[^<]+' patronvideos += '<div class="rimg"><img src="([^"]+)"[^>]+></div>[^<]+' patronvideos += '<div class="rtext">(.*?)</div>[^<]+' patronvideos += '<div class="rfinfo">(.*?)</div>[^<]+' patronvideos += '<div class="rflinks">(.*?)</div>[^<]+' patronvideos += '<div class="rinfo">(.*?)</div>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: if match[0].endswith("anime.gif"): scrapedtitle = match[3].strip() + " " + match[1].strip() + " (" + match[4] + ")" scrapedurl = urlparse.urljoin(item.url,match[2]) scrapedthumbnail = urlparse.urljoin(item.url,match[5]) scrapedplot = scrapertools.htmlclean(match[6]) scrapedextra = match[8] scrapedtitle = scrapedtitle.replace("[CR]"," CR ") if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action='findvideos', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra = scrapedextra , folder=True ) ) # Extrae la marca de siguiente p·gina patronvideos = '<span class="next"><a href="([^"]+)">Anteriores</a>...</span>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedtitle = "P·gina siguiente" scrapedurl = urlparse.urljoin(item.url,matches[0]) scrapedthumbnail = "" scrapedplot = "" itemlist.append( Item(channel=__channel__, action='home', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddnovedades(item): logger.info("[mcanime.py] ddnovedades") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) # Extrae las entradas (carpetas) patronvideos = '<ul class="dd_row">[^<]+' patronvideos += '<li class="dd_type dd_anime"><img[^>]+></li>[^<]+' patronvideos += '<li class="dd_update"><img[^>]+>([^<]+)</li>[^<]+' patronvideos += '<li class="dd_update"><a[^>]+>[^<]+</a></li>[^<]+' patronvideos += '<li class="dd_title">[^<]+' patronvideos += '<h5><a href="([^"]+)">([^<]+)</a></h5>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Atributos scrapedtitle = match[2].strip() + " ("+match[0].strip()+")" scrapedurl = urlparse.urljoin(item.url,match[1]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action='ddpostdetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) # Extrae la marca de siguiente p·gina patronvideos = '<span class="current">[^<]+</span><a href="([^"]+)">[^<]+</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedtitle = ">> Pagina siguiente" scrapedurl = urlparse.urljoin(item.url,matches[0]) scrapedthumbnail = "" scrapedplot = "" itemlist.append( Item(channel=__channel__, action='ddnovedades', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddalpha(item): logger.info("[mcanime.py] ddalpha") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) # Extrae las entradas (carpetas) patronvideos = '<a href="(/descarga_directa/anime/lista/[^"]+)">([^<]+)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Atributos scrapedtitle = match[1] scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action='ddlist', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddcat(item): logger.info("[mcanime.py] ddcat") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) # Extrae las entradas (carpetas) patronvideos = '<a href="(/descarga_directa/anime/genero/[^"]+)">([^<]+)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Atributos scrapedtitle = match[1] scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='ddlist', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddlist(item): logger.info("[mcanime.py] ddlist") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) # Extrae las entradas (carpetas) patronvideos = '<li class="dd_title"><h5><a href="([^"]+)">(.*?)</a>\s*<i>([^<]+)</i>\s*</h5></li>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Atributos scrapedtitle = match[1].strip().replace("<b>","").replace("</b>","") scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='ddseriedetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddseriedetail(item): logger.info("[mcanime.py] ddseriedetail") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) # Foto de la serie de la enciclopedia patron = '<img src="([^"]+)" width="300".*?class="title_pic" />' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: item.thumbnail = matches[0] # Argumento patron = '<h6>Sinopsis.*?</h6>(.*?)<h6>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: item.plot = matches[0] item.plot = item.plot.replace("\n"," ") item.plot = item.plot.replace("\r"," ") item.plot = item.plot.replace("\r\n"," ") item.plot = item.plot.strip() item.plot = scrapertools.htmlclean(matches[0]) # Fansubs patron = '<h6 class="m">Fansubs que trabajan esta serie</h6>[^<]+' patron += '<div id="user_actions">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: data = matches[0] #logger.info("[mcanime.py] data="+data) patron = '<ul class="dd_row">[^<]+' patron += '<li class="dd_type"><img[^>]+></li>[^<]+' patron += '<li class="dd_update"> <img[^>]+>([^<]+)</li>[^<]+' patron += '<li class="dd_title">[^<]+' patron += '<h5><a href="([^"]+)">([^<]+)</a></h5>' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: # Atributos scrapedtitle = match[2].strip()+" ("+match[0].strip()+")" scrapedurl = urlparse.urljoin(item.url,match[1]) scrapedthumbnail = item.thumbnail scrapedplot = item.plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='ddpostdetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) # Aportaciones de los usuarios patron = '<h6 class="m">Por los Usuarios</h6>[^<]+' patron += '<div id="user_actions">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: data = matches[0] #logger.info("[mcanime.py] data="+data) patron = '<ul class="dd_row">[^<]+' patron += '<li class="dd_type"><img[^>]+></li>[^<]+' patron += '<li class="dd_update"> <img[^>]+>([^<]+)</li>[^<]+' patron += '<li class="dd_title">[^<]+' patron += '<h5><a href="([^"]+)">([^<]+)</a></h5>' matches = re.compile(patron,re.DOTALL).findall(data) for match in matches: # Atributos scrapedtitle = match[2].strip()+" ("+match[0].strip()+")" scrapedurl = urlparse.urljoin(item.url,match[1]) scrapedthumbnail = item.thumbnail scrapedplot = item.plot if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='ddpostdetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def ddpostdetail(item): logger.info("[mcanime.py] ddpostdetail") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # Foto de la serie de la enciclopedia patron = '<img src="([^"]+)" width="300".*?class="title_pic" />' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: item.thumbnail = matches[0] # Argumento - texto del post patron = '<div id="download_detail">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)>0: item.plot = scrapertools.htmlclean(matches[0]) item.plot = item.plot.replace("\r\n"," ") item.plot = item.plot.replace("\r"," ") item.plot = item.plot.replace("\n"," ") item.plot = item.plot.strip() # ------------------------------------------------------------------------------------ # Busca los enlaces a los videos # ------------------------------------------------------------------------------------ i = 1 itemlist = servertools.find_video_items(data=data) for videoitem in itemlist: videoitem.channel = __channel__ videoitem.action="play" videoitem.folder=False try: fulltitle = unicode( item.title.strip() + " (%d) " + videoitem.title, "utf-8" ).encode("iso-8859-1") fulltitle = fulltitle % i except: validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$&'()-@^_`." stripped = ''.join(c for c in item.title if c in validchars) fulltitle = stripped.strip() + " (%d) " + videoitem.title fulltitle = fulltitle % i i = i + 1 return itemlist def forum(item): logger.info("[mcanime.py] forum") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # Extrae las entradas del foro (series / pelis) patronvideos = '<ul class="topic_row">[^<]+<li class="topic_type"><img.*?' patronvideos += '<li class="topic_title"><h5><a href="([^"]+)">([^<]+)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: # Extrae try: scrapedtitle = unicode( match[1], "utf-8" ).encode("iso-8859-1") except: scrapedtitle = match[1] scrapedurl = urlparse.urljoin(item.url,match[0].replace("&amp;","&")) scrapedthumbnail = "" scrapedplot = "" # Depuracion if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='forumdetail', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) # Extrae la siguiente p·gina patronvideos = '<a href="([^"]+)" class="next">(Siguiente &raquo;)</a>' matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) for match in matches: scrapedtitle = "P·gina siguiente" scrapedurl = urlparse.urljoin(item.url,match[0].replace("&amp;","&")) scrapedthumbnail = "" scrapedplot = "" # Depuracion if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") # AÒade al listado de XBMC itemlist.append( Item(channel=__channel__, action='forum', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True ) ) return itemlist def forumdetail(item): logger.info("[mcanime.py] forumdetail") itemlist=[] # Descarga la p·gina data = scrapertools.cache_page(item.url) #logger.info(data) # ------------------------------------------------------------------------------------ # Busca los enlaces a los mirrors, paginas, o capÌtulos de las series... # ------------------------------------------------------------------------------------ patronvideos = '([^"]+)" class="next">Siguiente' matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: logger.info("Encontrada pagina siguiente") itemlist.append( Item(channel=__channel__, action='list', title=">> Página siguiente" , url=urlparse.urljoin(item.url,match).replace("&amp;","&") , folder=True ) ) # ------------------------------------------------------------------------------------ # Busca los enlaces a los videos # ------------------------------------------------------------------------------------ # Saca el cuerpo del post #logFile.info("data="+data) #patronvideos = '<div class="content">.*?<div class="poster">.*?</div>(.*?)</div>' patronvideos = '<div class="content">(.*?)<div class="content">' matches = re.compile(patronvideos,re.DOTALL).findall(data) datapost="" if len(matches)>0: datapost=matches[0] else: datapost = "" #logFile.info("dataPost="+dataPost) # Saca el thumbnail patronvideos = '<img src="([^"]+)"' matches = re.compile(patronvideos,re.DOTALL).findall(datapost) thumbnailurl="" logger.info("thumbnails") for match in matches: logger.info(match) if len(matches)>0: thumbnailurl=matches[0] patronvideos = '<img.*?>(.*?)<a' matches = re.compile(patronvideos,re.DOTALL).findall(datapost) descripcion = "" if len(matches)>0: descripcion = matches[0] descripcion = descripcion.replace("<br />","") descripcion = descripcion.replace("<br/>","") descripcion = descripcion.replace("\r","") descripcion = descripcion.replace("\n"," ") descripcion = re.sub("<[^>]+>"," ",descripcion) logger.info("descripcion="+descripcion) itemlist.extend( servertools.find_video_items(data=datapost) ) for video in itemlist: if video.folder==False: video.channel = __channel__ video.title = re.sub("<[^>]+>","",item.title) video.action = "play" return itemlist # Verificación automática de canales: Esta función debe devolver "True" si está ok el canal. def test(): from servers import servertools # mainlist mainlist_items = mainlist(Item()) # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors novedades_items = home(mainlist_items[0]) bien = False for novedad_item in novedades_items: mirrors = servertools.find_video_items( item=novedad_item ) if len(mirrors)>0: bien = True break return bien
gpl-3.0
jorsea/odoomrp-wip
mrp_product_variants_types/__openerp__.py
17
1592
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { "name": "MRP product variants types", "version": "1.0", "depends": [ "mrp_product_variants", "product_attribute_types", ], "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", "contributors": [ "Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>", ], "category": "Hidden/Dependency", "website": "http://www.odoomrp.com", "summary": "", "description": """ This module extends product variants on MRP. It adds the possibility of defining a custom value when the attribute is of range type. """, "data": [ "views/mrp_view.xml", ], "installable": True, "auto_install": True, }
agpl-3.0
ondrokrc/gramps
gramps/gen/errors.py
2
4668
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2003-2007 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Provide Error objects """ class FilterError(Exception): """Error used to report Filter errors""" def __init__(self, value, value2=""): Exception.__init__(self) self.value = value self.value2 = value2 def __str__(self): "Return string representation" return self.value def messages(self): "Return the messages" return (self.value, self.value2) class DateError(Exception): """Error used to report Date errors Might have a .date attribute holding an invalid Date object that triggered the error. """ def __init__(self, value=""): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class DatabaseError(Exception): """Error used to report database errors""" def __init__(self, value=""): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class ReportError(Exception): """Error used to report Report errors.""" def __init__(self, value, value2=""): Exception.__init__(self) self.value = value self.value2 = value2 def __str__(self): "Return string representation" return self.value def messages(self): "Return the messages" return (self.value, self.value2) class GedcomError(Exception): """Error used to report GEDCOM errors""" def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class GrampsImportError(Exception): """Error used to report mistakes during import of files into Gramps""" def __init__(self, value, value2=""): Exception.__init__(self) self.value = value self.value2 = value2 def __str__(self): "Return string representation" return self.value def messages(self): "Return the messages" return (self.value, self.value2) class PluginError(Exception): """Error used to report plugin errors""" def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class HandleError(Exception): """Error used to report wrong database handle errors""" def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class WindowActiveError(Exception): """Error used to report that the request window is already displayed.""" def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class UnavailableError(Exception): def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value class MaskError(Exception): pass class ValidationError(Exception): pass class DbError(Exception): """Error used to report BerkeleyDB errors.""" def __init__(self, value): Exception.__init__(self) try: (errnum, errmsg) = value self.value = errmsg except: self.value = value def __str__(self): "Return string representation" return self.value class MergeError(Exception): """Error used to report merge errors""" def __init__(self, value=""): Exception.__init__(self) self.value = value def __str__(self): "Return string representation" return self.value
gpl-2.0
noodle-learns-programming/wagtail
wagtail/contrib/wagtailapi/utils.py
13
1299
from django.conf import settings from django.utils.six.moves.urllib.parse import urlparse from wagtail.wagtailcore.models import Page class BadRequestError(Exception): pass class URLPath(object): """ This class represents a URL path that should be converted to a full URL. It is used when the domain that should be used is not known at the time the URL was generated. It will get resolved to a full URL during serialisation in api.py. One example use case is the documents endpoint adding download URLs into the JSON. The endpoint does not know the domain name to use at the time so returns one of these instead. """ def __init__(self, path): self.path = path class ObjectDetailURL(object): def __init__(self, model, pk): self.model = model self.pk = pk def get_base_url(request=None): base_url = getattr(settings, 'WAGTAILAPI_BASE_URL', request.site.root_url if request else None) if base_url: # We only want the scheme and netloc base_url_parsed = urlparse(base_url) return base_url_parsed.scheme + '://' + base_url_parsed.netloc def pages_for_site(site): pages = Page.objects.public().live() pages = pages.descendant_of(site.root_page, inclusive=True) return pages
bsd-3-clause
tardyp/buildbot
worker/buildbot_worker/interfaces.py
17
3673
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members # disable pylint warnings triggered by interface definitions # pylint: disable=no-self-argument # pylint: disable=no-method-argument # pylint: disable=inherit-non-class from __future__ import absolute_import from __future__ import print_function from zope.interface import Interface class IWorkerCommand(Interface): """This interface is implemented by all of the worker's Command subclasses. It specifies how the worker can start, interrupt, and query the various Commands running on behalf of the buildmaster.""" def __init__(builder, stepId, args): """Create the Command. 'builder' is a reference to the parent buildbot_worker.base.WorkerForBuilderBase instance, which will be used to send status updates (by calling builder.sendStatus). 'stepId' is a random string which helps correlate worker logs with the master. 'args' is a dict of arguments that comes from the master-side BuildStep, with contents that are specific to the individual Command subclass. This method is not intended to be subclassed.""" def setup(args): """This method is provided for subclasses to override, to extract parameters from the 'args' dictionary. The default implementation does nothing. It will be called from __init__""" def start(): """Begin the command, and return a Deferred. While the command runs, it should send status updates to the master-side BuildStep by calling self.sendStatus(status). The 'status' argument is typically a dict with keys like 'stdout', 'stderr', and 'rc'. When the step completes, it should fire the Deferred (the results are not used). If an exception occurs during execution, it may also errback the deferred, however any reasonable errors should be trapped and indicated with a non-zero 'rc' status rather than raising an exception. Exceptions should indicate problems within the buildbot itself, not problems in the project being tested. """ def interrupt(): """This is called to tell the Command that the build is being stopped and therefore the command should be terminated as quickly as possible. The command may continue to send status updates, up to and including an 'rc' end-of-command update (which should indicate an error condition). The Command's deferred should still be fired when the command has finally completed. If the build is being stopped because the worker it shutting down or because the connection to the buildmaster has been lost, the status updates will simply be discarded. The Command does not need to be aware of this. Child shell processes should be killed. Simple ShellCommand classes can just insert a header line indicating that the process will be killed, then os.kill() the child."""
gpl-2.0
aspidites/django
tests/m2m_intermediary/models.py
282
1292
""" Many-to-many relationships via an intermediary table For many-to-many relationships that need extra fields on the intermediary table, use an intermediary model. In this example, an ``Article`` can have multiple ``Reporter`` objects, and each ``Article``-``Reporter`` combination (a ``Writer``) has a ``position`` field, which specifies the ``Reporter``'s position for the given article (e.g. "Staff writer"). """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) def __str__(self): return "%s %s" % (self.first_name, self.last_name) @python_2_unicode_compatible class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() def __str__(self): return self.headline @python_2_unicode_compatible class Writer(models.Model): reporter = models.ForeignKey(Reporter, models.CASCADE) article = models.ForeignKey(Article, models.CASCADE) position = models.CharField(max_length=100) def __str__(self): return '%s (%s)' % (self.reporter, self.position)
bsd-3-clause
shastikk/youtube-dl
youtube_dl/extractor/sohu.py
92
7209
# encoding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import ( compat_str, compat_urllib_request, compat_urllib_parse, ) from ..utils import ( ExtractorError, ) class SohuIE(InfoExtractor): _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?' _TESTS = [{ 'note': 'This video is available only in Mainland China', 'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super', 'md5': '29175c8cadd8b5cc4055001e85d6b372', 'info_dict': { 'id': '382479172', 'ext': 'mp4', 'title': 'MV:Far East Movement《The Illest》', }, 'skip': 'On available in China', }, { 'url': 'http://tv.sohu.com/20150305/n409385080.shtml', 'md5': '699060e75cf58858dd47fb9c03c42cfb', 'info_dict': { 'id': '409385080', 'ext': 'mp4', 'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》', } }, { 'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml', 'md5': '9bf34be48f2f4dadcb226c74127e203c', 'info_dict': { 'id': '78693464', 'ext': 'mp4', 'title': '【爱范品】第31期:MWC见不到的奇葩手机', } }, { 'note': 'Multipart video', 'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml', 'info_dict': { 'id': '78910339', 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', }, 'playlist': [{ 'md5': 'bdbfb8f39924725e6589c146bc1883ad', 'info_dict': { 'id': '78910339_part1', 'ext': 'mp4', 'duration': 294, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'md5': '3e1f46aaeb95354fd10e7fca9fc1804e', 'info_dict': { 'id': '78910339_part2', 'ext': 'mp4', 'duration': 300, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }, { 'md5': '8407e634175fdac706766481b9443450', 'info_dict': { 'id': '78910339_part3', 'ext': 'mp4', 'duration': 150, 'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆', } }] }, { 'note': 'Video with title containing dash', 'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml', 'info_dict': { 'id': '78932792', 'ext': 'mp4', 'title': 'youtube-dl testing video', }, 'params': { 'skip_download': True } }] def _real_extract(self, url): def _fetch_data(vid_id, mytv=False): if mytv: base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid=' else: base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid=' req = compat_urllib_request.Request(base_data_url + vid_id) cn_verification_proxy = self._downloader.params.get('cn_verification_proxy') if cn_verification_proxy: req.add_header('Ytdl-request-proxy', cn_verification_proxy) return self._download_json( req, video_id, 'Downloading JSON data for %s' % vid_id) mobj = re.match(self._VALID_URL, url) video_id = mobj.group('id') mytv = mobj.group('mytv') is not None webpage = self._download_webpage(url, video_id) title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage)) vid = self._html_search_regex( r'var vid ?= ?["\'](\d+)["\']', webpage, 'video path') vid_data = _fetch_data(vid, mytv) if vid_data['play'] != 1: if vid_data.get('status') == 12: raise ExtractorError( 'Sohu said: There\'s something wrong in the video.', expected=True) else: raise ExtractorError( 'Sohu said: The video is only licensed to users in Mainland China.', expected=True) formats_json = {} for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'): vid_id = vid_data['data'].get('%sVid' % format_id) if not vid_id: continue vid_id = compat_str(vid_id) formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv) part_count = vid_data['data']['totalBlocks'] playlist = [] for i in range(part_count): formats = [] for format_id, format_data in formats_json.items(): allot = format_data['allot'] data = format_data['data'] clips_url = data['clipsURL'] su = data['su'] video_url = 'newflv.sohu.ccgslb.net' cdnId = None retries = 0 while 'newflv.sohu.ccgslb.net' in video_url: params = { 'prot': 9, 'file': clips_url[i], 'new': su[i], 'prod': 'flash', } if cdnId is not None: params['idc'] = cdnId download_note = 'Downloading %s video URL part %d of %d' % ( format_id, i + 1, part_count) if retries > 0: download_note += ' (retry #%d)' % retries part_info = self._parse_json(self._download_webpage( 'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)), video_id, download_note), video_id) video_url = part_info['url'] cdnId = part_info.get('nid') retries += 1 if retries > 5: raise ExtractorError('Failed to get video URL') formats.append({ 'url': video_url, 'format_id': format_id, 'filesize': data['clipsBytes'][i], 'width': data['width'], 'height': data['height'], 'fps': data['fps'], }) self._sort_formats(formats) playlist.append({ 'id': '%s_part%d' % (video_id, i + 1), 'title': title, 'duration': vid_data['data']['clipsDuration'][i], 'formats': formats, }) if len(playlist) == 1: info = playlist[0] info['id'] = video_id else: info = { '_type': 'multi_video', 'entries': playlist, 'id': video_id, 'title': title, } return info
unlicense
kalbasit/eve
examples/security/hmac.py
19
2772
# -*- coding: utf-8 -*- """ Auth-HMAC ~~~~~~~~~ Securing an Eve-powered API with HMAC based Authentication. The ``eve.auth.HMACAuth`` class allows for custom Amazon S3-like authentication, which is basically a very secure custom authentication scheme built around the `Authorization` header. The server provides the client with a user id and a secret key through some out-of-band technique (e.g., the service sends the client an e-mail containing the user id and secret key). The client will use the supplied secret key to sign all requests. When the client wants to send a request he builds the complete request and then using the secret key computes a hash over the complete message body (and optionally some of the message headers if required) Next the client add the computed hash and his userid to the message in the Authorization header: Authorization: johndoe:uCMfSzkjue+HSDygYB5aEg== and sends it to the service. The service retrieves the userid from the message header and searches the private key for that user in its own database. Next he computes the hash over the message body (and selected headers) using the key to generate its hash. If the hash the client sends matches the hash the server computes the server knows the message was send by the real client and was not altered in any way. Really the only tricky part is sharing a secret key with the user and keeping that secure. That is why some services allow for generation of shared keys with a limited life time so you can give the key to a third party to temporarily work on your behalf. This is also the reason why the secret key is generally provided through out-of-band channels (often a webpage or, as said above, an email or plain old paper). The HMACAuth class also supports access roles. Checkout Eve at https://github.com/nicolaiarocci/eve This snippet by Nicola Iarocci can be used freely for anything you like. Consider it public domain. """ from eve import Eve from eve.auth import HMACAuth from hashlib import sha1 import hmac class HMACAuth(HMACAuth): def check_auth(self, userid, hmac_hash, headers, data, allowed_roles): # use Eve's own db driver; no additional connections/resources are used accounts = app.data.driver.db['accounts'] user = accounts.find_one({'userid': userid}) if user: secret_key = user['secret_key'] # in this implementation we only hash request data, ignoring the # headers. return user and \ hmac.new(str(secret_key), str(data), sha1).hexdigest() == hmac_hash if __name__ == '__main__': app = Eve(auth=HMACAuth) app.run()
bsd-3-clause
antonow/concept-to-clinic
prediction/src/tests/test_identify_trained_model_predict.py
1
1414
import numpy as np import pytest from ..algorithms.identify import trained_model from ..tests.test_endpoints import skip_slow_test @pytest.fixture def dicom_path_001(): yield '../images/LIDC-IDRI-0001/1.3.6.1.4.1.14519.5.2.1.6279.6001.298806137288633453246975630178/' \ '1.3.6.1.4.1.14519.5.2.1.6279.6001.179049373636438705059720603192' @pytest.fixture def dicom_path_003(): yield "/images/LIDC-IDRI-0003/1.3.6.1.4.1.14519.5.2.1.6279.6001.101370605276577556143013894866/" \ "1.3.6.1.4.1.14519.5.2.1.6279.6001.170706757615202213033480003264" @pytest.fixture def nodule_locations_001(): yield {"x": 317, "y": 367, "z": 7} @pytest.fixture def nodule_locations_003(): yield {"x": 369, "y": 347, "z": 6} @pytest.mark.skipif(skip_slow_test, reason='Takes very long') def test_identify_nodules_001(dicom_path_001, nodule_locations_001): predicted = trained_model.predict(dicom_path_001) first = predicted[0] dist = np.sqrt(np.sum([(first[s] - nodule_locations_001[s]) ** 2 for s in ["x", "y", "z"]])) assert (dist < 10) @pytest.mark.skipif(skip_slow_test, reason='Takes very long') def test_identify_nodules_003(dicom_path_003, nodule_locations_003): predicted = trained_model.predict(dicom_path_003) first = predicted[0] dist = np.sqrt(np.sum([(first[s] - nodule_locations_003[s]) ** 2 for s in ["x", "y", "z"]])) assert (dist < 10)
mit
yangsiy/shadowsocks
tests/nose_plugin.py
1072
1164
#!/usr/bin/env python # # Copyright 2015 clowwindy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import nose from nose.plugins.base import Plugin class ExtensionPlugin(Plugin): name = "ExtensionPlugin" def options(self, parser, env): Plugin.options(self, parser, env) def configure(self, options, config): Plugin.configure(self, options, config) self.enabled = True def wantFile(self, file): return file.endswith('.py') def wantDirectory(self, directory): return True def wantModule(self, file): return True if __name__ == '__main__': nose.main(addplugins=[ExtensionPlugin()])
apache-2.0
odejesush/tensorflow
tensorflow/python/training/sync_replicas_optimizer.py
16
19827
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Synchronize replicas for training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import types_pb2 from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import optimizer from tensorflow.python.training import queue_runner from tensorflow.python.training import session_manager from tensorflow.python.training import session_run_hook # Please note that the gradients from replicas are averaged instead of summed # (as in the old sync_replicas_optimizer) so you need to increase the learning # rate according to the number of replicas. This change is introduced to be # consistent with how gradients are aggregated (averaged) within a batch in a # replica. class SyncReplicasOptimizer(optimizer.Optimizer): """Class to synchronize, aggregate gradients and pass them to the optimizer. In a typical asynchronous training environment, it's common to have some stale gradients. For example, with a N-replica asynchronous training, gradients will be applied to the variables N times independently. Depending on each replica's training speed, some gradients might be calculated from copies of the variable from several steps back (N-1 steps on average). This optimizer avoids stale gradients by collecting gradients from all replicas, averaging them, then applying them to the variables in one shot, after which replicas can fetch the new variables and continue. The following accumulators/queue are created: <empty line> * N `gradient accumulators`, one per variable to train. Gradients are pushed to them and the chief worker will wait until enough gradients are collected and then average them before applying to variables. The accumulator will drop all stale gradients (more details in the accumulator op). * 1 `token` queue where the optimizer pushes the new global_step value after all variables are updated. The following local variable is created: * `sync_rep_local_step`, one per replica. Compared against the global_step in each accumulator to check for staleness of the gradients. The optimizer adds nodes to the graph to collect gradients and pause the trainers until variables are updated. For the Parameter Server job: <empty line> 1. An accumulator is created for each variable, and each replica pushes the gradients into the accumulators instead of directly applying them to the variables. 2. Each accumulator averages once enough gradients (replicas_to_aggregate) have been accumulated. 3. Apply the averaged gradients to the variables. 4. Only after all variables have been updated, increment the global step. 5. Only after step 4, pushes `global_step` in the `token_queue`, once for each worker replica. The workers can now fetch the global step, use it to update its local_step variable and start the next batch. For the replicas: <empty line> 1. Start a step: fetch variables and compute gradients. 2. Once the gradients have been computed, push them into gradient accumulators. Each accumulator will check the staleness and drop the stale. 3. After pushing all the gradients, dequeue an updated value of global_step from the token queue and record that step to its local_step variable. Note that this is effectively a barrier. 4. Start the next batch. ### Usage ```python # Create any optimizer to update the variables, say a simple SGD: opt = GradientDescentOptimizer(learning_rate=0.1) # Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each # step the optimizer collects 50 gradients before applying to variables. # Note that if you want to have 2 backup replicas, you can change # total_num_replicas=52 and make sure this number matches how many physical # replicas you started in your job. opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50, total_num_replicas=50) # Some models have startup_delays to help stabilize the model but when using # sync_replicas training, set it to 0. # Now you can call `minimize()` or `compute_gradients()` and # `apply_gradients()` normally training_op = opt.minimize(total_loss, global_step=self.global_step) # You can create the hook which handles initialization and queues. sync_replicas_hook = opt.make_session_run_hook(is_chief) ``` In the training program, every worker will run the train_op as if not synchronized. ```python with training.MonitoredTrainingSession( master=workers[worker_id].target, is_chief=is_chief, hooks=[sync_replicas_hook]) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(training_op) ``` @@__init__ @@compute_gradients @@apply_gradients @@get_chief_queue_runner @@get_init_tokens_op """ def __init__(self, opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name="sync_replicas"): """Construct a sync_replicas optimizer. Args: opt: The actual optimizer that will be used to compute and apply the gradients. Must be one of the Optimizer classes. replicas_to_aggregate: number of replicas to aggregate for each variable update. total_num_replicas: Total number of tasks/workers/replicas, could be different from replicas_to_aggregate. If total_num_replicas > replicas_to_aggregate: it is backup_replicas + replicas_to_aggregate. If total_num_replicas < replicas_to_aggregate: Replicas compute multiple batches per update to variables. variable_averages: Optional `ExponentialMovingAverage` object, used to maintain moving averages for the variables passed in `variables_to_average`. variables_to_average: a list of variables that need to be averaged. Only needed if variable_averages is passed in. use_locking: If True use locks for update operation. name: string. Optional name of the returned operation. """ if total_num_replicas is None: total_num_replicas = replicas_to_aggregate super(SyncReplicasOptimizer, self).__init__(use_locking, name) logging.info( "SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s", replicas_to_aggregate, total_num_replicas) self._opt = opt self._replicas_to_aggregate = replicas_to_aggregate self._gradients_applied = False self._variable_averages = variable_averages self._variables_to_average = variables_to_average self._total_num_replicas = total_num_replicas self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate) self._global_step = None self._sync_token_queue = None # The synchronization op will be executed in a queue runner which should # only be executed by one of the replicas (usually the chief). self._chief_queue_runner = None # Remember which accumulator is on which device to set the initial step in # the accumulator to be global step. This list contains list of the # following format: (accumulator, device). self._accumulator_list = [] def compute_gradients(self, *args, **kwargs): """Compute gradients of "loss" for the variables in "var_list". This simply wraps the compute_gradients() from the real optimizer. The gradients will be aggregated in the apply_gradients() so that user can modify the gradients like clipping with per replica global norm if needed. The global norm with aggregated gradients can be bad as one replica's huge gradients can hurt the gradients from other replicas. Args: *args: Arguments for compute_gradients(). **kwargs: Keyword arguments for compute_gradients(). Returns: A list of (gradient, variable) pairs. """ return self._opt.compute_gradients(*args, **kwargs) def apply_gradients(self, grads_and_vars, global_step=None, name=None): """Apply gradients to variables. This contains most of the synchronization implementation and also wraps the apply_gradients() from the real optimizer. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: train_op: The op to dequeue a token so the replicas can exit this batch and start the next one. This is executed by each replica. Raises: ValueError: If the grads_and_vars is empty. ValueError: If global step is not provided, the staleness cannot be checked. """ if not grads_and_vars: raise ValueError("Must supply at least one variable") if global_step is None: raise ValueError("Global step is required to check staleness") self._global_step = global_step train_ops = [] aggregated_grad = [] var_list = [] self._local_step = variables.Variable( initial_value=0, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], dtype=global_step.dtype.base_dtype, name="sync_rep_local_step") self.local_step_init_op = state_ops.assign(self._local_step, global_step) chief_init_ops = [self.local_step_init_op] self.ready_for_local_init_op = variables.report_uninitialized_variables( variables.global_variables()) with ops.name_scope(None, self._name): for grad, var in grads_and_vars: var_list.append(var) with ops.device(var.device): # Dense gradients. if grad is None: aggregated_grad.append(None) # pass-through. continue elif isinstance(grad, ops.Tensor): grad_accum = data_flow_ops.ConditionalAccumulator( grad.dtype, shape=var.get_shape(), shared_name=var.name + "/grad_accum") train_ops.append(grad_accum.apply_grad( grad, local_step=self._local_step)) aggregated_grad.append(grad_accum.take_grad( self._replicas_to_aggregate)) else: if not isinstance(grad, ops.IndexedSlices): raise ValueError("Unknown grad type!") grad_accum = data_flow_ops.SparseConditionalAccumulator( grad.dtype, shape=(), shared_name=var.name + "/grad_accum") train_ops.append(grad_accum.apply_indexed_slices_grad( grad, local_step=self._local_step)) aggregated_grad.append(grad_accum.take_indexed_slices_grad( self._replicas_to_aggregate)) self._accumulator_list.append((grad_accum, var.device)) aggregated_grads_and_vars = zip(aggregated_grad, var_list) # sync_op will be assigned to the same device as the global step. with ops.device(global_step.device), ops.name_scope(""): update_op = self._opt.apply_gradients(aggregated_grads_and_vars, global_step) # Create token queue. with ops.device(global_step.device), ops.name_scope(""): sync_token_queue = ( data_flow_ops.FIFOQueue(-1, global_step.dtype.base_dtype, shapes=(), name="sync_token_q", shared_name="sync_token_q")) self._sync_token_queue = sync_token_queue # dummy_queue is passed to the queue runner. Don't use the real queues # because the queue runner doesn't automatically reopen it once it # closed queues in PS devices. dummy_queue = ( data_flow_ops.FIFOQueue(1, types_pb2.DT_INT32, shapes=(), name="dummy_queue", shared_name="dummy_queue")) with ops.device(global_step.device), ops.name_scope(""): # Replicas have to wait until they can get a token from the token queue. with ops.control_dependencies(train_ops): token = sync_token_queue.dequeue() train_op = state_ops.assign(self._local_step, token) with ops.control_dependencies([update_op]): # Sync_op needs to insert tokens to the token queue at the end of the # step so the replicas can fetch them to start the next step. tokens = array_ops.fill([self._tokens_per_step], global_step) sync_op = sync_token_queue.enqueue_many((tokens,)) if self._variable_averages is not None: with ops.control_dependencies([sync_op]), ops.name_scope(""): sync_op = self._variable_averages.apply( self._variables_to_average) self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue, [sync_op]) for accum, dev in self._accumulator_list: with ops.device(dev): chief_init_ops.append( accum.set_global_step( global_step, name="SetGlobalStep")) self.chief_init_op = control_flow_ops.group(*(chief_init_ops)) self._gradients_applied = True return train_op def get_chief_queue_runner(self): """Returns the QueueRunner for the chief to execute. This includes the operations to synchronize replicas: aggregate gradients, apply to variables, increment global step, insert tokens to token queue. Note that this can only be called after calling apply_gradients() which actually generates this queuerunner. Returns: A `QueueRunner` for chief to execute. Raises: ValueError: If this is called before apply_gradients(). """ if self._gradients_applied is False: raise ValueError("Should be called after apply_gradients().") return self._chief_queue_runner def get_slot(self, *args, **kwargs): """Return a slot named "name" created for "var" by the Optimizer. This simply wraps the get_slot() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: The `Variable` for the slot if it was created, `None` otherwise. """ return self._opt.get_slot(*args, **kwargs) def get_slot_names(self, *args, **kwargs): """Return a list of the names of slots created by the `Optimizer`. This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings. """ return self._opt.get_slot_names(*args, **kwargs) def get_init_tokens_op(self, num_tokens=-1): """Returns the op to fill the sync_token_queue with the tokens. This is supposed to be executed in the beginning of the chief/sync thread so that even if the total_num_replicas is less than replicas_to_aggregate, the model can still proceed as the replicas can compute multiple steps per variable update. Make sure: `num_tokens >= replicas_to_aggregate - total_num_replicas`. Args: num_tokens: Number of tokens to add to the queue. Returns: An op for the chief/sync replica to fill the token queue. Raises: ValueError: If this is called before apply_gradients(). ValueError: If num_tokens are smaller than replicas_to_aggregate - total_num_replicas. """ if self._gradients_applied is False: raise ValueError( "get_init_tokens_op() should be called after apply_gradients().") tokens_needed = self._replicas_to_aggregate - self._total_num_replicas if num_tokens == -1: num_tokens = self._replicas_to_aggregate elif num_tokens < tokens_needed: raise ValueError( "Too few tokens to finish the first step: %d (given) vs %d (needed)" % (num_tokens, tokens_needed)) if num_tokens > 0: with ops.device(self._global_step.device), ops.name_scope(""): tokens = array_ops.fill([num_tokens], self._global_step) init_tokens = self._sync_token_queue.enqueue_many((tokens,)) else: init_tokens = control_flow_ops.no_op(name="no_init_tokens") return init_tokens def make_session_run_hook(self, is_chief, num_tokens=-1): """Creates a hook to handle SyncReplicasHook ops such as initialization.""" if is_chief: return _SyncReplicasOptimizerHook(self.chief_init_op, self.ready_for_local_init_op, self.get_chief_queue_runner(), self.get_init_tokens_op(num_tokens)) return _SyncReplicasOptimizerHook(self.local_step_init_op, self.ready_for_local_init_op, None, None) class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook): """A SessionRunHook handles ops related to SyncReplicasOptimizer.""" def __init__(self, local_init_op, ready_for_local_init_op, q_runner, init_tokens_op): """Creates hook to handle SyncReplicaOptimizer initialization ops. Args: local_init_op: Either `SyncReplicasOptimizer.chief_init_op` or `SyncReplicasOptimizer.local_step_init_op`. ready_for_local_init_op: `SyncReplicasOptimizer.ready_for_local_init_op` q_runner: Either `SyncReplicasOptimizer.get_chief_queue_runner` or `None` init_tokens_op: `SyncReplicasOptimizer.get_init_tokens_op` or None """ self._local_init_op = local_init_op self._ready_for_local_init_op = ready_for_local_init_op self._q_runner = q_runner self._init_tokens_op = init_tokens_op def after_create_session(self, session, coord): """Runs SyncReplicasOptimizer initialization ops.""" local_init_success, msg = session_manager._ready( # pylint: disable=protected-access self._ready_for_local_init_op, session, "Model is not ready for SyncReplicasOptimizer local init.") if not local_init_success: raise RuntimeError( "Init operations did not make model ready for SyncReplicasOptimizer " "local_init. Init op: %s, error: %s" % (self._local_init_op.name, msg)) session.run(self._local_init_op) if self._init_tokens_op is not None: session.run(self._init_tokens_op) if self._q_runner is not None: self._q_runner.create_threads( session, coord=coord, daemon=True, start=True)
apache-2.0
HybridF5/nova
nova/tests/unit/objects/test_instance_numa_topology.py
8
7427
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid import mock from oslo_serialization import jsonutils from nova import exception from nova import objects from nova.objects import fields from nova.tests.unit.objects import test_objects fake_instance_uuid = str(uuid.uuid4()) fake_obj_numa_topology = objects.InstanceNUMATopology( instance_uuid = fake_instance_uuid, cells=[ objects.InstanceNUMACell( id=0, cpuset=set([1, 2]), memory=512, pagesize=2048), objects.InstanceNUMACell( id=1, cpuset=set([3, 4]), memory=512, pagesize=2048) ]) fake_numa_topology = fake_obj_numa_topology._to_dict() fake_db_topology = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': 0, 'id': 1, 'instance_uuid': fake_instance_uuid, 'numa_topology': fake_obj_numa_topology._to_json() } fake_old_db_topology = dict(fake_db_topology) # copy fake_old_db_topology['numa_topology'] = jsonutils.dumps(fake_numa_topology) def get_fake_obj_numa_topology(context): fake_obj_numa_topology_cpy = fake_obj_numa_topology.obj_clone() fake_obj_numa_topology_cpy._context = context return fake_obj_numa_topology_cpy class _TestInstanceNUMATopology(object): @mock.patch('nova.db.instance_extra_update_by_uuid') def test_create(self, mock_update): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.instance_uuid = fake_db_topology['instance_uuid'] topo_obj.create() self.assertEqual(1, len(mock_update.call_args_list)) @mock.patch('nova.db.instance_extra_update_by_uuid') def test_save(self, mock_update): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.instance_uuid = fake_db_topology['instance_uuid'] topo_obj._save() self.assertEqual(1, len(mock_update.call_args_list)) def _test_get_by_instance_uuid(self): numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid( self.context, fake_db_topology['instance_uuid']) self.assertEqual(fake_db_topology['instance_uuid'], numa_topology.instance_uuid) for obj_cell, topo_cell in zip( numa_topology.cells, fake_obj_numa_topology['cells']): self.assertIsInstance(obj_cell, objects.InstanceNUMACell) self.assertEqual(topo_cell.id, obj_cell.id) self.assertEqual(topo_cell.cpuset, obj_cell.cpuset) self.assertEqual(topo_cell.memory, obj_cell.memory) self.assertEqual(topo_cell.pagesize, obj_cell.pagesize) @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid(self, mock_get): mock_get.return_value = fake_db_topology self._test_get_by_instance_uuid() @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_old(self, mock_get): mock_get.return_value = fake_old_db_topology self._test_get_by_instance_uuid() @mock.patch('nova.db.instance_extra_get_by_instance_uuid') def test_get_by_instance_uuid_missing(self, mock_get): mock_get.return_value = None self.assertRaises( exception.NumaTopologyNotFound, objects.InstanceNUMATopology.get_by_instance_uuid, self.context, 'fake_uuid') def test_siblings(self): inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2])) self.assertEqual([], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2]), cpu_topology=topo) self.assertEqual([], inst_cell.siblings) # One thread actually means no threads topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2]), cpu_topology=topo) self.assertEqual([], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), cpu_topology=topo) self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings) topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4) inst_cell = objects.InstanceNUMACell( cpuset=set([0, 1, 2, 3]), cpu_topology=topo) self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings) def test_pin(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) inst_cell.pin(0, 14) self.assertEqual({0: 14}, inst_cell.cpu_pinning) inst_cell.pin(12, 14) self.assertEqual({0: 14}, inst_cell.cpu_pinning) inst_cell.pin(1, 16) self.assertEqual({0: 14, 1: 16}, inst_cell.cpu_pinning) def test_pin_vcpus(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17)) self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning) def test_default_behavior(self): inst_cell = objects.InstanceNUMACell() self.assertEqual(0, len(inst_cell.obj_get_changes())) def test_cpu_pinning_requested_cell(self): inst_cell = objects.InstanceNUMACell(cpuset=set([0, 1, 2, 3]), cpu_pinning=None) self.assertFalse(inst_cell.cpu_pinning_requested) inst_cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED self.assertTrue(inst_cell.cpu_pinning_requested) def test_cpu_pinning_requested(self): fake_topo_obj = copy.deepcopy(fake_obj_numa_topology) self.assertFalse(fake_topo_obj.cpu_pinning_requested) for cell in fake_topo_obj.cells: cell.cpu_policy = fields.CPUAllocationPolicy.DEDICATED self.assertTrue(fake_topo_obj.cpu_pinning_requested) def test_clear_host_pinning(self): topo_obj = get_fake_obj_numa_topology(self.context) topo_obj.cells[0].pin_vcpus((1, 10), (2, 11)) topo_obj.cells[0].id = 3 topo_obj.cells[1].pin_vcpus((3, 0), (4, 1)) topo_obj.cells[1].id = 0 topo_obj.clear_host_pinning() self.assertEqual({}, topo_obj.cells[0].cpu_pinning) self.assertEqual(-1, topo_obj.cells[0].id) self.assertEqual({}, topo_obj.cells[1].cpu_pinning) self.assertEqual(-1, topo_obj.cells[1].id) class TestInstanceNUMATopology(test_objects._LocalTest, _TestInstanceNUMATopology): pass class TestInstanceNUMATopologyRemote(test_objects._RemoteTest, _TestInstanceNUMATopology): pass
apache-2.0
dnlove/ns3
src/topology-read/bindings/modulegen__gcc_LP64.py
2
156760
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.topology_read', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## node-container.h (module 'network'): ns3::NodeContainer [class] module.add_class('NodeContainer', import_from_module='ns.network') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper [class] module.add_class('TopologyReaderHelper') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## topology-reader.h (module 'topology-read'): ns3::TopologyReader [class] module.add_class('TopologyReader', parent=root_module['ns3::Object']) ## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link [class] module.add_class('Link', outer_class=root_module['ns3::TopologyReader']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader [class] module.add_class('InetTopologyReader', parent=root_module['ns3::TopologyReader']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## net-device.h (module 'network'): ns3::NetDevice [class] module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object']) ## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration] module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network') ## node.h (module 'network'): ns3::Node [class] module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object']) ## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader [class] module.add_class('OrbisTopologyReader', parent=root_module['ns3::TopologyReader']) ## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader [class] module.add_class('RocketfuelTopologyReader', parent=root_module['ns3::TopologyReader']) ## nstime.h (module 'core'): ns3::TimeChecker [class] module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) module.add_container('std::map< std::string, std::string >', ('std::string', 'std::string'), container_type='map') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TopologyReaderHelper_methods(root_module, root_module['ns3::TopologyReaderHelper']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TopologyReader_methods(root_module, root_module['ns3::TopologyReader']) register_Ns3TopologyReaderLink_methods(root_module, root_module['ns3::TopologyReader::Link']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3InetTopologyReader_methods(root_module, root_module['ns3::InetTopologyReader']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice']) register_Ns3Node_methods(root_module, root_module['ns3::Node']) register_Ns3OrbisTopologyReader_methods(root_module, root_module['ns3::OrbisTopologyReader']) register_Ns3RocketfuelTopologyReader_methods(root_module, root_module['ns3::RocketfuelTopologyReader']) register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3NodeContainer_methods(root_module, cls): ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor] cls.add_constructor([]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor] cls.add_constructor([param('std::string', 'nodeName')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')]) ## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor] cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function] cls.add_method('Add', 'void', [param('ns3::NodeContainer', 'other')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('Add', 'void', [param('ns3::Ptr< ns3::Node >', 'node')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function] cls.add_method('Add', 'void', [param('std::string', 'nodeName')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function] cls.add_method('Begin', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n')]) ## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function] cls.add_method('Create', 'void', [param('uint32_t', 'n'), param('uint32_t', 'systemId')]) ## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function] cls.add_method('End', '__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >', [], is_const=True) ## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function] cls.add_method('Get', 'ns3::Ptr< ns3::Node >', [param('uint32_t', 'i')], is_const=True) ## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function] cls.add_method('GetGlobal', 'ns3::NodeContainer', [], is_static=True) ## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TopologyReaderHelper_methods(root_module, cls): ## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper(ns3::TopologyReaderHelper const & arg0) [copy constructor] cls.add_constructor([param('ns3::TopologyReaderHelper const &', 'arg0')]) ## topology-reader-helper.h (module 'topology-read'): ns3::TopologyReaderHelper::TopologyReaderHelper() [constructor] cls.add_constructor([]) ## topology-reader-helper.h (module 'topology-read'): ns3::Ptr<ns3::TopologyReader> ns3::TopologyReaderHelper::GetTopologyReader() [member function] cls.add_method('GetTopologyReader', 'ns3::Ptr< ns3::TopologyReader >', []) ## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileName(std::string const fileName) [member function] cls.add_method('SetFileName', 'void', [param('std::string const', 'fileName')]) ## topology-reader-helper.h (module 'topology-read'): void ns3::TopologyReaderHelper::SetFileType(std::string const fileType) [member function] cls.add_method('SetFileType', 'void', [param('std::string const', 'fileType')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'tid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Start() [member function] cls.add_method('Start', 'void', []) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'value')]) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'timeUnit')], is_const=True) return def register_Ns3TopologyReader_methods(root_module, cls): ## topology-reader.h (module 'topology-read'): ns3::TopologyReader::TopologyReader() [constructor] cls.add_constructor([]) ## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::AddLink(ns3::TopologyReader::Link link) [member function] cls.add_method('AddLink', 'void', [param('ns3::TopologyReader::Link', 'link')]) ## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::GetFileName() const [member function] cls.add_method('GetFileName', 'std::string', [], is_const=True) ## topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::TopologyReader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksBegin() const [member function] cls.add_method('LinksBegin', 'std::_List_const_iterator< ns3::TopologyReader::Link >', [], is_const=True) ## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::LinksEmpty() const [member function] cls.add_method('LinksEmpty', 'bool', [], is_const=True) ## topology-reader.h (module 'topology-read'): std::_List_const_iterator<ns3::TopologyReader::Link> ns3::TopologyReader::LinksEnd() const [member function] cls.add_method('LinksEnd', 'std::_List_const_iterator< ns3::TopologyReader::Link >', [], is_const=True) ## topology-reader.h (module 'topology-read'): int ns3::TopologyReader::LinksSize() const [member function] cls.add_method('LinksSize', 'int', [], is_const=True) ## topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::TopologyReader::Read() [member function] cls.add_method('Read', 'ns3::NodeContainer', [], is_pure_virtual=True, is_virtual=True) ## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::SetFileName(std::string const fileName) [member function] cls.add_method('SetFileName', 'void', [param('std::string const', 'fileName')]) return def register_Ns3TopologyReaderLink_methods(root_module, cls): ## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::TopologyReader::Link const & arg0) [copy constructor] cls.add_constructor([param('ns3::TopologyReader::Link const &', 'arg0')]) ## topology-reader.h (module 'topology-read'): ns3::TopologyReader::Link::Link(ns3::Ptr<ns3::Node> fromPtr, std::string fromName, ns3::Ptr<ns3::Node> toPtr, std::string toName) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'fromPtr'), param('std::string', 'fromName'), param('ns3::Ptr< ns3::Node >', 'toPtr'), param('std::string', 'toName')]) ## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesBegin() [member function] cls.add_method('AttributesBegin', 'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >', []) ## topology-reader.h (module 'topology-read'): std::_Rb_tree_const_iterator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, std::basic_string<char, std::char_traits<char>, std::allocator<char> > > > ns3::TopologyReader::Link::AttributesEnd() [member function] cls.add_method('AttributesEnd', 'std::_Rb_tree_const_iterator< std::pair< std::basic_string< char, std::char_traits< char >, std::allocator< char > > const, std::basic_string< char, std::char_traits< char >, std::allocator< char > > > >', []) ## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetAttribute(std::string name) const [member function] cls.add_method('GetAttribute', 'std::string', [param('std::string', 'name')], is_const=True) ## topology-reader.h (module 'topology-read'): bool ns3::TopologyReader::Link::GetAttributeFailSafe(std::string name, std::string & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('std::string &', 'value')], is_const=True) ## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetFromNode() const [member function] cls.add_method('GetFromNode', 'ns3::Ptr< ns3::Node >', [], is_const=True) ## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetFromNodeName() const [member function] cls.add_method('GetFromNodeName', 'std::string', [], is_const=True) ## topology-reader.h (module 'topology-read'): ns3::Ptr<ns3::Node> ns3::TopologyReader::Link::GetToNode() const [member function] cls.add_method('GetToNode', 'ns3::Ptr< ns3::Node >', [], is_const=True) ## topology-reader.h (module 'topology-read'): std::string ns3::TopologyReader::Link::GetToNodeName() const [member function] cls.add_method('GetToNodeName', 'std::string', [], is_const=True) ## topology-reader.h (module 'topology-read'): void ns3::TopologyReader::Link::SetAttribute(std::string name, std::string & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('std::string &', 'value')]) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3InetTopologyReader_methods(root_module, cls): ## inet-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::InetTopologyReader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## inet-topology-reader.h (module 'topology-read'): ns3::InetTopologyReader::InetTopologyReader() [constructor] cls.add_constructor([]) ## inet-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::InetTopologyReader::Read() [member function] cls.add_method('Read', 'ns3::NodeContainer', [], is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3NetDevice_methods(root_module, cls): ## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor] cls.add_constructor([]) ## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor] cls.add_constructor([param('ns3::NetDevice const &', 'arg0')]) ## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function] cls.add_method('AddLinkChangeCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function] cls.add_method('GetAddress', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function] cls.add_method('GetBroadcast', 'ns3::Address', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function] cls.add_method('GetChannel', 'ns3::Ptr< ns3::Channel >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function] cls.add_method('GetIfIndex', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function] cls.add_method('GetMtu', 'uint16_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv4Address', 'multicastGroup')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function] cls.add_method('GetMulticast', 'ns3::Address', [param('ns3::Ipv6Address', 'addr')], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function] cls.add_method('GetNode', 'ns3::Ptr< ns3::Node >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function] cls.add_method('IsBridge', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function] cls.add_method('IsLinkUp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function] cls.add_method('IsPointToPoint', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function] cls.add_method('NeedsArp', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('Send', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function] cls.add_method('SendFrom', 'bool', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function] cls.add_method('SetAddress', 'void', [param('ns3::Address', 'address')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function] cls.add_method('SetIfIndex', 'void', [param('uint32_t const', 'index')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function] cls.add_method('SetMtu', 'bool', [param('uint16_t const', 'mtu')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function] cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetPromiscReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> cb) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_pure_virtual=True, is_virtual=True) ## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function] cls.add_method('SupportsSendFrom', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Node_methods(root_module, cls): ## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor] cls.add_constructor([param('ns3::Node const &', 'arg0')]) ## node.h (module 'network'): ns3::Node::Node() [constructor] cls.add_constructor([]) ## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor] cls.add_constructor([param('uint32_t', 'systemId')]) ## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function] cls.add_method('AddApplication', 'uint32_t', [param('ns3::Ptr< ns3::Application >', 'application')]) ## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function] cls.add_method('AddDevice', 'uint32_t', [param('ns3::Ptr< ns3::NetDevice >', 'device')]) ## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function] cls.add_method('ChecksumEnabled', 'bool', [], is_static=True) ## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function] cls.add_method('GetApplication', 'ns3::Ptr< ns3::Application >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function] cls.add_method('GetDevice', 'ns3::Ptr< ns3::NetDevice >', [param('uint32_t', 'index')], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function] cls.add_method('GetId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function] cls.add_method('GetNApplications', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function] cls.add_method('GetNDevices', 'uint32_t', [], is_const=True) ## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_const=True) ## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('RegisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function] cls.add_method('RegisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')]) ## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function] cls.add_method('UnregisterDeviceAdditionListener', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')]) ## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function] cls.add_method('UnregisterProtocolHandler', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')]) ## node.h (module 'network'): void ns3::Node::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## node.h (module 'network'): void ns3::Node::DoStart() [member function] cls.add_method('DoStart', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3OrbisTopologyReader_methods(root_module, cls): ## orbis-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::OrbisTopologyReader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## orbis-topology-reader.h (module 'topology-read'): ns3::OrbisTopologyReader::OrbisTopologyReader() [constructor] cls.add_constructor([]) ## orbis-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::OrbisTopologyReader::Read() [member function] cls.add_method('Read', 'ns3::NodeContainer', [], is_virtual=True) return def register_Ns3RocketfuelTopologyReader_methods(root_module, cls): ## rocketfuel-topology-reader.h (module 'topology-read'): static ns3::TypeId ns3::RocketfuelTopologyReader::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## rocketfuel-topology-reader.h (module 'topology-read'): ns3::RocketfuelTopologyReader::RocketfuelTopologyReader() [constructor] cls.add_constructor([]) ## rocketfuel-topology-reader.h (module 'topology-read'): ns3::NodeContainer ns3::RocketfuelTopologyReader::Read() [member function] cls.add_method('Read', 'ns3::NodeContainer', [], is_virtual=True) return def register_Ns3TimeChecker_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')]) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
gpl-2.0
SteveHNH/ansible
lib/ansible/modules/cloud/misc/virt_pool.py
29
21829
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Maciej Delmanowski <drybjed@gmail.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: virt_pool author: "Maciej Delmanowski (@drybjed)" version_added: "2.0" short_description: Manage libvirt storage pools description: - Manage I(libvirt) storage pools. options: name: required: false aliases: [ "pool" ] description: - name of the storage pool being managed. Note that pool must be previously defined with xml. state: required: false choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ] description: - specify which state you want a storage pool to be in. If 'active', pool will be started. If 'present', ensure that pool is present but do not change its state; if it's missing, you need to specify xml argument. If 'inactive', pool will be stopped. If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration. If 'deleted', pool contents will be deleted and then pool undefined. command: required: false choices: [ "define", "build", "create", "start", "stop", "destroy", "delete", "undefine", "get_xml", "list_pools", "facts", "info", "status" ] description: - in addition to state management, various non-idempotent commands are available. See examples. autostart: required: false choices: ["yes", "no"] description: - Specify if a given storage pool should be started automatically on system boot. uri: required: false default: "qemu:///system" description: - I(libvirt) connection uri. xml: required: false description: - XML document used with the define command. mode: required: false choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ] description: - Pass additional parameters to 'build' or 'delete' commands. requirements: - "python >= 2.6" - "python-libvirt" - "python-lxml" ''' EXAMPLES = ''' # Define a new storage pool - virt_pool: command: define name: vms xml: '{{ lookup("template", "pool/dir.xml.j2") }}' # Build a storage pool if it does not exist - virt_pool: command: build name: vms # Start a storage pool - virt_pool: command: create name: vms # List available pools - virt_pool: command: list_pools # Get XML data of a specified pool - virt_pool: command: get_xml name: vms # Stop a storage pool - virt_pool: command: destroy name: vms # Delete a storage pool (destroys contents) - virt_pool: command: delete name: vms # Undefine a storage pool - virt_pool: command: undefine name: vms # Gather facts about storage pools # Facts will be available as 'ansible_libvirt_pools' - virt_pool: command: facts # Gather information about pools managed by 'libvirt' remotely using uri - virt_pool: command: info uri: '{{ item }}' with_items: '{{ libvirt_uris }}' register: storage_pools # Ensure that a pool is active (needs to be defined and built first) - virt_pool: state: active name: vms # Ensure that a pool is inactive - virt_pool: state: inactive name: vms # Ensure that a given pool will be started at boot - virt_pool: autostart: yes name: vms # Disable autostart for a given pool - virt_pool: autostart: no name: vms ''' try: import libvirt except ImportError: HAS_VIRT = False else: HAS_VIRT = True try: from lxml import etree except ImportError: HAS_XML = False else: HAS_XML = True from ansible.module_utils.basic import AnsibleModule VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 ALL_COMMANDS = [] ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete', 'undefine', 'destroy', 'get_xml', 'define', 'refresh'] HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ] ALL_COMMANDS.extend(ENTRY_COMMANDS) ALL_COMMANDS.extend(HOST_COMMANDS) ENTRY_STATE_ACTIVE_MAP = { 0 : "inactive", 1 : "active" } ENTRY_STATE_AUTOSTART_MAP = { 0 : "no", 1 : "yes" } ENTRY_STATE_PERSISTENT_MAP = { 0 : "no", 1 : "yes" } ENTRY_STATE_INFO_MAP = { 0 : "inactive", 1 : "building", 2 : "running", 3 : "degraded", 4 : "inaccessible" } ENTRY_BUILD_FLAGS_MAP = { "new" : 0, "repair" : 1, "resize" : 2, "no_overwrite" : 4, "overwrite" : 8 } ENTRY_DELETE_FLAGS_MAP = { "normal" : 0, "zeroed" : 1 } ALL_MODES = [] ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys()) ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys()) class EntryNotFound(Exception): pass class LibvirtConnection(object): def __init__(self, uri, module): self.module = module conn = libvirt.open(uri) if not conn: raise Exception("hypervisor connection failure") self.conn = conn def find_entry(self, entryid): # entryid = -1 returns a list of everything results = [] # Get active entries for name in self.conn.listStoragePools(): entry = self.conn.storagePoolLookupByName(name) results.append(entry) # Get inactive entries for name in self.conn.listDefinedStoragePools(): entry = self.conn.storagePoolLookupByName(name) results.append(entry) if entryid == -1: return results for entry in results: if entry.name() == entryid: return entry raise EntryNotFound("storage pool %s not found" % entryid) def create(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).create() else: try: state = self.find_entry(entryid).isActive() except: return self.module.exit_json(changed=True) if not state: return self.module.exit_json(changed=True) def destroy(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).destroy() else: if self.find_entry(entryid).isActive(): return self.module.exit_json(changed=True) def undefine(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).undefine() else: if not self.find_entry(entryid): return self.module.exit_json(changed=True) def get_status2(self, entry): state = entry.isActive() return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown") def get_status(self, entryid): if not self.module.check_mode: state = self.find_entry(entryid).isActive() return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown") else: try: state = self.find_entry(entryid).isActive() return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown") except: return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown") def get_uuid(self, entryid): return self.find_entry(entryid).UUIDString() def get_xml(self, entryid): return self.find_entry(entryid).XMLDesc(0) def get_info(self, entryid): return self.find_entry(entryid).info() def get_volume_count(self, entryid): return self.find_entry(entryid).numOfVolumes() def get_volume_names(self, entryid): return self.find_entry(entryid).listVolumes() def get_devices(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) if xml.xpath('/pool/source/device'): result = [] for device in xml.xpath('/pool/source/device'): result.append(device.get('path')) try: return result except: raise ValueError('No devices specified') def get_format(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/format')[0].get('type') except: raise ValueError('Format not specified') return result def get_host(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/host')[0].get('name') except: raise ValueError('Host not specified') return result def get_source_path(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) try: result = xml.xpath('/pool/source/dir')[0].get('path') except: raise ValueError('Source path not specified') return result def get_path(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) return xml.xpath('/pool/target/path')[0].text def get_type(self, entryid): xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0)) return xml.get('type') def build(self, entryid, flags): if not self.module.check_mode: return self.find_entry(entryid).build(flags) else: try: state = self.find_entry(entryid) except: return self.module.exit_json(changed=True) if not state: return self.module.exit_json(changed=True) def delete(self, entryid, flags): if not self.module.check_mode: return self.find_entry(entryid).delete(flags) else: try: state = self.find_entry(entryid) except: return self.module.exit_json(changed=True) if state: return self.module.exit_json(changed=True) def get_autostart(self, entryid): state = self.find_entry(entryid).autostart() return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown") def get_autostart2(self, entryid): if not self.module.check_mode: return self.find_entry(entryid).autostart() else: try: return self.find_entry(entryid).autostart() except: return self.module.exit_json(changed=True) def set_autostart(self, entryid, val): if not self.module.check_mode: return self.find_entry(entryid).setAutostart(val) else: try: state = self.find_entry(entryid).autostart() except: return self.module.exit_json(changed=True) if bool(state) != val: return self.module.exit_json(changed=True) def refresh(self, entryid): return self.find_entry(entryid).refresh() def get_persistent(self, entryid): state = self.find_entry(entryid).isPersistent() return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown") def define_from_xml(self, entryid, xml): if not self.module.check_mode: return self.conn.storagePoolDefineXML(xml) else: try: self.find_entry(entryid) except: return self.module.exit_json(changed=True) class VirtStoragePool(object): def __init__(self, uri, module): self.module = module self.uri = uri self.conn = LibvirtConnection(self.uri, self.module) def get_pool(self, entryid): return self.conn.find_entry(entryid) def list_pools(self, state=None): results = [] for entry in self.conn.find_entry(-1): if state: if state == self.conn.get_status2(entry): results.append(entry.name()) else: results.append(entry.name()) return results def state(self): results = [] for entry in self.list_pools(): state_blurb = self.conn.get_status(entry) results.append("%s %s" % (entry,state_blurb)) return results def autostart(self, entryid): return self.conn.set_autostart(entryid, True) def get_autostart(self, entryid): return self.conn.get_autostart2(entryid) def set_autostart(self, entryid, state): return self.conn.set_autostart(entryid, state) def create(self, entryid): return self.conn.create(entryid) def start(self, entryid): return self.conn.create(entryid) def stop(self, entryid): return self.conn.destroy(entryid) def destroy(self, entryid): return self.conn.destroy(entryid) def undefine(self, entryid): return self.conn.undefine(entryid) def status(self, entryid): return self.conn.get_status(entryid) def get_xml(self, entryid): return self.conn.get_xml(entryid) def define(self, entryid, xml): return self.conn.define_from_xml(entryid, xml) def build(self, entryid, flags): return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0)) def delete(self, entryid, flags): return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0)) def refresh(self, entryid): return self.conn.refresh(entryid) def info(self): return self.facts(facts_mode='info') def facts(self, facts_mode='facts'): results = dict() for entry in self.list_pools(): results[entry] = dict() if self.conn.find_entry(entry): data = self.conn.get_info(entry) # libvirt returns maxMem, memory, and cpuTime as long()'s, which # xmlrpclib tries to convert to regular int's during serialization. # This throws exceptions, so convert them to strings here and # assume the other end of the xmlrpc connection can figure things # out or doesn't care. results[entry] = { "status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"), "size_total" : str(data[1]), "size_used" : str(data[2]), "size_available" : str(data[3]), } results[entry]["autostart"] = self.conn.get_autostart(entry) results[entry]["persistent"] = self.conn.get_persistent(entry) results[entry]["state"] = self.conn.get_status(entry) results[entry]["path"] = self.conn.get_path(entry) results[entry]["type"] = self.conn.get_type(entry) results[entry]["uuid"] = self.conn.get_uuid(entry) if self.conn.find_entry(entry).isActive(): results[entry]["volume_count"] = self.conn.get_volume_count(entry) results[entry]["volumes"] = list() for volume in self.conn.get_volume_names(entry): results[entry]["volumes"].append(volume) else: results[entry]["volume_count"] = -1 try: results[entry]["host"] = self.conn.get_host(entry) except ValueError: pass try: results[entry]["source_path"] = self.conn.get_source_path(entry) except ValueError: pass try: results[entry]["format"] = self.conn.get_format(entry) except ValueError: pass try: devices = self.conn.get_devices(entry) results[entry]["devices"] = devices except ValueError: pass else: results[entry]["state"] = self.conn.get_status(entry) facts = dict() if facts_mode == 'facts': facts["ansible_facts"] = dict() facts["ansible_facts"]["ansible_libvirt_pools"] = results elif facts_mode == 'info': facts['pools'] = results return facts def core(module): state = module.params.get('state', None) name = module.params.get('name', None) command = module.params.get('command', None) uri = module.params.get('uri', None) xml = module.params.get('xml', None) autostart = module.params.get('autostart', None) mode = module.params.get('mode', None) v = VirtStoragePool(uri, module) res = {} if state and command == 'list_pools': res = v.list_pools(state=state) if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res if state: if not name: module.fail_json(msg = "state change requires a specified name") res['changed'] = False if state in [ 'active' ]: if v.status(name) is not 'active': res['changed'] = True res['msg'] = v.start(name) elif state in [ 'present' ]: try: v.get_pool(name) except EntryNotFound: if not xml: module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified") v.define(name, xml) res = {'changed': True, 'created': name} elif state in [ 'inactive' ]: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': res['changed'] = True res['msg'] = v.destroy(name) elif state in [ 'undefined', 'absent' ]: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': v.destroy(name) res['changed'] = True res['msg'] = v.undefine(name) elif state in [ 'deleted' ]: entries = v.list_pools() if name in entries: if v.status(name) is not 'inactive': v.destroy(name) v.delete(name, mode) res['changed'] = True res['msg'] = v.undefine(name) else: module.fail_json(msg="unexpected state") return VIRT_SUCCESS, res if command: if command in ENTRY_COMMANDS: if not name: module.fail_json(msg = "%s requires 1 argument: name" % command) if command == 'define': if not xml: module.fail_json(msg = "define requires xml argument") try: v.get_pool(name) except EntryNotFound: v.define(name, xml) res = {'changed': True, 'created': name} return VIRT_SUCCESS, res elif command == 'build': res = v.build(name, mode) if not isinstance(res, dict): res = { 'changed': True, command: res } return VIRT_SUCCESS, res elif command == 'delete': res = v.delete(name, mode) if not isinstance(res, dict): res = { 'changed': True, command: res } return VIRT_SUCCESS, res res = getattr(v, command)(name) if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() if not isinstance(res, dict): res = { command: res } return VIRT_SUCCESS, res else: module.fail_json(msg="Command %s not recognized" % command) if autostart is not None: if not name: module.fail_json(msg = "state change requires a specified name") res['changed'] = False if autostart: if not v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, True) else: if v.get_autostart(name): res['changed'] = True res['msg'] = v.set_autostart(name, False) return VIRT_SUCCESS, res module.fail_json(msg="expected state or command parameter to be specified") def main(): module = AnsibleModule ( argument_spec = dict( name = dict(aliases=['pool']), state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']), command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), autostart = dict(type='bool'), mode = dict(choices=ALL_MODES), ), supports_check_mode = True ) if not HAS_VIRT: module.fail_json( msg='The `libvirt` module is not importable. Check the requirements.' ) if not HAS_XML: module.fail_json( msg='The `lxml` module is not importable. Check the requirements.' ) rc = VIRT_SUCCESS try: rc, result = core(module) except Exception as e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=result) else: module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
vmanoria/bluemix-hue-filebrowser
hue-3.8.1-bluemix/desktop/core/ext-py/parquet-python/parquet/__main__.py
35
2065
import argparse import logging import sys def setup_logging(options=None): level = logging.DEBUG if options is not None and options.debug \ else logging.WARNING console = logging.StreamHandler() console.setLevel(level) formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('parquet').addHandler(console) def main(argv=None): argv = argv or sys.argv[1:] parser = argparse.ArgumentParser('parquet', description='Read parquet files') parser.add_argument('--metadata', action='store_true', help='show metadata on file') parser.add_argument('--row-group-metadata', action='store_true', help="show per row group metadata") parser.add_argument('--no-data', action='store_true', help="don't dump any data from the file") parser.add_argument('--limit', action='store', type=int, default=-1, help='max records to output') parser.add_argument('--col', action='append', type=str, help='only include this column (can be ' 'specified multiple times)') parser.add_argument('--no-headers', action='store_true', help='skip headers in output (only applies if ' 'format=csv)') parser.add_argument('--format', action='store', type=str, default='csv', help='format for the output data. can be csv or json.') parser.add_argument('--debug', action='store_true', help='log debug info to stderr') parser.add_argument('file', help='path to the file to parse') args = parser.parse_args(argv) setup_logging(args) import parquet if args.metadata: parquet.dump_metadata(args.file, args.row_group_metadata) if not args.no_data: parquet.dump(args.file, args) if __name__ == '__main__': main()
gpl-2.0
adazey/Muzez
libs/nltk/sem/hole.py
1
14326
# Natural Language Toolkit: Logic # # Author: Peter Wang # Updated by: Dan Garrette <dhgarrette@gmail.com> # # Copyright (C) 2001-2016 NLTK Project # URL: <http://nltk.org> # For license information, see LICENSE.TXT """ An implementation of the Hole Semantics model, following Blackburn and Bos, Representation and Inference for Natural Language (CSLI, 2005). The semantic representations are built by the grammar hole.fcfg. This module contains driver code to read in sentences and parse them according to a hole semantics grammar. After parsing, the semantic representation is in the form of an underspecified representation that is not easy to read. We use a "plugging" algorithm to convert that representation into first-order logic formulas. """ from __future__ import print_function, unicode_literals from functools import reduce from nltk import compat from nltk.parse import load_parser from nltk.sem.skolemize import skolemize from nltk.sem.logic import (AllExpression, AndExpression, ApplicationExpression, ExistsExpression, IffExpression, ImpExpression, LambdaExpression, NegatedExpression, OrExpression) # Note that in this code there may be multiple types of trees being referred to: # # 1. parse trees # 2. the underspecified representation # 3. first-order logic formula trees # 4. the search space when plugging (search tree) # class Constants(object): ALL = 'ALL' EXISTS = 'EXISTS' NOT = 'NOT' AND = 'AND' OR = 'OR' IMP = 'IMP' IFF = 'IFF' PRED = 'PRED' LEQ = 'LEQ' HOLE = 'HOLE' LABEL = 'LABEL' MAP = {ALL: lambda v, e: AllExpression(v.variable, e), EXISTS: lambda v, e: ExistsExpression(v.variable, e), NOT: NegatedExpression, AND: AndExpression, OR: OrExpression, IMP: ImpExpression, IFF: IffExpression, PRED: ApplicationExpression} class HoleSemantics(object): """ This class holds the broken-down components of a hole semantics, i.e. it extracts the holes, labels, logic formula fragments and constraints out of a big conjunction of such as produced by the hole semantics grammar. It then provides some operations on the semantics dealing with holes, labels and finding legal ways to plug holes with labels. """ def __init__(self, usr): """ Constructor. `usr' is a ``sem.Expression`` representing an Underspecified Representation Structure (USR). A USR has the following special predicates: ALL(l,v,n), EXISTS(l,v,n), AND(l,n,n), OR(l,n,n), IMP(l,n,n), IFF(l,n,n), PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions, LEQ(n,n), HOLE(n), LABEL(n) where l is the label of the node described by the predicate, n is either a label or a hole, and v is a variable. """ self.holes = set() self.labels = set() self.fragments = {} # mapping of label -> formula fragment self.constraints = set() # set of Constraints self._break_down(usr) self.top_most_labels = self._find_top_most_labels() self.top_hole = self._find_top_hole() def is_node(self, x): """ Return true if x is a node (label or hole) in this semantic representation. """ return x in (self.labels | self.holes) def _break_down(self, usr): """ Extract holes, labels, formula fragments and constraints from the hole semantics underspecified representation (USR). """ if isinstance(usr, AndExpression): self._break_down(usr.first) self._break_down(usr.second) elif isinstance(usr, ApplicationExpression): func, args = usr.uncurry() if func.variable.name == Constants.LEQ: self.constraints.add(Constraint(args[0], args[1])) elif func.variable.name == Constants.HOLE: self.holes.add(args[0]) elif func.variable.name == Constants.LABEL: self.labels.add(args[0]) else: label = args[0] assert label not in self.fragments self.fragments[label] = (func, args[1:]) else: raise ValueError(usr.label()) def _find_top_nodes(self, node_list): top_nodes = node_list.copy() for f in compat.itervalues(self.fragments): # the label is the first argument of the predicate args = f[1] for arg in args: if arg in node_list: top_nodes.discard(arg) return top_nodes def _find_top_most_labels(self): """ Return the set of labels which are not referenced directly as part of another formula fragment. These will be the top-most labels for the subtree that they are part of. """ return self._find_top_nodes(self.labels) def _find_top_hole(self): """ Return the hole that will be the top of the formula tree. """ top_holes = self._find_top_nodes(self.holes) assert len(top_holes) == 1 # it must be unique return top_holes.pop() def pluggings(self): """ Calculate and return all the legal pluggings (mappings of labels to holes) of this semantics given the constraints. """ record = [] self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record) return record def _plug_nodes(self, queue, potential_labels, plug_acc, record): """ Plug the nodes in `queue' with the labels in `potential_labels'. Each element of `queue' is a tuple of the node to plug and the list of ancestor holes from the root of the graph to that node. `potential_labels' is a set of the labels which are still available for plugging. `plug_acc' is the incomplete mapping of holes to labels made on the current branch of the search tree so far. `record' is a list of all the complete pluggings that we have found in total so far. It is the only parameter that is destructively updated. """ if queue != []: (node, ancestors) = queue[0] if node in self.holes: # The node is a hole, try to plug it. self._plug_hole(node, ancestors, queue[1:], potential_labels, plug_acc, record) else: assert node in self.labels # The node is a label. Replace it in the queue by the holes and # labels in the formula fragment named by that label. args = self.fragments[node][1] head = [(a, ancestors) for a in args if self.is_node(a)] self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record) else: raise Exception('queue empty') def _plug_hole(self, hole, ancestors0, queue, potential_labels0, plug_acc0, record): """ Try all possible ways of plugging a single hole. See _plug_nodes for the meanings of the parameters. """ # Add the current hole we're trying to plug into the list of ancestors. assert hole not in ancestors0 ancestors = [hole] + ancestors0 # Try each potential label in this hole in turn. for l in potential_labels0: # Is the label valid in this hole? if self._violates_constraints(l, ancestors): continue plug_acc = plug_acc0.copy() plug_acc[hole] = l potential_labels = potential_labels0.copy() potential_labels.remove(l) if len(potential_labels) == 0: # No more potential labels. That must mean all the holes have # been filled so we have found a legal plugging so remember it. # # Note that the queue might not be empty because there might # be labels on there that point to formula fragments with # no holes in them. _sanity_check_plugging will make sure # all holes are filled. self._sanity_check_plugging(plug_acc, self.top_hole, []) record.append(plug_acc) else: # Recursively try to fill in the rest of the holes in the # queue. The label we just plugged into the hole could have # holes of its own so at the end of the queue. Putting it on # the end of the queue gives us a breadth-first search, so that # all the holes at level i of the formula tree are filled # before filling level i+1. # A depth-first search would work as well since the trees must # be finite but the bookkeeping would be harder. self._plug_nodes(queue + [(l, ancestors)], potential_labels, plug_acc, record) def _violates_constraints(self, label, ancestors): """ Return True if the `label' cannot be placed underneath the holes given by the set `ancestors' because it would violate the constraints imposed on it. """ for c in self.constraints: if c.lhs == label: if c.rhs not in ancestors: return True return False def _sanity_check_plugging(self, plugging, node, ancestors): """ Make sure that a given plugging is legal. We recursively go through each node and make sure that no constraints are violated. We also check that all holes have been filled. """ if node in self.holes: ancestors = [node] + ancestors label = plugging[node] else: label = node assert label in self.labels for c in self.constraints: if c.lhs == label: assert c.rhs in ancestors args = self.fragments[label][1] for arg in args: if self.is_node(arg): self._sanity_check_plugging(plugging, arg, [label] + ancestors) def formula_tree(self, plugging): """ Return the first-order logic formula tree for this underspecified representation using the plugging given. """ return self._formula_tree(plugging, self.top_hole) def _formula_tree(self, plugging, node): if node in plugging: return self._formula_tree(plugging, plugging[node]) elif node in self.fragments: pred, args = self.fragments[node] children = [self._formula_tree(plugging, arg) for arg in args] return reduce(Constants.MAP[pred.variable.name], children) else: return node @compat.python_2_unicode_compatible class Constraint(object): """ This class represents a constraint of the form (L =< N), where L is a label and N is a node (a label or a hole). """ def __init__(self, lhs, rhs): self.lhs = lhs self.rhs = rhs def __eq__(self, other): if self.__class__ == other.__class__: return self.lhs == other.lhs and self.rhs == other.rhs else: return False def __ne__(self, other): return not (self == other) def __hash__(self): return hash(repr(self)) def __repr__(self): return '(%s < %s)' % (self.lhs, self.rhs) def hole_readings(sentence, grammar_filename=None, verbose=False): if not grammar_filename: grammar_filename = 'grammars/sample_grammars/hole.fcfg' if verbose: print('Reading grammar file', grammar_filename) parser = load_parser(grammar_filename) # Parse the sentence. tokens = sentence.split() trees = list(parser.parse(tokens)) if verbose: print('Got %d different parses' % len(trees)) all_readings = [] for tree in trees: # Get the semantic feature from the top of the parse tree. sem = tree.label()['SEM'].simplify() # Print the raw semantic representation. if verbose: print('Raw: ', sem) # Skolemize away all quantifiers. All variables become unique. while isinstance(sem, LambdaExpression): sem = sem.term skolemized = skolemize(sem) if verbose: print('Skolemized:', skolemized) # Break the hole semantics representation down into its components # i.e. holes, labels, formula fragments and constraints. hole_sem = HoleSemantics(skolemized) # Maybe show the details of the semantic representation. if verbose: print('Holes: ', hole_sem.holes) print('Labels: ', hole_sem.labels) print('Constraints: ', hole_sem.constraints) print('Top hole: ', hole_sem.top_hole) print('Top labels: ', hole_sem.top_most_labels) print('Fragments:') for l, f in hole_sem.fragments.items(): print('\t%s: %s' % (l, f)) # Find all the possible ways to plug the formulas together. pluggings = hole_sem.pluggings() # Build FOL formula trees using the pluggings. readings = list(map(hole_sem.formula_tree, pluggings)) # Print out the formulas in a textual format. if verbose: for i, r in enumerate(readings): print() print('%d. %s' % (i, r)) print() all_readings.extend(readings) return all_readings if __name__ == '__main__': for r in hole_readings('a dog barks'): print(r) print() for r in hole_readings('every girl chases a dog'): print(r)
gpl-3.0
lengtche/beets
test/test_mbsubmit.py
16
2726
# -*- coding: utf-8 -*- # This file is part of beets. # Copyright 2016, Adrian Sampson and Diego Moreda. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. from __future__ import division, absolute_import, print_function import unittest from test.helper import capture_stdout, control_stdin, TestHelper from test.test_importer import ImportHelper, AutotagStub from test.test_ui_importer import TerminalImportSessionSetup class MBSubmitPluginTest(TerminalImportSessionSetup, unittest.TestCase, ImportHelper, TestHelper): def setUp(self): self.setup_beets() self.load_plugins('mbsubmit') self._create_import_dir(2) self._setup_import_session() self.matcher = AutotagStub().install() def tearDown(self): self.unload_plugins() self.teardown_beets() self.matcher.restore() def test_print_tracks_output(self): """Test the output of the "print tracks" choice.""" self.matcher.matching = AutotagStub.BAD with capture_stdout() as output: with control_stdin('\n'.join(['p', 's'])): # Print tracks; Skip self.importer.run() # Manually build the string for comparing the output. tracklist = (u'Print tracks? ' u'01. Tag Title 1 - Tag Artist (0:01)\n' u'02. Tag Title 2 - Tag Artist (0:01)') self.assertIn(tracklist, output.getvalue()) def test_print_tracks_output_as_tracks(self): """Test the output of the "print tracks" choice, as singletons.""" self.matcher.matching = AutotagStub.BAD with capture_stdout() as output: with control_stdin('\n'.join(['t', 's', 'p', 's'])): # as Tracks; Skip; Print tracks; Skip self.importer.run() # Manually build the string for comparing the output. tracklist = (u'Print tracks? ' u'02. Tag Title 2 - Tag Artist (0:01)') self.assertIn(tracklist, output.getvalue()) def suite(): return unittest.TestLoader().loadTestsFromName(__name__) if __name__ == '__main__': unittest.main(defaultTest='suite')
mit
isohybrid/dotfile
vim/bundle/git:--github.com-klen-python-mode/pylibs/rope/refactor/importutils/module_imports.py
27
16583
import rope.base.pynames from rope.base import ast, utils from rope.refactor.importutils import importinfo from rope.refactor.importutils import actions class ModuleImports(object): def __init__(self, pycore, pymodule, import_filter=None): self.pycore = pycore self.pymodule = pymodule self.separating_lines = 0 self.filter = import_filter @property @utils.saveit def imports(self): finder = _GlobalImportFinder(self.pymodule, self.pycore) result = finder.find_import_statements() self.separating_lines = finder.get_separating_line_count() if self.filter is not None: for import_stmt in result: if not self.filter(import_stmt): import_stmt.readonly = True return result def _get_unbound_names(self, defined_pyobject): visitor = _GlobalUnboundNameFinder(self.pymodule, defined_pyobject) ast.walk(self.pymodule.get_ast(), visitor) return visitor.unbound def remove_unused_imports(self): can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule)) visitor = actions.RemovingVisitor( self.pycore, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def get_used_imports(self, defined_pyobject): result = [] can_select = _OneTimeSelector(self._get_unbound_names(defined_pyobject)) visitor = actions.FilteringVisitor( self.pycore, self._current_folder(), can_select) for import_statement in self.imports: new_import = import_statement.accept(visitor) if new_import is not None and not new_import.is_empty(): result.append(new_import) return result def get_changed_source(self): imports = self.imports after_removing = self._remove_imports(imports) imports = [stmt for stmt in imports if not stmt.import_info.is_empty()] first_non_blank = self._first_non_blank_line(after_removing, 0) first_import = self._first_import_line() - 1 result = [] # Writing module docs result.extend(after_removing[first_non_blank:first_import]) # Writing imports sorted_imports = sorted(imports, self._compare_import_locations) for stmt in sorted_imports: start = self._get_import_location(stmt) if stmt != sorted_imports[0]: result.append('\n' * stmt.blank_lines) result.append(stmt.get_import_statement() + '\n') if sorted_imports and first_non_blank < len(after_removing): result.append('\n' * self.separating_lines) # Writing the body first_after_imports = self._first_non_blank_line(after_removing, first_import) result.extend(after_removing[first_after_imports:]) return ''.join(result) def _get_import_location(self, stmt): start = stmt.get_new_start() if start is None: start = stmt.get_old_location()[0] return start def _compare_import_locations(self, stmt1, stmt2): def get_location(stmt): if stmt.get_new_start() is not None: return stmt.get_new_start() else: return stmt.get_old_location()[0] return cmp(get_location(stmt1), get_location(stmt2)) def _remove_imports(self, imports): lines = self.pymodule.source_code.splitlines(True) after_removing = [] last_index = 0 for stmt in imports: start, end = stmt.get_old_location() after_removing.extend(lines[last_index:start - 1]) last_index = end - 1 for i in range(start, end): after_removing.append('') after_removing.extend(lines[last_index:]) return after_removing def _first_non_blank_line(self, lines, lineno): result = lineno for line in lines[lineno:]: if line.strip() == '': result += 1 else: break return result def add_import(self, import_info): visitor = actions.AddingVisitor(self.pycore, [import_info]) for import_statement in self.imports: if import_statement.accept(visitor): break else: lineno = self._get_new_import_lineno() blanks = self._get_new_import_blanks() self.imports.append(importinfo.ImportStatement( import_info, lineno, lineno, blank_lines=blanks)) def _get_new_import_blanks(self): return 0 def _get_new_import_lineno(self): if self.imports: return self.imports[-1].end_line return 1 def filter_names(self, can_select): visitor = actions.RemovingVisitor( self.pycore, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def expand_stars(self): can_select = _OneTimeSelector(self._get_unbound_names(self.pymodule)) visitor = actions.ExpandStarsVisitor( self.pycore, self._current_folder(), can_select) for import_statement in self.imports: import_statement.accept(visitor) def remove_duplicates(self): added_imports = [] for import_stmt in self.imports: visitor = actions.AddingVisitor(self.pycore, [import_stmt.import_info]) for added_import in added_imports: if added_import.accept(visitor): import_stmt.empty_import() else: added_imports.append(import_stmt) def get_relative_to_absolute_list(self): visitor = rope.refactor.importutils.actions.RelativeToAbsoluteVisitor( self.pycore, self._current_folder()) for import_stmt in self.imports: if not import_stmt.readonly: import_stmt.accept(visitor) return visitor.to_be_absolute def get_self_import_fix_and_rename_list(self): visitor = rope.refactor.importutils.actions.SelfImportVisitor( self.pycore, self._current_folder(), self.pymodule.get_resource()) for import_stmt in self.imports: if not import_stmt.readonly: import_stmt.accept(visitor) return visitor.to_be_fixed, visitor.to_be_renamed def _current_folder(self): return self.pymodule.get_resource().parent def sort_imports(self): # IDEA: Sort from import list visitor = actions.SortingVisitor(self.pycore, self._current_folder()) for import_statement in self.imports: import_statement.accept(visitor) in_projects = sorted(visitor.in_project, self._compare_imports) third_party = sorted(visitor.third_party, self._compare_imports) standards = sorted(visitor.standard, self._compare_imports) future = sorted(visitor.future, self._compare_imports) blank_lines = 0 last_index = self._first_import_line() last_index = self._move_imports(future, last_index, 0) last_index = self._move_imports(standards, last_index, 1) last_index = self._move_imports(third_party, last_index, 1) last_index = self._move_imports(in_projects, last_index, 1) self.separating_lines = 2 def _first_import_line(self): nodes = self.pymodule.get_ast().body lineno = 0 if self.pymodule.get_doc() is not None: lineno = 1 if len(nodes) > lineno: lineno = self.pymodule.logical_lines.logical_line_in( nodes[lineno].lineno)[0] else: lineno = self.pymodule.lines.length() while lineno > 1: line = self.pymodule.lines.get_line(lineno - 1) if line.strip() == '': lineno -= 1 else: break return lineno def _compare_imports(self, stmt1, stmt2): str1 = stmt1.get_import_statement() str2 = stmt2.get_import_statement() if str1.startswith('from ') and not str2.startswith('from '): return 1 if not str1.startswith('from ') and str2.startswith('from '): return -1 return cmp(str1, str2) def _move_imports(self, imports, index, blank_lines): if imports: imports[0].move(index, blank_lines) index += 1 if len(imports) > 1: for stmt in imports[1:]: stmt.move(index) index += 1 return index def handle_long_imports(self, maxdots, maxlength): visitor = actions.LongImportVisitor( self._current_folder(), self.pycore, maxdots, maxlength) for import_statement in self.imports: if not import_statement.readonly: import_statement.accept(visitor) for import_info in visitor.new_imports: self.add_import(import_info) return visitor.to_be_renamed def remove_pyname(self, pyname): """Removes pyname when imported in ``from mod import x``""" visitor = actions.RemovePyNameVisitor(self.pycore, self.pymodule, pyname, self._current_folder()) for import_stmt in self.imports: import_stmt.accept(visitor) class _OneTimeSelector(object): def __init__(self, names): self.names = names self.selected_names = set() def __call__(self, imported_primary): if self._can_name_be_added(imported_primary): for name in self._get_dotted_tokens(imported_primary): self.selected_names.add(name) return True return False def _get_dotted_tokens(self, imported_primary): tokens = imported_primary.split('.') for i in range(len(tokens)): yield '.'.join(tokens[:i + 1]) def _can_name_be_added(self, imported_primary): for name in self._get_dotted_tokens(imported_primary): if name in self.names and name not in self.selected_names: return True return False class _UnboundNameFinder(object): def __init__(self, pyobject): self.pyobject = pyobject def _visit_child_scope(self, node): pyobject = self.pyobject.get_module().get_scope().\ get_inner_scope_for_line(node.lineno).pyobject visitor = _LocalUnboundNameFinder(pyobject, self) for child in ast.get_child_nodes(node): ast.walk(child, visitor) def _FunctionDef(self, node): self._visit_child_scope(node) def _ClassDef(self, node): self._visit_child_scope(node) def _Name(self, node): if self._get_root()._is_node_interesting(node) and \ not self.is_bound(node.id): self.add_unbound(node.id) def _Attribute(self, node): result = [] while isinstance(node, ast.Attribute): result.append(node.attr) node = node.value if isinstance(node, ast.Name): result.append(node.id) primary = '.'.join(reversed(result)) if self._get_root()._is_node_interesting(node) and \ not self.is_bound(primary): self.add_unbound(primary) else: ast.walk(node, self) def _get_root(self): pass def is_bound(self, name, propagated=False): pass def add_unbound(self, name): pass class _GlobalUnboundNameFinder(_UnboundNameFinder): def __init__(self, pymodule, wanted_pyobject): super(_GlobalUnboundNameFinder, self).__init__(pymodule) self.unbound = set() self.names = set() for name, pyname in pymodule._get_structural_attributes().items(): if not isinstance(pyname, (rope.base.pynames.ImportedName, rope.base.pynames.ImportedModule)): self.names.add(name) wanted_scope = wanted_pyobject.get_scope() self.start = wanted_scope.get_start() self.end = wanted_scope.get_end() + 1 def _get_root(self): return self def is_bound(self, primary, propagated=False): name = primary.split('.')[0] if name in self.names: return True return False def add_unbound(self, name): names = name.split('.') for i in range(len(names)): self.unbound.add('.'.join(names[:i + 1])) def _is_node_interesting(self, node): return self.start <= node.lineno < self.end class _LocalUnboundNameFinder(_UnboundNameFinder): def __init__(self, pyobject, parent): super(_LocalUnboundNameFinder, self).__init__(pyobject) self.parent = parent def _get_root(self): return self.parent._get_root() def is_bound(self, primary, propagated=False): name = primary.split('.')[0] if propagated: names = self.pyobject.get_scope().get_propagated_names() else: names = self.pyobject.get_scope().get_names() if name in names or self.parent.is_bound(name, propagated=True): return True return False def add_unbound(self, name): self.parent.add_unbound(name) class _GlobalImportFinder(object): def __init__(self, pymodule, pycore): self.current_folder = None if pymodule.get_resource(): self.current_folder = pymodule.get_resource().parent self.pymodule = pymodule self.pycore = pycore self.imports = [] self.pymodule = pymodule self.lines = self.pymodule.lines def visit_import(self, node, end_line): start_line = node.lineno import_statement = importinfo.ImportStatement( importinfo.NormalImport(self._get_names(node.names)), start_line, end_line, self._get_text(start_line, end_line), blank_lines=self._count_empty_lines_before(start_line)) self.imports.append(import_statement) def _count_empty_lines_before(self, lineno): result = 0 for current in range(lineno - 1, 0, -1): line = self.lines.get_line(current) if line.strip() == '': result += 1 else: break return result def _count_empty_lines_after(self, lineno): result = 0 for current in range(lineno + 1, self.lines.length()): line = self.lines.get_line(current) if line.strip() == '': result += 1 else: break return result def get_separating_line_count(self): if not self.imports: return 0 return self._count_empty_lines_after(self.imports[-1].end_line - 1) def _get_text(self, start_line, end_line): result = [] for index in range(start_line, end_line): result.append(self.lines.get_line(index)) return '\n'.join(result) def visit_from(self, node, end_line): level = 0 if node.level: level = node.level import_info = importinfo.FromImport( node.module or '', # see comment at rope.base.ast.walk level, self._get_names(node.names)) start_line = node.lineno self.imports.append(importinfo.ImportStatement( import_info, node.lineno, end_line, self._get_text(start_line, end_line), blank_lines=self._count_empty_lines_before(start_line))) def _get_names(self, alias_names): result = [] for alias in alias_names: result.append((alias.name, alias.asname)) return result def find_import_statements(self): nodes = self.pymodule.get_ast().body for index, node in enumerate(nodes): if isinstance(node, (ast.Import, ast.ImportFrom)): lines = self.pymodule.logical_lines end_line = lines.logical_line_in(node.lineno)[1] + 1 if isinstance(node, ast.Import): self.visit_import(node, end_line) if isinstance(node, ast.ImportFrom): self.visit_from(node, end_line) return self.imports
bsd-2-clause
hal0x2328/neo-python
neo/Storage/Common/DebugStorage.py
1
1150
from neo.Blockchain import GetBlockchain from neo.Storage.Common.DBPrefix import DBPrefix import neo.Storage.Implementation.DBFactory as DBFactory from neo.Storage.Interface.DBProperties import DBProperties from neo.logging import log_manager logger = log_manager.getLogger('db') class DebugStorage: __instance = None @property def db(self): return self._db def reset(self): with self._db.openIter( DBProperties(prefix=DBPrefix.ST_Storage, include_value=False)) as it: for key in it: self._db.delete(key) def __init__(self): try: self._db = GetBlockchain().Default().GetDB().cloneDatabaseStorage( DBFactory.getDebugStorageDB()) except Exception as e: logger.info("DEBUG leveldb unavailable, you may already be running this process: %s " % e) raise Exception('DEBUG Leveldb Unavailable %s ' % e) @staticmethod def instance(): if not DebugStorage.__instance: DebugStorage.__instance = DebugStorage() return DebugStorage.__instance
mit
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.3.0/Lib/test/test_py_compile.py
42
2122
import imp import os import py_compile import shutil import tempfile import unittest from test import support, script_helper class PyCompileTests(unittest.TestCase): def setUp(self): self.directory = tempfile.mkdtemp() self.source_path = os.path.join(self.directory, '_test.py') self.pyc_path = self.source_path + 'c' self.cache_path = imp.cache_from_source(self.source_path) self.cwd_drive = os.path.splitdrive(os.getcwd())[0] # In these tests we compute relative paths. When using Windows, the # current working directory path and the 'self.source_path' might be # on different drives. Therefore we need to switch to the drive where # the temporary source file lives. drive = os.path.splitdrive(self.source_path)[0] if drive: os.chdir(drive) with open(self.source_path, 'w') as file: file.write('x = 123\n') def tearDown(self): shutil.rmtree(self.directory) if self.cwd_drive: os.chdir(self.cwd_drive) def test_absolute_path(self): py_compile.compile(self.source_path, self.pyc_path) self.assertTrue(os.path.exists(self.pyc_path)) self.assertFalse(os.path.exists(self.cache_path)) def test_cache_path(self): py_compile.compile(self.source_path) self.assertTrue(os.path.exists(self.cache_path)) def test_cwd(self): cwd = os.getcwd() os.chdir(self.directory) py_compile.compile(os.path.basename(self.source_path), os.path.basename(self.pyc_path)) os.chdir(cwd) self.assertTrue(os.path.exists(self.pyc_path)) self.assertFalse(os.path.exists(self.cache_path)) def test_relative_path(self): py_compile.compile(os.path.relpath(self.source_path), os.path.relpath(self.pyc_path)) self.assertTrue(os.path.exists(self.pyc_path)) self.assertFalse(os.path.exists(self.cache_path)) def test_main(): support.run_unittest(PyCompileTests) if __name__ == "__main__": test_main()
mit
kailIII/emaresa
trunk.cl/honorarios/honorarios.py
1
83496
# -*- coding: utf-8 -*- ############################################################################## # # Author: OpenDrive Ltda # Copyright (c) 2013 Opendrive Ltda # # WARNING: This program as such is intended to be used by professional # programmers who take the whole responsibility of assessing all potential # consequences resulting from its eventual inadequacies and bugs # End users who are looking for a ready-to-use solution with commercial # guarantees and support are strongly advised to contract a Free Software # Service Company # # This program is Free Software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # ############################################################################## import time from lxml import etree import openerp.addons.decimal_precision as dp import openerp.exceptions from openerp import netsvc #from openerp import pooler from openerp.osv import fields, osv, orm from openerp.tools.translate import _ class account_honorarios(osv.osv): def _amount_all(self, cr, uid, ids, name, args, context=None): res = {} for fees in self.browse(cr, uid, ids, context=context): res[fees.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0 } for line in fees.fees_line: res[fees.id]['amount_untaxed'] += line.price_subtotal for line in fees.tax_line: res[fees.id]['amount_tax'] += line.amount res[fees.id]['amount_total'] = res[fees.id]['amount_untaxed'] - res[fees.id]['amount_tax'] return res def _get_journal(self, cr, uid, context=None): if context is None: context = {} user = self.pool.get('res.users').browse(cr, uid, uid, context=context) company_id = context.get('company_id', user.company_id.id) journal_obj = self.pool.get('account.journal') domain = [('company_id', '=', company_id)] res = journal_obj.search(cr, uid, domain, limit=1) return res and res[0] or False def _get_currency(self, cr, uid, context=None): res = False journal_id = self._get_journal(cr, uid, context=context) if journal_id: journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context) res = journal.currency and journal.currency.id or journal.company_id.currency_id.id return res def _get_journal_analytic(self, cr, uid, type_inv, context=None): ########################################################################################################################### ########################################################################################################################### #########################Revisar en account/project/project.py por los tipos############################################### ########################################################################################################################### ########################################################################################################################### result = self.pool.get('account.analytic.journal').search(cr, uid, ['type','=','purchase'], context=context) if not result: raise osv.except_osv(_('No Analytic Journal!'),_("You must define an analytic journal (purchase) for ballot fees!")) return result[0] # def _get_type(self, cr, uid, context=None): # if context is None: # context = {} # return context.get('type', 'out_invoice') def _reconciled(self, cr, uid, ids, name, args, context=None): res = {} wf_service = netsvc.LocalService("workflow") for fee in self.browse(cr, uid, ids, context=context): res[fee.id] = self.test_paid(cr, uid, [fee.id]) if not res[fee.id] and fee.state == 'paid': wf_service.trg_validate(uid, 'account.fees', fee.id, 'open_test', cr) return res def _amount_residual(self, cr, uid, ids, name, args, context=None): """Function of the field residua. It computes the residual amount (balance) for each invoice""" if context is None: context = {} ctx = context.copy() result = {} currency_obj = self.pool.get('res.currency') for fees in self.browse(cr, uid, ids, context=context): nb_fee_in_partial_rec = max_fees_id = 0 result[fees.id] = 0.0 if fees.move_id: for aml in fees.move_id.line_id: if aml.account_id.type in ('receivable','payable'): if aml.currency_id and aml.currency_id.id == fees.currency_id.id: result[fees.id] += aml.amount_residual_currency else: ctx['date'] = aml.date result[fees.id] += currency_obj.compute(cr, uid, aml.company_id.currency_id.id, fees.currency_id.id, aml.amount_residual, context=ctx) if aml.reconcile_partial_id.line_partial_ids: #we check if the ballot fees is partially reconciled and if there are other ballot fees #involved in this partial reconciliation (and we sum these ballot fees) for line in aml.reconcile_partial_id.line_partial_ids: if line.fees: nb_fee_in_partial_rec += 1 #store the max ballot fees id as for this ballot fees we will make a balance instead of a simple division max_fees_id = max(max_fees_id, line.fees.id) if nb_fee_in_partial_rec: #if there are several ballot fees in a partial reconciliation, we split the residual by the number #of ballot fees to have a sum of residual amounts that matches the partner balance new_value = currency_obj.round(cr, uid, fees.currency_id, result[fees.id] / nb_fee_in_partial_rec) if fees.id == max_fees_id: #if it's the last the ballo fees of the bunch of ballot fees partially reconciled together, we make a #balance to avoid rounding errors result[fees.id] = result[fees.id] - ((nb_fee_in_partial_rec - 1) * new_value) else: result[fees.id] = new_value #prevent the residual amount on the ballot fees to be less than 0 result[fees.id] = max(result[fees.id], 0.0) return result # Give Journal Items related to the payment reconciled to this ballot fees # Return ids of partial and total payments related to the selected ballot fees def _get_lines(self, cr, uid, ids, name, arg, context=None): res = {} for fees in self.browse(cr, uid, ids, context=context): id = fees.id res[id] = [] if not fees.move_id: continue data_lines = [x for x in fees.move_id.line_id if x.account_id.id == fees.account_id.id] partial_ids = [] for line in data_lines: ids_line = [] if line.reconcile_id: ids_line = line.reconcile_id.line_id elif line.reconcile_partial_id: ids_line = line.reconcile_partial_id.line_partial_ids l = map(lambda x: x.id, ids_line) partial_ids.append(line.id) res[id] =[x for x in l if x <> line.id and x not in partial_ids] return res def _get_fees_line(self, cr, uid, ids, context=None): result = {} for line in self.pool.get('account.fees.line').browse(cr, uid, ids, context=context): result[line.fees_id.id] = True return result.keys() def _get_fees_tax(self, cr, uid, ids, context=None): result = {} for tax in self.pool.get('account.fees.tax').browse(cr, uid, ids, context=context): result[tax.fees_id.id] = True return result.keys() def _compute_lines(self, cr, uid, ids, name, args, context=None): result = {} for fees in self.browse(cr, uid, ids, context=context): src = [] lines = [] if fees.move_id: for m in fees.move_id.line_id: temp_lines = [] if m.reconcile_id: temp_lines = map(lambda x: x.id, m.reconcile_id.line_id) elif m.reconcile_partial_id: temp_lines = map(lambda x: x.id, m.reconcile_partial_id.line_partial_ids) lines += [x for x in temp_lines if x not in lines] src.append(m.id) lines = filter(lambda x: x not in src, lines) result[fees.id] = lines return result def _get_fees_from_line(self, cr, uid, ids, context=None): move = {} for line in self.pool.get('account.move.line').browse(cr, uid, ids, context=context): if line.reconcile_partial_id: for line2 in line.reconcile_partial_id.line_partial_ids: move[line2.move_id.id] = True if line.reconcile_id: for line2 in line.reconcile_id.line_id: move[line2.move_id.id] = True fees_ids = [] if move: fees_ids = self.pool.get('account.fees').search(cr, uid, [('move_id','in',move.keys())], context=context) return fees_ids def _get_fees_from_reconcile(self, cr, uid, ids, context=None): move = {} for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context): for line in r.line_partial_ids: move[line.move_id.id] = True for line in r.line_id: move[line.move_id.id] = True fees_ids = [] if move: fees_ids = self.pool.get('account.fees').search(cr, uid, [('move_id','in',move.keys())], context=context) return fees_ids _name = "account.fees" _inherit = ['mail.thread'] _description = 'Honorarios' _order = "id desc" # _track = { # 'type': { # }, # 'state': { # 'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'paid' and obj['type'] in ('out_invoice', 'out_refund'), # 'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open' and obj['type'] in ('out_invoice', 'out_refund'), # }, # } _columns = { 'name': fields.char('Description', size=64, select=True, readonly=True),# states={'draft':[('readonly',False)]}), 'origin': fields.char('Source Document', size=64, help="Reference of the document that produced this ballot fees.",\ readonly=True), #states={'draft':[('readonly',False)]}), 'fees_number':fields.char('Fees Number', size=64, help="The reference of this ballot fees as provided by the lender.",\ readonly=True),# states={'draft':[('readonly',False)]}), 'number':fields.related('move_id','name', type='char', readonly=True, size=64, relation='account.move',\ store=True, string='Number'), 'internal_number':fields.char('Ballot Fees Number', size=32, readonly=True,\ help="Unique number of the ballot fees, computed automatically when the ballot fees is created."), 'comment': fields.text('Additional Information'), 'state':fields.selection([ ('draft','Draft'),\ ('open','Open'),\ ('paid','Paid'),\ ('cancel','Cancelled'),\ ],'Status', select=True, readonly=True, track_visibility='onchange',\ help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Invoice. \ \n* The \'Open\' status is used when user create ballot fees,a ballot fees number is generated.Its in open status till user does not pay ballot fees. \ \n* The \'Paid\' status is set automatically when the ballot fees is paid. Its related journal entries may or may not be reconciled. \ \n* The \'Cancelled\' status is used when user cancel ballot fees.'), 'sent': fields.boolean('Sent', readonly=True, help="It indicates that the ballot fees has been sent."), 'date_fees':fields.date('Ballor Fees Date', readonly=True,\ select=True, help="Keep empty to use the current date"), 'partner_id':fields.many2one('res.partner', 'Partner', change_default=True, readonly=True, required=True,\ track_visibility='always'), 'payment_term': fields.many2one('account.payment.term', 'Payment Terms',readonly=True,\ help="If you use payment terms, the due date will be computed automatically at the generation "\ "of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "\ "The payment term may compute several due dates, for example 50% now, 50% in one month."), 'period_id': fields.many2one('account.period', 'Force Period', \ domain=[('state','<>','done')],\ help="Keep empty to use the period of the validation(ballot fees) date.", readonly=True), 'account_id':fields.many2one('account.account', 'Account', required=True, readonly=True,\ help="The partner account used for this ballot fees."), 'fees_line':fields.one2many('account.fees.line', 'fees_id', 'Ballot Fees Lines', readonly=True), 'tax_line':fields.one2many('account.fees.tax', 'fees_id', 'Tax Lines', readonly=True), 'move_id':fields.many2one('account.move', 'Journal Entry', readonly=True, select=1, ondelete='restrict',\ help="Link to the automatically generated Journal Items."), 'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Bruto', \ track_visibility='always', store={ 'account.fees': (lambda self, cr, uid, ids, c={}: ids, ['fees_line'], 20), 'account.fees.tax': (_get_fees_tax, None, 20), 'account.fees.line': (_get_fees_line,\ ['price_unit','fees_line_tax_id','quantity','discount','fees_id'], 20), }, multi='all'), 'amount_tax':fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Retencion',\ store={ 'account.fees': (lambda self, cr, uid, ids, c={}: ids, ['fees_line'], 20), 'account.fees.tax': (_get_fees_tax, None, 20), 'account.fees.line': (_get_fees_line, ['price_unit','fees_line_tax_id','quantity','discount','fees_id'], 20), }, multi='all'), 'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total a Pagar',\ store={ 'account.fees': (lambda self, cr, uid, ids, c={}: ids, ['fees_line'], 20), 'account.fees.tax': (_get_fees_tax, None, 20), 'account.fees.line': (_get_fees_line, ['price_unit','fees_line_tax_id','quantity','discount','fees_id'], 20), }, multi='all'), 'currency_id':fields.many2one('res.currency', 'Currency', required=True, readonly=True,\ track_visibility='always'), 'journal_id':fields.many2one('account.journal', 'Journal', required=True, readonly=True), 'check_total': fields.float('Verification Total', digits_compute=dp.get_precision('Account'), readonly=True), 'company_id': fields.many2one('res.company', 'Company', required=True, change_default=True, readonly=True), 'reconciled': fields.function(_reconciled, string='Paid/Reconciled', type='boolean',\ store={ 'account.fees': (lambda self, cr, uid, ids, c={}: ids, None, 50), # Check if we can remove ? 'account.move.line': (_get_fees_from_line, None, 50), 'account.move.reconcile': (_get_fees_from_reconcile, None, 50), },\ help="It indicates that the ballot fees has been paid and the journal entry of the ballot fees has been reconciled\ with one or several journal entries of payment."), 'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',\ help='Bank Account Number to which the ballot fees will be paid. A Company bank account if this is a Customer Ballot Fees,\ otherwise a Partner bank account number.', readonly=True),# states={'draft':[('readonly',False)]}), 'move_lines':fields.function(_get_lines, type='many2many', relation='account.move.line', string='Entry Lines'), 'residual': fields.function(_amount_residual, digits_compute=dp.get_precision('Account'), string='Balance',\ store={ 'account.fees': (lambda self, cr, uid, ids, c={}: ids, ['fees_line','move_id'], 50), 'account.fees.tax': (_get_fees_tax, None, 50), 'account.fees.line': (_get_fees_line, ['price_unit','fees_line_tax_id','quantity','discount','fees_id'], 50), 'account.move.line': (_get_fees_from_line, None, 50), 'account.move.reconcile': (_get_fees_from_reconcile, None, 50), }, help="Remaining amount due."), 'payment_ids': fields.function(_compute_lines, relation='account.move.line', type="many2many", string='Payments'), # 'move_name': fields.char('Journal Entry', size=64, readonly=True, states={'draft':[('readonly',False)]}), 'user_id': fields.many2one('res.users', 'Salesperson', readonly=True, track_visibility='onchange'), 'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True), } _defaults = { 'state': 'draft', 'journal_id': _get_journal, 'currency_id': _get_currency, 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c), 'check_total': 0.0, 'internal_number': False, 'user_id': lambda s, cr, u, c: u, 'sent': False, } _sql_constraints = [ ('number_uniq', 'unique(number, company_id, journal_id)', 'Ballot Fees Number must be unique per Company!'), ] def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False): journal_obj = self.pool.get('account.journal') if context is None: context = {} if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']: partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0] if not view_type: view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.fees.tree')]) view_type = 'tree' if view_type == 'form': if partner['supplier'] and not partner['customer']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.fees.supplier.form')]) elif partner['customer'] and not partner['supplier']: view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.fees.form')]) if view_id and isinstance(view_id, (list, tuple)): view_id = view_id[0] res = super(account_honorarios,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu) #######################################OJOOOOOOOOOOOOOOO############################## type = context.get('journal_type', False) for field in res['fields']: if field == 'journal_id' and type: journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1) res['fields'][field]['selection'] = journal_select doc = etree.XML(res['arch']) return res def get_log_context(self, cr, uid, context=None): if context is None: context = {} res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'fees_form') view_id = res and res[1] or False context['view_id'] = view_id return context def ballot_fees_print(self, cr, uid, ids, context=None): ''' This function prints the ballot fees and mark it as sent, so that we can see more easily the next step of the workflow ''' assert len(ids) == 1, 'This option should only be used for a single id at a time.' self.write(cr, uid, ids, {'sent': True}, context=context) datas = { 'ids': ids, 'model': 'account.fees', 'form': self.read(cr, uid, ids[0], context=context) } return { 'type': 'ir.actions.report.xml', 'report_name': 'account.fees', 'datas': datas, 'nodestroy' : True } def action_fees_sent(self, cr, uid, ids, context=None): ''' This function opens a window to compose an email, with the edi invoice template message loaded by default ''' assert len(ids) == 1, 'This option should only be used for a single id at a time.' ir_model_data = self.pool.get('ir.model.data') try: ##############################OJO AKIIIIIII###################### template_id = ir_model_data.get_object_reference(cr, uid, 'account', 'email_template_edi_invoice')[1] ############################################################################################ except ValueError: template_id = False try: compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1] except ValueError: compose_form_id = False ctx = dict(context) ctx.update({ 'default_model': 'account.fees', 'default_res_id': ids[0], 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment', 'mark_fees_as_sent': True, }) return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'views': [(compose_form_id, 'form')], 'view_id': compose_form_id, 'target': 'new', 'context': ctx, } def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None): result = {} if journal_id: journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context) currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id company_id = journal.company_id.id result = { 'value': { 'currency_id': currency_id, 'company_id': company_id, } } return result def onchange_company_id(self, cr, uid, ids, company_id, part_id, fees_line, currency_id): #TODO: add the missing context parameter when forward-porting in trunk so we can remove # this hack! context = self.pool['res.users'].context_get(cr, uid) val = {} dom = {} obj_journal = self.pool.get('account.journal') account_obj = self.pool.get('account.account') fee_line_obj = self.pool.get('account.fees.line') if company_id and part_id and type: acc_id = False partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id) if partner_obj.property_account_payable and partner_obj.property_account_receivable: if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id: property_obj = self.pool.get('ir.property') rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)]) pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)]) if not rec_pro_id: rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)]) if not pay_pro_id: pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)]) rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id']) pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id']) rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False if not rec_res_id and not pay_res_id: raise osv.except_osv(_('Configuration Error!'), _('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.')) acc_id = rec_res_id val= {'account_id': acc_id} if ids: if company_id: fee_obj = self.browse(cr,uid,ids) for line in fee_obj[0].fees_line: if line.account_id: if line.account_id.company_id.id != company_id: result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)]) if not result_id: raise osv.except_osv(_('Configuration Error!'), _('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.')) fee_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]}) else: if fees_line: for fee_line in fees_line: obj_l = account_obj.browse(cr, uid, fee_line[2]['account_id']) if obj_l.company_id.id != company_id: raise osv.except_osv(_('Configuration Error!'), _('Ballot fees line account\'s company and ballot fees\'s company does not match.')) else: continue if company_id: journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', 'purchase')])#AGREGADA CONDICION A MANO if journal_ids: val['journal_id'] = journal_ids[0] ir_values_obj = self.pool.get('ir.values') ################## OJO AKIIII!!!!! ##################### res_journal_default = ir_values_obj.get(cr, uid, 'default', 'type=%s' % ('purchase'), ['account.fees']) for r in res_journal_default: if r[1] == 'journal_id' and r[2] in journal_ids: val['journal_id'] = r[2] if not val.get('journal_id', False): journal_type_map = dict(obj_journal._columns['type'].selection) journal_type_label = self.pool['ir.translation']._get_source(cr, uid, None, ('code','selection'), context.get('lang'), #OJO AK TAMBIEN 'purchase') raise osv.except_osv(_('Configuration Error!'), _('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.') % ('"%s"' % journal_type_label)) dom = {'journal_id': [('id', 'in', journal_ids)]} else: journal_ids = obj_journal.search(cr, uid, []) return {'value': val, 'domain': dom} def unlink(self, cr, uid, ids, context=None): if context is None: context = {} fees = self.read(cr, uid, ids, ['state','internal_number'], context=context) unlink_ids = [] for t in fees: if t['state'] not in ('draft', 'cancel'): raise openerp.exceptions.Warning(\ _('You cannot delete an ballot fees which is not draft or cancelled. You should refund it instead.')) elif t['internal_number']: raise openerp.exceptions.Warning(\ _('You cannot delete an ballot fees after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.')) else: unlink_ids.append(t['id']) osv.osv.unlink(self, cr, uid, unlink_ids, context=context) return True def onchange_partner_id(self, cr, uid, ids, partner_id, date_fees=False, payment_term=False,\ partner_bank_id=False, company_id=False): partner_payment_term = False acc_id = False bank_id = False fiscal_position = False opt = [('uid', str(uid))] if partner_id: opt.insert(0, ('id', partner_id)) p = self.pool.get('res.partner').browse(cr, uid, partner_id) if company_id: if (p.property_account_receivable.company_id and\ (p.property_account_receivable.company_id.id != company_id)) and\ (p.property_account_payable.company_id and\ (p.property_account_payable.company_id.id != company_id)): property_obj = self.pool.get('ir.property') rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)]) pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)]) if not rec_pro_id: rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)]) if not pay_pro_id: pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)]) rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id']) pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id']) rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False if not rec_res_id and not pay_res_id: raise osv.except_osv(_('Configuration Error!'), _('Cannot find a chart of accounts for this company, you should create one.')) account_obj = self.pool.get('account.account') rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id]) pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id]) p.property_account_receivable = rec_obj_acc[0] p.property_account_payable = pay_obj_acc[0] acc_id = p.property_account_payable.id partner_payment_term = p.property_supplier_payment_term and p.property_supplier_payment_term.id or False fiscal_position = p.property_account_position and p.property_account_position.id or False if p.bank_ids: bank_id = p.bank_ids[0].id result = { 'value': { 'account_id': acc_id, 'payment_term': partner_payment_term, 'fiscal_position': fiscal_position } } result['value']['partner_bank_id'] = bank_id if payment_term != partner_payment_term: if partner_payment_term: to_update = self.onchange_payment_term_date_fees(cr, uid, ids, partner_payment_term, date_fees) result['value'].update(to_update['value']) if partner_bank_id != bank_id: to_update = self.onchange_partner_bank(cr, uid, ids, bank_id) result['value'].update(to_update['value']) return result def onchange_payment_term_date_fees(self, cr, uid, ids, payment_term_id, date_fees): res = {} if not date_fees: res = { 'value':{ 'date_fees': time.strftime('%Y-%m-%d') } } return res # def onchang_invoice_line(self, cr, uid, ids, lines): # return {} def onchange_partner_bank(self, cursor, user, ids, partner_bank_id=False): return {'value': {}} def test_paid(self, cr, uid, ids, *args): res = self.move_line_id_payment_get(cr, uid, ids) if not res: return False ok = True for id in res: cr.execute('select reconcile_id from account_move_line where id=%s', (id,)) ok = ok and bool(cr.fetchone()[0]) return ok # Workflow stuff ################# # go from canceled state to draft state def action_cancel_draft(self, cr, uid, ids, *args): self.write(cr, uid, ids, {'state':'draft'}) wf_service = netsvc.LocalService("workflow") for fee_id in ids: wf_service.trg_delete(uid, 'account.fees', fee_id, cr) wf_service.trg_create(uid, 'account.fees', fee_id, cr) return True # return the ids of the move lines which has the same account than the invoice # whose id is in ids def move_line_id_payment_get(self, cr, uid, ids, *args): if not ids: return [] result = self.move_line_id_payment_gets(cr, uid, ids, *args) return result.get(ids[0], []) def move_line_id_payment_gets(self, cr, uid, ids, *args): res = {} if not ids: return res cr.execute('SELECT i.id, l.id '\ 'FROM account_move_line l '\ 'LEFT JOIN account_fees i ON (i.move_id=l.move_id) '\ 'WHERE i.id IN %s '\ 'AND l.account_id=i.account_id', (tuple(ids),)) for r in cr.fetchall(): res.setdefault(r[0], []) res[r[0]].append( r[1] ) return res def button_reset_taxes(self, cr, uid, ids, context=None): if context is None: context = {} ctx = context.copy() ait_obj = self.pool.get('account.fees.tax') for id in ids: cr.execute("DELETE FROM account_fees_tax WHERE fees_id=%s AND manual is False", (id,)) partner = self.browse(cr, uid, id, context=ctx).partner_id if partner.lang: ctx.update({'lang': partner.lang}) for taxe in ait_obj.compute(cr, uid, id, context=ctx).values(): ait_obj.create(cr, uid, taxe) # Update the stored value (fields.function), so we write to trigger recompute self.pool.get('account.fees').write(cr, uid, ids, {'fees_line':[]}, context=ctx) return True def confirm_paid(self, cr, uid, ids, context=None): if context is None: context = {} self.write(cr, uid, ids, {'state':'paid'}, context=context) return True def copy(self, cr, uid, id, default=None, context=None): default = default or {} default.update({ 'state':'draft', 'number':False, 'move_id':False, 'move_name':False, 'internal_number': False, 'period_id': False, 'sent': False, }) if 'date_fees' not in default: default.update({ 'date_fees':False }) return super(account_invoice, self).copy(cr, uid, id, default, context) def action_move_create(self, cr, uid, ids, context=None): """Creates ballot fees related analytics and financial move lines""" ait_obj = self.pool.get('account.fees.tax') cur_obj = self.pool.get('res.currency') period_obj = self.pool.get('account.period') payment_term_obj = self.pool.get('account.payment.term') journal_obj = self.pool.get('account.journal') move_obj = self.pool.get('account.move') if context is None: context = {} for fee in self.browse(cr, uid, ids, context=context): if not fee.journal_id.sequence_id: raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this ballot fees.')) if not fee.fees_line: raise osv.except_osv(_('No Ballot Fees Lines!'), _('Please create some ballot fees lines.')) if fee.move_id: continue ctx = context.copy() ctx.update({'lang': fee.partner_id.lang}) if not fee.date_fees: self.write(cr, uid, [fee.id], {'date_fees': fields.date.context_today(self,cr,uid,context=context)}, context=ctx) company_currency = self.pool['res.company'].browse(cr, uid, fee.company_id.id).currency_id.id # create the analytical lines # one move line per ballot fees line iml = self._get_analytic_lines(cr, uid, fee.id, context=ctx) # check if taxes are all computed compute_taxes = ait_obj.compute(cr, uid, fee.id, context=ctx) self.check_tax_lines(cr, uid, fee, compute_taxes, ait_obj) # I disabled the check_total feature group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1] group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context) if group_check_total and uid in [x.id for x in group_check_total.users]: if (abs(fee.check_total - fee.amount_total) >= (fee.currency_id.rounding/2.0)): raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the ballot fees!\nThe encoded total does not match the computed total.')) if fee.payment_term: total_fixed = total_percent = 0 for line in fee.payment_term.line_ids: if line.value == 'fixed': total_fixed += line.value_amount if line.value == 'procent': total_percent += line.value_amount total_fixed = (total_fixed * 100) / (fee.amount_total or 1.0) if (total_fixed + total_percent) > 100: raise osv.except_osv(_('Error!'), _("Cannot create the ballot fees.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total ballot fees amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.")) # one move line per tax line iml += ait_obj.move_line_get(cr, uid, fee.id) entry_type = 'journal_pur_voucher' diff_currency_p = fee.currency_id.id <> company_currency # create one move line for the total and possibly adjust the other lines amount total = 0 total_currency = 0 total, total_currency, iml = self.compute_fees_totals(cr, uid, fee, company_currency, iml, context=ctx) acc_id = fee.account_id.id name = fee['name'] or 'servicio de honorarios' totlines = False if fee.payment_term: totlines = payment_term_obj.compute(cr, uid, fee.payment_term.id, total, fee.date_fees or False, context=ctx) if totlines: res_amount_currency = total_currency i = 0 ctx.update({'date': fee.date_fees}) for t in totlines: if fee.currency_id.id != company_currency: amount_currency = cur_obj.compute(cr, uid, company_currency, fee.currency_id.id, t[1], context=ctx) else: amount_currency = False # last line add the diff res_amount_currency -= amount_currency or 0 i += 1 if i == len(totlines): amount_currency += res_amount_currency iml.append({ 'type': 'dest', 'name': name, 'price': t[1], 'account_id': acc_id, 'date_maturity': t[0], 'amount_currency': diff_currency_p \ and amount_currency or False, 'currency_id': diff_currency_p \ and fee.currency_id.id or False, }) else: iml.append({ 'type': 'dest', 'name': name, 'price': total, 'account_id': acc_id, 'date_maturity': fee.date_fees or False, 'amount_currency': diff_currency_p \ and total_currency or False, 'currency_id': diff_currency_p \ and fee.currency_id.id or False, }) #Guarda el monto en moneda de la empresa monto_nacional = 0 tax_nacional = 0 #Guarda el monto de la unidad monto_unidad = 0 tax_unidad = 0 #Guarda el monto de la divisa de la boleta monto_divisa = 0 tax_divisa = 0 for fees_line in iml: if 'type' in fees_line and 'src' in fees_line.get('type'): monto_nacional += fees_line['price'] monto_unidad += fees_line['price_unit'] if fees_line['amount_currency']: monto_divisa += fees_line['amount_currency'] if 'type' in fees_line and 'tax' in fees_line.get('type'): tax_nacional = fees_line['price'] tax_unidad = fees_line['price_unit'] if fees_line['amount_currency']: tax_divisa = fees_line['amount_currency'] fees_line['price'] *= -1 if 'type' in fees_line and 'dest' in fees_line.get('type'): fees_line['price'] = (monto_nacional - tax_nacional)*-1 fees_line['price_unit'] = monto_unidad - tax_unidad if monto_divisa: fees_line['amount_currency'] = (monto_divisa - tax_divisa)*-1 date = fee.date_fees or time.strftime('%Y-%m-%d') part = self.pool.get("res.partner")._find_accounting_partner(fee.partner_id) line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml) line = self.group_lines(cr, uid, iml, line, fee) journal_id = fee.journal_id.id journal = journal_obj.browse(cr, uid, journal_id, context=ctx) if journal.centralisation: raise osv.except_osv(_('User Error!'), _('You cannot create an ballot fees on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.')) line = self.finalize_fees_move_lines(cr, uid, fee, line) move = { 'line_id': line, 'journal_id': journal_id, 'date': date, 'narration': fee.comment, 'company_id': fee.company_id.id, } period_id = fee.period_id and fee.period_id.id or False ctx.update(company_id=fee.company_id.id, account_period_prefer_normal=True) if not period_id: period_ids = period_obj.find(cr, uid, fee.date_fees, context=ctx) period_id = period_ids and period_ids[0] or False if period_id: move['period_id'] = period_id for i in line: i[2]['period_id'] = period_id ctx.update(fees=fee) move_id = move_obj.create(cr, uid, move, context=ctx) new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name # make the ballot fees point to that move self.write(cr, uid, [fee.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx) # Pass ballot fees in context in method post: used if you want to get the same # account move reference when creating the same ballot fees after a cancelled one: move_obj.post(cr, uid, [move_id], context=ctx) self._log_event(cr, uid, ids) return True def fees_validate(self, cr, uid, ids, context=None): self.write(cr, uid, ids, {'state':'open'}, context=context) return True def action_number(self, cr, uid, ids, context=None): if context is None: context = {} #TODO: not correct fix but required a frech values before reading it. self.write(cr, uid, ids, {}) for obj_fee in self.browse(cr, uid, ids, context=context): number = obj_fee.number move_id = obj_fee.move_id and obj_fee.move_id.id or False self.write(cr, uid, ids, {'internal_number': number}) ref = self._convert_ref(cr, uid, number) cr.execute('UPDATE account_move SET ref=%s ' \ 'WHERE id=%s AND (ref is null OR ref = \'\')', (ref, move_id)) cr.execute('UPDATE account_move_line SET ref=%s ' \ 'WHERE move_id=%s AND (ref is null OR ref = \'\')', (ref, move_id)) cr.execute('UPDATE account_analytic_line SET ref=%s ' \ 'FROM account_move_line ' \ 'WHERE account_move_line.move_id = %s ' \ 'AND account_analytic_line.move_id = account_move_line.id', (ref, move_id)) return True def action_cancel(self, cr, uid, ids, context=None): if context is None: context = {} account_move_obj = self.pool.get('account.move') fees = self.read(cr, uid, ids, ['move_id', 'payment_ids']) move_ids = [] # ones that we will need to remove for i in fees: if i['move_id']: move_ids.append(i['move_id'][0]) if i['payment_ids']: account_move_line_obj = self.pool.get('account.move.line') pay_ids = account_move_line_obj.browse(cr, uid, i['payment_ids']) for move_line in pay_ids: if move_line.reconcile_partial_id and move_line.reconcile_partial_id.line_partial_ids: raise osv.except_osv(_('Error!'), _('You cannot cancel an ballot fees which is partially paid. You need to unreconcile related payment entries first.')) # First, set the ballot fees as cancelled and detach the move ids self.write(cr, uid, ids, {'state':'cancel', 'move_id':False}) if move_ids: # second, invalidate the move(s) account_move_obj.button_cancel(cr, uid, move_ids, context=context) # delete the move this ballot fees was pointing to # Note that the corresponding move_lines and move_reconciles # will be automatically deleted too account_move_obj.unlink(cr, uid, move_ids, context=context) self._log_event(cr, uid, ids, -1.0, 'Cancel Ballot Fees') return True def action_date_assign(self, cr, uid, ids, *args): for fee in self.browse(cr, uid, ids): res = self.onchange_payment_term_date_fees(cr, uid, fee.id, fee.payment_term.id, fee.date_fees) if res and res['value']: self.write(cr, uid, [fee.id], res['value']) return True def _get_analytic_lines(self, cr, uid, id, context=None): if context is None: context = {} fee = self.browse(cr, uid, id) cur_obj = self.pool.get('res.currency') company_currency = self.pool['res.company'].browse(cr, uid, fee.company_id.id).currency_id.id sign = -1 iml = self.pool.get('account.fees.line').move_line_get(cr, uid, fee.id, context=context) for il in iml: if il['account_analytic_id']: if not fee.journal_id.analytic_journal_id: raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (fee.journal_id.name,)) il['analytic_lines'] = [(0,0, { 'name': il['name'], 'date': fee['date_fees'], 'account_id': il['account_analytic_id'], 'unit_amount': il['quantity'], 'amount': cur_obj.compute(cr, uid, fee.currency_id.id, company_currency, il['price'],\ context={'date': fee.date_fees}) * sign, 'product_id': il['product_id'], 'product_uom_id': il['uos_id'], 'general_account_id': il['account_id'], 'journal_id': fee.journal_id.analytic_journal_id.id, })] return iml def check_tax_lines(self, cr, uid, fee, compute_taxes, ait_obj): company_currency = self.pool['res.company'].browse(cr, uid, fee.company_id.id).currency_id if not fee.tax_line: for tax in compute_taxes.values(): ait_obj.create(cr, uid, tax) else: tax_key = [] for tax in fee.tax_line: if tax.manual: continue key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id, tax.account_analytic_id.id) tax_key.append(key) if not key in compute_taxes: raise osv.except_osv(_('Warning!'), _('Global taxes defined, but they are not in ballot fees lines !')) base = compute_taxes[key]['base'] if abs(base - tax.base) > company_currency.rounding: raise osv.except_osv(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.')) for key in compute_taxes: if not key in tax_key: raise osv.except_osv(_('Warning!'), _('Taxes are missing!\nClick on compute button.')) def compute_fees_totals(self, cr, uid, fee, company_currency, fees_move_lines, context=None): if context is None: context={} total = 0 total_currency = 0 cur_obj = self.pool.get('res.currency') for i in fees_move_lines: if fee.currency_id.id != company_currency: context.update({'date': fee.date_fees or time.strftime('%Y-%m-%d')}) i['currency_id'] = fee.currency_id.id i['amount_currency'] = i['price'] i['price'] = cur_obj.compute(cr, uid, fee.currency_id.id, company_currency, i['price'], context=context) else: i['amount_currency'] = False i['currency_id'] = False total -= i['price'] total_currency -= i['amount_currency'] or i['price'] return total, total_currency, fees_move_lines def line_get_convert(self, cr, uid, x, part, date, context=None): return { 'date_maturity': x.get('date_maturity', False), 'partner_id': part, 'name': x['name'][:64], 'date': date, 'debit': x['price']>0 and x['price'], 'credit': x['price']<0 and -x['price'], 'account_id': x['account_id'], 'analytic_lines': x.get('analytic_lines', []), 'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)), 'currency_id': x.get('currency_id', False), 'tax_code_id': x.get('tax_code_id', False), 'tax_amount': x.get('tax_amount', False), 'quantity': x.get('quantity',1.00), 'product_id': x.get('product_id', False), 'product_uom_id': x.get('uos_id', False), 'analytic_account_id': x.get('account_analytic_id', False), } def group_lines(self, cr, uid, iml, line, fee): """Merge account move lines (and hence analytic lines) if ballot fees line hashcodes are equals""" if fee.journal_id.group_invoice_lines:#OJO CON ESTO ;) line2 = {} for x, y, l in line: tmp = self.fee_line_characteristic_hashcode(fee, l) if tmp in line2: am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit']) line2[tmp]['debit'] = (am > 0) and am or 0.0 line2[tmp]['credit'] = (am < 0) and -am or 0.0 line2[tmp]['tax_amount'] += l['tax_amount'] line2[tmp]['analytic_lines'] += l['analytic_lines'] else: line2[tmp] = l line = [] for key, val in line2.items(): line.append((0,0,val)) return line def fee_line_characteristic_hashcode(self, fees, fees_line): """Overridable hashcode generation for invoice lines. Lines having the same hashcode will be grouped together if the journal has the 'group line' option. Of course a module can add fields to invoice lines that would need to be tested too before merging lines or not.""" return "%s-%s-%s-%s-%s"%( fees_line['account_id'], fees_line.get('tax_code_id',"False"), fees_line.get('product_id',"False"), fees_line.get('analytic_account_id',"False"), fees_line.get('date_maturity',"False")) def finalize_fees_move_lines(self, cr, uid, fees_browse, move_lines): """finalize_invoice_move_lines(cr, uid, fees, move_lines) -> move_lines Hook method to be overridden in additional modules to verify and possibly alter the move lines to be created by an ballot fees, for special cases. :param fees_browse: browsable record of the ballot fees that is generating the move lines :param move_lines: list of dictionaries with the account.move.lines (as for create()) :return: the (possibly updated) final move_lines to create for this ballot fees """ return move_lines def _log_event(self, cr, uid, ids, factor=1.0, name='Open Ballot Fees'): #TODO: implement messages system return True def _convert_ref(self, cr, uid, ref): return (ref or '').replace('/','') ################### """ def button_compute(self, cr, uid, ids, context=None, set_total=False): self.button_reset_taxes(cr, uid, ids, context) for fee in self.browse(cr, uid, ids, context=context): if set_total: self.pool.get('account.fees').write(cr, uid, [inv.id], {'check_total':feee.amount_total}) return True def list_distinct_taxes(self, cr, uid, ids): invoices = self.browse(cr, uid, ids) taxes = {} for inv in invoices: for tax in inv.tax_line: if not tax['name'] in taxes: taxes[tax['name']] = {'name': tax['name']} return taxes.values() def name_get(self, cr, uid, ids, context=None): if not ids: return [] types = { 'out_invoice': _('Invoice'), 'in_invoice': _('Supplier Invoice'), 'out_refund': _('Refund'), 'in_refund': _('Supplier Refund'), } return [(r['id'], '%s %s' % (r['number'] or types[r['type']], r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')] def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100): if not args: args = [] if context is None: context = {} ids = [] if name: ids = self.search(cr, user, [('number','=',name)] + args, limit=limit, context=context) if not ids: ids = self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context) return self.name_get(cr, user, ids, context) def _refund_cleanup_lines(self, cr, uid, lines, context=None): """ # ""Convert records to dict of values suitable for one2many line creation # :param list(browse_record) lines: records to convert # :return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...] # "" """ clean_lines = [] for line in lines: clean_line = {} for field in line._all_columns.keys(): if line._all_columns[field].column._type == 'many2one': clean_line[field] = line[field].id elif line._all_columns[field].column._type not in ['many2many','one2many']: clean_line[field] = line[field] elif field == 'invoice_line_tax_id': tax_list = [] for tax in line[field]: tax_list.append(tax.id) clean_line[field] = [(6,0, tax_list)] clean_lines.append(clean_line) return map(lambda x: (0,0,x), clean_lines) def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None): """ # ""Prepare the dict of values to create the new refund from the invoice. # This method may be overridden to implement custom # refund generation (making sure to call super() to establish # a clean extension chain). # :param integer invoice_id: id of the invoice to refund # :param dict invoice: read of the invoice to refund # :param string date: refund creation date from the wizard # :param integer period_id: force account.period from the wizard # :param string description: description of the refund from the wizard # :param integer journal_id: account.journal from the wizard # :return: dict of value to create() the refund # "" """ obj_journal = self.pool.get('account.journal') type_dict = { 'out_invoice': 'out_refund', # Customer Invoice 'in_invoice': 'in_refund', # Supplier Invoice 'out_refund': 'out_invoice', # Customer Refund 'in_refund': 'in_invoice', # Supplier Refund } invoice_data = {} for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id', 'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']: if invoice._all_columns[field].column._type == 'many2one': invoice_data[field] = invoice[field].id else: invoice_data[field] = invoice[field] if invoice[field] else False invoice_lines = self._refund_cleanup_lines(cr, uid, invoice.invoice_line, context=context) tax_lines = filter(lambda l: l['manual'], invoice.tax_line) tax_lines = self._refund_cleanup_lines(cr, uid, tax_lines, context=context) if journal_id: refund_journal_ids = [journal_id] elif invoice['type'] == 'in_invoice': refund_journal_ids = obj_journal.search(cr, uid, [('type','=','purchase_refund')], context=context) else: refund_journal_ids = obj_journal.search(cr, uid, [('type','=','sale_refund')], context=context) if not date: date = time.strftime('%Y-%m-%d') invoice_data.update({ 'type': type_dict[invoice['type']], 'date_invoice': date, 'state': 'draft', 'number': False, 'invoice_line': invoice_lines, 'tax_line': tax_lines, 'journal_id': refund_journal_ids and refund_journal_ids[0] or False, }) if period_id: invoice_data['period_id'] = period_id if description: invoice_data['name'] = description return invoice_data def refund(self, cr, uid, ids, date=None, period_id=None, description=None, journal_id=None, context=None): new_ids = [] for invoice in self.browse(cr, uid, ids, context=context): invoice = self._prepare_refund(cr, uid, invoice, date=date, period_id=period_id, description=description, journal_id=journal_id, context=context) # create the new invoice new_ids.append(self.create(cr, uid, invoice, context=context)) return new_ids def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''): if context is None: context = {} #TODO check if we can use different period for payment and the writeoff line assert len(ids)==1, "Can only pay one invoice at a time." invoice = self.browse(cr, uid, ids[0], context=context) src_account_id = invoice.account_id.id # Take the seq as name for move types = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1} direction = types[invoice.type] #take the choosen date if 'date_p' in context and context['date_p']: date=context['date_p'] else: date=time.strftime('%Y-%m-%d') # Take the amount in currency and the currency of the payment if 'amount_currency' in context and context['amount_currency'] and 'currency_id' in context and context['currency_id']: amount_currency = context['amount_currency'] currency_id = context['currency_id'] else: amount_currency = False currency_id = False pay_journal = self.pool.get('account.journal').read(cr, uid, pay_journal_id, ['type'], context=context) if invoice.type in ('in_invoice', 'out_invoice'): if pay_journal['type'] == 'bank': entry_type = 'bank_pay_voucher' # Bank payment else: entry_type = 'pay_voucher' # Cash payment else: entry_type = 'cont_voucher' if invoice.type in ('in_invoice', 'in_refund'): ref = invoice.reference else: ref = self._convert_ref(cr, uid, invoice.number) partner = self.pool['res.partner']._find_accounting_partner(invoice.partner_id) # Pay attention to the sign for both debit/credit AND amount_currency l1 = { 'debit': direction * pay_amount>0 and direction * pay_amount, 'credit': direction * pay_amount<0 and - direction * pay_amount, 'account_id': src_account_id, 'partner_id': partner.id, 'ref':ref, 'date': date, 'currency_id':currency_id, 'amount_currency':amount_currency and direction * amount_currency or 0.0, 'company_id': invoice.company_id.id, } l2 = { 'debit': direction * pay_amount<0 and - direction * pay_amount, 'credit': direction * pay_amount>0 and direction * pay_amount, 'account_id': pay_account_id, 'partner_id': partner.id, 'ref':ref, 'date': date, 'currency_id':currency_id, 'amount_currency':amount_currency and - direction * amount_currency or 0.0, 'company_id': invoice.company_id.id, } if not name: name = invoice.invoice_line and invoice.invoice_line[0].name or invoice.number l1['name'] = name l2['name'] = name lines = [(0, 0, l1), (0, 0, l2)] move = {'ref': ref, 'line_id': lines, 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date} move_id = self.pool.get('account.move').create(cr, uid, move, context=context) line_ids = [] total = 0.0 line = self.pool.get('account.move.line') move_ids = [move_id,] if invoice.move_id: move_ids.append(invoice.move_id.id) cr.execute('SELECT id FROM account_move_line '\ 'WHERE move_id IN %s', ((move_id, invoice.move_id.id),)) lines = line.browse(cr, uid, map(lambda x: x[0], cr.fetchall()) ) for l in lines+invoice.payment_ids: if l.account_id.id == src_account_id: line_ids.append(l.id) total += (l.debit or 0.0) - (l.credit or 0.0) inv_id, name = self.name_get(cr, uid, [invoice.id], context=context)[0] if (not round(total,self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))) or writeoff_acc_id: self.pool.get('account.move.line').reconcile(cr, uid, line_ids, 'manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context) else: code = invoice.currency_id.symbol # TODO: use currency's formatting function msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \ (pay_amount, code, invoice.amount_total, code, total, code) self.message_post(cr, uid, [inv_id], body=msg, context=context) self.pool.get('account.move.line').reconcile_partial(cr, uid, line_ids, 'manual', context) # Update the stored value (fields.function), so we write to trigger recompute self.pool.get('account.invoice').write(cr, uid, ids, {}, context=context) return True """ account_honorarios() class account_honorarios_line(osv.osv): def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict): # CAMBIO PARA QUE FUNCIONE EL ASIENTO CON MONTOS DE IMPUESTO INCLUIDO EN VALOR DE LA LINEA res = {} cur_obj = self.pool.get('res.currency') for line in self.browse(cr, uid, ids): res[line.id] = line.price_unit if line.fees_id: cur = line.fees_id.currency_id res[line.id] = cur_obj.round(cr, uid, cur, res[line.id]) return res def _price_unit_default(self, cr, uid, context=None): if context is None: context = {} if context.get('check_total', False): t = context['check_total'] for l in context.get('fees_line', {}): if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]: tax_obj = self.pool.get('account.tax') p = l[2].get('price_unit', 0) * (1-l[2].get('discount', 0)/100.0) t = t - (p * l[2].get('quantity')) taxes = l[2].get('fees_line_tax_id') if len(taxes[0]) >= 3 and taxes[0][2]: taxes = tax_obj.browse(cr, uid, list(taxes[0][2])) for tax in tax_obj.compute_all(cr, uid, taxes, p,l[2].get('quantity'), l[2].get('product_id', False), context.get('partner_id', False))['taxes']: t = t - tax['amount'] return t return 0 _name = "account.fees.line" _description = "Ballot Fees Line" _columns = { 'name': fields.text('Description', required=True), 'origin': fields.char('Source Document', size=256, help="Reference of the document that produced this invoice."), 'sequence': fields.integer('Sequence', help="Gives the sequence of this line when displaying the ballot fees."), 'fees_id': fields.many2one('account.fees', 'Ballot Fees Reference', ondelete='cascade', select=True), 'uos_id': fields.many2one('product.uom', 'Unit of Measure', ondelete='set null', select=True), 'product_id': fields.many2one('product.product', 'Product', ondelete='set null', select=True), 'account_id': fields.many2one('account.account', 'Account', required=True,\ domain=[('type','<>','view'), ('type', '<>', 'closed')],\ help="The income or expense account related to the selected product."), 'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')), 'price_subtotal': fields.function(_amount_line, string='Amount', type="float", digits_compute= dp.get_precision('Account'), store=True), 'quantity': fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True), 'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount')), 'fees_line_tax_id': fields.many2many('account.tax', 'account_fees_line_tax', 'fees_line_id', 'tax_id', 'Retencion',\ domain=[('parent_id','=',False)]), 'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'), 'company_id': fields.related('fees_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True), 'partner_id': fields.related('fees_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True) } def _default_account_id(self, cr, uid, context=None): # XXX this gets the default account for the user's company, # it should get the default account for the invoice's company # however, the ballot fees's company does not reach this point if context is None: context = {} prop = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context) return prop and prop.id or False _defaults = { 'quantity': 1, 'discount': 0.0, 'price_unit': _price_unit_default, 'account_id': _default_account_id, } def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False): if context is None: context = {} res = super(account_honorarios_line, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type,\ context=context, toolbar=toolbar, submenu=submenu) doc = etree.XML(res['arch']) for node in doc.xpath("//field[@name='product_id']"): ########################################################## ########################################################## ##########OJO DE NUEVO, CAMBIAR EN PRODUCTO############### ############AGREGAR BOLETAS DE HONORARIOS################# ########################################################## node.set('domain', "[('purchase_ok', '=', True)]") res['arch'] = etree.tostring(doc) return res def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None): if context is None: context = {} company_id = company_id if company_id != None else context.get('company_id',False) context = dict(context) context.update({'company_id': company_id, 'force_company': company_id}) if not partner_id: raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") ) if not product: return {'value': {}, 'domain':{'product_uom':[]}} part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) fpos_obj = self.pool.get('account.fiscal.position') fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False if part.lang: context.update({'lang': part.lang}) result = {} res = self.pool.get('product.product').browse(cr, uid, product, context=context) a = res.property_account_expense.id if not a: a = res.categ_id.property_account_expense_categ.id a = fpos_obj.map_account(cr, uid, fpos, a) if a: result['account_id'] = a taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False) tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes) result.update( {'price_unit': price_unit or res.standard_price,'fees_line_tax_id': tax_id} ) result['name'] = res.partner_ref result['uos_id'] = uom_id or res.uom_id.id if res.description: result['name'] += '\n'+res.description domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]} res_final = {'value':result, 'domain':domain} if not company_id or not currency_id: return res_final company = self.pool.get('res.company').browse(cr, uid, company_id, context=context) currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context) if company.currency_id.id != currency.id: res_final['value']['price_unit'] = res.standard_price new_price = res_final['value']['price_unit'] * currency.rate res_final['value']['price_unit'] = new_price if result['uos_id'] and result['uos_id'] != res.uom_id.id: selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context) new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id']) res_final['value']['price_unit'] = new_price return res_final def uos_id_change(self, cr, uid, ids, product, uom, qty=0, name='', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None): if context is None: context = {} company_id = company_id if company_id != None else context.get('company_id',False) context = dict(context) context.update({'company_id': company_id}) warning = {} res = self.product_id_change(cr, uid, ids, product, uom, qty, name, partner_id, fposition_id, price_unit, currency_id, context=context) if not uom: res['value']['price_unit'] = 0.0 if product and uom: prod = self.pool.get('product.product').browse(cr, uid, product, context=context) prod_uom = self.pool.get('product.uom').browse(cr, uid, uom, context=context) if prod.uom_id.category_id.id != prod_uom.category_id.id: warning = { 'title': _('Warning!'), 'message': _('The selected unit of measure is not compatible with the unit of measure of the product.') } res['value'].update({'uos_id': prod.uom_id.id}) return {'value': res['value'], 'warning': warning} return res def move_line_get(self, cr, uid, fees_id, context=None): ###################################################################### ###################################################################### ##############Aki calcula impuestos################################### ###################################################################### ###################################################################### res = [] tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') if context is None: context = {} fee = self.pool.get('account.fees').browse(cr, uid, fees_id, context=context) company_currency = self.pool['res.company'].browse(cr, uid, fee.company_id.id).currency_id.id for line in fee.fees_line: mres = self.move_line_get_item(cr, uid, line, context) if not mres: continue res.append(mres) tax_code_found= False for tax in tax_obj.compute_all(cr, uid, line.fees_line_tax_id, (line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),\ line.quantity, line.product_id, fee.partner_id)['taxes']: tax_code_id = tax['ref_base_code_id'] tax_amount = line.price_subtotal * tax['ref_base_sign'] if tax_code_found: if not tax_code_id: continue res.append(self.move_line_get_item(cr, uid, line, context)) res[-1]['price'] = 0.0 res[-1]['account_analytic_id'] = False elif not tax_code_id: continue tax_code_found = True res[-1]['tax_code_id'] = tax_code_id res[-1]['tax_amount'] = cur_obj.compute(cr, uid, fee.currency_id.id, company_currency, tax_amount, context={'date': fee.date_fees}) return res def move_line_get_item(self, cr, uid, line, context=None): return { 'type':'src', 'name': line.name.split('\n')[0][:64], 'price_unit':line.price_unit, 'quantity':line.quantity, 'price':line.price_subtotal, 'account_id':line.account_id.id, 'product_id':line.product_id.id, 'uos_id':line.uos_id.id, 'account_analytic_id':line.account_analytic_id.id, 'taxes':line.fees_line_tax_id, } # # Set the tax field according to the account and the fiscal position # def onchange_account_id(self, cr, uid, ids, product_id, partner_id, fposition_id, account_id): if not account_id: return {} unique_tax_ids = [] fpos = fposition_id and self.pool.get('account.fiscal.position').browse(cr, uid, fposition_id) or False account = self.pool.get('account.account').browse(cr, uid, account_id) if not product_id: taxes = account.tax_ids unique_tax_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes) else: product_change_result = self.product_id_change(cr, uid, ids, product_id, False, partner_id=partner_id, fposition_id=fposition_id, company_id=account.company_id.id) if product_change_result and 'value' in product_change_result and 'fees_line_tax_id' in product_change_result['value']: unique_tax_ids = product_change_result['value']['fees_line_tax_id'] return {'value':{'fees_line_tax_id': unique_tax_ids}} account_honorarios_line() class account_honorarios_tax(osv.osv): _name = "account.fees.tax" _description = "Ballot Fees Tax" def _count_factor(self, cr, uid, ids, name, args, context=None): res = {} for fees_tax in self.browse(cr, uid, ids, context=context): res[fees_tax.id] = { 'factor_base': 1.0, 'factor_tax': 1.0, } if fees_tax.amount <> 0.0: factor_tax = fees_tax.tax_amount / fees_tax.amount res[fees_tax.id]['factor_tax'] = factor_tax if fees_tax.base <> 0.0: factor_base = fees_tax.base_amount / fees_tax.base res[fees_tax.id]['factor_base'] = factor_base return res _columns = { 'fees_id':fields.many2one('account.fees', 'Ballot Fees Line', ondelete='cascade', select=True), 'name':fields.char('Tax Description', size=64, required=True), 'account_id':fields.many2one('account.account', 'Tax Account', required=True,\ domain=[('type', '<>', 'closed')]), 'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic account'), 'base':fields.float('Base', digits_compute=dp.get_precision('Account')), 'amount':fields.float('Amount', digits_compute=dp.get_precision('Account')), 'manual':fields.boolean('Manual'), 'sequence':fields.integer('Sequence', help="Gives the sequence order when displaying a list of ballot fees tax."), 'base_code_id':fields.many2one('account.tax.code', 'Base Code', help="The account basis of the tax declaration."), 'base_amount':fields.float('Base Code Amount', digits_compute=dp.get_precision('Account')), 'tax_code_id':fields.many2one('account.tax.code', 'Tax Code', help="The tax basis of the tax declaration."), 'tax_amount':fields.float('Tax Code Amount', digits_compute=dp.get_precision('Account')), 'company_id':fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True), 'factor_base':fields.function(_count_factor, string='Multipication factor for Base code', type='float', multi="all"), 'factor_tax':fields.function(_count_factor, string='Multipication factor Tax code', type='float', multi="all") } def base_change(self, cr, uid, ids, base, currency_id=False, company_id=False, date_fees=False): cur_obj = self.pool.get('res.currency') company_obj = self.pool.get('res.company') company_currency = False factor = 1 if ids: factor = self.read(cr, uid, ids[0], ['factor_base'])['factor_base'] if company_id: company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0] if currency_id and company_currency: base = cur_obj.compute(cr, uid, currency_id, company_currency, base*factor, context={'date': date_fees or time.strftime('%Y-%m-%d')}, round=False) return {'value': {'base_amount':base}} def amount_change(self, cr, uid, ids, amount, currency_id=False, company_id=False, date_fees=False): cur_obj = self.pool.get('res.currency') company_obj = self.pool.get('res.company') company_currency = False factor = 1 if ids: factor = self.read(cr, uid, ids[0], ['factor_tax'])['factor_tax'] if company_id: company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0] if currency_id and company_currency: amount = cur_obj.compute(cr, uid, currency_id, company_currency, amount*factor, context={'date': date_fees or time.strftime('%Y-%m-%d')}, round=False) return {'value': {'tax_amount': amount}} _order = 'sequence' _defaults = { 'manual': 1, 'base_amount': 0.0, 'tax_amount': 0.0, } def compute(self, cr, uid, fees_id, context=None): tax_grouped = {} tax_obj = self.pool.get('account.tax') cur_obj = self.pool.get('res.currency') fee = self.pool.get('account.fees').browse(cr, uid, fees_id, context=context) cur = fee.currency_id company_currency = self.pool['res.company'].browse(cr, uid, fee.company_id.id).currency_id.id for line in fee.fees_line: for tax in tax_obj.compute_all(cr, uid, line.fees_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, line.product_id, fee.partner_id)['taxes']: val={} val['fees_id'] = fee.id val['name'] = tax['name'] val['amount'] = tax['amount'] val['manual'] = False val['sequence'] = tax['sequence'] val['base'] = cur_obj.round(cr, uid, cur, tax['price_unit'] * line['quantity']) val['base_code_id'] = tax['ref_base_code_id'] val['tax_code_id'] = tax['ref_tax_code_id'] val['base_amount'] = cur_obj.compute(cr, uid, fee.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'],\ context={'date': fee.date_fees or time.strftime('%Y-%m-%d')}, round=False) val['tax_amount'] = cur_obj.compute(cr, uid, fee.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'],\ context={'date': fee.date_fees or time.strftime('%Y-%m-%d')}, round=False) val['account_id'] = tax['account_paid_id'] or line.account_id.id val['account_analytic_id'] = tax['account_analytic_paid_id'] key = (val['tax_code_id'], val['base_code_id'], val['account_id'], val['account_analytic_id']) if not key in tax_grouped: tax_grouped[key] = val else: tax_grouped[key]['amount'] += val['amount'] tax_grouped[key]['base'] += val['base'] tax_grouped[key]['base_amount'] += val['base_amount'] tax_grouped[key]['tax_amount'] += val['tax_amount'] for t in tax_grouped.values(): t['base'] = cur_obj.round(cr, uid, cur, t['base']) t['amount'] = cur_obj.round(cr, uid, cur, t['amount']) t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount']) t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount']) return tax_grouped def move_line_get(self, cr, uid, fees_id): res = [] cr.execute('SELECT * FROM account_fees_tax WHERE fees_id=%s', (fees_id,)) for t in cr.dictfetchall(): if not t['amount'] \ and not t['tax_code_id'] \ and not t['tax_amount']: continue res.append({ 'type':'tax', 'name':t['name'], 'price_unit': t['amount'], 'quantity': 1, 'price': t['amount'] or 0.0, 'account_id': t['account_id'], 'tax_code_id': t['tax_code_id'], 'tax_amount': t['tax_amount'], 'account_analytic_id': t['account_analytic_id'], }) return res account_honorarios_tax() class res_partner(osv.osv): """ Inherits partner and adds ballot fees information in the partner form """ _inherit = 'res.partner' _columns = { 'fees_ids': fields.one2many('account.fees.line', 'partner_id', 'Ballot Fees', readonly=True), } def _find_accounting_partner(self, partner): ''' Find the partner for which the accounting entries will be created ''' # FIXME: after 7.0, to replace by function field partner.commercial_partner_id #if the chosen partner is not a company and has a parent company, use the parent for the journal entries #because you want to ballot fees 'Agrolait, accounting department' but the journal items are for 'Agrolait' while not partner.is_company and partner.parent_id: partner = partner.parent_id return partner def copy(self, cr, uid, id, default=None, context=None): default = default or {} default.update({'fees_ids' : []}) return super(res_partner, self).copy(cr, uid, id, default, context) class mail_compose_message(osv.Model): _inherit = 'mail.compose.message' def send_mail(self, cr, uid, ids, context=None): context = context or {} if context.get('default_model') == 'account.fees' and context.get('default_res_id') and context.get('mark_fees_as_sent'): context = dict(context, mail_post_autofollow=True) self.pool.get('account.fees').write(cr, uid, [context['default_res_id']], {'sent': True}, context=context) return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
cloudbase/neutron-virtualbox
neutron/plugins/nec/db/api.py
10
6134
# Copyright 2012 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy as sa from neutron.db import api as db from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import securitygroup as ext_sg from neutron.i18n import _LW from neutron import manager from neutron.openstack.common import log as logging from neutron.plugins.nec.common import config # noqa from neutron.plugins.nec.common import exceptions as nexc from neutron.plugins.nec.db import models as nmodels LOG = logging.getLogger(__name__) OFP_VLAN_NONE = 0xffff resource_map = {'ofc_tenant': nmodels.OFCTenantMapping, 'ofc_network': nmodels.OFCNetworkMapping, 'ofc_port': nmodels.OFCPortMapping, 'ofc_router': nmodels.OFCRouterMapping, 'ofc_packet_filter': nmodels.OFCFilterMapping} # utitlity methods def _get_resource_model(resource): return resource_map[resource] def get_ofc_item(session, resource, neutron_id): model = _get_resource_model(resource) if not model: return try: return session.query(model).filter_by(neutron_id=neutron_id).one() except sa.orm.exc.NoResultFound: return def get_ofc_id(session, resource, neutron_id): ofc_item = get_ofc_item(session, resource, neutron_id) if ofc_item: return ofc_item.ofc_id else: raise nexc.OFCMappingNotFound(resource=resource, neutron_id=neutron_id) def exists_ofc_item(session, resource, neutron_id): if get_ofc_item(session, resource, neutron_id): return True else: return False def find_ofc_item(session, resource, ofc_id): try: model = _get_resource_model(resource) params = dict(ofc_id=ofc_id) return (session.query(model).filter_by(**params).one()) except sa.orm.exc.NoResultFound: return None def add_ofc_item(session, resource, neutron_id, ofc_id): try: model = _get_resource_model(resource) params = dict(neutron_id=neutron_id, ofc_id=ofc_id) item = model(**params) with session.begin(subtransactions=True): session.add(item) session.flush() except Exception as exc: LOG.exception(exc) raise nexc.NECDBException(reason=exc.message) return item def del_ofc_item(session, resource, neutron_id): try: model = _get_resource_model(resource) with session.begin(subtransactions=True): item = session.query(model).filter_by(neutron_id=neutron_id).one() session.delete(item) return True except sa.orm.exc.NoResultFound: LOG.warning(_LW("del_ofc_item(): NotFound item " "(resource=%(resource)s, id=%(id)s) "), {'resource': resource, 'id': neutron_id}) return False def get_portinfo(session, id): try: return (session.query(nmodels.PortInfo). filter_by(id=id). one()) except sa.orm.exc.NoResultFound: return None def add_portinfo(session, id, datapath_id='', port_no=0, vlan_id=OFP_VLAN_NONE, mac=''): try: portinfo = nmodels.PortInfo(id=id, datapath_id=datapath_id, port_no=port_no, vlan_id=vlan_id, mac=mac) with session.begin(subtransactions=True): session.add(portinfo) except Exception as exc: LOG.exception(exc) raise nexc.NECDBException(reason=exc.message) return portinfo def del_portinfo(session, id): try: with session.begin(subtransactions=True): portinfo = session.query(nmodels.PortInfo).filter_by(id=id).one() session.delete(portinfo) except sa.orm.exc.NoResultFound: LOG.warning(_LW("del_portinfo(): NotFound portinfo for " "port_id: %s"), id) def get_active_ports_on_ofc(context, network_id, port_id=None): """Retrieve ports on OFC on a given network. It returns a list of tuple (neutron port_id, OFC id). """ query = context.session.query(nmodels.OFCPortMapping) query = query.join(models_v2.Port, nmodels.OFCPortMapping.neutron_id == models_v2.Port.id) query = query.filter(models_v2.Port.network_id == network_id) if port_id: query = query.filter(nmodels.OFCPortMapping.neutron_id == port_id) return [(p['neutron_id'], p['ofc_id']) for p in query] def get_port_from_device(port_id): """Get port from database.""" LOG.debug("get_port_with_securitygroups() called:port_id=%s", port_id) session = db.get_session() sg_binding_port = sg_db.SecurityGroupPortBinding.port_id query = session.query(models_v2.Port, sg_db.SecurityGroupPortBinding.security_group_id) query = query.outerjoin(sg_db.SecurityGroupPortBinding, models_v2.Port.id == sg_binding_port) query = query.filter(models_v2.Port.id == port_id) port_and_sgs = query.all() if not port_and_sgs: return None port = port_and_sgs[0][0] plugin = manager.NeutronManager.get_plugin() port_dict = plugin._make_port_dict(port) port_dict[ext_sg.SECURITYGROUPS] = [ sg_id for port_, sg_id in port_and_sgs if sg_id] port_dict['security_group_rules'] = [] port_dict['security_group_source_groups'] = [] port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] return port_dict
apache-2.0
mgedmin/ansible-modules-core
cloud/amazon/rds_subnet_group.py
104
5446
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: rds_subnet_group version_added: "1.5" short_description: manage RDS database subnet groups description: - Creates, modifies, and deletes RDS database subnet groups. This module has a dependency on python-boto >= 2.5. options: state: description: - Specifies whether the subnet should be present or absent. required: true default: present aliases: [] choices: [ 'present' , 'absent' ] name: description: - Database subnet group identifier. required: true default: null aliases: [] description: description: - Database subnet group description. Only set when a new group is added. required: false default: null aliases: [] subnets: description: - List of subnet IDs that make up the database subnet group. required: false default: null aliases: [] region: description: - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used. required: true default: null aliases: ['aws_region', 'ec2_region'] author: "Scott Anderson (@tastychutney)" extends_documentation_fragment: aws ''' EXAMPLES = ''' # Add or change a subnet group - rds_subnet_group state: present name: norwegian-blue description: My Fancy Ex Parrot Subnet Group subnets: - subnet-aaaaaaaa - subnet-bbbbbbbb # Remove a subnet group - rds_subnet_group: state: absent name: norwegian-blue ''' try: import boto.rds from boto.exception import BotoServerError HAS_BOTO = True except ImportError: HAS_BOTO = False def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state = dict(required=True, choices=['present', 'absent']), name = dict(required=True), description = dict(required=False), subnets = dict(required=False, type='list'), ) ) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto required for this module') state = module.params.get('state') group_name = module.params.get('name').lower() group_description = module.params.get('description') group_subnets = module.params.get('subnets') or {} if state == 'present': for required in ['name', 'description', 'subnets']: if not module.params.get(required): module.fail_json(msg = str("Parameter %s required for state='present'" % required)) else: for not_allowed in ['description', 'subnets']: if module.params.get(not_allowed): module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed)) # Retrieve any AWS settings from the environment. region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) if not region: module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set.")) try: conn = boto.rds.connect_to_region(region, **aws_connect_kwargs) except boto.exception.BotoServerError, e: module.fail_json(msg = e.error_message) try: changed = False exists = False try: matching_groups = conn.get_all_db_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except BotoServerError, e: if e.error_code != 'DBSubnetGroupNotFoundFault': module.fail_json(msg = e.error_message) if state == 'absent': if exists: conn.delete_db_subnet_group(group_name) changed = True else: if not exists: new_group = conn.create_db_subnet_group(group_name, desc=group_description, subnet_ids=group_subnets) changed = True else: # Sort the subnet groups before we compare them matching_groups[0].subnet_ids.sort() group_subnets.sort() if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ): changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets) changed = True except BotoServerError, e: module.fail_json(msg = e.error_message) module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.ec2 import * main()
gpl-3.0
ajayuranakar/django-blog
lib/python2.7/site-packages/pip/commands/search.py
84
4717
import sys import textwrap import pkg_resources import pip.download from pip.basecommand import Command, SUCCESS from pip.util import get_terminal_size from pip.log import logger from pip.backwardcompat import xmlrpclib, reduce, cmp from pip.exceptions import CommandError from pip.status_codes import NO_MATCHES_FOUND from distutils.version import StrictVersion, LooseVersion class SearchCommand(Command): """Search for PyPI packages whose name or summary contains <query>.""" name = 'search' usage = """ %prog [options] <query>""" summary = 'Search PyPI for packages.' def __init__(self, *args, **kw): super(SearchCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '--index', dest='index', metavar='URL', default='https://pypi.python.org/pypi', help='Base URL of Python Package Index (default %default)') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: raise CommandError('Missing required argument (search query).') query = args index_url = options.index pypi_hits = self.search(query, index_url) hits = transform_hits(pypi_hits) terminal_width = None if sys.stdout.isatty(): terminal_width = get_terminal_size()[0] print_results(hits, terminal_width=terminal_width) if pypi_hits: return SUCCESS return NO_MATCHES_FOUND def search(self, query, index_url): pypi = xmlrpclib.ServerProxy(index_url) hits = pypi.search({'name': query, 'summary': query}, 'or') return hits def transform_hits(hits): """ The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use. """ packages = {} for hit in hits: name = hit['name'] summary = hit['summary'] version = hit['version'] score = hit['_pypi_ordering'] if score is None: score = 0 if name not in packages.keys(): packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score} else: packages[name]['versions'].append(version) # if this is the highest version, replace summary and score if version == highest_version(packages[name]['versions']): packages[name]['summary'] = summary packages[name]['score'] = score # each record has a unique name now, so we will convert the dict into a list sorted by score package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True) return package_list def print_results(hits, name_column_width=25, terminal_width=None): installed_packages = [p.project_name for p in pkg_resources.working_set] for hit in hits: name = hit['name'] summary = hit['summary'] or '' if terminal_width is not None: # wrap and indent summary to fit terminal summary = textwrap.wrap(summary, terminal_width - name_column_width - 5) summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) line = '%s - %s' % (name.ljust(name_column_width), summary) try: logger.notify(line) if name in installed_packages: dist = pkg_resources.get_distribution(name) logger.indent += 2 try: latest = highest_version(hit['versions']) if dist.version == latest: logger.notify('INSTALLED: %s (latest)' % dist.version) else: logger.notify('INSTALLED: %s' % dist.version) logger.notify('LATEST: %s' % latest) finally: logger.indent -= 2 except UnicodeEncodeError: pass def compare_versions(version1, version2): try: return cmp(StrictVersion(version1), StrictVersion(version2)) # in case of abnormal version number, fall back to LooseVersion except ValueError: pass try: return cmp(LooseVersion(version1), LooseVersion(version2)) except TypeError: # certain LooseVersion comparions raise due to unorderable types, # fallback to string comparison return cmp([str(v) for v in LooseVersion(version1).version], [str(v) for v in LooseVersion(version2).version]) def highest_version(versions): return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
gpl-3.0
aboutsajjad/Bridge
app_packages/youtube_dl/extractor/ndtv.py
16
4606
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse_unquote_plus ) from ..utils import ( parse_duration, remove_end, unified_strdate, urljoin ) class NDTVIE(InfoExtractor): _VALID_URL = r'https?://(?:[^/]+\.)?ndtv\.com/(?:[^/]+/)*videos?/?(?:[^/]+/)*[^/?^&]+-(?P<id>\d+)' _TESTS = [ { 'url': 'https://khabar.ndtv.com/video/show/prime-time/prime-time-ill-system-and-poor-education-468818', 'md5': '78efcf3880ef3fd9b83d405ca94a38eb', 'info_dict': { 'id': '468818', 'ext': 'mp4', 'title': "प्राइम टाइम: सिस्टम बीमार, स्कूल बदहाल", 'description': 'md5:f410512f1b49672e5695dea16ef2731d', 'upload_date': '20170928', 'duration': 2218, 'thumbnail': r're:https?://.*\.jpg', } }, { # __filename is url 'url': 'http://movies.ndtv.com/videos/cracker-free-diwali-wishes-from-karan-johar-kriti-sanon-other-stars-470304', 'md5': 'f1d709352305b44443515ac56b45aa46', 'info_dict': { 'id': '470304', 'ext': 'mp4', 'title': "Cracker-Free Diwali Wishes From Karan Johar, Kriti Sanon & Other Stars", 'description': 'md5:f115bba1adf2f6433fa7c1ade5feb465', 'upload_date': '20171019', 'duration': 137, 'thumbnail': r're:https?://.*\.jpg', } }, { 'url': 'https://www.ndtv.com/video/news/news/delhi-s-air-quality-status-report-after-diwali-is-very-poor-470372', 'only_matching': True }, { 'url': 'https://auto.ndtv.com/videos/the-cnb-daily-october-13-2017-469935', 'only_matching': True }, { 'url': 'https://sports.ndtv.com/cricket/videos/2nd-t20i-rock-thrown-at-australia-cricket-team-bus-after-win-over-india-469764', 'only_matching': True }, { 'url': 'http://gadgets.ndtv.com/videos/uncharted-the-lost-legacy-review-465568', 'only_matching': True }, { 'url': 'http://profit.ndtv.com/videos/news/video-indian-economy-on-very-solid-track-international-monetary-fund-chief-470040', 'only_matching': True }, { 'url': 'http://food.ndtv.com/video-basil-seeds-coconut-porridge-419083', 'only_matching': True }, { 'url': 'https://doctor.ndtv.com/videos/top-health-stories-of-the-week-467396', 'only_matching': True }, { 'url': 'https://swirlster.ndtv.com/video/how-to-make-friends-at-work-469324', 'only_matching': True } ] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) # '__title' does not contain extra words such as sub-site name, "Video" etc. title = compat_urllib_parse_unquote_plus( self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._og_search_title(webpage)) filename = self._search_regex( r"(?:__)?filename\s*[:=]\s*'([^']+)'", webpage, 'video filename') # in "movies" sub-site pages, filename is URL video_url = urljoin('https://ndtvod.bc-ssl.cdn.bitgravity.com/23372/ndtv/', filename.lstrip('/')) # "doctor" sub-site has MM:SS format duration = parse_duration(self._search_regex( r"(?:__)?duration\s*[:=]\s*'([^']+)'", webpage, 'duration', fatal=False)) # "sports", "doctor", "swirlster" sub-sites don't have 'publish-date' upload_date = unified_strdate(self._html_search_meta( 'publish-date', webpage, 'upload date', default=None) or self._html_search_meta( 'uploadDate', webpage, 'upload date', default=None) or self._search_regex( r'datePublished"\s*:\s*"([^"]+)"', webpage, 'upload date', fatal=False)) description = remove_end(self._og_search_description(webpage), ' (Read more)') return { 'id': video_id, 'url': video_url, 'title': title, 'description': description, 'thumbnail': self._og_search_thumbnail(webpage), 'duration': duration, 'upload_date': upload_date, }
mit
meredith-digops/ansible
lib/ansible/galaxy/role.py
10
15355
######################################################################## # # (C) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import tarfile import tempfile import yaml from distutils.version import LooseVersion from shutil import rmtree import ansible.constants as C from ansible.errors import AnsibleError from ansible.module_utils.urls import open_url from ansible.playbook.role.requirement import RoleRequirement from ansible.galaxy.api import GalaxyAPI try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class GalaxyRole(object): SUPPORTED_SCMS = set(['git', 'hg']) META_MAIN = os.path.join('meta', 'main.yml') META_INSTALL = os.path.join('meta', '.galaxy_install_info') ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests') def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None): self._metadata = None self._install_info = None self._validate_certs = not galaxy.options.ignore_certs display.debug('Validate TLS certificates: %s' % self._validate_certs) self.options = galaxy.options self.galaxy = galaxy self.name = name self.version = version self.src = src or name self.scm = scm if path is not None: if self.name not in path: path = os.path.join(path, self.name) self.path = path else: for role_path_dir in galaxy.roles_paths: role_path = os.path.join(role_path_dir, self.name) if os.path.exists(role_path): self.path = role_path break else: # use the first path by default self.path = os.path.join(galaxy.roles_paths[0], self.name) # create list of possible paths self.paths = [x for x in galaxy.roles_paths] self.paths = [os.path.join(x, self.name) for x in self.paths] def __repr__(self): """ Returns "rolename (version)" if version is not null Returns "rolename" otherwise """ if self.version: return "%s (%s)" % (self.name, self.version) else: return self.name def __eq__(self, other): return self.name == other.name @property def metadata(self): """ Returns role metadata """ if self._metadata is None: meta_path = os.path.join(self.path, self.META_MAIN) if os.path.isfile(meta_path): try: f = open(meta_path, 'r') self._metadata = yaml.safe_load(f) except: display.vvvvv("Unable to load metadata for %s" % self.name) return False finally: f.close() return self._metadata @property def install_info(self): """ Returns role install info """ if self._install_info is None: info_path = os.path.join(self.path, self.META_INSTALL) if os.path.isfile(info_path): try: f = open(info_path, 'r') self._install_info = yaml.safe_load(f) except: display.vvvvv("Unable to load Galaxy install info for %s" % self.name) return False finally: f.close() return self._install_info def _write_galaxy_install_info(self): """ Writes a YAML-formatted file to the role's meta/ directory (named .galaxy_install_info) which contains some information we can use later for commands like 'list' and 'info'. """ info = dict( version=self.version, install_date=datetime.datetime.utcnow().strftime("%c"), ) if not os.path.exists(os.path.join(self.path, 'meta')): os.makedirs(os.path.join(self.path, 'meta')) info_path = os.path.join(self.path, self.META_INSTALL) with open(info_path, 'w+') as f: try: self._install_info = yaml.safe_dump(info, f) except: return False return True def remove(self): """ Removes the specified role from the roles path. There is a sanity check to make sure there's a meta/main.yml file at this path so the user doesn't blow away random directories. """ if self.metadata: try: rmtree(self.path) return True except: pass return False def fetch(self, role_data): """ Downloads the archived role from github to a temp location """ if role_data: # first grab the file and save it to a temp location if "github_user" in role_data and "github_repo" in role_data: archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version) else: archive_url = self.src display.display("- downloading role from %s" % archive_url) try: url_file = open_url(archive_url, validate_certs=self._validate_certs) temp_file = tempfile.NamedTemporaryFile(delete=False) data = url_file.read() while data: temp_file.write(data) data = url_file.read() temp_file.close() return temp_file.name except Exception as e: display.error("failed to download the file: %s" % str(e)) return False def install(self): # the file is a tar, so open it that way and extract it # to the specified (or default) roles directory local_file = False if self.scm: # create tar file from scm url tmp_file = RoleRequirement.scm_archive_role(**self.spec) elif self.src: if os.path.isfile(self.src): # installing a local tar.gz local_file = True tmp_file = self.src elif '://' in self.src: role_data = self.src tmp_file = self.fetch(role_data) else: api = GalaxyAPI(self.galaxy) role_data = api.lookup_role_by_name(self.src) if not role_data: raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server)) if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'): # Container Enabled, running outside of a container display.warning("%s is a Container Enabled role and should only be installed using " "Ansible Container" % self.name) if role_data.get('role_type') == 'APP': # Container Role display.warning("%s is a Container App role and should only be installed using Ansible " "Container" % self.name) role_versions = api.fetch_role_related('versions', role_data['id']) if not self.version: # convert the version names to LooseVersion objects # and sort them to get the latest version. If there # are no versions in the list, we'll grab the head # of the master branch if len(role_versions) > 0: loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions] loose_versions.sort() self.version = str(loose_versions[-1]) elif role_data.get('github_branch', None): self.version = role_data['github_branch'] else: self.version = 'master' elif self.version != 'master': if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]: raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions)) tmp_file = self.fetch(role_data) else: raise AnsibleError("No valid role data found") if tmp_file: display.debug("installing from %s" % tmp_file) if not tarfile.is_tarfile(tmp_file): raise AnsibleError("the file downloaded was not a tar.gz") else: if tmp_file.endswith('.gz'): role_tar_file = tarfile.open(tmp_file, "r:gz") else: role_tar_file = tarfile.open(tmp_file, "r") # verify the role's meta file meta_file = None members = role_tar_file.getmembers() # next find the metadata file for member in members: if self.META_MAIN in member.name: # Look for parent of meta/main.yml # Due to possibility of sub roles each containing meta/main.yml # look for shortest length parent meta_parent_dir = os.path.dirname(os.path.dirname(member.name)) if not meta_file: archive_parent_dir = meta_parent_dir meta_file = member else: if len(meta_parent_dir) < len(archive_parent_dir): archive_parent_dir = meta_parent_dir meta_file = member if not meta_file: raise AnsibleError("this role does not appear to have a meta/main.yml file.") else: try: self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file)) except: raise AnsibleError("this role does not appear to have a valid meta/main.yml file.") # we strip off any higher-level directories for all of the files contained within # the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other # hand, does not have a parent directory at all. installed = False while not installed: display.display("- extracting %s to %s" % (self.name, self.path)) try: if os.path.exists(self.path): if not os.path.isdir(self.path): raise AnsibleError("the specified roles path exists and is not a directory.") elif not getattr(self.options, "force", False): raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name) else: # using --force, remove the old path if not self.remove(): raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really " "want to put the role here." % self.path) else: os.makedirs(self.path) # now we do the actual extraction to the path for member in members: # we only extract files, and remove any relative path # bits that might be in the file for security purposes # and drop any containing directory, as mentioned above if member.isreg() or member.issym(): parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep) final_parts = [] for part in parts: if part != '..' and '~' not in part and '$' not in part: final_parts.append(part) member.name = os.path.join(*final_parts) role_tar_file.extract(member, self.path) # write out the install info file for later use self._write_galaxy_install_info() installed = True except OSError as e: error = True if e[0] == 13 and len(self.paths) > 1: current = self.paths.index(self.path) nextidx = current + 1 if len(self.paths) >= current: self.path = self.paths[nextidx] error = False if error: raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e))) # return the parsed yaml metadata display.display("- %s was installed successfully" % str(self)) if not local_file: try: os.unlink(tmp_file) except (OSError,IOError) as e: display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e))) return True return False @property def spec(self): """ Returns role spec info { 'scm': 'git', 'src': 'http://git.example.com/repos/repo.git', 'version': 'v1.0', 'name': 'repo' } """ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
gpl-3.0
tzabian/fuego-pootle
local_apps/pootle_misc/dbinit.py
5
4585
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008-2009 Zuza Software Foundation # # This file is part of Pootle. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. import sys from django.core.management import call_command from django.contrib.auth.models import User from pootle.i18n.gettext import ugettext as _ from pootle_language.models import Language from pootle_project.models import Project def header(exception): text = """ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html> <head> <title>%(title)s</title> <meta content="text/html; charset=utf-8" http-equiv="content-type" /> <style type="text/css"> body { background-color: #ffffff; color: #000000; font-family: Georgia, serif; margin: 40px auto; width: 740px; } h1 { font-size: 185%%; } ul { list-style-type: square; } .error { background-color: inherit; color: #d54e21; font-weight: bold; } </style> </head> <body> <h1>%(title)s</h1> <p class="error">%(msg)s</p> """ % {'title': _('Pootle: Install'), 'msg': _('Error: "%s" while attempting to access the Pootle database, will try to initialize database.', exception)} return text def syncdb(): text = u""" <p>%s</p> """ % _('Creating database tables...') call_command('syncdb', interactive=False) return text def initdb(): text = u""" <p>%s</p> """ % _('Creating default languages, projects and admin user') call_command('initdb') return text def stats_start(): text = u""" <p>%s</p> <ul> """ % _('Calculating translation statistics, this will take a few minutes') return text def stats_language(language): text = u""" <li>%s</li> """ % _('%(language)s is %(percent)d%% complete', {'language': language.localname(), 'percent': language.translated_percentage()}) return text def stats_project(project): text = u""" <li>%s</li> """ % _('Project %(project)s is %(percent)d%% complete', {'project': project.fullname, 'percent': project.translated_percentage()}) return text def stats_end(): text = u""" </ul> <p>%s</p> """ % _('Done calculating statistics for default languages and projects') return text def footer(): text = """ <p>%(endmsg)s</p> <div><script>setTimeout("location.reload()", 10000)</script></div> </body></html> """ % {'endmsg': _('Initialized database, you will be redirected to the front page in 10 seconds')} return text def staggered_install(exception): """Initialize the pootle database while displaying progress reports for each step""" # django's syncdb command prints progress repots to stdout, but # mod_wsgi doesn't like stdout, so we reroute to stderr stdout = sys.stdout sys.stdout = sys.stderr yield header(exception) # try to build the database tables yield syncdb() # if this is a fresh install we should add some default languages # and projects and a default admin account to make pootle more # usable out of the box # # if there are no user accounts apart from defaults then assume # it's fresh install if User.objects.hide_defaults().count() == 0: yield initdb() # first time to visit the front page all stats for projects and # languages will be calculated which can take forever, since users # don't like webpages that take forever let's precalculate the # stats here yield stats_start() for language in Language.objects.iterator(): yield stats_language(language) for project in Project.objects.iterator(): yield stats_project(project) yield stats_end() yield footer() # bring back stdout sys.stdout = stdout return
gpl-2.0
DANCEcollaborative/forum-xblock
XBlock Integration Files/xdjangobb/xblock/lib/python2.7/site-packages/django/contrib/auth/tests/tokens.py
96
2441
from datetime import date, timedelta from django.conf import settings from django.contrib.auth.models import User from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.test import TestCase class TokenGeneratorTest(TestCase): def test_make_token(self): """ Ensure that we can make a token and that it is valid """ user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) self.assertTrue(p0.check_token(user, tk1)) def test_10265(self): """ Ensure that the token generated for a user created in the same request will work correctly. """ # See ticket #10265 user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) reload = User.objects.get(username='comebackkid') tk2 = p0.make_token(reload) self.assertEqual(tk1, tk2) def test_timeout(self): """ Ensure we can use the token after n days, but no greater. """ # Uses a mocked version of PasswordResetTokenGenerator so we can change # the value of 'today' class Mocked(PasswordResetTokenGenerator): def __init__(self, today): self._today_val = today def _today(self): return self._today_val user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw') p0 = PasswordResetTokenGenerator() tk1 = p0.make_token(user) p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS)) self.assertTrue(p1.check_token(user, tk1)) p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1)) self.assertFalse(p2.check_token(user, tk1)) def test_date_length(self): """ Make sure we don't allow overly long dates, causing a potential DoS. """ user = User.objects.create_user('ima1337h4x0r', 'test4@example.com', 'p4ssw0rd') p0 = PasswordResetTokenGenerator() # This will put a 14-digit base36 timestamp into the token, which is too large. self.assertRaises(ValueError, p0._make_token_with_timestamp, user, 175455491841851871349)
mit
yoer/hue
desktop/core/ext-py/thrift-0.9.1/src/protocol/TMultiplexedProtocol.py
146
1417
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from thrift.Thrift import TMessageType from thrift.protocol import TProtocolDecorator SEPARATOR = ":" class TMultiplexedProtocol(TProtocolDecorator.TProtocolDecorator): def __init__(self, protocol, serviceName): TProtocolDecorator.TProtocolDecorator.__init__(self, protocol) self.serviceName = serviceName def writeMessageBegin(self, name, type, seqid): if (type == TMessageType.CALL or type == TMessageType.ONEWAY): self.protocol.writeMessageBegin( self.serviceName + SEPARATOR + name, type, seqid ) else: self.protocol.writeMessageBegin(name, type, seqid)
apache-2.0
allevin/PyGithub
tests/ExposeAllAttributes.py
2
7390
# -*- coding: utf-8 -*- ############################ Copyrights and license ############################ # # # Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> # # Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> # # Copyright 2018 sfdye <tsfdye@gmail.com> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from . import Framework class ExposeAllAttributes(Framework.TestCase): def testAllClasses(self): authenticatedUser = self.g.get_user() namedUser = self.g.get_user("nvie") repository = authenticatedUser.get_repo("PyGithub") organization = self.g.get_organization("BeaverSoftware") plan = authenticatedUser.plan branch = repository.get_branch("master") commit = repository.get_commit("1292bf0e22c796e91cc3d6e24b544aece8c21f2a") commitStats = commit.stats commitStatus = commit.get_statuses()[0] milestone = repository.get_milestone(17) gist = self.g.get_gist("149016") gistComment = gist.get_comment(4565) gistFile = gist.files[".gitignore"] gistHistoryState = gist.history[0] gitCommit = repository.get_git_commit( "be37b8a7f3a68631c32672dcd84d9eba27438ee6" ) gitAuthor = gitCommit.author gitTree = repository.get_git_tree("6f7c2d8c66d78863f7b91792deaead619799a1ce") gitTreeElement = gitTree.tree[0] gitBlob = repository.get_git_blob("681fb61f1761743a02f5c790f1c762cbfe8cfad1") gitRef = repository.get_git_ref("tags/v1.17.0") gitObject = gitRef.object issue = repository.get_issue(188) issueComment = issue.get_comment(22686536) issueEvent = issue.get_events()[0] issuePullRequest = issue.pull_request gitignoreTemplate = self.g.get_gitignore_template("Python") team = organization.get_team(141487) label = repository.get_label("Bug") pullRequest = repository.get_pull(31) pullRequestComment = pullRequest.get_review_comment(1580134) pullRequestPart = pullRequest.base file = pullRequest.get_files()[0] commitComment = repository.get_comment(3630301) rateLimit = self.g.get_rate_limit() hook = repository.get_hooks()[0] hookResponse = hook.last_response hookDescription = self.g.get_hooks()[0] comparison = repository.compare("master", "develop") contentFile = repository.get_contents("README.rst") permissions = repository.permissions event = repository.get_events()[0] notification = authenticatedUser.get_notification("8406712") notificationSubject = notification.subject missingAttributes = self.gatherMissingAttributes( [ authenticatedUser, # authorization, # Security issue if put as-is in ReplayData # authorizationApplication, # Security issue if put as-is in ReplayData branch, commit, commitComment, commitStats, commitStatus, comparison, contentFile, # download, # Deprecated: https://github.com/blog/1302-goodbye-uploads event, file, gist, gistComment, gistFile, gistHistoryState, gitAuthor, gitBlob, gitCommit, gitignoreTemplate, gitObject, gitRef, # gitTag, gitTree, gitTreeElement, hook, hookDescription, hookResponse, issue, issueComment, issueEvent, issuePullRequest, label, milestone, namedUser, notification, notificationSubject, organization, permissions, plan, pullRequest, pullRequestComment, # pullRequestMergeStatus, # Only obtained when merging a pull request through the API pullRequestPart, rateLimit, repository, # repositoryKey, # Security issue if put as-is in ReplayData # tag, team, # userKey, # Security issue if put as-is in ReplayData ] ) for className, attributesMissingInClass in sorted(missingAttributes.items()): for attrName, value in sorted(attributesMissingInClass.items()): print(className, attrName, "->", repr(value)) self.assertEqual(sum(len(attrs) for attrs in missingAttributes.values()), 0) def findMissingAttributes(self, obj): if hasattr(obj, "update"): obj.update() className = obj.__class__.__name__ missingAttributes = {} for attribute in obj.raw_data: if attribute != "_links": if not hasattr(obj, attribute): missingAttributes[attribute] = obj.raw_data[attribute] return (className, missingAttributes) def gatherMissingAttributes(self, objs): allMissingAttributes = dict() for obj in objs: className, attributesMissingInClass = self.findMissingAttributes(obj) if len(attributesMissingInClass) > 0: if className not in allMissingAttributes: allMissingAttributes[className] = dict() allMissingAttributes[className].update(attributesMissingInClass) return allMissingAttributes
lgpl-3.0
LutzLange/openshift-ansible
utils/setup.py
9
2999
"""A setuptools based setup module. """ # Always prefer setuptools over distutils from setuptools import setup setup( name='ooinstall', # Versions should comply with PEP440. For a discussion on single-sourcing # the version across setup.py and the project code, see # https://packaging.python.org/en/latest/single_source_version.html version="3.0.0", description="Ansible wrapper for OpenShift Enterprise 3 installation.", # The project's main homepage. url="http://github.com/openshift/openshift-extras/tree/enterprise-3.0/oo-install", # Author details author="openshift@redhat.com", author_email="OpenShift", # Choose your license license="Apache 2.0", # See https://pypi.python.org/pypi?%3Aaction=list_classifiers classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 2.7', 'Topic :: Utilities', ], # What does your project relate to? keywords='oo-install setuptools development', # You can just specify the packages manually here if your project is # simple. Or you can use find_packages(). #packages=find_packages(exclude=['contrib', 'docs', 'tests*']), packages=['ooinstall'], package_dir={'ooinstall': 'src/ooinstall'}, # List run-time dependencies here. These will be installed by pip when # your project is installed. For an analysis of "install_requires" vs pip's # requirements files see: # https://packaging.python.org/en/latest/requirements.html install_requires=['click', 'PyYAML'], # List additional groups of dependencies here (e.g. development # dependencies). You can install these using the following syntax, # for example: # $ pip install -e .[dev,test] #extras_require={ # 'dev': ['check-manifest'], # 'test': ['coverage'], #}, # If there are data files included in your packages that need to be # installed, specify them here. If using Python 2.6 or less, then these # have to be included in MANIFEST.in as well. package_data={ 'ooinstall': ['ansible.cfg', 'ansible_plugins/*'], }, # Although 'package_data' is the preferred approach, in some case you may # need to place data files outside of your packages. See: # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' #data_files=[('my_data', ['data/data_file'])], tests_require=['nose'], test_suite='nose.collector', # To provide executable scripts, use entry points in preference to the # "scripts" keyword. Entry points provide cross-platform support and allow # pip to create the appropriate form of executable for the target platform. entry_points={ 'console_scripts': [ 'oo-install=ooinstall.cli_installer:cli', ], }, )
apache-2.0
thumbimigwe/echorizr
lib/python2.7/site-packages/django/db/models/__init__.py
24
1679
from functools import wraps from django.core.exceptions import ObjectDoesNotExist # NOQA from django.db.models import signals # NOQA from django.db.models.aggregates import * # NOQA from django.db.models.deletion import ( # NOQA CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError, ) from django.db.models.expressions import ( # NOQA Case, Expression, ExpressionWrapper, F, Func, Value, When, ) from django.db.models.fields import * # NOQA from django.db.models.fields.files import FileField, ImageField # NOQA from django.db.models.fields.proxy import OrderWrt # NOQA from django.db.models.fields.subclassing import SubfieldBase # NOQA from django.db.models.lookups import Lookup, Transform # NOQA from django.db.models.manager import Manager # NOQA from django.db.models.query import Prefetch, Q, QuerySet # NOQA # Imports that would create circular imports if sorted from django.db.models.base import Model # NOQA isort:skip from django.db.models.fields.related import ( # NOQA isort:skip ForeignKey, ForeignObject, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel, ) def permalink(func): """ Decorator that calls urlresolvers.reverse() to return a URL using parameters returned by the decorated function "func". "func" should be a function that returns a tuple in one of the following formats: (viewname, viewargs) (viewname, viewargs, viewkwargs) """ from django.core.urlresolvers import reverse @wraps(func) def inner(*args, **kwargs): bits = func(*args, **kwargs) return reverse(bits[0], None, *bits[1:3]) return inner
mit
javiergarridomellado/Empresa_django
devcodela/lib/python2.7/site-packages/django/contrib/gis/utils/srs.py
210
3235
from django.contrib.gis.gdal import SpatialReference def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None, database=None): """ This function takes a GDAL SpatialReference system and adds its information to the `spatial_ref_sys` table of the spatial backend. Doing this enables database-level spatial transformations for the backend. Thus, this utility is useful for adding spatial reference systems not included by default with the backend -- for example, the so-called "Google Maps Mercator Projection" is excluded in PostGIS 1.3 and below, and the following adds it to the `spatial_ref_sys` table: >>> from django.contrib.gis.utils import add_srs_entry >>> add_srs_entry(900913) Keyword Arguments: auth_name: This keyword may be customized with the value of the `auth_name` field. Defaults to 'EPSG'. auth_srid: This keyword may be customized with the value of the `auth_srid` field. Defaults to the SRID determined by GDAL. ref_sys_name: For SpatiaLite users only, sets the value of the `ref_sys_name` field. Defaults to the name determined by GDAL. database: The name of the database connection to use; the default is the value of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, it's value is 'default'). """ from django.db import connections, DEFAULT_DB_ALIAS if not database: database = DEFAULT_DB_ALIAS connection = connections[database] if not hasattr(connection.ops, 'spatial_version'): raise Exception('The `add_srs_entry` utility only works ' 'with spatial backends.') if connection.ops.oracle or connection.ops.mysql: raise Exception('This utility does not support the ' 'Oracle or MySQL spatial backends.') SpatialRefSys = connection.ops.spatial_ref_sys() # If argument is not a `SpatialReference` instance, use it as parameter # to construct a `SpatialReference` instance. if not isinstance(srs, SpatialReference): srs = SpatialReference(srs) if srs.srid is None: raise Exception('Spatial reference requires an SRID to be ' 'compatible with the spatial backend.') # Initializing the keyword arguments dictionary for both PostGIS # and SpatiaLite. kwargs = {'srid' : srs.srid, 'auth_name' : auth_name, 'auth_srid' : auth_srid or srs.srid, 'proj4text' : srs.proj4, } # Backend-specific fields for the SpatialRefSys model. if connection.ops.postgis: kwargs['srtext'] = srs.wkt if connection.ops.spatialite: kwargs['ref_sys_name'] = ref_sys_name or srs.name # Creating the spatial_ref_sys model. try: # Try getting via SRID only, because using all kwargs may # differ from exact wkt/proj in database. sr = SpatialRefSys.objects.using(database).get(srid=srs.srid) except SpatialRefSys.DoesNotExist: sr = SpatialRefSys.objects.using(database).create(**kwargs) # Alias is for backwards-compatibility purposes. add_postgis_srs = add_srs_entry
gpl-2.0
agentxan/nzbToMedia
libs/unidecode/x023.py
165
4341
data = ( '[?]', # 0x00 '[?]', # 0x01 '[?]', # 0x02 '^', # 0x03 '[?]', # 0x04 '[?]', # 0x05 '[?]', # 0x06 '[?]', # 0x07 '[?]', # 0x08 '[?]', # 0x09 '[?]', # 0x0a '[?]', # 0x0b '[?]', # 0x0c '[?]', # 0x0d '[?]', # 0x0e '[?]', # 0x0f '[?]', # 0x10 '[?]', # 0x11 '[?]', # 0x12 '[?]', # 0x13 '[?]', # 0x14 '[?]', # 0x15 '[?]', # 0x16 '[?]', # 0x17 '[?]', # 0x18 '[?]', # 0x19 '[?]', # 0x1a '[?]', # 0x1b '[?]', # 0x1c '[?]', # 0x1d '[?]', # 0x1e '[?]', # 0x1f '[?]', # 0x20 '[?]', # 0x21 '[?]', # 0x22 '[?]', # 0x23 '[?]', # 0x24 '[?]', # 0x25 '[?]', # 0x26 '[?]', # 0x27 '[?]', # 0x28 '<', # 0x29 '> ', # 0x2a '[?]', # 0x2b '[?]', # 0x2c '[?]', # 0x2d '[?]', # 0x2e '[?]', # 0x2f '[?]', # 0x30 '[?]', # 0x31 '[?]', # 0x32 '[?]', # 0x33 '[?]', # 0x34 '[?]', # 0x35 '[?]', # 0x36 '[?]', # 0x37 '[?]', # 0x38 '[?]', # 0x39 '[?]', # 0x3a '[?]', # 0x3b '[?]', # 0x3c '[?]', # 0x3d '[?]', # 0x3e '[?]', # 0x3f '[?]', # 0x40 '[?]', # 0x41 '[?]', # 0x42 '[?]', # 0x43 '[?]', # 0x44 '[?]', # 0x45 '[?]', # 0x46 '[?]', # 0x47 '[?]', # 0x48 '[?]', # 0x49 '[?]', # 0x4a '[?]', # 0x4b '[?]', # 0x4c '[?]', # 0x4d '[?]', # 0x4e '[?]', # 0x4f '[?]', # 0x50 '[?]', # 0x51 '[?]', # 0x52 '[?]', # 0x53 '[?]', # 0x54 '[?]', # 0x55 '[?]', # 0x56 '[?]', # 0x57 '[?]', # 0x58 '[?]', # 0x59 '[?]', # 0x5a '[?]', # 0x5b '[?]', # 0x5c '[?]', # 0x5d '[?]', # 0x5e '[?]', # 0x5f '[?]', # 0x60 '[?]', # 0x61 '[?]', # 0x62 '[?]', # 0x63 '[?]', # 0x64 '[?]', # 0x65 '[?]', # 0x66 '[?]', # 0x67 '[?]', # 0x68 '[?]', # 0x69 '[?]', # 0x6a '[?]', # 0x6b '[?]', # 0x6c '[?]', # 0x6d '[?]', # 0x6e '[?]', # 0x6f '[?]', # 0x70 '[?]', # 0x71 '[?]', # 0x72 '[?]', # 0x73 '[?]', # 0x74 '[?]', # 0x75 '[?]', # 0x76 '[?]', # 0x77 '[?]', # 0x78 '[?]', # 0x79 '[?]', # 0x7a '[?]', # 0x7b '[?]', # 0x7c '[?]', # 0x7d '[?]', # 0x7e '[?]', # 0x7f '[?]', # 0x80 '[?]', # 0x81 '[?]', # 0x82 '[?]', # 0x83 '[?]', # 0x84 '[?]', # 0x85 '[?]', # 0x86 '[?]', # 0x87 '[?]', # 0x88 '[?]', # 0x89 '[?]', # 0x8a '[?]', # 0x8b '[?]', # 0x8c '[?]', # 0x8d '[?]', # 0x8e '[?]', # 0x8f '[?]', # 0x90 '[?]', # 0x91 '[?]', # 0x92 '[?]', # 0x93 '[?]', # 0x94 '[?]', # 0x95 '[?]', # 0x96 '[?]', # 0x97 '[?]', # 0x98 '[?]', # 0x99 '[?]', # 0x9a '[?]', # 0x9b '[?]', # 0x9c '[?]', # 0x9d '[?]', # 0x9e '[?]', # 0x9f '[?]', # 0xa0 '[?]', # 0xa1 '[?]', # 0xa2 '[?]', # 0xa3 '[?]', # 0xa4 '[?]', # 0xa5 '[?]', # 0xa6 '[?]', # 0xa7 '[?]', # 0xa8 '[?]', # 0xa9 '[?]', # 0xaa '[?]', # 0xab '[?]', # 0xac '[?]', # 0xad '[?]', # 0xae '[?]', # 0xaf '[?]', # 0xb0 '[?]', # 0xb1 '[?]', # 0xb2 '[?]', # 0xb3 '[?]', # 0xb4 '[?]', # 0xb5 '[?]', # 0xb6 '[?]', # 0xb7 '[?]', # 0xb8 '[?]', # 0xb9 '[?]', # 0xba '[?]', # 0xbb '[?]', # 0xbc '[?]', # 0xbd '[?]', # 0xbe '[?]', # 0xbf '[?]', # 0xc0 '[?]', # 0xc1 '[?]', # 0xc2 '[?]', # 0xc3 '[?]', # 0xc4 '[?]', # 0xc5 '[?]', # 0xc6 '[?]', # 0xc7 '[?]', # 0xc8 '[?]', # 0xc9 '[?]', # 0xca '[?]', # 0xcb '[?]', # 0xcc '[?]', # 0xcd '[?]', # 0xce '[?]', # 0xcf '[?]', # 0xd0 '[?]', # 0xd1 '[?]', # 0xd2 '[?]', # 0xd3 '[?]', # 0xd4 '[?]', # 0xd5 '[?]', # 0xd6 '[?]', # 0xd7 '[?]', # 0xd8 '[?]', # 0xd9 '[?]', # 0xda '[?]', # 0xdb '[?]', # 0xdc '[?]', # 0xdd '[?]', # 0xde '[?]', # 0xdf '[?]', # 0xe0 '[?]', # 0xe1 '[?]', # 0xe2 '[?]', # 0xe3 '[?]', # 0xe4 '[?]', # 0xe5 '[?]', # 0xe6 '[?]', # 0xe7 '[?]', # 0xe8 '[?]', # 0xe9 '[?]', # 0xea '[?]', # 0xeb '[?]', # 0xec '[?]', # 0xed '[?]', # 0xee '[?]', # 0xef '[?]', # 0xf0 '[?]', # 0xf1 '[?]', # 0xf2 '[?]', # 0xf3 '[?]', # 0xf4 '[?]', # 0xf5 '[?]', # 0xf6 '[?]', # 0xf7 '[?]', # 0xf8 '[?]', # 0xf9 '[?]', # 0xfa '[?]', # 0xfb '[?]', # 0xfc '[?]', # 0xfd '[?]', # 0xfe )
gpl-3.0
varunkamra/kuma
vendor/packages/pygments/lexers/shell.py
72
15794
# -*- coding: utf-8 -*- """ pygments.lexers.shell ~~~~~~~~~~~~~~~~~~~~~ Lexers for various shells. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, include from pygments.token import Punctuation, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic from pygments.util import shebang_matches __all__ = ['BashLexer', 'BashSessionLexer', 'TcshLexer', 'BatchLexer', 'PowerShellLexer', 'ShellSessionLexer'] line_re = re.compile('.*?\n') class BashLexer(RegexLexer): """ Lexer for (ba|k|)sh shell scripts. .. versionadded:: 0.6 """ name = 'Bash' aliases = ['bash', 'sh', 'ksh', 'shell'] filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'PKGBUILD'] mimetypes = ['application/x-sh', 'application/x-shellscript'] tokens = { 'root': [ include('basic'), (r'`', String.Backtick, 'backticks'), include('data'), include('interp'), ], 'interp': [ (r'\$\(\(', Keyword, 'math'), (r'\$\(', Keyword, 'paren'), (r'\$\{#?', String.Interpol, 'curly'), (r'\$#?(\w+|.)', Name.Variable), ], 'basic': [ (r'\b(if|fi|else|while|do|done|for|then|return|function|case|' r'select|continue|until|esac|elif)(\s*)\b', bygroups(Keyword, Text)), (r'\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|' r'complete|declare|dirs|disown|echo|enable|eval|exec|exit|' r'export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|' r'local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|' r'shopt|source|suspend|test|time|times|trap|true|type|typeset|' r'ulimit|umask|unalias|unset|wait)\s*\b(?!\.)', Name.Builtin), (r'#.*\n', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]', Operator), (r'<<<', Operator), # here-string (r'<<-?\s*(\'?)\\?(\w+)[\w\W]+?\2', String), (r'&&|\|\|', Operator), ], 'data': [ (r'(?s)\$?"(\\\\|\\[0-7]+|\\.|[^"\\$])*"', String.Double), (r'"', String.Double, 'string'), (r"(?s)\$'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r"(?s)'.*?'", String.Single), (r';', Punctuation), (r'&', Punctuation), (r'\|', Punctuation), (r'\s+', Text), (r'\d+(?= |\Z)', Number), (r'[^=\s\[\]{}()$"\'`\\<&|;]+', Text), (r'<', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'(?s)(\\\\|\\[0-7]+|\\.|[^"\\$])+', String.Double), include('interp'), ], 'curly': [ (r'\}', String.Interpol, '#pop'), (r':-', Keyword), (r'\w+', Name.Variable), (r'[^}:"\'`$\\]+', Punctuation), (r':', Punctuation), include('root'), ], 'paren': [ (r'\)', Keyword, '#pop'), include('root'), ], 'math': [ (r'\)\)', Keyword, '#pop'), (r'[-+*/%^|&]|\*\*|\|\|', Operator), (r'\d+#\d+', Number), (r'\d+#(?! )', Number), (r'\d+', Number), include('root'), ], 'backticks': [ (r'`', String.Backtick, '#pop'), include('root'), ], } def analyse_text(text): if shebang_matches(text, r'(ba|z|)sh'): return 1 if text.startswith('$ '): return 0.2 class BashSessionLexer(Lexer): """ Lexer for simplistic shell sessions. .. versionadded:: 1.1 """ name = 'Bash Session' aliases = ['console'] filenames = ['*.sh-session'] mimetypes = ['application/x-shell-session'] def get_tokens_unprocessed(self, text): bashlexer = BashLexer(**self.options) pos = 0 curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() m = re.match(r'^((?:\(\S+\))?(?:|sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' r'?|\[\S+[@:][^\n]+\].+)[$#%])(.*\n?)' , line) if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer # changes. if not insertions: pos = match.start() insertions.append((len(curcode), [(0, Generic.Prompt, m.group(1))])) curcode += m.group(2) elif line.startswith('>'): insertions.append((len(curcode), [(0, Generic.Prompt, line[:1])])) curcode += line[1:] else: if insertions: toks = bashlexer.get_tokens_unprocessed(curcode) for i, t, v in do_insertions(insertions, toks): yield pos+i, t, v yield match.start(), Generic.Output, line insertions = [] curcode = '' if insertions: for i, t, v in do_insertions(insertions, bashlexer.get_tokens_unprocessed(curcode)): yield pos+i, t, v class ShellSessionLexer(Lexer): """ Lexer for shell sessions that works with different command prompts .. versionadded:: 1.6 """ name = 'Shell Session' aliases = ['shell-session'] filenames = ['*.shell-session'] mimetypes = ['application/x-sh-session'] def get_tokens_unprocessed(self, text): bashlexer = BashLexer(**self.options) pos = 0 curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() m = re.match(r'^((?:\[?\S+@[^$#%]+\]?\s*)[$#%])(.*\n?)', line) if m: # To support output lexers (say diff output), the output # needs to be broken by prompts whenever the output lexer # changes. if not insertions: pos = match.start() insertions.append((len(curcode), [(0, Generic.Prompt, m.group(1))])) curcode += m.group(2) else: if insertions: toks = bashlexer.get_tokens_unprocessed(curcode) for i, t, v in do_insertions(insertions, toks): yield pos+i, t, v yield match.start(), Generic.Output, line insertions = [] curcode = '' if insertions: for i, t, v in do_insertions(insertions, bashlexer.get_tokens_unprocessed(curcode)): yield pos+i, t, v class BatchLexer(RegexLexer): """ Lexer for the DOS/Windows Batch file format. .. versionadded:: 0.7 """ name = 'Batchfile' aliases = ['bat', 'batch', 'dosbatch', 'winbatch'] filenames = ['*.bat', '*.cmd'] mimetypes = ['application/x-dos-batch'] flags = re.MULTILINE | re.IGNORECASE tokens = { 'root': [ # Lines can start with @ to prevent echo (r'^\s*@', Punctuation), (r'^(\s*)(rem\s.*)$', bygroups(Text, Comment)), (r'".*?"', String.Double), (r"'.*?'", String.Single), # If made more specific, make sure you still allow expansions # like %~$VAR:zlt (r'%%?[~$:\w]+%?', Name.Variable), (r'::.*', Comment), # Technically :: only works at BOL (r'\b(set)(\s+)(\w+)', bygroups(Keyword, Text, Name.Variable)), (r'\b(call)(\s+)(:\w+)', bygroups(Keyword, Text, Name.Label)), (r'\b(goto)(\s+)(\w+)', bygroups(Keyword, Text, Name.Label)), (r'\b(set|call|echo|on|off|endlocal|for|do|goto|if|pause|' r'setlocal|shift|errorlevel|exist|defined|cmdextversion|' r'errorlevel|else|cd|md|del|deltree|cls|choice)\b', Keyword), (r'\b(equ|neq|lss|leq|gtr|geq)\b', Operator), include('basic'), (r'.', Text), ], 'echo': [ # Escapes only valid within echo args? (r'\^\^|\^<|\^>|\^\|', String.Escape), (r'\n', Text, '#pop'), include('basic'), (r'[^\'"^]+', Text), ], 'basic': [ (r'".*?"', String.Double), (r"'.*?'", String.Single), (r'`.*?`', String.Backtick), (r'-?\d+', Number), (r',', Punctuation), (r'=', Operator), (r'/\S+', Name), (r':\w+', Name.Label), (r'\w:\w+', Text), (r'([<>|])(\s*)(\w+)', bygroups(Punctuation, Text, Name)), ], } class TcshLexer(RegexLexer): """ Lexer for tcsh scripts. .. versionadded:: 0.10 """ name = 'Tcsh' aliases = ['tcsh', 'csh'] filenames = ['*.tcsh', '*.csh'] mimetypes = ['application/x-csh'] tokens = { 'root': [ include('basic'), (r'\$\(', Keyword, 'paren'), (r'\$\{#?', Keyword, 'curly'), (r'`', String.Backtick, 'backticks'), include('data'), ], 'basic': [ (r'\b(if|endif|else|while|then|foreach|case|default|' r'continue|goto|breaksw|end|switch|endsw)\s*\b', Keyword), (r'\b(alias|alloc|bg|bindkey|break|builtins|bye|caller|cd|chdir|' r'complete|dirs|echo|echotc|eval|exec|exit|fg|filetest|getxvers|' r'glob|getspath|hashstat|history|hup|inlib|jobs|kill|' r'limit|log|login|logout|ls-F|migrate|newgrp|nice|nohup|notify|' r'onintr|popd|printenv|pushd|rehash|repeat|rootnode|popd|pushd|' r'set|shift|sched|setenv|setpath|settc|setty|setxvers|shift|' r'source|stop|suspend|source|suspend|telltc|time|' r'umask|unalias|uncomplete|unhash|universe|unlimit|unset|unsetenv|' r'ver|wait|warp|watchlog|where|which)\s*\b', Name.Builtin), (r'#.*', Comment), (r'\\[\w\W]', String.Escape), (r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)), (r'[\[\]{}()=]+', Operator), (r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String), (r';', Punctuation), ], 'data': [ (r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), (r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), (r'\s+', Text), (r'[^=\s\[\]{}()$"\'`\\;#]+', Text), (r'\d+(?= |\Z)', Number), (r'\$#?(\w+|.)', Name.Variable), ], 'curly': [ (r'\}', Keyword, '#pop'), (r':-', Keyword), (r'\w+', Name.Variable), (r'[^}:"\'`$]+', Punctuation), (r':', Punctuation), include('root'), ], 'paren': [ (r'\)', Keyword, '#pop'), include('root'), ], 'backticks': [ (r'`', String.Backtick, '#pop'), include('root'), ], } class PowerShellLexer(RegexLexer): """ For Windows PowerShell code. .. versionadded:: 1.5 """ name = 'PowerShell' aliases = ['powershell', 'posh', 'ps1', 'psm1'] filenames = ['*.ps1','*.psm1'] mimetypes = ['text/x-powershell'] flags = re.DOTALL | re.IGNORECASE | re.MULTILINE keywords = ( 'while validateset validaterange validatepattern validatelength ' 'validatecount until trap switch return ref process param parameter in ' 'if global: function foreach for finally filter end elseif else ' 'dynamicparam do default continue cmdletbinding break begin alias \\? ' '% #script #private #local #global mandatory parametersetname position ' 'valuefrompipeline valuefrompipelinebypropertyname ' 'valuefromremainingarguments helpmessage try catch throw').split() operators = ( 'and as band bnot bor bxor casesensitive ccontains ceq cge cgt cle ' 'clike clt cmatch cne cnotcontains cnotlike cnotmatch contains ' 'creplace eq exact f file ge gt icontains ieq ige igt ile ilike ilt ' 'imatch ine inotcontains inotlike inotmatch ireplace is isnot le like ' 'lt match ne not notcontains notlike notmatch or regex replace ' 'wildcard').split() verbs = ( 'write where wait use update unregister undo trace test tee take ' 'suspend stop start split sort skip show set send select scroll resume ' 'restore restart resolve resize reset rename remove register receive ' 'read push pop ping out new move measure limit join invoke import ' 'group get format foreach export expand exit enter enable disconnect ' 'disable debug cxnew copy convertto convertfrom convert connect ' 'complete compare clear checkpoint aggregate add').split() commenthelp = ( 'component description example externalhelp forwardhelpcategory ' 'forwardhelptargetname functionality inputs link ' 'notes outputs parameter remotehelprunspace role synopsis').split() tokens = { 'root': [ # we need to count pairs of parentheses for correct highlight # of '$(...)' blocks in strings (r'\(', Punctuation, 'child'), (r'\s+', Text), (r'^(\s*#[#\s]*)(\.(?:%s))([^\n]*$)' % '|'.join(commenthelp), bygroups(Comment, String.Doc, Comment)), (r'#[^\n]*?$', Comment), (r'(&lt;|<)#', Comment.Multiline, 'multline'), (r'@"\n', String.Heredoc, 'heredoc-double'), (r"@'\n.*?\n'@", String.Heredoc), # escaped syntax (r'`[\'"$@-]', Punctuation), (r'"', String.Double, 'string'), (r"'([^']|'')*'", String.Single), (r'(\$|@@|@)((global|script|private|env):)?\w+', Name.Variable), (r'(%s)\b' % '|'.join(keywords), Keyword), (r'-(%s)\b' % '|'.join(operators), Operator), (r'(%s)-[a-z_]\w*\b' % '|'.join(verbs), Name.Builtin), (r'\[[a-z_\[][\w. `,\[\]]*\]', Name.Constant), # .net [type]s (r'-[a-z_]\w*', Name), (r'\w+', Name), (r'[.,;@{}\[\]$()=+*/\\&%!~?^`|<>-]|::', Punctuation), ], 'child': [ (r'\)', Punctuation, '#pop'), include('root'), ], 'multline': [ (r'[^#&.]+', Comment.Multiline), (r'#(>|&gt;)', Comment.Multiline, '#pop'), (r'\.(%s)' % '|'.join(commenthelp), String.Doc), (r'[#&.]', Comment.Multiline), ], 'string': [ (r"`[0abfnrtv'\"$`]", String.Escape), (r'[^$`"]+', String.Double), (r'\$\(', Punctuation, 'child'), (r'""', String.Double), (r'[`$]', String.Double), (r'"', String.Double, '#pop'), ], 'heredoc-double': [ (r'\n"@', String.Heredoc, '#pop'), (r'\$\(', Punctuation, 'child'), (r'[^@\n]+"]', String.Heredoc), (r".", String.Heredoc), ] }
mpl-2.0
mathben/python_clang_parser
result/generate_uml.py
1
1943
#!/usr/bin/env python2 # -*- coding: utf-8 -*- import os import pygraphviz as pgv from ast import ast class GenerateUml(object): def __init__(self, _parser, _lst_obj_ast): self._parser = _parser self._lst_obj_ast = _lst_obj_ast self._name = os.path.split(_parser.working_path)[1] self.file_path = os.path.join(_parser.graph_path, self._name + ".dot") self._uml_name = "UML " + self._name file_path = os.path.join(_parser.graph_path, self._uml_name) self.g = pgv.AGraph(name=file_path, directed=True) def generate_uml(self): self.g.node_attr.update(shape='record') self._add_class_dot() self.g.layout(prog='dot') self.g.draw(path=self.file_path + "_dot.svgz", format='svgz') self.g.write(self.file_path) def _add_class_dot(self): # double loop to get all class dct_class_obj = ast.create_class_dict_from_lst_ast_obj(self._lst_obj_ast) for cls_obj in dct_class_obj.values(): self._add_class_node(cls_obj) self._add_class_base_edge(cls_obj) self._add_class_composition_edge(cls_obj) def _add_class_node(self, cls_obj): self.g.add_node(cls_obj.namespace_name, label=cls_obj.get_dot()) def _add_class_base_edge(self, cls_obj): for cls_base in cls_obj.derived_class: if not self.g.has_node(cls_base.type): # create a external node self.g.add_node(cls_base.type, color="red") self.g.add_edge(cls_obj.namespace_name, cls_base.type, arrowhead="empty") def _add_class_composition_edge(self, cls_obj): for var in cls_obj.variable: if not self.g.has_node(var.type): # create a external node # self.g.add_node(var.type, color="red") continue self.g.add_edge(cls_obj.namespace_name, var.type, arrowhead="normal")
gpl-3.0
duyetdev/openerp-6.1.1
openerp/tests/addons/test_limits/models.py
14
1052
# -*- coding: utf-8 -*- import time import openerp class m(openerp.osv.osv.Model): """ This model exposes a few methods that will consume between 'almost no resource' and 'a lot of resource'. """ _name = 'test.limits.model' def consume_nothing(self, cr, uid, context=None): return True def consume_memory(self, cr, uid, size, context=None): l = [0] * size return True def leak_memory(self, cr, uid, size, context=None): if not hasattr(self, 'l'): self.l = [] self.l.append([0] * size) return True def consume_time(self, cr, uid, seconds, context=None): time.sleep(seconds) return True def consume_cpu_time(self, cr, uid, seconds, context=None): import os t0 = time.clock() t1 = time.clock() while t1 - t0 < seconds: for i in xrange(10000000): x = i * i t1 = time.clock() return True # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
lwiecek/django
django/test/selenium.py
10
3100
from __future__ import unicode_literals import sys import unittest from django.test import LiveServerTestCase, tag from django.utils.module_loading import import_string from django.utils.six import with_metaclass from django.utils.text import capfirst class SeleniumTestCaseBase(type(LiveServerTestCase)): # List of browsers to dynamically create test classes for. browsers = [] # Sentinel value to differentiate browser-specific instances. browser = None def __new__(cls, name, bases, attrs): """ Dynamically create new classes and add them to the test module when multiple browsers specs are provided (e.g. --selenium=firefox,chrome). """ test_class = super(SeleniumTestCaseBase, cls).__new__(cls, name, bases, attrs) # If the test class is either browser-specific or a test base, return it. if test_class.browser or not any(name.startswith('test') and callable(value) for name, value in attrs.items()): return test_class elif test_class.browsers: # Reuse the created test class to make it browser-specific. # We can't rename it to include the browser name or create a # subclass like we do with the remaining browsers as it would # either duplicate tests or prevent pickling of its instances. first_browser = test_class.browsers[0] test_class.browser = first_browser # Create subclasses for each of the remaining browsers and expose # them through the test's module namespace. module = sys.modules[test_class.__module__] for browser in test_class.browsers[1:]: browser_test_class = cls.__new__( cls, str("%s%s" % (capfirst(browser), name)), (test_class,), {'browser': browser, '__module__': test_class.__module__} ) setattr(module, browser_test_class.__name__, browser_test_class) return test_class # If no browsers were specified, skip this class (it'll still be discovered). return unittest.skip('No browsers specified.')(test_class) @classmethod def import_webdriver(cls, browser): return import_string("selenium.webdriver.%s.webdriver.WebDriver" % browser) def create_webdriver(self): return self.import_webdriver(self.browser)() @tag('selenium') class SeleniumTestCase(with_metaclass(SeleniumTestCaseBase, LiveServerTestCase)): @classmethod def setUpClass(cls): cls.selenium = cls.create_webdriver() cls.selenium.implicitly_wait(10) super(SeleniumTestCase, cls).setUpClass() @classmethod def _tearDownClassInternal(cls): # quit() the WebDriver before attempting to terminate and join the # single-threaded LiveServerThread to avoid a dead lock if the browser # kept a connection alive. if hasattr(cls, 'selenium'): cls.selenium.quit() super(SeleniumTestCase, cls)._tearDownClassInternal()
bsd-3-clause
oscarolar/odoo
openerp/tools/import_email.py
105
6375
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import os, sys import re import smtplib import email, mimetypes from email.Header import decode_header from email.MIMEText import MIMEText import xmlrpclib warn_msg = """ Bonjour, Le message avec le sujet "%s" n'a pu être archivé dans l'ERP. """.decode('utf-8') class EmailParser(object): def __init__(self, headers, dispatcher): self.headers = headers self.dispatcher = dispatcher def parse(self, msg): dispatcher((self.headers, msg)) class CommandDispatcher(object): def __init__(self, receiver): self.receiver = receiver def __call__(self, request): return self.receiver(request) class RPCProxy(object): def __init__(self, uid, passwd, host='localhost', port=8069, path='object'): self.rpc = xmlrpclib.ServerProxy('http://%s:%s/%s' % (host, port, path)) self.user_id = uid self.passwd = passwd def __call__(self, request): return self.rpc.execute(self.user_id, self.passwd, *request) class ReceiverEmail2Event(object): email_re = re.compile(r""" ([a-zA-Z][\w\.-]*[a-zA-Z0-9] # username part @ # mandatory @ sign [a-zA-Z0-9][\w\.-]* # domain must start with a letter \. [a-z]{2,3} # TLD ) """, re.VERBOSE) project_re = re.compile(r"^ *\[?(\d{4}\.?\d{0,3})\]?", re.UNICODE) def __init__(self, rpc): self.rpc = rpc def get_addresses(self, headers, msg): hcontent = '' for header in [h for h in headers if msg.has_key(h)]: hcontent += msg[header] return self.email_re.findall(hcontent) def get_partners(self, headers, msg): alladdresses = self.get_addresses(headers, msg) address_ids = self.rpc(('res.partner', 'search', [('email', 'in', alladdresses)])) addresses = self.rpc(('res.partner', 'read', address_ids)) return [x['partner_id'][0] for x in addresses] def __call__(self, request): headers, msg = request partners = self.get_partners(headers, msg) subject = u'' for string, charset in decode_header(msg['Subject']): if charset: subject += string.decode(charset) else: subject += unicode(string) if partners: self.save_mail(msg, subject, partners) else: warning = MIMEText((warn_msg % (subject,)).encode('utf-8'), 'plain', 'utf-8') warning['Subject'] = 'Message de OpenERP' warning['From'] = 'erp@steel-sa.com' warning['To'] = msg['From'] s = smtplib.SMTP() s.connect() s.sendmail('erp@steel-sa.com', self.email_re.findall(msg['From']), warning.as_string()) s.close() if msg.is_multipart(): for message in [m for m in msg.get_payload() if m.get_content_type() == 'message/rfc822']: self((headers, message.get_payload()[0])) def save_mail(self, msg, subject, partners): counter, description = 1, u'' if msg.is_multipart(): for part in msg.get_payload(): stockdir = os.path.join('emails', msg['Message-Id'][1:-1]) newdir = os.path.join('/tmp', stockdir) filename = part.get_filename() if not filename: ext = mimetypes.guess_extension(part.get_type()) if not ext: ext = '.bin' filename = 'part-%03d%s' % (counter, ext) if part.get_content_maintype() == 'multipart': continue elif part.get_content_maintype() == 'text': if part.get_content_subtype() == 'plain': description += part.get_payload(decode=1).decode(part.get_charsets()[0]) description += u'\n\nVous trouverez les éventuels fichiers dans le répertoire: %s' % stockdir continue else: description += u'\n\nCe message est en "%s", vous trouverez ce texte dans le répertoire: %s' % (part.get_content_type(), stockdir) elif part.get_content_type() == 'message/rfc822': continue if not os.path.isdir(newdir): os.mkdir(newdir) counter += 1 fd = file(os.path.join(newdir, filename), 'w') fd.write(part.get_payload(decode=1)) fd.close() else: description = msg.get_payload(decode=1).decode(msg.get_charsets()[0]) project = self.project_re.search(subject) if project: project = project.groups()[0] else: project = '' for partner in partners: self.rpc(('res.partner.event', 'create', {'name' : subject, 'partner_id' : partner, 'description' : description, 'project' : project})) if __name__ == '__main__': rpc_dispatcher = CommandDispatcher(RPCProxy(4, 'admin')) dispatcher = CommandDispatcher(ReceiverEmail2Event(rpc_dispatcher)) parser = EmailParser(['To', 'Cc', 'From'], dispatcher) parser.parse(email.message_from_file(sys.stdin)) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ksya/kodi.screensaver.sonyscreenoff
resources/lib/requests/cookies.py
131
18291
# -*- coding: utf-8 -*- """ requests.cookies ~~~~~~~~~~~~~~~~ Compatibility code to be able to use `cookielib.CookieJar` with requests. requests.utils imports from here, so be careful with imports. """ import copy import time import calendar import collections from ._internal_utils import to_native_string from .compat import cookielib, urlparse, urlunparse, Morsel try: import threading # grr, pyflakes: this fixes "redefinition of unused 'threading'" threading except ImportError: import dummy_threading as threading class MockRequest(object): """Wraps a `requests.Request` to mimic a `urllib2.Request`. The code in `cookielib.CookieJar` expects this interface in order to correctly manage cookie policies, i.e., determine whether a cookie can be set, given the domains of the request and the cookie. The original request object is read-only. The client is responsible for collecting the new headers via `get_new_headers()` and interpreting them appropriately. You probably want `get_cookie_header`, defined below. """ def __init__(self, request): self._r = request self._new_headers = {} self.type = urlparse(self._r.url).scheme def get_type(self): return self.type def get_host(self): return urlparse(self._r.url).netloc def get_origin_req_host(self): return self.get_host() def get_full_url(self): # Only return the response's URL if the user hadn't set the Host # header if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain host = to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query, parsed.fragment ]) def is_unverifiable(self): return True def has_header(self, name): return name in self._r.headers or name in self._new_headers def get_header(self, name, default=None): return self._r.headers.get(name, self._new_headers.get(name, default)) def add_header(self, key, val): """cookielib has no legitimate use for this method; add it back if you find one.""" raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") def add_unredirected_header(self, name, value): self._new_headers[name] = value def get_new_headers(self): return self._new_headers @property def unverifiable(self): return self.is_unverifiable() @property def origin_req_host(self): return self.get_origin_req_host() @property def host(self): return self.get_host() class MockResponse(object): """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. ...what? Basically, expose the parsed HTTP headers from the server response the way `cookielib` expects to see them. """ def __init__(self, headers): """Make a MockResponse for `cookielib` to read. :param headers: a httplib.HTTPMessage or analogous carrying the headers """ self._headers = headers def info(self): return self._headers def getheaders(self, name): self._headers.getheaders(name) def extract_cookies_to_jar(jar, request, response): """Extract the cookies from the response into a CookieJar. :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) :param request: our own requests.Request object :param response: urllib3.HTTPResponse object """ if not (hasattr(response, '_original_response') and response._original_response): return # the _original_response field is the wrapped httplib.HTTPResponse object, req = MockRequest(request) # pull out the HTTPMessage with the headers and put it in the mock: res = MockResponse(response._original_response.msg) jar.extract_cookies(res, req) def get_cookie_header(jar, request): """ Produce an appropriate Cookie header string to be sent with `request`, or None. :rtype: str """ r = MockRequest(request) jar.add_cookie_header(r) return r.get_new_headers().get('Cookie') def remove_cookie_by_name(cookiejar, name, domain=None, path=None): """Unsets a cookie by name, by default over all domains and paths. Wraps CookieJar.clear(), is O(n). """ clearables = [] for cookie in cookiejar: if cookie.name != name: continue if domain is not None and domain != cookie.domain: continue if path is not None and path != cookie.path: continue clearables.append((cookie.domain, cookie.path, cookie.name)) for domain, path, name in clearables: cookiejar.clear(domain, path, name) class CookieConflictError(RuntimeError): """There are two cookies that meet the criteria specified in the cookie jar. Use .get and .set and include domain and path args in order to be more specific. """ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): """Compatibility class; is a cookielib.CookieJar, but exposes a dict interface. This is the CookieJar we create by default for requests and sessions that don't specify one, since some clients may expect response.cookies and session.cookies to support dict operations. Requests does not use the dict interface internally; it's just for compatibility with external client code. All requests code should work out of the box with externally provided instances of ``CookieJar``, e.g. ``LWPCookieJar`` and ``FileCookieJar``. Unlike a regular CookieJar, this class is pickleable. .. warning:: dictionary operations that are normally O(1) may be O(n). """ def get(self, name, default=None, domain=None, path=None): """Dict-like get() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. .. warning:: operation is O(n), not O(1). """ try: return self._find_no_duplicates(name, domain, path) except KeyError: return default def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c def iterkeys(self): """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. .. seealso:: itervalues() and iteritems(). """ for cookie in iter(self): yield cookie.name def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. .. seealso:: values() and items(). """ return list(self.iterkeys()) def itervalues(self): """Dict-like itervalues() that returns an iterator of values of cookies from the jar. .. seealso:: iterkeys() and iteritems(). """ for cookie in iter(self): yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. .. seealso:: keys() and items(). """ return list(self.itervalues()) def iteritems(self): """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. .. seealso:: iterkeys() and itervalues(). """ for cookie in iter(self): yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a vanilla python dict of key value pairs. .. seealso:: keys() and values(). """ return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" domains = [] for cookie in iter(self): if cookie.domain not in domains: domains.append(cookie.domain) return domains def list_paths(self): """Utility method to list all the paths in the jar.""" paths = [] for cookie in iter(self): if cookie.path not in paths: paths.append(cookie.path) return paths def multiple_domains(self): """Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool """ domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False # there is only one domain in jar def get_dict(self, domain=None, path=None): """Takes as an argument an optional domain and path and returns a plain old Python dict of name-value pairs of cookies that meet the requirements. :rtype: dict """ dictionary = {} for cookie in iter(self): if (domain is None or cookie.domain == domain) and (path is None or cookie.path == path): dictionary[cookie.name] = cookie.value return dictionary def __contains__(self, name): try: return super(RequestsCookieJar, self).__contains__(name) except CookieConflictError: return True def __getitem__(self, name): """Dict-like __getitem__() for compatibility with client code. Throws exception if there are more than one cookie with name. In that case, use the more explicit get() method instead. .. warning:: operation is O(n), not O(1). """ return self._find_no_duplicates(name) def __setitem__(self, name, value): """Dict-like __setitem__ for compatibility with client code. Throws exception if there is already a cookie of that name in the jar. In that case, use the more explicit set() method instead. """ self.set(name, value) def __delitem__(self, name): """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s ``remove_cookie_by_name()``. """ remove_cookie_by_name(self, name) def set_cookie(self, cookie, *args, **kwargs): if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): cookie.value = cookie.value.replace('\\"', '') return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) def update(self, other): """Updates this jar with cookies from another CookieJar or dict-like""" if isinstance(other, cookielib.CookieJar): for cookie in other: self.set_cookie(copy.copy(cookie)) else: super(RequestsCookieJar, self).update(other) def _find(self, name, domain=None, path=None): """Requests uses this method internally to get cookie values. If there are conflicting cookies, _find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown if there are conflicting cookies. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :return: cookie.value """ for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: return cookie.value raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): """Both ``__get_item__`` and ``get`` call this function: it's never used elsewhere in Requests. :param name: a string containing name of cookie :param domain: (optional) string containing domain of cookie :param path: (optional) string containing path of cookie :raises KeyError: if cookie is not found :raises CookieConflictError: if there are multiple cookies that match name and optionally domain and path :return: cookie.value """ toReturn = None for cookie in iter(self): if cookie.name == name: if domain is None or cookie.domain == domain: if path is None or cookie.path == path: if toReturn is not None: # if there are multiple cookies that meet passed in criteria raise CookieConflictError('There are multiple cookies with name, %r' % (name)) toReturn = cookie.value # we will eventually return this as long as no cookie conflict if toReturn: return toReturn raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def __getstate__(self): """Unlike a normal CookieJar, this class is pickleable.""" state = self.__dict__.copy() # remove the unpickleable RLock object state.pop('_cookies_lock') return state def __setstate__(self, state): """Unlike a normal CookieJar, this class is pickleable.""" self.__dict__.update(state) if '_cookies_lock' not in self.__dict__: self._cookies_lock = threading.RLock() def copy(self): """Return a copy of this RequestsCookieJar.""" new_cj = RequestsCookieJar() new_cj.update(self) return new_cj def _copy_cookie_jar(jar): if jar is None: return None if hasattr(jar, 'copy'): # We're dealing with an instance of RequestsCookieJar return jar.copy() # We're dealing with a generic CookieJar instance new_jar = copy.copy(jar) new_jar.clear() for cookie in jar: new_jar.set_cookie(copy.copy(cookie)) return new_jar def create_cookie(name, value, **kwargs): """Make a cookie from underspecified parameters. By default, the pair of `name` and `value` will be set for the domain '' and sent on every request (this is sometimes called a "supercookie"). """ result = dict( version=0, name=name, value=value, port=None, domain='', path='/', secure=False, expires=None, discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,) badargs = set(kwargs) - set(result) if badargs: err = 'create_cookie() got unexpected keyword arguments: %s' raise TypeError(err % list(badargs)) result.update(kwargs) result['port_specified'] = bool(result['port']) result['domain_specified'] = bool(result['domain']) result['domain_initial_dot'] = result['domain'].startswith('.') result['path_specified'] = bool(result['path']) return cookielib.Cookie(**result) def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" expires = None if morsel['max-age']: try: expires = int(time.time() + int(morsel['max-age'])) except ValueError: raise TypeError('max-age: %s must be integer' % morsel['max-age']) elif morsel['expires']: time_template = '%a, %d-%b-%Y %H:%M:%S GMT' expires = calendar.timegm( time.strptime(morsel['expires'], time_template) ) return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), discard=False, domain=morsel['domain'], expires=expires, name=morsel.key, path=morsel['path'], port=None, rest={'HttpOnly': morsel['httponly']}, rfc2109=False, secure=bool(morsel['secure']), value=morsel.value, version=morsel['version'] or 0, ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar def merge_cookies(cookiejar, cookies): """Add cookies to cookiejar and returns a merged CookieJar. :param cookiejar: CookieJar object to add the cookies to. :param cookies: Dictionary or CookieJar object to be added. """ if not isinstance(cookiejar, cookielib.CookieJar): raise ValueError('You can only merge into CookieJar') if isinstance(cookies, dict): cookiejar = cookiejar_from_dict( cookies, cookiejar=cookiejar, overwrite=False) elif isinstance(cookies, cookielib.CookieJar): try: cookiejar.update(cookies) except AttributeError: for cookie_in_jar in cookies: cookiejar.set_cookie(cookie_in_jar) return cookiejar
gpl-3.0
CaliOpen/CaliOpen
src/backend/components/py.pi/caliopen_pi/qualifiers/mastodon.py
1
3411
# -*- coding: utf-8 -*- """Caliopen user message qualification logic.""" from __future__ import absolute_import, print_function, unicode_literals import logging from caliopen_main.message.parameters import NewInboundMessage from caliopen_main.message.parsers.mastodon import MastodonStatus from caliopen_main.discussion.core import Discussion from caliopen_main.participant.store import HashLookup from caliopen_main.common.helpers.normalize import clean_mastodon_address from ..features import marshal_features from .base import BaseQualifier log = logging.getLogger(__name__) class UserMastodonQualifier(BaseQualifier): """Process a Mastodon direct message to unmarshal it in our stack.""" _lookups = { 'hash': HashLookup, } def lookup_discussion_sequence(self, message, *args, **kwargs): """Return list of lookup type, value from a tweet.""" seq = list() participants = message.hash_participants seq.append(('hash', participants)) return seq, seq[0][1] def process_inbound(self, raw): """ Process inbound message. @param raw: a RawMessage object which should be a json conforming to https://docs.joinmastodon.org/api/entities/#status @rtype: NewMessage """ toot = MastodonStatus(raw.raw_data) new_message = NewInboundMessage() new_message.raw_msg_id = raw.raw_msg_id new_message.body_html = toot.body_html new_message.date = toot.date new_message.protocol = toot.protocol new_message.is_unread = True new_message.is_draft = False new_message.is_answered = False new_message.is_received = True new_message.importance_level = 0 # XXX tofix on parser new_message.external_references = toot.external_references participants = [] for p in toot.participants: p.address = p.address participant, contact = self.get_participant(toot, p) new_message.participants.append(participant) participants.append((participant, contact)) if not participants: raise Exception("no participant found in raw tweet {}".format( raw.raw_msg_id)) # Compute PI !! # TODO # compute tags self._get_tags(new_message) if new_message.tags: log.debug('Resolved tags {}'.format(new_message.tags)) # build discussion_id from lookup_sequence lookup_sequence, discussion_id = self.lookup_discussion_sequence( new_message) log.debug('Lookup with sequence {} gives {}'.format(lookup_sequence, discussion_id)) new_message.discussion_id = discussion_id # upsert lookup tables discuss = Discussion(self.user) discuss.upsert_lookups_for_participants(new_message.participants) # Format features new_message.privacy_features = \ marshal_features(new_message.privacy_features) try: new_message.validate() except Exception as exc: log.error( "validation failed with error : « {} » \ for new_message {}[dump : {}]".format( exc, new_message, vars(new_message))) raise exc return new_message
gpl-3.0
redhat-openstack/nova
nova/cmd/idmapshift.py
68
7093
# Copyright 2014 Rackspace, Andrew Melton # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ########## IDMapShift ########## IDMapShift is a tool that properly sets the ownership of a filesystem for use with linux user namespaces. ===== Usage ===== nova-idmapshift -i -u 0:10000:2000 -g 0:10000:2000 path This command will idempotently shift `path` to proper ownership using the provided uid and gid mappings. ========= Arguments ========= nova-idmapshift -i -c -d -v -u [[guest-uid:host-uid:count],...] -g [[guest-gid:host-gid:count],...] -n [nobody-id] path path: Root path of the filesystem to be shifted -i, --idempotent: Shift operation will only be performed if filesystem appears unshifted -c, --confirm: Will perform check on filesystem Returns 0 when filesystem appears shifted Returns 1 when filesystem appears unshifted -d, --dry-run: Print chown operations, but won't perform them -v, --verbose: Print chown operations while performing them -u, --uid: User ID mappings, maximum of 3 ranges -g, --gid: Group ID mappings, maximum of 3 ranges -n, --nobody: ID to map all unmapped uid and gids to. ======= Purpose ======= When using user namespaces with linux containers, the filesystem of the container must be owned by the targeted user and group ids being applied to that container. Otherwise, processes inside the container won't be able to access the filesystem. For example, when using the id map string '0:10000:2000', this means that user ids inside the container between 0 and 1999 will map to user ids on the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes 10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that files that are owned by root need to actually be owned by user 10000, and files owned by 50 need to be owned by 10050, and so on. IDMapShift will take the uid and gid strings used for user namespaces and properly set up the filesystem for use by those users. Uids and gids outside of provided ranges will be mapped to nobody (max uid/gid) so that they are inaccessible inside the container. """ import argparse import os import sys NOBODY_ID = 65534 def find_target_id(fsid, mappings, nobody, memo): if fsid not in memo: for start, target, count in mappings: if start <= fsid < start + count: memo[fsid] = (fsid - start) + target break else: memo[fsid] = nobody return memo[fsid] def print_chown(path, uid, gid, target_uid, target_gid): print('%s %s:%s -> %s:%s' % (path, uid, gid, target_uid, target_gid)) def shift_path(path, uid_mappings, gid_mappings, nobody, uid_memo, gid_memo, dry_run=False, verbose=False): stat = os.lstat(path) uid = stat.st_uid gid = stat.st_gid target_uid = find_target_id(uid, uid_mappings, nobody, uid_memo) target_gid = find_target_id(gid, gid_mappings, nobody, gid_memo) if verbose: print_chown(path, uid, gid, target_uid, target_gid) if not dry_run: os.lchown(path, target_uid, target_gid) def shift_dir(fsdir, uid_mappings, gid_mappings, nobody, dry_run=False, verbose=False): uid_memo = dict() gid_memo = dict() def shift_path_short(p): shift_path(p, uid_mappings, gid_mappings, nobody, dry_run=dry_run, verbose=verbose, uid_memo=uid_memo, gid_memo=gid_memo) shift_path_short(fsdir) for root, dirs, files in os.walk(fsdir): for d in dirs: path = os.path.join(root, d) shift_path_short(path) for f in files: path = os.path.join(root, f) shift_path_short(path) def confirm_path(path, uid_ranges, gid_ranges, nobody): stat = os.lstat(path) uid = stat.st_uid gid = stat.st_gid uid_in_range = True if uid == nobody else False gid_in_range = True if gid == nobody else False if not uid_in_range or not gid_in_range: for (start, end) in uid_ranges: if start <= uid <= end: uid_in_range = True break for (start, end) in gid_ranges: if start <= gid <= end: gid_in_range = True break return uid_in_range and gid_in_range def get_ranges(maps): return [(target, target + count - 1) for (start, target, count) in maps] def confirm_dir(fsdir, uid_mappings, gid_mappings, nobody): uid_ranges = get_ranges(uid_mappings) gid_ranges = get_ranges(gid_mappings) if not confirm_path(fsdir, uid_ranges, gid_ranges, nobody): return False for root, dirs, files in os.walk(fsdir): for d in dirs: path = os.path.join(root, d) if not confirm_path(path, uid_ranges, gid_ranges, nobody): return False for f in files: path = os.path.join(root, f) if not confirm_path(path, uid_ranges, gid_ranges, nobody): return False return True def id_map_type(val): maps = val.split(',') id_maps = [] for m in maps: map_vals = m.split(':') if len(map_vals) != 3: msg = ('Invalid id map %s, correct syntax is ' 'guest-id:host-id:count.') raise argparse.ArgumentTypeError(msg % val) try: vals = [int(i) for i in map_vals] except ValueError: msg = 'Invalid id map %s, values must be integers' % val raise argparse.ArgumentTypeError(msg) id_maps.append(tuple(vals)) return id_maps def main(): parser = argparse.ArgumentParser('User Namespace FS Owner Shift') parser.add_argument('path') parser.add_argument('-u', '--uid', type=id_map_type, default=[]) parser.add_argument('-g', '--gid', type=id_map_type, default=[]) parser.add_argument('-n', '--nobody', default=NOBODY_ID, type=int) parser.add_argument('-i', '--idempotent', action='store_true') parser.add_argument('-c', '--confirm', action='store_true') parser.add_argument('-d', '--dry-run', action='store_true') parser.add_argument('-v', '--verbose', action='store_true') args = parser.parse_args() if args.idempotent or args.confirm: if confirm_dir(args.path, args.uid, args.gid, args.nobody): sys.exit(0) else: if args.confirm: sys.exit(1) shift_dir(args.path, args.uid, args.gid, args.nobody, dry_run=args.dry_run, verbose=args.verbose)
apache-2.0
klim-iv/phantomjs-qt5
src/webkit/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
4
5337
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import glob import logging import optparse import os import re import sys from webkitpy.common.checkout import scm from webkitpy.common.system.filesystem import FileSystem from webkitpy.common.system.executive import Executive _log = logging.getLogger(__name__) def remove_first_line_comment(text): return re.compile(r'^<!--.*?-->\s*', re.DOTALL).sub('', text) def translate_includes(text): # Mapping of single filename to relative path under WebKit root. # Assumption: these filenames are globally unique. include_mapping = { "js-test-pre.js": "../../../resources", "js-test-post.js": "../../../resources", "desktop-gl-constants.js": "resources", } for filename, path in include_mapping.items(): search = r'(?:[^"\'= ]*/)?' + re.escape(filename) # We use '/' instead of os.path.join in order to produce consistent # output cross-platform. replace = path + '/' + filename text = re.sub(search, replace, text) return text def translate_khronos_test(text): """ This method translates the contents of a Khronos test to a WebKit test. """ translateFuncs = [ remove_first_line_comment, translate_includes, ] for f in translateFuncs: text = f(text) return text def update_file(in_filename, out_dir): # check in_filename exists # check out_dir exists out_filename = os.path.join(out_dir, os.path.basename(in_filename)) _log.debug("Processing " + in_filename) with open(in_filename, 'r') as in_file: with open(out_filename, 'w') as out_file: out_file.write(translate_khronos_test(in_file.read())) def update_directory(in_dir, out_dir): for filename in glob.glob(os.path.join(in_dir, '*.html')): update_file(os.path.join(in_dir, filename), out_dir) def default_out_dir(): detector = scm.SCMDetector(FileSystem(), Executive()) current_scm = detector.detect_scm_system(os.path.dirname(sys.argv[0])) if not current_scm: return os.getcwd() root_dir = current_scm.checkout_root if not root_dir: return os.getcwd() out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl") if os.path.isdir(out_dir): return out_dir return os.getcwd() def configure_logging(options): """Configures the logging system.""" log_fmt = '%(levelname)s: %(message)s' log_datefmt = '%y%m%d %H:%M:%S' log_level = logging.INFO if options.verbose: log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' '%(message)s') log_level = logging.DEBUG logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt) def option_parser(): usage = "usage: %prog [options] (input file or directory)" parser = optparse.OptionParser(usage=usage) parser.add_option('-v', '--verbose', action='store_true', default=False, help='include debug-level logging') parser.add_option('-o', '--output', action='store', type='string', default=default_out_dir(), metavar='DIR', help='specify an output directory to place files ' 'in [default: %default]') return parser def main(): parser = option_parser() (options, args) = parser.parse_args() configure_logging(options) if len(args) == 0: _log.error("Must specify an input directory or filename.") parser.print_help() return 1 in_name = args[0] if os.path.isfile(in_name): update_file(in_name, options.output) elif os.path.isdir(in_name): update_directory(in_name, options.output) else: _log.error("'%s' is not a directory or a file.", in_name) return 2 return 0
bsd-3-clause
CKrawczyk/densityplot
densityplot/hex_bin_subtract.py
1
4732
import matplotlib as mpl import pylab as pl from mpl_toolkits.axes_grid1.inset_locator import inset_axes def hex_difference(xy1,xy2,show_all=False,hkwargs={},color_bar=True,fignum=1): """A function that plots the difference between two hexbin plots. Parameters ---------- xy1 : A tuple of (x,y) corrdianates for the first hexbin. A tuple of (x,y,C) can also be passed in where C is the value for each (x,y) point. xy2 : A tuple of (x,y) corrdianates for the second hexbin. A tuple of (x,y,C) can also be passed in where C is the value for each (x,y) point. NOTE : the 'C' functinality is untested and may not work as expected. Keywords -------- show_all : bool (optional) If True all intermediate hexbin plots are returned. Default: show_all=False color_bar : bool (optional) If True a colorbar is placed on the plot(s) Default: colorbar=True fignum : int (optional) The number to give the resulting figure(s). If show_all=True, the intermediate plots will be fignum+1 and fignum+2 while the difference will be fignum. default: fignum=1 Passed Keywords --------------- hkwargs: a dictionary of keywords passed to hexbin (see http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin for additional keywords that can be set) Returns ------- d : pylab.hexbin object Object returned by the difference hexbin h1 : pylab.hexbin object Object returned by hexbin of first data set NOTE: only returned if show_all=True h2 : pylab.hexbin object Object returned by hexbin of second data set NOTE: only returned if show_all=True c : matplotlib.colorbar.Colorbar instance NOTE: only returned if color_bar=True Usage ----- import numpy as np n=100000 x1=np.random.standard_normal(n) #random x points y1=2+3*x1+4*np.random.standard_normal(n) #random y points x2=np.random.standard_normal(n) #random x points y2=2-3*x2+4*np.random.standard_normal(n) #random y points hex_difference((x1,y1),(x2,y2),show_all=True,color_bar=True,hkwargs={'gridsize':100,'extent':[-4.5,4.5,-25,25],'vmin':-180,'vmax':180}) pl.show() """ if show_all: #if shoing all hexbins then draw them as you go (you can't change the drawing axis object after creation) pl.figure(fignum+1) hex1=pl.hexbin(*xy1,**hkwargs) if color_bar: pl.colorbar() pl.figure(fignum+2) hex2=pl.hexbin(*xy2,**hkwargs) if color_bar: pl.colorbar() else: #make but don't draw the 2 hexbins hex1=pl.hexbin(*xy1,visible=False,**hkwargs) #make the hexbins, visible it False to avoid drawing them to a plot hex2=pl.hexbin(*xy2,visible=False,**hkwargs) pl.figure(fignum) hex_dif=pl.hexbin(*xy1,visible=False,**hkwargs) #this will have the counts overwritten (so don't draw yet) c1=hex1.get_array() #the counts for hex1 c2=hex2.get_array() #the counts for hex2 c_dif=c1-c2 #difference between plots gdx=~((c1==0)&(c2==0)) #the bins to draw (removes where both hists had no counts) #NOTE: if the 'C' values are set checking against 0 is NOT a good idea... hex_dif.set_array(c_dif[gdx]) #set the defferences into the hex_dif object h=hex_dif.get_paths() #get the hexagon Path object(s) if len(h)>1: #you have an old version of matplotlib, use this bit of code rem_me=pl.array(h)[~gdx] #bins to remove for r in rem_me: h.remove(r) #remove blank bins else: #either you have a boaring hexbin or a newer version of matplotlib h=hex_dif.get_offsets() hex_dif.set_offsets(h[gdx]) hex_dif.set_visible(True) #this draws the new hex_dif ret=[hex_dif] if show_all: ret.append(hex1) ret.append(hex2) if color_bar: ains=inset_axes(pl.gca(),width='80%',height='5%',loc=9) #TODO: externalize colorbar keywords c=pl.colorbar(hex_dif,cax=ains,orientation='horizontal') ret.append(c) return tuple(ret) if __name__=='__main__': import numpy as np n=100000 x1=np.random.standard_normal(n) #random x points y1=2+3*x1+4*np.random.standard_normal(n) #random y points x2=np.random.standard_normal(n) #random x points y2=2-3*x2+4*np.random.standard_normal(n) #random y points hex_difference((x1,y1),(x2,y2),show_all=True,color_bar=True,hkwargs={'gridsize':100,'extent':[-4.5,4.5,-25,25],'vmin':-180,'vmax':180}) pl.show()
mit
dims/neutron
neutron/agent/dhcp/config.py
5
5371
# Copyright 2015 OpenStack Foundation # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron._i18n import _ DHCP_AGENT_OPTS = [ cfg.IntOpt('resync_interval', default=5, help=_("The DHCP agent will resync its state with Neutron to " "recover from any transient notification or RPC errors. " "The interval is number of seconds between attempts.")), cfg.StrOpt('dhcp_driver', default='neutron.agent.linux.dhcp.Dnsmasq', help=_("The driver used to manage the DHCP server.")), cfg.BoolOpt('enable_isolated_metadata', default=False, help=_("The DHCP server can assist with providing metadata " "support on isolated networks. Setting this value to " "True will cause the DHCP server to append specific " "host routes to the DHCP request. The metadata service " "will only be activated when the subnet does not " "contain any router port. The guest instance must be " "configured to request host routes via DHCP (Option " "121). This option doesn't have any effect when " "force_metadata is set to True.")), cfg.BoolOpt('force_metadata', default=False, help=_("In some cases the Neutron router is not present to " "provide the metadata IP but the DHCP server can be " "used to provide this info. Setting this value will " "force the DHCP server to append specific host routes " "to the DHCP request. If this option is set, then the " "metadata service will be activated for all the " "networks.")), cfg.BoolOpt('enable_metadata_network', default=False, help=_("Allows for serving metadata requests coming from a " "dedicated metadata access network whose CIDR is " "169.254.169.254/16 (or larger prefix), and is " "connected to a Neutron router from which the VMs send " "metadata:1 request. In this case DHCP Option 121 will " "not be injected in VMs, as they will be able to reach " "169.254.169.254 through a router. This option " "requires enable_isolated_metadata = True.")), cfg.IntOpt('num_sync_threads', default=4, help=_('Number of threads to use during sync process. ' 'Should not exceed connection pool size configured on ' 'server.')) ] DHCP_OPTS = [ cfg.StrOpt('dhcp_confs', default='$state_path/dhcp', help=_('Location to store DHCP server config files.')), cfg.StrOpt('dhcp_domain', default='openstacklocal', help=_('Domain to use for building the hostnames. ' 'This option is deprecated. It has been moved to ' 'neutron.conf as dns_domain. It will be removed ' 'in a future release.'), deprecated_for_removal=True), ] DNSMASQ_OPTS = [ cfg.StrOpt('dnsmasq_config_file', default='', help=_('Override the default dnsmasq settings ' 'with this file.')), cfg.ListOpt('dnsmasq_dns_servers', help=_('Comma-separated list of the DNS servers which will be ' 'used as forwarders.'), deprecated_name='dnsmasq_dns_server'), cfg.StrOpt('dnsmasq_base_log_dir', help=_("Base log dir for dnsmasq logging. " "The log contains DHCP and DNS log information and " "is useful for debugging issues with either DHCP or " "DNS. If this section is null, disable dnsmasq log.")), cfg.BoolOpt('dnsmasq_local_resolv', default=False, help=_("Enables the dnsmasq service to provide name " "resolution for instances via DNS resolvers on the " "host running the DHCP agent. Effectively removes the " "'--no-resolv' option from the dnsmasq process " "arguments. Adding custom DNS resolvers to the " "'dnsmasq_dns_servers' option disables this feature.")), cfg.IntOpt( 'dnsmasq_lease_max', default=(2 ** 24), help=_('Limit number of leases to prevent a denial-of-service.')), cfg.BoolOpt('dhcp_broadcast_reply', default=False, help=_("Use broadcast in DHCP replies.")), ]
apache-2.0
StormTrooper/osmc
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x08f.py
252
4651
data = ( 'Er ', # 0x00 'Qiong ', # 0x01 'Ju ', # 0x02 'Jiao ', # 0x03 'Guang ', # 0x04 'Lu ', # 0x05 'Kai ', # 0x06 'Quan ', # 0x07 'Zhou ', # 0x08 'Zai ', # 0x09 'Zhi ', # 0x0a 'She ', # 0x0b 'Liang ', # 0x0c 'Yu ', # 0x0d 'Shao ', # 0x0e 'You ', # 0x0f 'Huan ', # 0x10 'Yun ', # 0x11 'Zhe ', # 0x12 'Wan ', # 0x13 'Fu ', # 0x14 'Qing ', # 0x15 'Zhou ', # 0x16 'Ni ', # 0x17 'Ling ', # 0x18 'Zhe ', # 0x19 'Zhan ', # 0x1a 'Liang ', # 0x1b 'Zi ', # 0x1c 'Hui ', # 0x1d 'Wang ', # 0x1e 'Chuo ', # 0x1f 'Guo ', # 0x20 'Kan ', # 0x21 'Yi ', # 0x22 'Peng ', # 0x23 'Qian ', # 0x24 'Gun ', # 0x25 'Nian ', # 0x26 'Pian ', # 0x27 'Guan ', # 0x28 'Bei ', # 0x29 'Lun ', # 0x2a 'Pai ', # 0x2b 'Liang ', # 0x2c 'Ruan ', # 0x2d 'Rou ', # 0x2e 'Ji ', # 0x2f 'Yang ', # 0x30 'Xian ', # 0x31 'Chuan ', # 0x32 'Cou ', # 0x33 'Qun ', # 0x34 'Ge ', # 0x35 'You ', # 0x36 'Hong ', # 0x37 'Shu ', # 0x38 'Fu ', # 0x39 'Zi ', # 0x3a 'Fu ', # 0x3b 'Wen ', # 0x3c 'Ben ', # 0x3d 'Zhan ', # 0x3e 'Yu ', # 0x3f 'Wen ', # 0x40 'Tao ', # 0x41 'Gu ', # 0x42 'Zhen ', # 0x43 'Xia ', # 0x44 'Yuan ', # 0x45 'Lu ', # 0x46 'Jiu ', # 0x47 'Chao ', # 0x48 'Zhuan ', # 0x49 'Wei ', # 0x4a 'Hun ', # 0x4b 'Sori ', # 0x4c 'Che ', # 0x4d 'Jiao ', # 0x4e 'Zhan ', # 0x4f 'Pu ', # 0x50 'Lao ', # 0x51 'Fen ', # 0x52 'Fan ', # 0x53 'Lin ', # 0x54 'Ge ', # 0x55 'Se ', # 0x56 'Kan ', # 0x57 'Huan ', # 0x58 'Yi ', # 0x59 'Ji ', # 0x5a 'Dui ', # 0x5b 'Er ', # 0x5c 'Yu ', # 0x5d 'Xian ', # 0x5e 'Hong ', # 0x5f 'Lei ', # 0x60 'Pei ', # 0x61 'Li ', # 0x62 'Li ', # 0x63 'Lu ', # 0x64 'Lin ', # 0x65 'Che ', # 0x66 'Ya ', # 0x67 'Gui ', # 0x68 'Xuan ', # 0x69 'Di ', # 0x6a 'Ren ', # 0x6b 'Zhuan ', # 0x6c 'E ', # 0x6d 'Lun ', # 0x6e 'Ruan ', # 0x6f 'Hong ', # 0x70 'Ku ', # 0x71 'Ke ', # 0x72 'Lu ', # 0x73 'Zhou ', # 0x74 'Zhi ', # 0x75 'Yi ', # 0x76 'Hu ', # 0x77 'Zhen ', # 0x78 'Li ', # 0x79 'Yao ', # 0x7a 'Qing ', # 0x7b 'Shi ', # 0x7c 'Zai ', # 0x7d 'Zhi ', # 0x7e 'Jiao ', # 0x7f 'Zhou ', # 0x80 'Quan ', # 0x81 'Lu ', # 0x82 'Jiao ', # 0x83 'Zhe ', # 0x84 'Fu ', # 0x85 'Liang ', # 0x86 'Nian ', # 0x87 'Bei ', # 0x88 'Hui ', # 0x89 'Gun ', # 0x8a 'Wang ', # 0x8b 'Liang ', # 0x8c 'Chuo ', # 0x8d 'Zi ', # 0x8e 'Cou ', # 0x8f 'Fu ', # 0x90 'Ji ', # 0x91 'Wen ', # 0x92 'Shu ', # 0x93 'Pei ', # 0x94 'Yuan ', # 0x95 'Xia ', # 0x96 'Zhan ', # 0x97 'Lu ', # 0x98 'Che ', # 0x99 'Lin ', # 0x9a 'Xin ', # 0x9b 'Gu ', # 0x9c 'Ci ', # 0x9d 'Ci ', # 0x9e 'Pi ', # 0x9f 'Zui ', # 0xa0 'Bian ', # 0xa1 'La ', # 0xa2 'La ', # 0xa3 'Ci ', # 0xa4 'Xue ', # 0xa5 'Ban ', # 0xa6 'Bian ', # 0xa7 'Bian ', # 0xa8 'Bian ', # 0xa9 '[?] ', # 0xaa 'Bian ', # 0xab 'Ban ', # 0xac 'Ci ', # 0xad 'Bian ', # 0xae 'Bian ', # 0xaf 'Chen ', # 0xb0 'Ru ', # 0xb1 'Nong ', # 0xb2 'Nong ', # 0xb3 'Zhen ', # 0xb4 'Chuo ', # 0xb5 'Chuo ', # 0xb6 'Suberu ', # 0xb7 'Reng ', # 0xb8 'Bian ', # 0xb9 'Bian ', # 0xba 'Sip ', # 0xbb 'Ip ', # 0xbc 'Liao ', # 0xbd 'Da ', # 0xbe 'Chan ', # 0xbf 'Gan ', # 0xc0 'Qian ', # 0xc1 'Yu ', # 0xc2 'Yu ', # 0xc3 'Qi ', # 0xc4 'Xun ', # 0xc5 'Yi ', # 0xc6 'Guo ', # 0xc7 'Mai ', # 0xc8 'Qi ', # 0xc9 'Za ', # 0xca 'Wang ', # 0xcb 'Jia ', # 0xcc 'Zhun ', # 0xcd 'Ying ', # 0xce 'Ti ', # 0xcf 'Yun ', # 0xd0 'Jin ', # 0xd1 'Hang ', # 0xd2 'Ya ', # 0xd3 'Fan ', # 0xd4 'Wu ', # 0xd5 'Da ', # 0xd6 'E ', # 0xd7 'Huan ', # 0xd8 'Zhe ', # 0xd9 'Totemo ', # 0xda 'Jin ', # 0xdb 'Yuan ', # 0xdc 'Wei ', # 0xdd 'Lian ', # 0xde 'Chi ', # 0xdf 'Che ', # 0xe0 'Ni ', # 0xe1 'Tiao ', # 0xe2 'Zhi ', # 0xe3 'Yi ', # 0xe4 'Jiong ', # 0xe5 'Jia ', # 0xe6 'Chen ', # 0xe7 'Dai ', # 0xe8 'Er ', # 0xe9 'Di ', # 0xea 'Po ', # 0xeb 'Wang ', # 0xec 'Die ', # 0xed 'Ze ', # 0xee 'Tao ', # 0xef 'Shu ', # 0xf0 'Tuo ', # 0xf1 'Kep ', # 0xf2 'Jing ', # 0xf3 'Hui ', # 0xf4 'Tong ', # 0xf5 'You ', # 0xf6 'Mi ', # 0xf7 'Beng ', # 0xf8 'Ji ', # 0xf9 'Nai ', # 0xfa 'Yi ', # 0xfb 'Jie ', # 0xfc 'Zhui ', # 0xfd 'Lie ', # 0xfe 'Xun ', # 0xff )
gpl-2.0
simonwydooghe/ansible
lib/ansible/module_utils/network/nxos/facts/bfd_interfaces/bfd_interfaces.py
20
3476
# # -*- coding: utf-8 -*- # Copyright 2019 Cisco and/or its affiliates. # GNU General Public License v3.0+ # (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type """ The nxos bfd_interfaces fact class Populate the facts tree based on the current device configuration. """ import re from copy import deepcopy from ansible.module_utils.network.common import utils from ansible.module_utils.network.nxos.argspec.bfd_interfaces.bfd_interfaces import Bfd_interfacesArgs from ansible.module_utils.network.nxos.utils.utils import get_interface_type class Bfd_interfacesFacts(object): """ The nxos_bfd_interfaces fact class """ def __init__(self, module, subspec='config', options='options'): self._module = module self.argument_spec = Bfd_interfacesArgs.argument_spec spec = deepcopy(self.argument_spec) if subspec: if options: facts_argument_spec = spec[subspec][options] else: facts_argument_spec = spec[subspec] else: facts_argument_spec = spec self.generated_spec = utils.generate_dict(facts_argument_spec) def populate_facts(self, connection, ansible_facts, data=None): """ Populate the facts for bfd_interfaces :param connection: the device connection :param ansible_facts: Facts dictionary :param data: previously collected conf :rtype: dictionary :returns: facts """ objs = [] if not data: data = connection.get("show running-config | section '^interface|^feature bfd'") # Some of the bfd attributes if 'feature bfd' in data.split('\n'): resources = data.split('interface ') resources.pop(0) else: resources = [] for resource in resources: if resource: obj = self.render_config(self.generated_spec, resource) if obj and len(obj.keys()) > 1: objs.append(obj) ansible_facts['ansible_network_resources'].pop('bfd_interfaces', None) facts = {} if objs: facts['bfd_interfaces'] = [] params = utils.validate_config(self.argument_spec, {'config': objs}) for cfg in params['config']: facts['bfd_interfaces'].append(utils.remove_empties(cfg)) ansible_facts['ansible_network_resources'].update(facts) return ansible_facts def render_config(self, spec, conf): """ Render config as dictionary structure and delete keys from spec for null values :param spec: The facts tree, generated from the argspec :param conf: The configuration :rtype: dictionary :returns: The generated config """ config = deepcopy(spec) match = re.search(r'^(\S+)', conf) intf = match.group(1) if get_interface_type(intf) == 'unknown': return {} config['name'] = intf # 'bfd'/'bfd echo' do not nvgen when enabled thus set to 'enable' when None. # 'bfd' is not supported on some platforms config['bfd'] = utils.parse_conf_cmd_arg(conf, 'bfd', 'enable', 'disable') or 'enable' config['echo'] = utils.parse_conf_cmd_arg(conf, 'bfd echo', 'enable', 'disable') or 'enable' return utils.remove_empties(config)
gpl-3.0
capitalDIGI/DIGI
contrib/testgen/gen_base58_test_vectors.py
1064
4344
#!/usr/bin/env python ''' Generate valid and invalid base58 address and private key test vectors. Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json ''' # 2012 Wladimir J. van der Laan # Released under MIT License import os from itertools import islice from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars import random from binascii import b2a_hex # key types PUBKEY_ADDRESS = 48 SCRIPT_ADDRESS = 5 PUBKEY_ADDRESS_TEST = 111 SCRIPT_ADDRESS_TEST = 196 PRIVKEY = 176 PRIVKEY_TEST = 239 metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] # templates for valid sequences templates = [ # prefix, payload_size, suffix, metadata # None = N/A ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), ((PRIVKEY,), 32, (), (True, False, None, False)), ((PRIVKEY,), 32, (1,), (True, False, None, True)), ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) ] def is_valid(v): '''Check vector v for validity''' result = b58decode_chk(v) if result is None: return False valid = False for template in templates: prefix = str(bytearray(template[0])) suffix = str(bytearray(template[2])) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True return False def gen_valid_vectors(): '''Generate valid test vectors''' while True: for template in templates: prefix = str(bytearray(template[0])) payload = os.urandom(template[1]) suffix = str(bytearray(template[2])) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None]) yield (rv, b2a_hex(payload), metadata) def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix): '''Generate possibly invalid vector''' if corrupt_prefix: prefix = os.urandom(1) else: prefix = str(bytearray(template[0])) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) else: payload = os.urandom(template[1]) if corrupt_suffix: suffix = os.urandom(len(template[2])) else: suffix = str(bytearray(template[2])) return b58encode_chk(prefix + payload + suffix) def randbool(p = 0.5): '''Return True with P(p)''' return random.random() < p def gen_invalid_vectors(): '''Generate invalid test vectors''' # start with some manual edge-cases yield "", yield "x", while True: # kinds of invalid vectors: # invalid prefix # invalid payload length # invalid (randomized) suffix (add random data) # corrupt checksum for template in templates: val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2)) if random.randint(0,10)<1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) else: # replace random character in the middle n = random.randint(0, len(val)) val = val[0:n] + random.choice(b58chars) + val[n+1:] if not is_valid(val): yield val, if __name__ == '__main__': import sys, json iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors} try: uiter = iters[sys.argv[1]] except IndexError: uiter = gen_valid_vectors try: count = int(sys.argv[2]) except IndexError: count = 0 data = list(islice(uiter(), count)) json.dump(data, sys.stdout, sort_keys=True, indent=4) sys.stdout.write('\n')
mit
ofer43211/unisubs
utils/subtitles.py
5
2063
# Amara, universalsubtitles.org # # Copyright (C) 2014 Participatory Culture Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # http://www.gnu.org/licenses/agpl-3.0.html. """utils.subtitles -- Subtitle-related functions. This module creates a custom SubtitleLoader which allows us to control the styling/layout. """ from babelsubs.loader import SubtitleLoader subtitle_loader = SubtitleLoader() subtitle_loader.add_style('amara-style', color="white", fontFamily="proportionalSansSerif", fontSize="18px", backgroundColor="transparent", textOutline="black 1px 0px", textAlign="center") subtitle_loader.add_region('bottom', 'amara-style', extent='100% 20%', origin='0 80%') subtitle_loader.add_region('top', 'amara-style', extent='100% 20%', origin='0 0') def create_new_subtitles(language_code, title='', description=''): return subtitle_loader.create_new(language_code, title, description) def load_subtitles(language_code, content, file_type): return subtitle_loader.loads(language_code, content, file_type) def load_subtitles_from_file(language_code, path): return subtitle_loader.load(language_code, path) def dfxp_merge(subtitle_sets): return subtitle_loader.dfxp_merge(subtitle_sets)
agpl-3.0
openprocurement/openprocurement.auth
setup.py
1
1214
from setuptools import setup, find_packages import os version = '3.0.0' setup(name='openprocurement.auth', version=version, description="", long_description=open("README.txt").read() + "\n" + open(os.path.join("docs", "HISTORY.txt")).read(), # Get more strings from # http://pypi.python.org/pypi?:action=list_classifiers classifiers=[ "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", ], keywords='', author='Quintagroup, Ltd.', author_email='info@quintagroup.com', license='Apache License 2.0', url='https://github.com/openprocurement/openprocurement.auth', packages=find_packages(exclude=['ez_setup']), namespace_packages=['openprocurement'], include_package_data=True, zip_safe=False, install_requires=[ 'setuptools', 'Flask', 'Flask-SQLAlchemy', 'werkzeug', 'Flask-OAuthlib', 'retrying' ], entry_points={ 'paste.app_factory': [ 'oauth_provider = openprocurement.auth.provider:make_oath_provider_app' ] }, )
apache-2.0
scalyr/scalyr-agent-2
scalyr_agent/third_party/requests/status_codes.py
148
3323
# -*- coding: utf-8 -*- from .structures import LookupDict _codes = { # Informational. 100: ('continue',), 101: ('switching_protocols',), 102: ('processing',), 103: ('checkpoint',), 122: ('uri_too_long', 'request_uri_too_long'), 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), 201: ('created',), 202: ('accepted',), 203: ('non_authoritative_info', 'non_authoritative_information'), 204: ('no_content',), 205: ('reset_content', 'reset'), 206: ('partial_content', 'partial'), 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), 208: ('already_reported',), 226: ('im_used',), # Redirection. 300: ('multiple_choices',), 301: ('moved_permanently', 'moved', '\\o-'), 302: ('found',), 303: ('see_other', 'other'), 304: ('not_modified',), 305: ('use_proxy',), 306: ('switch_proxy',), 307: ('temporary_redirect', 'temporary_moved', 'temporary'), 308: ('permanent_redirect', 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 # Client Error. 400: ('bad_request', 'bad'), 401: ('unauthorized',), 402: ('payment_required', 'payment'), 403: ('forbidden',), 404: ('not_found', '-o-'), 405: ('method_not_allowed', 'not_allowed'), 406: ('not_acceptable',), 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), 408: ('request_timeout', 'timeout'), 409: ('conflict',), 410: ('gone',), 411: ('length_required',), 412: ('precondition_failed', 'precondition'), 413: ('request_entity_too_large',), 414: ('request_uri_too_large',), 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), 417: ('expectation_failed',), 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), 421: ('misdirected_request',), 422: ('unprocessable_entity', 'unprocessable'), 423: ('locked',), 424: ('failed_dependency', 'dependency'), 425: ('unordered_collection', 'unordered'), 426: ('upgrade_required', 'upgrade'), 428: ('precondition_required', 'precondition'), 429: ('too_many_requests', 'too_many'), 431: ('header_fields_too_large', 'fields_too_large'), 444: ('no_response', 'none'), 449: ('retry_with', 'retry'), 450: ('blocked_by_windows_parental_controls', 'parental_controls'), 451: ('unavailable_for_legal_reasons', 'legal_reasons'), 499: ('client_closed_request',), # Server Error. 500: ('internal_server_error', 'server_error', '/o\\', '✗'), 501: ('not_implemented',), 502: ('bad_gateway',), 503: ('service_unavailable', 'unavailable'), 504: ('gateway_timeout',), 505: ('http_version_not_supported', 'http_version'), 506: ('variant_also_negotiates',), 507: ('insufficient_storage',), 509: ('bandwidth_limit_exceeded', 'bandwidth'), 510: ('not_extended',), 511: ('network_authentication_required', 'network_auth', 'network_authentication'), } codes = LookupDict(name='status_codes') for code, titles in _codes.items(): for title in titles: setattr(codes, title, code) if not title.startswith(('\\', '/')): setattr(codes, title.upper(), code)
apache-2.0
kamyu104/django
django/conf/locale/sr/formats.py
1008
2011
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. F Y.' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y.' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y.' SHORT_DATETIME_FORMAT = 'j.m.Y. H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.' '%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.' # '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.' # '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.' # '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59' '%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200' '%d.%m.%Y. %H:%M', # '25.10.2006. 14:30' '%d.%m.%Y.', # '25.10.2006.' '%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59' '%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200' '%d.%m.%y. %H:%M', # '25.10.06. 14:30' '%d.%m.%y.', # '25.10.06.' '%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59' '%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200' '%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30' '%d. %m. %Y.', # '25. 10. 2006.' '%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59' '%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200' '%d. %m. %y. %H:%M', # '25. 10. 06. 14:30' '%d. %m. %y.', # '25. 10. 06.' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
ddico/odoo
addons/website_sale_stock/tests/test_website_sale_stock_product_warehouse.py
12
3311
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.addons.website_sale.tests.test_website_sale_product_attribute_value_config import TestWebsiteSaleProductAttributeValueConfig class TestWebsiteSaleStockProductWarehouse(TestWebsiteSaleProductAttributeValueConfig): def test_get_combination_info(self): """ Checked that correct product quantity is shown in website according to the warehouse which is set in current website. - Create two warehouse - Create two stockable products - Update quantity of Product A in Warehouse 1 - Update quantity of Product B in Warehouse 2 - Set Warehouse 1 in website - Check available quantity of Product A and Product B in website Product A should be available in the website as it is available in warehouse 1 but Product B should not be available in website as it is stored in warehouse 2. """ # Create two warehouses warehouse_1 = self.env['stock.warehouse'].create({ 'name': 'Warehouse 1', 'code': 'WH1' }) warehouse_2 = self.env['stock.warehouse'].create({ 'name': 'Warehouse 2', 'code': 'WH2' }) # Create two stockable products product_1 = self.env['product.product'].create({ 'name': 'Product A', 'inventory_availability': 'always', 'type': 'product', 'default_code': 'E-COM1', }) product_2 = self.env['product.product'].create({ 'name': 'Product B', 'inventory_availability': 'always', 'type': 'product', 'default_code': 'E-COM2', }) # Update quantity of Product A in Warehouse 1 self.env['stock.quant'].with_context(inventory_mode=True).create({ 'product_id': product_1.id, 'inventory_quantity': 10.0, 'location_id': warehouse_1.lot_stock_id.id, }) # Update quantity of Product B in Warehouse 2 self.env['stock.quant'].with_context(inventory_mode=True).create({ 'product_id': product_2.id, 'inventory_quantity': 10.0, 'location_id': warehouse_2.lot_stock_id.id, }) # Get current website and set warehouse_id of Warehouse 1 current_website = self.env['website'].get_current_website() current_website.warehouse_id = warehouse_1 product = product_1.with_context(website_id=current_website.id) combination_info = product.product_tmpl_id.with_context(website_sale_stock_get_quantity=True)._get_combination_info() # Check available quantity of product is according to warehouse self.assertEqual(combination_info['virtual_available'], 10, "10 units of Product A should be available in warehouse 1.") product = product_2.with_context(website_id=current_website.id) combination_info = product.product_tmpl_id.with_context(website_sale_stock_get_quantity=True)._get_combination_info() # Check available quantity of product is according to warehouse self.assertEqual(combination_info['virtual_available'], 0, "Product B should not be available in warehouse 1.")
agpl-3.0
andnovar/kivy
kivy/core/window/window_egl_rpi.py
17
3086
''' EGL Rpi Window: EGL Window provider, specialized for the Pi Inspired by: rpi_vid_core + JF002 rpi kivy repo ''' __all__ = ('WindowEglRpi', ) from kivy.logger import Logger from kivy.core.window import WindowBase from kivy.base import EventLoop from kivy.lib.vidcore_lite import bcm, egl from os import environ # Default display IDs. (DISPMANX_ID_MAIN_LCD, DISPMANX_ID_AUX_LCD, DISPMANX_ID_HDMI, DISPMANX_ID_SDTV, DISPMANX_ID_FORCE_LCD, DISPMANX_ID_FORCE_TV, DISPMANX_ID_FORCE_OTHER) = range(7) class WindowEglRpi(WindowBase): _rpi_dispmanx_id = int(environ.get("KIVY_BCM_DISPMANX_ID", "0")) def create_window(self): bcm.host_init() w, h = bcm.graphics_get_display_size(self._rpi_dispmanx_id) Logger.debug('Window: Actual display size: {}x{}'.format( w, h)) self._size = w, h self._create_window(w, h) self._create_egl_context(self.win, 0) super(WindowEglRpi, self).create_window() def _create_window(self, w, h): dst = bcm.Rect(0, 0, w, h) src = bcm.Rect(0, 0, w << 16, h << 16) display = egl.bcm_display_open(self._rpi_dispmanx_id) update = egl.bcm_update_start(0) element = egl.bcm_element_add(update, display, 0, dst, src) self.win = egl.NativeWindow(element, w, h) egl.bcm_update_submit_sync(update) def _create_egl_context(self, win, flags): api = egl._constants.EGL_OPENGL_ES_API c = egl._constants attribs = [ c.EGL_RED_SIZE, 8, c.EGL_GREEN_SIZE, 8, c.EGL_BLUE_SIZE, 8, c.EGL_ALPHA_SIZE, 8, c.EGL_DEPTH_SIZE, 16, c.EGL_STENCIL_SIZE, 8, c.EGL_SURFACE_TYPE, c.EGL_WINDOW_BIT, c.EGL_NONE] attribs_context = [c.EGL_CONTEXT_CLIENT_VERSION, 2, c.EGL_NONE] display = egl.GetDisplay(c.EGL_DEFAULT_DISPLAY) egl.Initialise(display) egl.BindAPI(c.EGL_OPENGL_ES_API) egl.GetConfigs(display) config = egl.ChooseConfig(display, attribs, 1)[0] surface = egl.CreateWindowSurface(display, config, win) context = egl.CreateContext(display, config, None, attribs_context) egl.MakeCurrent(display, surface, surface, context) self.egl_info = (display, surface, context) egl.MakeCurrent(display, surface, surface, context) def close(self): egl.Terminate(self.egl_info[0]) def flip(self): egl.SwapBuffers(self.egl_info[0], self.egl_info[1]) def _mainloop(self): EventLoop.idle() def mainloop(self): while not EventLoop.quit and EventLoop.status == 'started': try: self._mainloop() except BaseException as inst: raise ''' # use exception manager first r = ExceptionManager.handle_exception(inst) if r == ExceptionManager.RAISE: #stopTouchApp() raise else: pass '''
mit
quamilek/django
django/conf/locale/ru/formats.py
1059
1267
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j E Y г.' TIME_FORMAT = 'G:i' DATETIME_FORMAT = 'j E Y г. G:i' YEAR_MONTH_FORMAT = 'F Y г.' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '\xa0' # non-breaking space NUMBER_GROUPING = 3
bsd-3-clause
anjos/rrpack
rr/algorithm.py
1
10237
#!/usr/bin/env python # vim: set fileencoding=utf-8 : # Andre Anjos <andre.anjos@idiap.ch> # Wed 17 Jun 2015 17:51:02 CEST import logging logger = logging.getLogger() import numpy import scipy.optimize def make_labels(X): """Helper function that generates a single 1D numpy.ndarray with labels which are good targets for stock logistic regression. Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: With a single dimension, containing suitable labels for all rows and for all classes defined in X (depth). """ return numpy.hstack([k*numpy.ones(len(X[k]), dtype=int) for k in range(len(X))]) class Machine: """A class to handle all run-time aspects for Logistic Regression Parameters: theta (numpy.ndarray): A set of parameters for the Logistic Regression model. This must be an iterable (or numpy.ndarray) with all parameters for the model, including the bias term, which must be on entry 0 (the first entry at the iterable). """ def __init__(self, theta): self.theta = numpy.array(theta).copy() def __call__(self, X): """Spits out the hypothesis given the data. Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 2 dimensions. Every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: A 1D numpy.ndarray with as many entries as rows in the input 2D array ``X``, representing g(x), the sigmoidal hypothesis. """ Xp = numpy.hstack((numpy.ones((len(X),1)), X)) #add bias term return 1. / (1. + numpy.exp(-numpy.dot(Xp, self.theta))) def predict(self, X): """Predicts the class of each row of X Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 2 dimensions. Every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: A 1D numpy.ndarray with as many entries as rows in the input 2D array ``X``, representing g(x), the class predictions for the current machine. """ retval = self(X) retval[retval<0.5] = 0. retval[retval>=0.5] = 1. return retval.astype(int) def J(self, X, regularizer=0.0): """ Calculates the logistic regression cost Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. regularizer (float): A regularization parameter Returns: float: The averaged (regularized) cost for the whole dataset """ h = numpy.hstack([self(X[k]) for k in (0,1)]) y = make_labels(X) logh = numpy.nan_to_num(numpy.log(h)) log1h = numpy.nan_to_num(numpy.log(1-h)) regularization_term = regularizer*(self.theta[1:]**2).sum() main_term = -(y*logh + ((1-y)*log1h)).mean() return main_term + regularization_term def dJ(self, X, regularizer=0.0): """ Calculates the logistic regression first derivative of the cost w.r.t. each parameter theta Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. regularizer (float): A regularization parameter, if the solution should be regularized. Returns: numpy.ndarray: A 1D numpy.ndarray with as many entries as columns on the input matrix ``X`` plus 1 (the bias term). It denotes the average gradient of the cost w.r.t. to each machine parameter theta. """ Xflat = numpy.vstack([k for k in X]) Xp = numpy.hstack((numpy.ones((len(Xflat),1)), Xflat)) #add bias term y = make_labels(X) retval = ((self(Xflat) - y) * Xp.T).T.mean(axis=0) retval[1:] += (regularizer*self.theta[1:])/len(X) return retval class Trainer: """A class to handle all training aspects for Logistic Regression Parameters: regularizer (float): A regularization parameter """ def __init__(self, regularizer=0.0): self.regularizer = regularizer def J(self, theta, machine, X): """ Calculates the vectorized cost *J*. """ machine.theta = theta return machine.J(X, self.regularizer) def dJ(self, theta, machine, X): """ Calculates the vectorized partial derivative of the cost *J* w.r.t. to **all** :math:`\theta`'s. Use the training dataset. """ machine.theta = theta return machine.dJ(X, self.regularizer) def train(self, X): """ Optimizes the machine parameters to fit the input data, using ``scipy.optimize.fmin_l_bfgs_b``. Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. Returns: Machine: A trained machine. Raises: RuntimeError: In case problems exist with the design matrix ``X`` or with convergence. """ # check data dimensionality if not organized in a matrix if not isinstance(X, numpy.ndarray): baseline = X[0].shape[1] for k in X: if k.shape[1] != baseline: raise RuntimeError("Mismatch on the dimensionality of input `X`") # prepare the machine theta0 = numpy.zeros(X[0].shape[1]+1) #include bias terms machine = Machine(theta0) logger.debug('Settings:') logger.debug(' * initial guess = %s', [k for k in theta0]) logger.debug(' * cost (J) = %g', machine.J(X, self.regularizer)) logger.debug('Training using scipy.optimize.fmin_l_bfgs_b()...') # Fill in the right parameters so that the minimization can take place theta, cost, d = scipy.optimize.fmin_l_bfgs_b( self.J, theta0, self.dJ, (machine, X), ) if d['warnflag'] == 0: logger.info("** LBFGS converged successfuly **") machine.theta = theta logger.debug('Final settings:') logger.debug(' * theta = %s', [k for k in theta]) logger.debug(' * cost (J) = %g', cost) return machine else: message = "LBFGS did **not** converged:" if d['warnflag'] == 1: message += " Too many function evaluations" elif d['warnflag'] == 2: message += " %s" % d['task'] raise RuntimeError(message) class MultiClassMachine: """A class to handle all run-time aspects for Multiclass Log. Regression Parameters: machines (iterable): An iterable over any number of machines that will be stored. """ def __init__(self, machines): self.machines = machines def __call__(self, X): """Spits out the hypothesis for each machine given the data Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 2 dimensions. Every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: A 2D numpy.ndarray with as many entries as rows in the input 2D array ``X``, representing g(x), the sigmoidal hypothesis. Each column on the output array represents the output of one of the logistic regression machines in this """ return numpy.vstack([m(X) for m in self.machines]).T def predict(self, X): """Predicts the class of each row of X Parameters: X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the two classes, every row corresponds to one example of the data set, every column, one different feature. Returns: numpy.ndarray: A 1D numpy.ndarray with as many entries as rows in the input 2D array ``X``, representing g(x), the class predictions for the current machine. """ return self(X).argmax(axis=1) class MultiClassTrainer: """A class to handle all training aspects for Multiclass Log. Regression Parameters: regularizer (float): A regularization parameter """ def __init__(self, regularizer=0.0): self.regularizer = regularizer def train(self, X): """ Trains multiple logistic regression classifiers to handle the multiclass problem posed by ``X`` X (numpy.ndarray): The input data matrix. This must be a numpy.ndarray with 3 dimensions or an iterable containing 2 numpy.ndarrays with 2 dimensions each. Each correspond to the data for one of the input classes, every row corresponds to one example of the data set, every column, one different feature. Returns: Machine: A trained multiclass machine. """ _trainer = Trainer(self.regularizer) if len(X) == 2: #trains and returns a single logistic regression classifer return _trainer.train(X) else: #trains and returns a multi-class logistic regression classifier # use one-versus-all strategy machines = [] for k in range(len(X)): NC_range = list(range(0,k)) + list(range(k+1,len(X))) Xp = numpy.array([numpy.vstack(X[NC_range]), X[k]]) machines.append(_trainer.train(Xp)) return MultiClassMachine(machines)
bsd-3-clause
js850/pele
pele/potentials/lj.py
1
2183
from math import * import numpy as np #to access np.exp() not built int exp from pele.potentials import BasePotential import fortran.lj as ljf __all__ = ["LJ"] class LJ(BasePotential): """ simple lennard jones potential""" def __init__(self, eps=1.0, sig=1.0, boxl=None): self.sig = sig self.eps = eps self.boxl = boxl if self.boxl is None: self.periodic = False self.boxl = 10000. else: self.periodic = True def getEnergy(self, coords): E = ljf.ljenergy( coords, self.eps, self.sig, self.periodic, self.boxl) return E def getEnergyGradient(self, coords): E, grad = ljf.ljenergy_gradient( coords, self.eps, self.sig, self.periodic, self.boxl) return E, grad def getEnergyList(self, coords, ilist): #ilist = ilist_i.getNPilist() #ilist += 1 #fortran indexing E = ljf.energy_ilist( coords, self.eps, self.sig, ilist.reshape(-1), self.periodic, self.boxl) #ilist -= 1 return E def getEnergyGradientList(self, coords, ilist): #ilist = ilist_i.getNPilist() #ilist += 1 #fortran indexing E, grad = ljf.energy_gradient_ilist( coords, self.eps, self.sig, ilist.reshape(-1), self.periodic, self.boxl) #ilist -= 1 return E, grad def getEnergyGradientHessian(self, coords): if self.periodic: raise Exception("Hessian not implemented for periodic boundaries") from fortran.lj_hess import ljdiff g, energy, hess = ljdiff(coords, True, True) return energy, g, hess def main(): #test class natoms = 12 coords = np.random.uniform(-1,1,natoms*3)*2 lj = LJ() E = lj.getEnergy(coords) print "E", E E, V = lj.getEnergyGradient(coords) print "E", E print "V" print V print "try a quench" from pele.optimize import mylbfgs as quench quench( coords, lj, iprint=1 ) #quench( coords, lj.getEnergyGradientNumerical, iprint=1 ) if __name__ == "__main__": main()
gpl-3.0
moneymaker365/script.module.stem
lib/stem/descriptor/microdescriptor.py
13
9866
# Copyright 2013-2015, Damian Johnson and The Tor Project # See LICENSE for licensing information """ Parsing for Tor microdescriptors, which contain a distilled version of a relay's server descriptor. As of Tor version 0.2.3.3-alpha Tor no longer downloads server descriptors by default, opting for microdescriptors instead. Unlike most descriptor documents these aren't available on the metrics site (since they don't contain any information that the server descriptors don't). The limited information in microdescriptors make them rather clunky to use compared with server descriptors. For instance microdescriptors lack the relay's fingerprint, making it difficut to use them to look up the relay's other descriptors. To do so you need to match the microdescriptor's digest against its corresponding router status entry. For added fun as of this writing the controller doesn't even surface those router status entries (:trac:`7953`). For instance, here's an example that prints the nickname and fignerprints of the exit relays. :: import os from stem.control import Controller from stem.descriptor import parse_file with Controller.from_port(port = 9051) as controller: controller.authenticate() exit_digests = set() data_dir = controller.get_conf('DataDirectory') for desc in controller.get_microdescriptors(): if desc.exit_policy.is_exiting_allowed(): exit_digests.add(desc.digest) print 'Exit Relays:' for desc in parse_file(os.path.join(data_dir, 'cached-microdesc-consensus')): if desc.digest in exit_digests: print ' %s (%s)' % (desc.nickname, desc.fingerprint) Doing the same is trivial with server descriptors... :: from stem.descriptor import parse_file print 'Exit Relays:' for desc in parse_file('/home/atagar/.tor/cached-descriptors'): if desc.exit_policy.is_exiting_allowed(): print ' %s (%s)' % (desc.nickname, desc.fingerprint) **Module Overview:** :: Microdescriptor - Tor microdescriptor. """ import hashlib import stem.exit_policy from stem.descriptor import ( Descriptor, _get_descriptor_components, _read_until_keywords, _value, _parse_simple_line, _parse_key_block, ) from stem.descriptor.router_status_entry import ( _parse_a_line, _parse_p_line, ) try: # added in python 3.2 from functools import lru_cache except ImportError: from stem.util.lru_cache import lru_cache REQUIRED_FIELDS = ( 'onion-key', ) SINGLE_FIELDS = ( 'onion-key', 'ntor-onion-key', 'family', 'p', 'p6', ) def _parse_file(descriptor_file, validate = False, **kwargs): """ Iterates over the microdescriptors in a file. :param file descriptor_file: file with descriptor content :param bool validate: checks the validity of the descriptor's content if **True**, skips these checks otherwise :param dict kwargs: additional arguments for the descriptor constructor :returns: iterator for Microdescriptor instances in the file :raises: * **ValueError** if the contents is malformed and validate is True * **IOError** if the file can't be read """ while True: annotations = _read_until_keywords('onion-key', descriptor_file) # read until we reach an annotation or onion-key line descriptor_lines = [] # read the onion-key line, done if we're at the end of the document onion_key_line = descriptor_file.readline() if onion_key_line: descriptor_lines.append(onion_key_line) else: break while True: last_position = descriptor_file.tell() line = descriptor_file.readline() if not line: break # EOF elif line.startswith(b'@') or line.startswith(b'onion-key'): descriptor_file.seek(last_position) break else: descriptor_lines.append(line) if descriptor_lines: if descriptor_lines[0].startswith(b'@type'): descriptor_lines = descriptor_lines[1:] # strip newlines from annotations annotations = list(map(bytes.strip, annotations)) descriptor_text = bytes.join(b'', descriptor_lines) yield Microdescriptor(descriptor_text, validate, annotations, **kwargs) else: break # done parsing descriptors def _parse_id_line(descriptor, entries): value = _value('id', entries) value_comp = value.split() if len(value_comp) >= 2: descriptor.identifier_type = value_comp[0] descriptor.identifier = value_comp[1] else: raise ValueError("'id' lines should contain both the key type and digest: id %s" % value) _parse_digest = lambda descriptor, entries: setattr(descriptor, 'digest', hashlib.sha256(descriptor.get_bytes()).hexdigest().upper()) _parse_onion_key_line = _parse_key_block('onion-key', 'onion_key', 'RSA PUBLIC KEY') _parse_ntor_onion_key_line = _parse_simple_line('ntor-onion-key', 'ntor_onion_key') _parse_family_line = lambda descriptor, entries: setattr(descriptor, 'family', _value('family', entries).split(' ')) _parse_p6_line = lambda descriptor, entries: setattr(descriptor, 'exit_policy_v6', stem.exit_policy.MicroExitPolicy(_value('p6', entries))) class Microdescriptor(Descriptor): """ Microdescriptor (`descriptor specification <https://gitweb.torproject.org/torspec.git/tree/dir-spec.txt>`_) :var str digest: **\*** hex digest for this microdescriptor, this can be used to match against the corresponding digest attribute of a :class:`~stem.descriptor.router_status_entry.RouterStatusEntryMicroV3` :var str onion_key: **\*** key used to encrypt EXTEND cells :var str ntor_onion_key: base64 key used to encrypt EXTEND in the ntor protocol :var list or_addresses: **\*** alternative for our address/or_port attributes, each entry is a tuple of the form (address (**str**), port (**int**), is_ipv6 (**bool**)) :var list family: **\*** nicknames or fingerprints of declared family :var stem.exit_policy.MicroExitPolicy exit_policy: **\*** relay's exit policy :var stem.exit_policy.MicroExitPolicy exit_policy_v6: **\*** exit policy for IPv6 :var str identifier_type: identity digest key type :var str identifier: base64 encoded identity digest, this is only used for collision prevention (:trac:`11743`) **\*** attribute is required when we're parsed with validation .. versionchanged:: 1.1.0 Added the identifier and identifier_type attributes. """ ATTRIBUTES = { 'onion_key': (None, _parse_onion_key_line), 'ntor_onion_key': (None, _parse_ntor_onion_key_line), 'or_addresses': ([], _parse_a_line), 'family': ([], _parse_family_line), 'exit_policy': (stem.exit_policy.MicroExitPolicy('reject 1-65535'), _parse_p_line), 'exit_policy_v6': (None, _parse_p6_line), 'identifier_type': (None, _parse_id_line), 'identifier': (None, _parse_id_line), 'digest': (None, _parse_digest), } PARSER_FOR_LINE = { 'onion-key': _parse_onion_key_line, 'ntor-onion-key': _parse_ntor_onion_key_line, 'a': _parse_a_line, 'family': _parse_family_line, 'p': _parse_p_line, 'p6': _parse_p6_line, 'id': _parse_id_line, } def __init__(self, raw_contents, validate = False, annotations = None): super(Microdescriptor, self).__init__(raw_contents, lazy_load = not validate) self._annotation_lines = annotations if annotations else [] entries = _get_descriptor_components(raw_contents, validate) if validate: self.digest = hashlib.sha256(self.get_bytes()).hexdigest().upper() self._parse(entries, validate) self._check_constraints(entries) else: self._entries = entries @lru_cache() def get_annotations(self): """ Provides content that appeared prior to the descriptor. If this comes from the cached-microdescs then this commonly contains content like... :: @last-listed 2013-02-24 00:18:30 :returns: **dict** with the key/value pairs in our annotations """ annotation_dict = {} for line in self._annotation_lines: if b' ' in line: key, value = line.split(b' ', 1) annotation_dict[key] = value else: annotation_dict[line] = None return annotation_dict def get_annotation_lines(self): """ Provides the lines of content that appeared prior to the descriptor. This is the same as the :func:`~stem.descriptor.microdescriptor.Microdescriptor.get_annotations` results, but with the unparsed lines and ordering retained. :returns: **list** with the lines of annotation that came before this descriptor """ return self._annotation_lines def _check_constraints(self, entries): """ Does a basic check that the entries conform to this descriptor type's constraints. :param dict entries: keyword => (value, pgp key) entries :raises: **ValueError** if an issue arises in validation """ for keyword in REQUIRED_FIELDS: if keyword not in entries: raise ValueError("Microdescriptor must have a '%s' entry" % keyword) for keyword in SINGLE_FIELDS: if keyword in entries and len(entries[keyword]) > 1: raise ValueError("The '%s' entry can only appear once in a microdescriptor" % keyword) if 'onion-key' != list(entries.keys())[0]: raise ValueError("Microdescriptor must start with a 'onion-key' entry") def _name(self, is_plural = False): return 'microdescriptors' if is_plural else 'microdescriptor' def _compare(self, other, method): if not isinstance(other, Microdescriptor): return False return method(str(self).strip(), str(other).strip()) def __hash__(self): return hash(str(self).strip()) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o)
lgpl-3.0
DaniilLeksin/theblog
env/lib/python2.7/site-packages/django/contrib/flatpages/tests/test_csrf.py
56
3486
import os from django.contrib.auth.models import User from django.contrib.auth.tests.utils import skipIfCustomUser from django.test import TestCase, Client from django.test import override_settings @override_settings( LOGIN_URL='/accounts/login/', MIDDLEWARE_CLASSES=( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware', ), CSRF_FAILURE_VIEW='django.views.csrf.csrf_failure', TEMPLATE_DIRS=( os.path.join(os.path.dirname(__file__), 'templates'), ), SITE_ID=1, ) class FlatpageCSRFTests(TestCase): fixtures = ['sample_flatpages', 'example_site'] urls = 'django.contrib.flatpages.tests.urls' def setUp(self): self.client = Client(enforce_csrf_checks=True) def test_view_flatpage(self): "A flatpage can be served through a view, even when the middleware is in use" response = self.client.get('/flatpage_root/flatpage/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it flat!</p>") def test_view_non_existent_flatpage(self): "A non-existent flatpage raises 404 when served through a view, even when the middleware is in use" response = self.client.get('/flatpage_root/no_such_flatpage/') self.assertEqual(response.status_code, 404) @skipIfCustomUser def test_view_authenticated_flatpage(self): "A flatpage served through a view can require authentication" response = self.client.get('/flatpage_root/sekrit/') self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/') User.objects.create_user('testuser', 'test@example.com', 's3krit') self.client.login(username='testuser', password='s3krit') response = self.client.get('/flatpage_root/sekrit/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it sekrit!</p>") def test_fallback_flatpage(self): "A flatpage can be served by the fallback middleware" response = self.client.get('/flatpage/') self.assertEqual(response.status_code, 200) self.assertContains(response, "<p>Isn't it flat!</p>") def test_fallback_non_existent_flatpage(self): "A non-existent flatpage raises a 404 when served by the fallback middleware" response = self.client.get('/no_such_flatpage/') self.assertEqual(response.status_code, 404) def test_post_view_flatpage(self): "POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)" response = self.client.post('/flatpage_root/flatpage/') self.assertEqual(response.status_code, 403) def test_post_fallback_flatpage(self): "POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)" response = self.client.post('/flatpage/') self.assertEqual(response.status_code, 403) def test_post_unknown_page(self): "POSTing to an unknown page isn't caught as a 403 CSRF error" response = self.client.post('/no_such_page/') self.assertEqual(response.status_code, 404)
gpl-2.0
petesburgh/or-tools
examples/python/knapsack.py
32
1764
# Copyright 2010-2014 Google # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Bi-dimensional knapsack problem.""" from google.apputils import app import gflags from ortools.algorithms import pywrapknapsack_solver FLAGS = gflags.FLAGS def main(unused_argv): # Create the solver. solver = pywrapknapsack_solver.KnapsackSolver( pywrapknapsack_solver.KnapsackSolver. KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER, 'test') profits = [360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600, 38, 48, 147, 78, 256, 63, 17, 120, 164, 432, 35, 92, 110, 22, 42, 50, 323, 514, 28, 87, 73, 78, 15, 26, 78, 210, 36, 85, 189, 274, 43, 33, 10, 19, 389, 276, 312] weights = [[7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15, 42, 9, 0, 42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56, 7, 29, 93, 44, 71, 3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13]] capacities = [850] optimal_profit = 7534 solver.Init(profits, weights, capacities) computed_profit = solver.Solve() print 'optimal profit = ' + str(computed_profit) + '/' + str(optimal_profit) if __name__ == '__main__': app.run()
apache-2.0
funkypawz/MakerRobot
peewee-master/playhouse/tests/models.py
1
10438
import datetime import sys from peewee import * from playhouse.tests.base import TestModel from playhouse.tests.base import test_db if sys.version_info[0] == 3: long = int class User(TestModel): username = CharField() class Meta: db_table = 'users' def prepared(self): self.foo = self.username @classmethod def create_users(cls, n): for i in range(n): cls.create(username='u%d' % (i + 1)) class Blog(TestModel): user = ForeignKeyField(User) title = CharField(max_length=25) content = TextField(default='') pub_date = DateTimeField(null=True) pk = PrimaryKeyField() def __unicode__(self): return '%s: %s' % (self.user.username, self.title) def prepared(self): self.foo = self.title class Comment(TestModel): blog = ForeignKeyField(Blog, related_name='comments') comment = CharField() class Relationship(TestModel): from_user = ForeignKeyField(User, related_name='relationships') to_user = ForeignKeyField(User, related_name='related_to') class NullModel(TestModel): char_field = CharField(null=True) text_field = TextField(null=True) datetime_field = DateTimeField(null=True) int_field = IntegerField(null=True) float_field = FloatField(null=True) decimal_field1 = DecimalField(null=True) decimal_field2 = DecimalField(decimal_places=2, null=True) double_field = DoubleField(null=True) bigint_field = BigIntegerField(null=True) date_field = DateField(null=True) time_field = TimeField(null=True) boolean_field = BooleanField(null=True) fixed_char_field = FixedCharField(null=True) ts_field = TimestampField(null=True, default=None, resolution=1000000) ts_field2 = TimestampField(null=True, default=None, resolution=1000, utc=True) class TimestampModel(TestModel): local_us = TimestampField(null=True, default=None, resolution=1000000) utc_ms = TimestampField(null=True, default=None, resolution=1000, utc=True) local = TimestampField(null=True) class UniqueModel(TestModel): name = CharField(unique=True) class UniqueMultiField(TestModel): name = CharField(unique=True) field_a = CharField(default='') field_b = IntegerField(default=0) class OrderedModel(TestModel): title = CharField() created = DateTimeField(default=datetime.datetime.now) class Meta: order_by = ('-created',) class Category(TestModel): parent = ForeignKeyField('self', related_name='children', null=True) name = CharField() class UserCategory(TestModel): user = ForeignKeyField(User) category = ForeignKeyField(Category) class NonIntModel(TestModel): pk = CharField(primary_key=True) data = CharField() class NonIntRelModel(TestModel): non_int_model = ForeignKeyField(NonIntModel, related_name='nr') class DBUser(TestModel): user_id = PrimaryKeyField(db_column='db_user_id') username = CharField(db_column='db_username') class DBBlog(TestModel): blog_id = PrimaryKeyField(db_column='db_blog_id') title = CharField(db_column='db_title') user = ForeignKeyField(DBUser, db_column='db_user') class SeqModelA(TestModel): id = IntegerField(primary_key=True, sequence='just_testing_seq') num = IntegerField() class SeqModelB(TestModel): id = IntegerField(primary_key=True, sequence='just_testing_seq') other_num = IntegerField() class MultiIndexModel(TestModel): f1 = CharField() f2 = CharField() f3 = CharField() class Meta: indexes = ( (('f1', 'f2'), True), (('f2', 'f3'), False), ) class BlogTwo(Blog): title = TextField() extra_field = CharField() class Parent(TestModel): data = CharField() class Child(TestModel): parent = ForeignKeyField(Parent) data = CharField(default='') class Orphan(TestModel): parent = ForeignKeyField(Parent, null=True) data = CharField(default='') class ChildPet(TestModel): child = ForeignKeyField(Child) data = CharField(default='') class OrphanPet(TestModel): orphan = ForeignKeyField(Orphan) data = CharField(default='') class ChildNullableData(TestModel): child = ForeignKeyField(Child, null=True) data = CharField() class CSVField(TextField): def db_value(self, value): if value: return ','.join(value) return value or '' def python_value(self, value): return value.split(',') if value else [] class CSVRow(TestModel): data = CSVField() class BlobModel(TestModel): data = BlobField() class Job(TestModel): name = CharField() class JobExecutionRecord(TestModel): job = ForeignKeyField(Job, primary_key=True) status = CharField() class JERRelated(TestModel): jer = ForeignKeyField(JobExecutionRecord) class TestModelA(TestModel): field = CharField(primary_key=True) data = CharField() class TestModelB(TestModel): field = CharField(primary_key=True) data = CharField() class TestModelC(TestModel): field = CharField(primary_key=True) data = CharField() class Post(TestModel): title = CharField() class Tag(TestModel): tag = CharField() class TagPostThrough(TestModel): tag = ForeignKeyField(Tag, related_name='posts') post = ForeignKeyField(Post, related_name='tags') class Meta: primary_key = CompositeKey('tag', 'post') class TagPostThroughAlt(TestModel): tag = ForeignKeyField(Tag, related_name='posts_alt') post = ForeignKeyField(Post, related_name='tags_alt') class Manufacturer(TestModel): name = CharField() class CompositeKeyModel(TestModel): f1 = CharField() f2 = IntegerField() f3 = FloatField() class Meta: primary_key = CompositeKey('f1', 'f2') class UserThing(TestModel): thing = CharField() user = ForeignKeyField(User, related_name='things') class Meta: primary_key = CompositeKey('thing', 'user') class Component(TestModel): name = CharField() manufacturer = ForeignKeyField(Manufacturer, null=True) class Computer(TestModel): hard_drive = ForeignKeyField(Component, related_name='c1') memory = ForeignKeyField(Component, related_name='c2') processor = ForeignKeyField(Component, related_name='c3') class CheckModel(TestModel): value = IntegerField(constraints=[Check('value > 0')]) # Deferred foreign keys. SnippetDeferred = DeferredRelation() class Language(TestModel): name = CharField() selected_snippet = ForeignKeyField(SnippetDeferred, null=True) class Snippet(TestModel): code = TextField() language = ForeignKeyField(Language, related_name='snippets') SnippetDeferred.set_model(Snippet) class _UpperField(CharField): def python_value(self, value): return value.upper() if value else value class UpperUser(TestModel): username = _UpperField() class Meta: db_table = User._meta.db_table class Package(TestModel): barcode = CharField(unique=True) class PackageItem(TestModel): title = CharField() package = ForeignKeyField( Package, related_name='items', to_field=Package.barcode) class PGSchema(TestModel): data = CharField() class Meta: schema = 'huey' class UpperCharField(CharField): def coerce(self, value): value = super(UpperCharField, self).coerce(value) if value: value = value.upper() return value class UpperModel(TestModel): data = UpperCharField() class CommentCategory(TestModel): category = ForeignKeyField(Category) comment = ForeignKeyField(Comment) sort_order = IntegerField(default=0) class Meta: primary_key = CompositeKey('comment', 'category') class BlogData(TestModel): blog = ForeignKeyField(Blog) class ServerDefaultModel(TestModel): name = CharField(constraints=[SQL("DEFAULT 'foo'")]) timestamp = DateTimeField(constraints=[ SQL('DEFAULT CURRENT_TIMESTAMP')]) class SpecialComment(TestModel): user = ForeignKeyField(User, related_name='special_comments') blog = ForeignKeyField(Blog, null=True, related_name='special_comments') name = CharField() class EmptyModel(TestModel): pass class NoPKModel(TestModel): data = TextField() class Meta: primary_key = False class TestingID(TestModel): uniq = UUIDField() class UUIDData(TestModel): id = UUIDField(primary_key=True) data = CharField() class UUIDRelatedModel(TestModel): data = ForeignKeyField(UUIDData, null=True, related_name='related_models') value = IntegerField(default=0) class UInt32Field(Field): db_field = 'int' def db_value(self, value): return long(value - (1 << 31)) def python_value(self, value): return long(value + (1 << 31)) class UIntModel(TestModel): data = UInt32Field() class UIntRelModel(TestModel): uint_model = ForeignKeyField(UIntModel, to_field='data') class Note(TestModel): user = ForeignKeyField(User, related_name='notes') text = TextField() class Flag(TestModel): label = TextField() class NoteFlag(TestModel): note = ForeignKeyField(Note, related_name='flags') flag = ForeignKeyField(Flag, related_name='notes') class NoteFlagNullable(TestModel): note = ForeignKeyField(Note, null=True, related_name='nullable_flags') flag = ForeignKeyField(Flag, null=True, related_name='nullable_notes') MODELS = [ User, Blog, Comment, Relationship, NullModel, TimestampModel, UniqueModel, OrderedModel, Category, UserCategory, NonIntModel, NonIntRelModel, DBUser, DBBlog, SeqModelA, SeqModelB, MultiIndexModel, BlogTwo, Parent, Child, Orphan, ChildPet, OrphanPet, BlobModel, Job, JobExecutionRecord, JERRelated, TestModelA, TestModelB, TestModelC, Tag, Post, TagPostThrough, TagPostThroughAlt, Language, Snippet, Manufacturer, CompositeKeyModel, UserThing, Component, Computer, CheckModel, Package, PackageItem, PGSchema, UpperModel, CommentCategory, BlogData, ServerDefaultModel, SpecialComment, EmptyModel, NoPKModel, TestingID, UUIDData, UUIDRelatedModel, UIntModel, UIntRelModel, Note, Flag, NoteFlag, ]
gpl-3.0
shtouff/django
django/contrib/auth/models.py
13
14582
from __future__ import unicode_literals from django.contrib import auth from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager from django.contrib.auth.signals import user_logged_in from django.contrib.contenttypes.models import ContentType from django.core import validators from django.core.exceptions import PermissionDenied from django.core.mail import send_mail from django.db import models from django.db.models.manager import EmptyManager from django.utils import six, timezone from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ def update_last_login(sender, user, **kwargs): """ A signal receiver which updates the last_login date for the user logging in. """ user.last_login = timezone.now() user.save(update_fields=['last_login']) user_logged_in.connect(update_last_login) class PermissionManager(models.Manager): use_in_migrations = True def get_by_natural_key(self, codename, app_label, model): return self.get( codename=codename, content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model), ) @python_2_unicode_compatible class Permission(models.Model): """ The permissions system provides a way to assign permissions to specific users and groups of users. The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows: - The "add" permission limits the user's ability to view the "add" form and add an object. - The "change" permission limits a user's ability to view the change list, view the "change" form and change an object. - The "delete" permission limits the ability to delete an object. Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date." Three basic permissions -- add, change and delete -- are automatically created for each Django model. """ name = models.CharField(_('name'), max_length=255) content_type = models.ForeignKey(ContentType) codename = models.CharField(_('codename'), max_length=100) objects = PermissionManager() class Meta: verbose_name = _('permission') verbose_name_plural = _('permissions') unique_together = (('content_type', 'codename'),) ordering = ('content_type__app_label', 'content_type__model', 'codename') def __str__(self): return "%s | %s | %s" % ( six.text_type(self.content_type.app_label), six.text_type(self.content_type), six.text_type(self.name)) def natural_key(self): return (self.codename,) + self.content_type.natural_key() natural_key.dependencies = ['contenttypes.contenttype'] class GroupManager(models.Manager): """ The manager for the auth's Group model. """ use_in_migrations = True def get_by_natural_key(self, name): return self.get(name=name) @python_2_unicode_compatible class Group(models.Model): """ Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups. A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission. Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only email messages. """ name = models.CharField(_('name'), max_length=80, unique=True) permissions = models.ManyToManyField( Permission, verbose_name=_('permissions'), blank=True, ) objects = GroupManager() class Meta: verbose_name = _('group') verbose_name_plural = _('groups') def __str__(self): return self.name def natural_key(self): return (self.name,) class UserManager(BaseUserManager): use_in_migrations = True def _create_user(self, username, email, password, is_staff, is_superuser, **extra_fields): """ Creates and saves a User with the given username, email and password. """ if not username: raise ValueError('The given username must be set') email = self.normalize_email(email) user = self.model(username=username, email=email, is_staff=is_staff, is_superuser=is_superuser, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, username, email=None, password=None, **extra_fields): return self._create_user(username, email, password, False, False, **extra_fields) def create_superuser(self, username, email, password, **extra_fields): return self._create_user(username, email, password, True, True, **extra_fields) # A few helper functions for common logic between User and AnonymousUser. def _user_get_all_permissions(user, obj): permissions = set() for backend in auth.get_backends(): if hasattr(backend, "get_all_permissions"): permissions.update(backend.get_all_permissions(user, obj)) return permissions def _user_has_perm(user, perm, obj): """ A backend can raise `PermissionDenied` to short-circuit permission checking. """ for backend in auth.get_backends(): if not hasattr(backend, 'has_perm'): continue try: if backend.has_perm(user, perm, obj): return True except PermissionDenied: return False return False def _user_has_module_perms(user, app_label): """ A backend can raise `PermissionDenied` to short-circuit permission checking. """ for backend in auth.get_backends(): if not hasattr(backend, 'has_module_perms'): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False class PermissionsMixin(models.Model): """ A mixin class that adds the fields and methods necessary to support Django's Group and Permission model using the ModelBackend. """ is_superuser = models.BooleanField( _('superuser status'), default=False, help_text=_( 'Designates that this user has all permissions without ' 'explicitly assigning them.' ), ) groups = models.ManyToManyField( Group, verbose_name=_('groups'), blank=True, help_text=_( 'The groups this user belongs to. A user will get all permissions ' 'granted to each of their groups.' ), related_name="user_set", related_query_name="user", ) user_permissions = models.ManyToManyField( Permission, verbose_name=_('user permissions'), blank=True, help_text=_('Specific permissions for this user.'), related_name="user_set", related_query_name="user", ) class Meta: abstract = True def get_group_permissions(self, obj=None): """ Returns a list of permission strings that this user has through their groups. This method queries all available auth backends. If an object is passed in, only permissions matching this object are returned. """ permissions = set() for backend in auth.get_backends(): if hasattr(backend, "get_group_permissions"): permissions.update(backend.get_group_permissions(self, obj)) return permissions def get_all_permissions(self, obj=None): return _user_get_all_permissions(self, obj) def has_perm(self, perm, obj=None): """ Returns True if the user has the specified permission. This method queries all available auth backends, but returns immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, permissions for this specific object are checked. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True # Otherwise we need to check the backends. return _user_has_perm(self, perm, obj) def has_perms(self, perm_list, obj=None): """ Returns True if the user has each of the specified permissions. If object is passed, it checks if the user has all required perms for this object. """ for perm in perm_list: if not self.has_perm(perm, obj): return False return True def has_module_perms(self, app_label): """ Returns True if the user has any permissions in the given app label. Uses pretty much the same logic as has_perm, above. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True return _user_has_module_perms(self, app_label) class AbstractUser(AbstractBaseUser, PermissionsMixin): """ An abstract base class implementing a fully featured User model with admin-compliant permissions. Username, password and email are required. Other fields are optional. """ username = models.CharField( _('username'), max_length=30, unique=True, help_text=_('Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.'), validators=[ validators.RegexValidator( r'^[\w.@+-]+$', _('Enter a valid username. This value may contain only ' 'letters, numbers ' 'and @/./+/-/_ characters.') ), ], error_messages={ 'unique': _("A user with that username already exists."), }, ) first_name = models.CharField(_('first name'), max_length=30, blank=True) last_name = models.CharField(_('last name'), max_length=30, blank=True) email = models.EmailField(_('email address'), blank=True) is_staff = models.BooleanField( _('staff status'), default=False, help_text=_('Designates whether the user can log into this admin site.'), ) is_active = models.BooleanField( _('active'), default=True, help_text=_( 'Designates whether this user should be treated as active. ' 'Unselect this instead of deleting accounts.' ), ) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] class Meta: verbose_name = _('user') verbose_name_plural = _('users') abstract = True def get_full_name(self): """ Returns the first_name plus the last_name, with a space in between. """ full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): "Returns the short name for the user." return self.first_name def email_user(self, subject, message, from_email=None, **kwargs): """ Sends an email to this User. """ send_mail(subject, message, from_email, [self.email], **kwargs) class User(AbstractUser): """ Users within the Django authentication system are represented by this model. Username, password and email are required. Other fields are optional. """ class Meta(AbstractUser.Meta): swappable = 'AUTH_USER_MODEL' @python_2_unicode_compatible class AnonymousUser(object): id = None pk = None username = '' is_staff = False is_active = False is_superuser = False _groups = EmptyManager(Group) _user_permissions = EmptyManager(Permission) def __init__(self): pass def __str__(self): return 'AnonymousUser' def __eq__(self, other): return isinstance(other, self.__class__) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return 1 # instances always return the same hash value def save(self): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def delete(self): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def set_password(self, raw_password): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def check_password(self, raw_password): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def _get_groups(self): return self._groups groups = property(_get_groups) def _get_user_permissions(self): return self._user_permissions user_permissions = property(_get_user_permissions) def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return _user_get_all_permissions(self, obj=obj) def has_perm(self, perm, obj=None): return _user_has_perm(self, perm, obj=obj) def has_perms(self, perm_list, obj=None): for perm in perm_list: if not self.has_perm(perm, obj): return False return True def has_module_perms(self, module): return _user_has_module_perms(self, module) def is_anonymous(self): return True def is_authenticated(self): return False def get_username(self): return self.username
bsd-3-clause
logzio/logzio-python-handler
tests/mockLogzioListener/listener.py
1
2722
# noinspection PyUnresolvedReferences import future import socket from http.server import BaseHTTPRequestHandler, HTTPServer from threading import Thread from .logsList import logs_list from .persistentFlags import persistent_flags class ListenerHandler(BaseHTTPRequestHandler): def do_POST(self): try: content_length = int(self.headers.get("Content-Length")) all_logs = self.rfile.read(content_length).decode("utf-8").split('\n') if len(all_logs) == 0: self._set_response(400, "Bad Request", b"Bad request you got there, pal") return for log in all_logs: if log != "": if persistent_flags.get_server_error(): self._set_response(500, "Issue!!!!!!!", b"Not good, not good at all.") return logs_list.list.append(log) self._set_response(200, "OK", b"Shabam! got logs.") return except IndexError: self._set_response(400, "Bad Request", b"Bad request you got there, pal") return def _set_response(self, http_code, http_description, byte_body): self.send_response(http_code, http_description) self.send_header('Content-type', 'text/html') self.end_headers() self.wfile.write(byte_body) class MockLogzioListener: def __init__(self): self.port = _find_available_port() self.host = "localhost" self.server = HTTPServer((self.host, self.port), ListenerHandler) self.listening_thread = Thread(target=self._start_listening) self.listening_thread.daemon = True self.listening_thread.name = "mock-logzio-listener" self.listening_thread.start() self.logs_list = logs_list.list self.persistent_flags = persistent_flags def _start_listening(self): self.server.serve_forever() def get_port(self): return self.port def get_host(self): return self.host def find_log(self, search_log): for current_log in self.logs_list: if search_log in current_log: return True return False def get_number_of_logs(self): return len(self.logs_list) def clear_logs_buffer(self): self.logs_list[:] = [] def set_server_error(self): self.persistent_flags.set_server_error() def clear_server_error(self): self.persistent_flags.clear_server_error() def _find_available_port(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(("", 0)) sock.listen(1) port = sock.getsockname()[1] sock.close() return port
apache-2.0
veridiam/Madcow-Waaltz
build/lib/madcow/include/chardet/constants.py
237
1484
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### _debug = 0 eDetecting = 0 eFoundIt = 1 eNotMe = 2 eStart = 0 eError = 1 eItsMe = 2 SHORTCUT_THRESHOLD = 0.95 import __builtin__ if not hasattr(__builtin__, 'False'): False = 0 True = 1 else: False = __builtin__.False True = __builtin__.True
gpl-3.0