code
stringlengths
1
25.8M
language
stringclasses
18 values
source
stringclasses
4 values
repo
stringclasses
78 values
path
stringlengths
0
268
from __future__ import unicode_literals from copy import deepcopy from django.db import models from django.core.exceptions import FieldError from django.db.models.query_utils import DeferredAttribute class FieldInstanceTracker(object): def __init__(self, instance, fields, field_map): self.instance = instance self.fields = fields self.field_map = field_map self.init_deferred_fields() def get_field_value(self, field): return getattr(self.instance, self.field_map[field]) def set_saved_fields(self, fields=None): if not self.instance.pk: self.saved_data = {} elif fields is None: self.saved_data = self.current() else: self.saved_data.update(**self.current(fields=fields)) # preventing mutable fields side effects for field, field_value in self.saved_data.items(): self.saved_data[field] = deepcopy(field_value) def current(self, fields=None): """Returns dict of current values for all tracked fields""" if fields is None: if self.instance._deferred_fields: fields = [ field for field in self.fields if field not in self.instance._deferred_fields ] else: fields = self.fields return dict((f, self.get_field_value(f)) for f in fields) def has_changed(self, field): """Returns ``True`` if field has changed from currently saved value""" if field in self.fields: return self.previous(field) != self.get_field_value(field) else: raise FieldError('field "%s" not tracked' % field) def previous(self, field): """Returns currently saved value of given field""" return self.saved_data.get(field) def changed(self): """Returns dict of fields that changed since save (with old values)""" return dict( (field, self.previous(field)) for field in self.fields if self.has_changed(field) ) def init_deferred_fields(self): self.instance._deferred_fields = [] if not self.instance._deferred: return class DeferredAttributeTracker(DeferredAttribute): def __get__(field, instance, owner): data = instance.__dict__ if data.get(field.field_name, field) is field: instance._deferred_fields.remove(field.field_name) value = super(DeferredAttributeTracker, field).__get__( instance, owner) self.saved_data[field.field_name] = deepcopy(value) return data[field.field_name] for field in self.fields: field_obj = self.instance.__class__.__dict__.get(field) if isinstance(field_obj, DeferredAttribute): self.instance._deferred_fields.append(field) # Django 1.4 model = None if hasattr(field_obj, 'model_ref'): model = field_obj.model_ref() field_tracker = DeferredAttributeTracker( field_obj.field_name, model) setattr(self.instance.__class__, field, field_tracker) class FieldTracker(object): tracker_class = FieldInstanceTracker def __init__(self, fields=None): self.fields = fields def get_field_map(self, cls): """Returns dict mapping fields names to model attribute names""" field_map = dict((field, field) for field in self.fields) all_fields = dict((f.name, f.attname) for f in cls._meta.local_fields) field_map.update(**dict((k, v) for (k, v) in all_fields.items() if k in field_map)) return field_map def contribute_to_class(self, cls, name): self.name = name self.attname = '_%s' % name models.signals.class_prepared.connect(self.finalize_class, sender=cls) def finalize_class(self, sender, **kwargs): if self.fields is None: self.fields = (field.attname for field in sender._meta.local_fields) self.fields = set(self.fields) self.field_map = self.get_field_map(sender) models.signals.post_init.connect(self.initialize_tracker) self.model_class = sender setattr(sender, self.name, self) def initialize_tracker(self, sender, instance, **kwargs): if not isinstance(instance, self.model_class): return # Only init instances of given model (including children) tracker = self.tracker_class(instance, self.fields, self.field_map) setattr(instance, self.attname, tracker) tracker.set_saved_fields() self.patch_save(instance) def patch_save(self, instance): original_save = instance.save def save(**kwargs): ret = original_save(**kwargs) update_fields = kwargs.get('update_fields') if not update_fields and update_fields is not None: # () or [] fields = update_fields elif update_fields is None: fields = None else: fields = ( field for field in update_fields if field in self.fields ) getattr(instance, self.attname).set_saved_fields( fields=fields ) return ret instance.save = save def __get__(self, instance, owner): if instance is None: return self else: return getattr(instance, self.attname) class ModelInstanceTracker(FieldInstanceTracker): def has_changed(self, field): """Returns ``True`` if field has changed from currently saved value""" if not self.instance.pk: return True elif field in self.saved_data: return self.previous(field) != self.get_field_value(field) else: raise FieldError('field "%s" not tracked' % field) def changed(self): """Returns dict of fields that changed since save (with old values)""" if not self.instance.pk: return {} saved = self.saved_data.items() current = self.current() return dict((k, v) for k, v in saved if v != current[k]) class ModelTracker(FieldTracker): tracker_class = ModelInstanceTracker def get_field_map(self, cls): return dict((field, field) for field in self.fields)
unknown
codeparrot/codeparrot-clean
#------------------------------------------------------------------------------- # Name: vertex # Purpose: class for 3D vertices # # Author: bencesomogyi # # Created: 19.10.2013 # Copyright: (c) bencesomogyi 2013 # Licence: <your licence> #------------------------------------------------------------------------------- import numpy class Vertex: """ class for 3D vertices """ def __init__(self,X=0.0,Y=0.0,Z=0.0): """ **Constructor** :param X: default: 0.0, x coordinate of the vertex :type X: float :param Y: default: 0.0, y coordinate of the vertex :type Y: float :param Z: default: 0.0, z coordinate of the vertex :type Z: float """ self.X = X """vertex X coordinate""" self.Y = Y """vertex Y coordinate""" self.Z = Z """vertex Z coordinate""" self.coords = numpy.array([X, Y, Z]) """vector of vertex coordinates""" self.father = [] """reference to father object""" self.faces = [] """reference to connected faces""" self.cells = [] """reference to connected cells""" self.id = 0. """vertex id""" def get_coords(self): """return coordinates as numpy array""" return numpy.array([self.X,self.Y,self.Z]) def setX(self,newX): """setter for X coordinate of vertex""" self.X = newX self.coords[0] = newX def setY(self,newY): """setter for Y coordinate of vertex""" self.Y = newY self.coords[1] = newY def setZ(self,newZ): """setter for Z coordinate of vertex""" self.Z = newZ self.coords[2] = newZ def print_coordinates(self): print "id: "+str(self.id)+" "+str(self.X)+" "+str(self.Y)+" "+str(self.Z) def get_cell_ids(self): cell_ids = [] for cell_ in self.cells: cell_ids.append(cell_.id) return cell_ids # def __hash__(self): # return self.X+10*self.Y+100*self.Z def __eq__(self,other): return (self.X == other.X) and (self.Y == other.Y) and (self.Z == other.Z) def are_vertices_equal(vertex1,vertex2): return (vertex1.X == vertex2.X) and (vertex1.Y == vertex2.Y) and (vertex1.Z == vertex2.Z) def get_independent_vertices(vertex_list): """ return the list of independent vertices :param vertex_list: list of vertices :type vertex_list: dict :return: list of independent vertices :rtype: dict """ indep_vertices = [] for vertex in vertex_list: if len(indep_vertices) == 0: indep_vertices.append(vertex) match = False for indep_vertex in indep_vertices: if vertex == indep_vertex: match = True break if match: continue else: indep_vertices.append(vertex) return indep_vertices def get_list_of_ids(vertex_list): """ return the id / list of ids for a vertex / list of vertices """ id_list = [] if isinstance(vertex_list, Vertex): id_list.append(vertex_list.id) else: for vertex_ in vertex_list: id_list.append(vertex_.id) return id_list
unknown
codeparrot/codeparrot-clean
from django.apps import apps from django.test import SimpleTestCase class NoModelTests(SimpleTestCase): def test_no_models(self): """It's possible to load an app with no models.py file.""" app_config = apps.get_app_config("no_models") self.assertIsNone(app_config.models_module)
python
github
https://github.com/django/django
tests/no_models/tests.py
#!/usr/bin/env python3 import argparse import os import genapi import numpy_api from genapi import BoolValuesApi, FunctionApi, GlobalVarApi, TypeApi # use annotated api when running under cpychecker h_template = r""" #if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) typedef struct { PyObject_HEAD npy_bool obval; } PyBoolScalarObject; extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; %s #else #if defined(PY_ARRAY_UNIQUE_SYMBOL) #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL #define _NPY_VERSION_CONCAT_HELPER2(x, y) x ## y #define _NPY_VERSION_CONCAT_HELPER(arg) \ _NPY_VERSION_CONCAT_HELPER2(arg, PyArray_RUNTIME_VERSION) #define PyArray_RUNTIME_VERSION \ _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) #endif /* By default do not export API in an .so (was never the case on windows) */ #ifndef NPY_API_SYMBOL_ATTRIBUTE #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN #endif #if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else #if defined(PY_ARRAY_UNIQUE_SYMBOL) NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else static void **PyArray_API = NULL; static int PyArray_RUNTIME_VERSION = 0; #endif #endif %s /* * The DType classes are inconvenient for the Python generation so exposed * manually in the header below (may be moved). */ #include "numpy/_public_dtype_api_table.h" #if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) static int _import_array(void) { int st; PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); PyObject *c_api; if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); } if (numpy == NULL) { return -1; } c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); Py_DECREF(numpy); if (c_api == NULL) { return -1; } if (!PyCapsule_CheckExact(c_api)) { PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); Py_DECREF(c_api); return -1; } PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); Py_DECREF(c_api); if (PyArray_API == NULL) { PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); return -1; } /* * On exceedingly few platforms these sizes may not match, in which case * We do not support older NumPy versions at all. */ if (sizeof(Py_ssize_t) != sizeof(Py_intptr_t) && PyArray_RUNTIME_VERSION < NPY_2_0_API_VERSION) { PyErr_Format(PyExc_RuntimeError, "module compiled against NumPy 2.0 but running on NumPy 1.x. " "Unfortunately, this is not supported on niche platforms where " "`sizeof(size_t) != sizeof(inptr_t)`."); } /* * Perform runtime check of C API version. As of now NumPy 2.0 is ABI * backwards compatible (in the exposed feature subset!) for all practical * purposes. */ if (NPY_VERSION < PyArray_GetNDArrayCVersion()) { PyErr_Format(PyExc_RuntimeError, "module compiled against "\ "ABI version 0x%%x but this version of numpy is 0x%%x", \ (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); return -1; } PyArray_RUNTIME_VERSION = (int)PyArray_GetNDArrayCFeatureVersion(); if (NPY_FEATURE_VERSION > PyArray_RUNTIME_VERSION) { PyErr_Format(PyExc_RuntimeError, "module was compiled against NumPy C-API version 0x%%x " "(NumPy " NPY_FEATURE_VERSION_STRING ") " "but the running NumPy has C-API version 0x%%x. " "Check the section C-API incompatibility at the " "Troubleshooting ImportError section at " "https://numpy.org/devdocs/user/troubleshooting-importerror.html" "#c-api-incompatibility " "for indications on how to solve this problem.", (int)NPY_FEATURE_VERSION, PyArray_RUNTIME_VERSION); return -1; } /* * Perform runtime check of endianness and check it matches the one set by * the headers (npy_endian.h) as a safeguard */ st = PyArray_GetEndianness(); if (st == NPY_CPU_UNKNOWN_ENDIAN) { PyErr_SetString(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); return -1; } #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN if (st != NPY_CPU_BIG) { PyErr_SetString(PyExc_RuntimeError, "FATAL: module compiled as big endian, but " "detected different endianness at runtime"); return -1; } #elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN if (st != NPY_CPU_LITTLE) { PyErr_SetString(PyExc_RuntimeError, "FATAL: module compiled as little endian, but " "detected different endianness at runtime"); return -1; } #endif return 0; } #if (SWIG_VERSION < 0x040400) #define _RETURN_VALUE NULL #else #define _RETURN_VALUE 0 #endif #define import_array() { \ if (_import_array() < 0) { \ PyErr_Print(); \ PyErr_SetString( \ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ return _RETURN_VALUE; \ } \ } #define import_array1(ret) { \ if (_import_array() < 0) { \ PyErr_Print(); \ PyErr_SetString( \ PyExc_ImportError, \ "numpy._core.multiarray failed to import" \ ); \ return ret; \ } \ } #define import_array2(msg, ret) { \ if (_import_array() < 0) { \ PyErr_Print(); \ PyErr_SetString(PyExc_ImportError, msg); \ return ret; \ } \ } #endif #endif """ # noqa: E501 c_template = r""" /* These pointers will be stored in the C-object for use in other extension modules */ void *PyArray_API[] = { %s }; """ def generate_api(output_dir, force=False): basename = 'multiarray_api' h_file = os.path.join(output_dir, f'__{basename}.h') c_file = os.path.join(output_dir, f'__{basename}.c') targets = (h_file, c_file) sources = numpy_api.multiarray_api do_generate_api(targets, sources) return targets def do_generate_api(targets, sources): header_file = targets[0] c_file = targets[1] global_vars = sources[0] scalar_bool_values = sources[1] types_api = sources[2] multiarray_funcs = sources[3] multiarray_api = sources[:] module_list = [] extension_list = [] init_list = [] # Check multiarray api indexes multiarray_api_index = genapi.merge_api_dicts(multiarray_api) unused_index_max = max(multiarray_api_index.get("__unused_indices__", 0)) genapi.check_api_dict(multiarray_api_index) numpyapi_list = genapi.get_api_functions('NUMPY_API', multiarray_funcs) # Create dict name -> *Api instance api_name = 'PyArray_API' multiarray_api_dict = {} for f in numpyapi_list: name = f.name index = multiarray_funcs[name][0] annotations = multiarray_funcs[name][1:] multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, f.return_type, f.args, api_name) for name, val in global_vars.items(): index, type = val multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) for name, val in scalar_bool_values.items(): index = val[0] multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) for name, val in types_api.items(): index = val[0] internal_type = None if len(val) == 1 else val[1] multiarray_api_dict[name] = TypeApi( name, index, 'PyTypeObject', api_name, internal_type) if len(multiarray_api_dict) != len(multiarray_api_index): keys_dict = set(multiarray_api_dict.keys()) keys_index = set(multiarray_api_index.keys()) keys_index_dict = keys_index - keys_dict keys_dict_index = keys_dict - keys_index raise AssertionError( f"Multiarray API size mismatch - index has extra keys {keys_index_dict}, " f"dict has extra keys {keys_dict_index}" ) extension_list = [] for name, index in genapi.order_dict(multiarray_api_index): api_item = multiarray_api_dict[name] # In NumPy 2.0 the API may have holes (which may be filled again) # in that case, add `NULL` to fill it. while len(init_list) < api_item.index: init_list.append(" NULL") extension_list.append(api_item.define_from_array_api_string()) init_list.append(api_item.array_api_define()) module_list.append(api_item.internal_define()) # In case we end with a "hole", append more NULLs while len(init_list) <= unused_index_max: init_list.append(" NULL") # Write to header s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) genapi.write_file(header_file, s) # Write to c-code s = c_template % ',\n'.join(init_list) genapi.write_file(c_file, s) return targets def main(): parser = argparse.ArgumentParser() parser.add_argument( "-o", "--outdir", type=str, help="Path to the output directory" ) parser.add_argument( "-i", "--ignore", type=str, help="An ignored input - may be useful to add a " "dependency between custom targets" ) args = parser.parse_args() outdir_abs = os.path.join(os.getcwd(), args.outdir) generate_api(outdir_abs) if __name__ == "__main__": main()
python
github
https://github.com/numpy/numpy
numpy/_core/code_generators/generate_numpy_api.py
import collections import copy import heapq import traceback import warnings import weakref import numpy import six import chainer from chainer import cuda from chainer import initializers from chainer.initializers import constant from chainer import utils from chainer.utils import argument def _check_grad_type(func, x, gx): def make_message(message): if func: detail = 'Function `{0}` ({1}) has a bug.\n'.format( type(func).__name__, func.label) stack = func.stack if stack: detail += 'Stacktrace of the function is below:\n' for line in traceback.format_list(func._stack): detail += line detail += ''' Please report this error to the issue tracker with the stack trace, the information of your environment, and your script: https://github.com/pfnet/chainer/issues/new. '''.format(type(func).__name__, func.label) else: detail = '' detail += message return detail if x.data is None or gx is None: # ``x.data is None`` implies that the data array is not retained return if not isinstance(gx, type(x.data)): msg = ('Type of data and grad mismatch\n%s != %s' % (type(x.data), type(gx))) raise TypeError(make_message(msg)) if gx.dtype != x.data.dtype: msg = ('Dtype of data and grad mismatch\n%s != %s' % (x.data.dtype, gx.dtype)) raise TypeError(make_message(msg)) if gx.shape != x.data.shape: msg = ('Shape of data and grad mismatch\n%s != %s' % (x.data.shape, gx.shape)) raise ValueError(make_message(msg)) def variable_repr(var): """Return the string representation of a variable. Args: var (~chainer.Variable): Input Variable. .. seealso:: numpy.array_repr """ xp = cuda.get_array_module(var) if xp is numpy: arr = var.data else: arr = var.data.get() if var.name: prefix = 'variable ' + var.name else: prefix = 'variable' if arr.size > 0 or arr.shape == (0,): lst = numpy.array2string(arr, None, None, None, ', ', prefix + '(') else: # show zero-length shape unless it is (0,) lst = '[], shape=%s' % (repr(arr.shape),) return '%s(%s)' % (prefix, lst) def variable_str(var): """Return the string representation of a variable. Args: var (~chainer.Variable): Input Variable. .. seealso:: numpy.array_str """ xp = cuda.get_array_module(var) if xp is numpy: arr = var.data else: arr = var.data.get() if var.name: prefix = 'variable ' + var.name + '(' else: prefix = 'variable(' return (prefix + numpy.array2string(arr, None, None, None, ' ', prefix) + ')') class VariableNode(object): """Node in the backward computational graph representing a variable. This object represents a variable node in a computational graph. The node is used in error backpropagation (a.k.a. backprop) to determine which gradient to be passed to each function. A variable node is held by the corresponding :class:`Variable` object, which is managed by users. :class:`Function` objects that take the variable as an input also hold references to the variable node. Note that the node does not hold a reference to the corresponding data array in general. The data array is actually accessible by the node in the following cases. 1. If there exists a :class:`Variable` object that holds a reference to the variable node, the variable node holds a weak reference to the variable object, and thus the data array is accessible via the weak reference. 2. If :meth:`retain_data` is called, the node holds a reference to the data array. It is mainly called by a function that needs the input or output data array in its backprop procedure. See :meth:`Function.retain_inputs` and :meth:`Function.retain_outputs` for more details. Users usually do not need to touch this variable node object. The computational graph is automatically managed by Chainer, and any interface that is beneficial for users is also provided by :class:`Variable`. Args: variable (Variable): The corresponding variable object. name (str): Name of the variable node. Attributes: dtype: Data type of the data array. shape: Shape of the data array. name (str): Name of the variable node. """ def __init__(self, variable, name, grad=None): self._variable = weakref.ref(variable) self._creator = None self._data = None self._rank = 0 self.name = name self._requires_grad = variable.requires_grad vdata = variable.data self._set_data_type(vdata) self.grad = grad @property def creator(self): """Function node that created this variable node.""" return self._creator @creator.setter def creator(self, func): self._creator = func if func is not None: self._rank = func.rank + 1 @property def data(self): """Data array of the corresponding variable. If the data is not available, it returns ``None``. """ return self._data @data.setter def data(self, d): self._data = d self._set_data_type(d) @property def grad(self): """Gradient array of the corresponding variable.""" return self._grad @grad.setter def grad(self, g): _check_grad_type(None, self, g) self._grad = g @property def label(self): """Short text that represents the variable node.""" if self.shape == (): return str(self.dtype) return '(%s), %s' % (', '.join(map(str, self.shape)), str(self.dtype)) @property def rank(self): return self._rank @property def requires_grad(self): """It indicates that ``grad`` will be set in backward calculation.""" return self._requires_grad def set_creator(self, creator): """Sets a :class:`Function` object that created this node. This method is equivalent to ``self.creator = creator``. Args: creator (Function): Function object that created this node. """ self.creator = creator def unchain(self): """Deletes the reference to the creator of this variable node. This method is equivalent to ``self.creator = None``. """ self.creator = None def retain_data(self): """Lets the node hold a reference to the underlying data array. This method gets the data array of the corresponding variable and keeps it. If the weak reference to the corresponding variable is dead, it raises an error. """ variable = self._variable() if variable is not None: self.data = variable.data else: raise RuntimeError('cannot retain variable data: the variable has ' 'been already released') def _set_data_type(self, d): if d is None: self.dtype = None self.shape = None else: self.dtype = d.dtype self.shape = d.shape def _set_grad_with_check(self, g, func, var): _check_grad_type(func, var, g) self._grad = g def _create_variable(data, name, grad, requires_grad): return Variable( data, name=name, grad=grad, requires_grad=requires_grad) class Variable(object): """__init__(data=None, *, name=None, grad=None, requires_grad=True) Array with a structure to keep track of computation. Every variable holds a data array of type either :class:`numpy.ndarray` or :class:`cupy.ndarray`. A variable object holds a data array and a :class:`VariableNode` object of a computational graph. If the variable is constructed by the user, the node is _root_ and does not hold any parent. If the variable is constructed by a :class:`Function` object, the node holds a reference to its parent called `creator`. This reference is used in backpropagation to backtrack the graph. Users can disable (resp. enable) this chaining behavior by calling :func:`~chainer.no_backprop_mode` (resp. :func:`~chainer.force_backprop_mode`). In the former context, a variable never creates a computational graph, whereas in the latter context, it is forced to create. .. warning:: ``volatile`` argument is not supported anymore since v2. Instead, use :func:`chainer.no_backprop_mode`. Args: data (numpy.ndarray or cupy.ndarray): Initial data array. name (str): Name of the variable. grad (numpy.ndarray or cupy.ndarray): Initial gradient array. requires_grad (bool): Boolean indicating whether ``grad`` will be set in backward calculation. Attributes: data: Data array of type either :class:`numpy.ndarray` or :class:`cupy.ndarray`. If it is None, the variable is left in an uninitialized state. grad: Gradient array. creator: The function who creates this variable. It is ``None`` if the variable is not created by any function. """ # NOQA def __init__(self, data=None, **kwargs): argument.check_unexpected_kwargs( kwargs, volatile='volatile argument is not supported anymore. ' 'Use chainer.using_config') name, grad, requires_grad \ = argument.parse_kwargs( kwargs, ('name', None), ('grad', None), ('requires_grad', True)) if (data is not None and not isinstance(data, (numpy.ndarray, cuda.ndarray))): msg = '''numpy.ndarray or cuda.ndarray are expected. Actual: {0}'''.format(type(data)) raise TypeError(msg) # Use a list as a data structure to hold the data array indirectly to # abstract its initialized/uninitialized state. self._data = [data] self._requires_grad = requires_grad self._node = VariableNode(self, name, grad) def __copy__(self): return self._copy_to(Variable()) def _copy_to(self, target): target.__dict__ = copy.copy(self.__dict__) target._node = VariableNode(target, self.name) return target def __reduce__(self): return _create_variable, (self.data, self.name, self._node._grad, self._requires_grad) def __repr__(self): return variable_repr(self) def __str__(self): return variable_str(self) @property def name(self): return self._node.name @name.setter def name(self, n): self._node.name = n def summary(self): if self.name: return '<variable %s>' % self.name else: return '<variable at 0x%x>' % id(self) def debug_print(self): """Display a summary of the stored data and location of the Variable""" msg = """{summary} - device: {device} - backend: {background} - shape: {shape} - dtype: {dtype} - statistics: {stats} - grad: {grad}""" stats_msg = 'mean={0:.8f}, std={1:.8f}' try: device = self.data.device except AttributeError: device = 'CPU' with cuda.get_device_from_array(self.data) as dev: xp = numpy if int(dev) == -1 else cuda.cupy if self.grad is None: grad = None elif xp.all(self.grad == 0): grad = 0 else: grad = stats_msg.format(float(xp.mean(self.grad)), float(xp.std(self.grad))) stats = stats_msg.format(float(xp.mean(self.data)), float(xp.std(self.data))) return msg.format(summary=self.summary(), grad=grad, shape=self.data.shape, background=type(self.data), dtype=self.data.dtype, device=device, stats=stats) def __pos__(self): return self def __len__(self): """Returns the first dimension of the data array. Returns: int: Number of the first dimension of the data array. """ return len(self.data) @property def label(self): """Short text that represents the variable.""" return self._node.label @property def creator(self): """:meth:`Function` object that created this variable. This property has a setter to which ``None`` can be set. Setting ``None`` to this property is equivalent to call :meth:`unchain`; it purges the variable from the function that created this variable. The setter also accepts the original :meth:`Function` object that created this variable. For example, you can once set ``None`` to this property and then set the original value again. .. note:: Setting an irrelevant :meth:`Function` object does not emit any error immediately, whereas the behavior is undefined. Do not set a :meth:`Function` object that did not create this variable object. """ return self._node._creator @creator.setter def creator(self, func): self._node.creator = func @property def data(self): return self._data[0] @data.setter def data(self, d): self._data[0] = d self._node._set_data_type(d) @property def grad(self): return self._node._grad @grad.setter def grad(self, g): self._node._set_grad_with_check(g, None, self) @property def shape(self): return self.data.shape @property def ndim(self): return self.data.ndim @property def size(self): return self.data.size @property def dtype(self): return self.data.dtype @property def rank(self): return self._node.rank @property def node(self): return self._node @property def requires_grad(self): """It indicates that ``grad`` will be set in backward calculation.""" return self._requires_grad def to_cpu(self): """Copies the data and gradient arrays to CPU.""" if self.data is None: return self._data = [cuda.to_cpu(self.data)] # ensure that the node tracks the device migration node = self._node if node._data is not None: node.retain_data() if node._grad is not None: node._grad = cuda.to_cpu(node._grad) def to_gpu(self, device=None): """Copies the data and gradient arrays to specified GPU. Args: device: Target device specifier. If omitted, the current device is used. """ if self.data is None: current = cuda.Device().id self._initial_device = current if device is None else device else: self._data = [cuda.to_gpu(self.data, device)] # ensure that the node tracks the device migration node = self._node if node._data is not None: node.retain_data() if node._grad is not None: node._grad = cuda.to_gpu(node._grad, device) def cleargrad(self): """Clears the gradient array.""" self._node._grad = None def zerograd(self): """Initializes the gradient array by zeros. .. deprecated:: v1.15 Use :meth:`cleargrad` instead. """ warnings.warn( 'Variable.zerograd is deprecated. Use Variable.cleargrad instead.', DeprecationWarning) if self.data is None: return with cuda.get_device_from_array(self.data) as dev: node = self._node if node._grad is None: xp = numpy if int(dev) == -1 else cuda.cupy node._grad = xp.zeros_like(self.data) else: node._grad.fill(0) def copydata(self, var): """Copies the data array from given source variable. This method copies the data array from given variable to this variable. The copy is done even if the arrays reside on different devices, including across the host and a GPU device. If this variable has an uninitialized data array, this method initializes it by the data array of the given variable. Similarly, if the given variable has an uninitialized data array, this method initializes it by the data array of this variable (``self``). If both are uninitialized, this method does nothing. Args: var (Variable): Source variable. """ src = var.data dst = self.data if src is None: if dst is None: return var.initialize(self.shape) src = var.data elif dst is None: self.initialize(src.shape) dst = self.data src_xp = cuda.get_array_module(src) dst_xp = cuda.get_array_module(dst) if dst_xp is src_xp: dst_xp.copyto(dst, src) elif dst_xp is numpy: dst_xp.copyto(dst, src.get()) else: dst.set(src) def addgrad(self, var): """Accumulates the gradient array from given source variable. This method adds the gradient of a given variable to the gradient of this variable. The accumulation is even done across the host and different devices. If this variable has uninitialized data/grad arrays, this method initializes it with the shape of the given varaible and then accumulates the gradient. Args: var (Variable): Source variable. """ src = var._node._grad if src is None: return if self.data is None: self.initialize(var.shape) dst = self._node._grad src_dev = cuda.get_device_from_array(src) dst_dev = cuda.get_device_from_array(self.data) if src_dev.id == dst_dev.id: with dst_dev: if dst is None: xp = cuda.get_array_module(src) self._node.grad = xp.copy(src) else: dst += src return if dst_dev.id < 0: src_grad = cuda.to_cpu(src) else: src_grad = cuda.to_gpu(src, device=dst_dev) if dst is None: self._node.grad = src_grad else: with dst_dev: dst += src_grad def set_creator(self, gen_func): """Notifies the variable that the given function is its creator. Args: gen_func (Function): Function object that creates this variable as one of its outputs. """ self._node.set_creator(gen_func) def backward(self, retain_grad=False): """Runs error backpropagation (a.k.a. backprop) from this variable. On backprop, :meth:`Function.backward` is called on each :class:`Function` object appearing in the backward graph starting from this variable. The backward graph is represented by backward references from variable nodes to their creators, and from functions to their input variable nodes. The backprop stops at all root nodes. Some functions set ``None`` as gradients of some inputs, where further backprop does not take place at such inputs. This method uses :data:`grad` as the initial error array. User can manually set a gradient array before calling this method. If :data:`data` contains only one element (i.e., it is scalar) and :data:`grad` is ``None``, then this method automatically complements 1.0 as the initial error. This is useful on starting backprop from some scalar loss value. Args: retain_grad (bool): If ``True``, the gradient arrays of all intermediate variables are kept. Otherwise, :data:`grad` of the intermediate variables are set to ``None`` on appropriate timing, which may reduce the maximum memory consumption. In most cases of training some models, the purpose of backprop is to compute gradients of parameters, not of all variables, and therefore it is recommended to set this flag ``False``. """ if self.creator is None: return initial_device = None if cuda.available and isinstance(self.data, cuda.cupy.ndarray): try: initial_device = cuda.Device() except cuda.cupy.cuda.runtime.CUDARuntimeError as e: if e.status != 38: # cudaErrorNoDevice raise is_debug = chainer.is_debug() cand_funcs = [] seen_set = set() seen_vars = set() need_copy = set() # Initialize error by 1, if this is a loss variable if self.data.size == 1 and self.grad is None: with cuda.get_device_from_array(self.data) as device: if device is cuda.DummyDevice: self.grad = numpy.ones_like(self.data) else: self.grad = cuda.cupy.ones_like(self.data) def add_cand(cand): if cand not in seen_set: # Negate since heapq is min-heap heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand)) seen_set.add(cand) add_cand(self.creator) while cand_funcs: _, _, func = heapq.heappop(cand_funcs) outputs = [y() for y in func.outputs] # access via weak ref in_data = tuple([x.data for x in func.inputs]) out_grad = tuple([None if y is None else y.grad for y in outputs]) hooks = chainer.get_function_hooks() if func._n_local_function_hooks != 0: hooks = collections.OrderedDict(hooks) hooks.update(func.local_function_hooks) cuda.get_device_from_array(*(in_data + out_grad)).use() for hook in six.itervalues(hooks): hook.backward_preprocess(func, in_data, out_grad) func.output_data = tuple( [None if y is None else y.data for y in outputs]) gxs = func.backward(in_data, out_grad) assert len(gxs) == len(in_data) if not getattr(func, '_retain_after_backward', False): func.output_data = None for hook in six.itervalues(hooks): hook.backward_postprocess(func, in_data, out_grad) if is_debug: for gx in gxs: if gx is None: continue cuda.get_device_from_array(gx).use() if cuda.get_array_module(gx).isnan(gx).any(): msg = 'NaN is detected on backward computation' raise RuntimeError(msg) if not retain_grad: for y in outputs: if y is not None and y is not self.node: y.grad = None for x, gx in zip(func.inputs, gxs): if gx is None: continue if not x.requires_grad: continue _check_grad_type(func, x, gx) # Accumulate the gradient to x. It is a bit tricky to handle # branches and parameter gradient accumulation correctly. id_x = id(x) if x.creator is None: # leaf if x._grad is None: x.grad = gx need_copy.add(id_x) else: cuda.get_device_from_array(gx).use() if id_x in need_copy: x.grad = utils.force_array(x._grad + gx) # copy need_copy.remove(id_x) else: x._grad += gx else: # not a leaf add_cand(x.creator) if id_x not in seen_vars: # 1st visit x.grad = gx seen_vars.add(id_x) need_copy.add(id_x) else: cuda.get_device_from_array(gx).use() if id_x in need_copy: # 2nd visit x.grad = utils.force_array(gx + x._grad) # copied need_copy.remove(id_x) else: # 3rd or later visit x._grad += gx del gxs # to reduce memory usage if initial_device is not None: initial_device.use() def reshape(self, *shape): """Returns a variable of a different shape and the same content. .. seealso:: :func:`chainer.functions.reshape` for full documentation, """ if len(shape) == 1 and isinstance(shape[0], (tuple, list)): shape = shape[0] return chainer.functions.reshape(self, shape) def transpose(self, *axes): """Permute the dimensions of an input variable without copy. .. seealso:: :func:`chainer.functions.transpose` for full documentation. """ if len(axes) == 0: axes = None elif len(axes) == 1 and (isinstance(axes[0], (tuple, list)) or axes[0] is None): axes = axes[0] return chainer.functions.transpose(self, axes) def unchain(self): """Deletes the reference to the creator of this variable. This method deletes the reference to the creator from the corresponding variable node. Unlike :meth:`unchain_backward`, it does not backtrack the graph. This method is equivalent to ``self.creator = None``. """ self.creator = None def unchain_backward(self): """Deletes references between variable nodes and functions backward. After this method completes, intermediate variable nodes and functions that are not referenced from anywhere are deallocated by reference count GC. Also this variable itself deletes the reference to its creator function from the node, i.e. the node becomes root in the computation graph. It indicates that backprop after unchaining stops at this variable. This behavior is useful to implement truncated BPTT. """ cand_funcs = [] seen_set = set() def add_cand(cand): if cand is not None and cand not in seen_set: cand_funcs.append(cand) seen_set.add(cand) add_cand(self.creator) while cand_funcs: func = cand_funcs.pop() for var in func.inputs: add_cand(var.creator) func.unchain() def retain_data(self): """Lets the corresponding variable node keep the underlying array.""" self._node.data = self._data[0] def __lt__(self, other): raise NotImplementedError() def __le__(self, other): raise NotImplementedError() def __eq__(self, other): raise NotImplementedError() def __ne__(self, other): raise NotImplementedError() def __gt__(self, other): raise NotImplementedError() def __ge__(self, other): raise NotImplementedError() def __nonzero__(self): raise NotImplementedError() def __bool__(self): raise NotImplementedError() def __hash__(self): return super(Variable, self).__hash__() __array_priority__ = 200 class Parameter(Variable): """Parameter variable that can be registered to a link. Parameter is a subclass of :class:`Variable`. It almost behaves as same as a usual variable except that a parameter can be registered to a :class:`~chainer.Link` object just by assigning it to an attribute of the link within an :meth:`~chainer.Link.init_scope` context. Parameter also supports an initialization by an initializer. It can have two initializers: one for the data array, and the other for the gradient array. The initializer only specifies the way of filling the elements of these arrays, and the shape information is specified at the initialization point. When a link that the parameter has been registered to is passed to an :class:`~chainer.GradientMethod`, an update rule is set to the parameter. This update rule specifies how to update the data array of the parameter using its gradient array. Args: initializer (~chainer.Initializer or numpy.ndarray or cupy.ndarray): Initializer of the data array. If ``shape`` is given, this initializer is immediately used to initialize the data array. Otherwise, if it is an array, it is immediately used as the data array, and otherwise the data array is left uninitialized and will be initialized by this initializer in :meth:`initialize`. It can also be a scalar, in which case the data array will be filled by this scalar. Note that float32 is used in this case. shape (int or tuple of int or None): Shape of the parameter. If it is ``None``, the initialization is deferred to the call of :meth:`initialize`. name (str): Name of the parameter. Attributes: initializer: Initializer of the data array. It is used for initializing the data array of an uninitialized variable. update_rule: :class:`~chainer.optimizer.UpdateRule` instance that updates this variable as a parameter. This argument is set to :attr:`update_rule`. """ initializer = None _grad_initializer = None _initial_device = -1 def __init__(self, initializer=None, shape=None, name=None): if initializer is None: initializer = constant.NaN() elif numpy.isscalar(initializer): initializer = constant.Constant(initializer) if shape is None: if isinstance(initializer, (numpy.ndarray, cuda.ndarray)): # parameter initialized by the initial array super(Parameter, self).__init__(initializer, name=name) else: # uninitialized parameter super(Parameter, self).__init__(name=name) self.initializer = initializer dtype = getattr(initializer, 'dtype', numpy.float32) self._grad_initializer = constant.NaN(dtype) else: # parameter initialized with a given shape if isinstance(initializer, (numpy.ndarray, cuda.ndarray)): xp = cuda.get_array_module(initializer) initializer = constant.Constant(initializer) else: xp = numpy data = initializers.generate_array(initializer, shape, xp) grad = xp.full_like(data, numpy.nan) super(Parameter, self).__init__(data, name=name, grad=grad) self.update_rule = None def __copy__(self): return self._copy_to(Parameter()) def __reduce__(self): return _recover_parameter, (self.data, self.name, self.grad, self.initializer, self.update_rule) def to_cpu(self): super(Parameter, self).to_cpu() if self.data is None: self._initial_device = -1 def to_gpu(self, device=None): super(Parameter, self).to_gpu(device) if self.data is None: if device is None: device = cuda.Device().id self._initial_device = device def cleargrad(self): super(Parameter, self).cleargrad() if self.data is None: self._grad_initializer = None def zerograd(self): super(Parameter, self).zerograd() if self.data is None: dtype = getattr(self.initializer, 'dtype', None) self._grad_initializer = initializers.Zero(dtype) def initialize(self, shape): """Initializes the uninitialized variable. Uninitialized variable is a variable created with the data array set to None. This method creates and initializes the data array. The shape of the variable can be left unknown until this method is called. Args: shape (tuple of int): Shape of the data array. """ data = initializers.generate_array(self.initializer, shape, numpy) ginit = self._grad_initializer grad = None if ginit is None else initializers.generate_array( ginit, shape, numpy) if self._initial_device >= 0: data = cuda.to_gpu(data, device=self._initial_device) if grad is not None: grad = cuda.to_gpu(grad, device=self._initial_device) self._data[0] = data self._node._grad = grad def update(self): """Updates the data array using the gradient and the update rule. This method updates the parameter using the attached update rule. """ if self.update_rule is not None: self.update_rule.update(self) def _recover_parameter(data, name, grad, initializer, update_rule): p = Parameter(initializer=initializer, name=name) p.data = data p.grad = grad p.update_rule = update_rule return p
unknown
codeparrot/codeparrot-clean
# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import socket import SocketServer import StringIO from . import compression from . import constants def LocalQuery(query): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) code = sock.connect_ex(("localhost", constants.CLIENT_PORT)) if code != 0: return None compression.Send(query, sock) compression.Send(constants.END_OF_STREAM, sock) rec = compression.Receiver(sock) data = None while not rec.IsDone(): data = rec.Current() assert data[0] == query[0] data = data[1] rec.Advance() sock.close() return data class LocalHandler(SocketServer.BaseRequestHandler): def handle(self): rec = compression.Receiver(self.request) while not rec.IsDone(): data = rec.Current() action = data[0] if action == constants.REQUEST_PEERS: with self.server.daemon.peer_list_lock: response = [ p.Pack() for p in self.server.daemon.peers if p.trusting_me ] compression.Send([action, response], self.request) elif action == constants.UNRESPONSIVE_PEER: self.server.daemon.DeletePeer(data[1]) elif action == constants.REQUEST_PUBKEY_FINGERPRINT: compression.Send([action, self.server.daemon.pubkey_fingerprint], self.request) elif action == constants.REQUEST_STATUS: compression.Send([action, self._GetStatusMessage()], self.request) elif action == constants.ADD_TRUSTED: fingerprint = self.server.daemon.CopyToTrusted(data[1]) compression.Send([action, fingerprint], self.request) elif action == constants.INFORM_DURATION: test_key = data[1] test_duration = data[2] arch = data[3] mode = data[4] self.server.daemon.AddPerfData(test_key, test_duration, arch, mode) elif action == constants.UPDATE_PERF: address = data[1] perf = data[2] self.server.daemon.UpdatePeerPerformance(data[1], data[2]) rec.Advance() compression.Send(constants.END_OF_STREAM, self.request) def _GetStatusMessage(self): sio = StringIO.StringIO() sio.write("Peers:\n") with self.server.daemon.peer_list_lock: for p in self.server.daemon.peers: sio.write("%s\n" % p) sio.write("My own jobs: %d, relative performance: %.2f\n" % (self.server.daemon.jobs, self.server.daemon.relative_perf)) # Low-priority TODO: Return more information. Ideas: # - currently running anything, # - time since last job, # - time since last repository fetch # - number of workpackets/testcases handled since startup # - slowest test(s) result = sio.getvalue() sio.close() return result class LocalSocketServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): def __init__(self, daemon): SocketServer.TCPServer.__init__(self, ("localhost", constants.CLIENT_PORT), LocalHandler) self.daemon = daemon
unknown
codeparrot/codeparrot-clean
export type Writable<T> = T extends Readonly<infer U> ? U : T
typescript
github
https://github.com/tailwindlabs/tailwindcss
packages/tailwindcss/src/types.ts
# @(#) $Id: String.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $ # # Copyright (C) 2001 # Associated Universities, Inc. Washington DC, USA. # # Produced for the ALMA project # # This library is free software; you can redistribute it and/or modify it under # the terms of the GNU Library General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) any # later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more # details. # # You should have received a copy of the GNU Library General Public License # along with this library; if not, write to the Free Software Foundation, Inc., # 675 Massachusetts Ave, Cambridge, MA 02139, USA. Correspondence concerning # ALMA should be addressed as follows: # # Internet email: alma-sw-admin@nrao.edu # "@(#) $Id: String.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $" # # who when what # -------- ---------- ---------------------------------------------- # dfugate 2004/07/21 Created. #------------------------------------------------------------------------------ ''' This module provides an implementation of the Pstring IDL interface ''' __version__ = "$Id: String.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $" #--REGULAR IMPORTS------------------------------------------------------------- from traceback import print_exc #--CORBA STUBS----------------------------------------------------------------- import ACS__POA #--ACS Imports----------------------------------------------------------------- from ACSImpl.GenericProperty import GenericProperty from ACSImpl.Monitors import Monitorstring #--GLOBALS--------------------------------------------------------------------- #------------------------------------------------------------------------------ #--P property------------------------------------------------------------------ #------------------------------------------------------------------------------ class Pstring(GenericProperty): ''' Properties can be derived from Pstring only if their IDL derives from ACS::Pstring. ''' #-------------------------------------------------------------------------- def __init__(self, name, charCompRef, devIORef): ''' Constructor Params: - name is the quite literally the name of the property - charCompRef is the characteristic component object which contains this property - devIORef is a reference to a DevIO to be used with this property Returns: Nothing Raises: Nothing. ''' GenericProperty.__init__(self, name, charCompRef, devIORef) return #-------------------------------------------------------------------------- def coerceToPropertyType(self, value=None): ''' Overriden. ''' #something went wrong. Return default value if value==None: return "" try: #coerce into an int type return eval("str(" + value + ")") except: #warn them about CDB access self.getLogger().logAlert("Unble to coerce '" + str(value) + "' into the correct type!") print_exc() #return an acceptable default value instead...an empty sequence return "" #-------------------------------------------------------------------------- def getMonitorObject(self, scheduler, timeoutID): ''' Helper method returns a monitor object of the correct type. ''' return Monitorstring(scheduler, timeoutID) #------------------------------------------------------------------------------ #--RO property----------------------------------------------------------------- #------------------------------------------------------------------------------ class ROstring(ACS__POA.ROstring, Pstring): ''' Properties can be derived from ROstring only if their IDL derives from ACS::ROstring. ''' #-------------------------------------------------------------------------- def __init__(self, name, charCompRef, devIORef=None): ''' Constructor Params: - name is the quite literally the name of the property - charCompRef is the characteristic component object which contains this property - devIORef is a reference to a DevIO to be used with this property Returns: Nothing Raises: Nothing. ''' Pstring.__init__(self, name, charCompRef, devIORef) return #----------------------------------------------------------------------------- #--RW property---------------------------------------------------------------- #----------------------------------------------------------------------------- class RWstring(ACS__POA.RWstring, ROstring): ''' Properties can be derived from ROstring only if their IDL derives from ACS::ROstring. ''' #------------------------------------------------------------------------- def __init__(self, name, charCompRef, devIORef=None): ''' Constructor Params: - name is the quite literally the name of the property - charCompRef is the characteristic component object which contains this property - devIORef is a reference to a DevIO to be used with this property Returns: Nothing Raises: Nothing. ''' ROstring.__init__(self, name, charCompRef, devIORef) return #---------------------------------------------------------------------------
unknown
codeparrot/codeparrot-clean
""" Test computation of chi2 - fitting of 6 RVs on three spectra. This also shows that simplex wont get out of the local minimum easily, so more powerful fitting environments are needed if we want to get over big obstacles. """ import pyterpol rl = pyterpol.RegionList() rl.add_region(wmin=6325, wmax=6375) rl.add_region(wmin=6540, wmax=6600) sl = pyterpol.StarList() sl.add_component(component='primary', teff=17000., logg=4.0, rv=-100.0, z=1.0, vrot=40.0, lr=0.35) sl.add_component(component='secondary', teff=26000., logg=4.0, rv=100.0, z=1.0, vrot=140.0, lr=0.65) obs = [ dict(filename='a', error=0.001), dict(filename='b', error=0.001), dict(filename='c', error=0.001) ] ol = pyterpol.ObservedList() ol.add_observations(obs) # setup the class itf = pyterpol.Interface(sl=sl, ol=ol, rl=rl, debug=False, spectrum_by_spectrum=['rv']) itf.set_grid_properties(order=2) itf.setup() print itf print itf.list_comparisons() # setup fitted parameters # itf.set_parameter(parname='logg', component='secondary', fitted=True, vmin=3.5, vmax=4.5) # itf.set_parameter(parname='teff', component='secondary', fitted=True, vmin=20000., vmax=28000.) itf.set_parameter(parname='vrot', component='secondary', fitted=True, vmin=100., vmax=170.) itf.set_parameter(parname='rv', component='secondary', fitted=True, vmin=-100., vmax=100.) # itf.set_parameter(parname='lr', component='secondary', fitted=True, vmin=0.5, vmax=0.8) # itf.set_parameter(parname='logg', component='primary', fitted=True, vmin=3.5, vmax=4.5) # itf.set_parameter(parname='teff', component='primary', fitted=True, vmin=15000., vmax=20000.) itf.set_parameter(parname='vrot', component='primary', fitted=True, vmin=40., vmax=80.) itf.set_parameter(parname='rv', component='primary', fitted=True, vmin=-100., vmax=100.) # itf.set_parameter(parname='lr', component='primary', fitted=True, vmin=0.2, vmax=0.5) # fitpars = itf.get_fitted_parameters() # # # # choose a fitter itf.choose_fitter('nlopt_nelder_mead', fitparams=fitpars, ftol=1e-4) print itf # # first of all reduce the comparison list l = itf.get_comparisons() # # # have a look at the chi^2 init_pars = pyterpol.parlist_to_list(fitpars) init_chi2 = itf.compute_chi2(init_pars, l=l) print "Initial settings:", init_pars, init_chi2 # # # plot initial comparison itf.plot_all_comparisons(l=l, figname='initial') # # # # do the fitting itf.run_fit(l=l) # # # # # evaluate final parameters final_pars = pyterpol.parlist_to_list(itf.get_fitted_parameters()) final_chi2 = itf.compute_chi2(final_pars, l=l) print "Final settings:", final_pars, final_chi2 # # # # # # 3 plot initial comparison itf.plot_all_comparisons(l=l, figname='final_spectra') itf.accept_fit()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python import StringIO from xml.dom.minidom import Document import thedata def main(): with open("static/xml/clusters.xml", 'w') as opf: doc = Document() write_clusters(doc, opf) with open("static/xml/users.xml", 'w') as opf: doc = Document() write_users(doc, opf) opf.close def write_clusters(doc, outputfile): data = thedata.CLUSTER_DATA tags = thedata.CLUSTER_TAGS # First basenode clusters = doc.createElement("clusters") doc.appendChild(clusters) for k, datum in enumerate(data): cluster = doc.createElement("cluster") clusters.appendChild(cluster) cluster.setAttribute('id', "cluster{0:02d}".format(k+1)) # handling all properties other than hostnames for tag, d in zip(tags, datum[:-1]): elm = doc.createElement(tag) cluster.appendChild(elm) text = doc.createTextNode(str(d)) elm.appendChild(text) # start handling hostnames from here hostnames = doc.createElement("hostnames") cluster.appendChild(hostnames) for hostname in datum[-1]: hn = doc.createElement("hn") # hn: hostname hostnames.appendChild(hn) text = doc.createTextNode(hostname) hn.appendChild(text) # text node in xml should not contain redundant space or newlines # doc.writexml(outputfile, indent=" ", addindent=" ", newl="\n") doc.writexml(outputfile) # this is not pretty def write_users(doc, outputfile): data = thedata.USER_DATA tags = thedata.USER_TAGS users = doc.createElement("users") doc.appendChild(users) data = StringIO.StringIO(data) k = 0 for line in data: if line.strip(): user = doc.createElement("user") users.appendChild(user) user.setAttribute('id', "user{0:02d}".format(k+1)) sl = [i.strip() for i in line.split("=")] ssl = sl[1].split() for username in ssl: un = doc.createElement(tags[0]) # un: username user.appendChild(un) text = doc.createTextNode(username) un.appendChild(text) rn = doc.createElement(tags[1]) # rn: realname user.appendChild(rn) text = doc.createTextNode(sl[0]) rn.appendChild(text) k += 1 print doc.toprettyxml(indent=" ") doc.writexml(outputfile) # this is not pretty if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
#!/usr/bin/env python # Generates the syscall.h header by scanning for all system calls # defined in the syscalls.master file. MASTER_FILE='kernel/syscall/syscalls.master' OUTPUT_SRC='kernel/syscall/syscall_table.c' OUTPUT_HDR='include/sys/syscalls.h' def parse_master_file(fn): syscalls=dict() with open(fn) as f: lines = f.readlines() for line in lines: if line[0] is "#": continue words = line.split() syscalls[int(words[0])] = (words[1], int(words[2])) return syscalls def src_header(): return """#include <sys/syscalls.h> /* GENERATED -- DO NOT EDIT (see scripts/gen_syscalls.py) */ /* System call table for the kernel */ """ def hdr_header(): return """#ifndef _SYS_SYSCALLS_H_ #define _SYS_SYSCALLS_H_ #include <sys/syscall_constants.h> /* GENERATED -- DO NOT EDIT (see scripts/gen_syscalls.py) */ /* System call numbers */ """ def hdr_footer(): return "#endif" def syscalls_to_src_hdr(syscalls): if len(syscalls.keys()) is 0: return (None, None) table = 'const sysent_t syscalls[] =\n{\n' table += ' /* name, num_args, func_ptr */\n' defines = '/* System call numbers */\n' table_fmt = ' {{ "{name}", {num_args}, (void *)sys_{name} }}, /* {num} */\n' defines_fmt = '#define SYS_{name} {num}\n' max_num=-1 for num in syscalls.keys(): if num > max_num: max_num = num (name, num_args) = syscalls[num] if name is None or num_args is None: return (None, None) if num_args < 0 or num_args > 6: return (None, None) table += table_fmt.format(name=name, num=num, num_args=num_args) defines += defines_fmt.format(name=name, num=num) table += "};\n" defines += defines_fmt.format(name="MAXNR", num=max_num+1) src = (src_header() + "\n\n" +\ table) hdr = (hdr_header() + "\n\n" +\ defines + "\n\n" +\ hdr_footer() + "\n") return (src, hdr) def main(): syscalls = parse_master_file(MASTER_FILE) (source, header) = syscalls_to_src_hdr(syscalls) if header is None or source is None: print("ERROR: Failed to parse {}".format(MASTER_FILE)) sys.exit(1) with open(OUTPUT_HDR, 'w') as of: of.write(header) with open(OUTPUT_SRC, 'w') as of: of.write(source) if __name__ == "__main__": main()
unknown
codeparrot/codeparrot-clean
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function from thrift.protocol import TJSONProtocol from thrift.TSerialization import serialize from apache.aurora.client.cli import EXIT_OK, Noun, Verb from apache.aurora.client.cli.context import AuroraCommandContext from apache.aurora.client.cli.options import JSON_WRITE_OPTION, ROLE_ARGUMENT class GetQuotaCmd(Verb): @property def name(self): return 'get' @property def help(self): return "Print information about quotas for a role" def get_options(self): return [JSON_WRITE_OPTION, ROLE_ARGUMENT] def render_quota(self, write_json, quota_resp): def get_quota_json(quota): result = {} result['cpu'] = quota.numCpus result['ram'] = float(quota.ramMb) / 1024 result['disk'] = float(quota.diskMb) / 1024 return result def get_quota_str(quota): result = [] result.append(' CPU: %s' % quota.numCpus) result.append(' RAM: %f GB' % (float(quota.ramMb) / 1024)) result.append(' Disk: %f GB' % (float(quota.diskMb) / 1024)) return result if write_json: return serialize(quota_resp.result.getQuotaResult, protocol_factory=TJSONProtocol.TSimpleJSONProtocolFactory()) else: quota_result = quota_resp.result.getQuotaResult result = ['Allocated:'] result += get_quota_str(quota_result.quota) if quota_result.prodSharedConsumption: result.append('Production shared pool resources consumed:') result += get_quota_str(quota_result.prodSharedConsumption) if quota_result.prodDedicatedConsumption: result.append('Production dedicated pool resources consumed:') result += get_quota_str(quota_result.prodDedicatedConsumption) if quota_result.nonProdSharedConsumption: result.append('Non-production shared pool resources consumed:') result += get_quota_str(quota_result.nonProdSharedConsumption) if quota_result.nonProdDedicatedConsumption: result.append('Non-production dedicated pool resources consumed:') result += get_quota_str(quota_result.nonProdDedicatedConsumption) return '\n'.join(result) def execute(self, context): (cluster, role) = context.options.role api = context.get_api(cluster) resp = api.get_quota(role) context.log_response_and_raise( resp, err_msg='Error retrieving quota for role %s' % role) context.print_out(self.render_quota(context.options.write_json, resp)) return EXIT_OK class Quota(Noun): @property def name(self): return 'quota' @property def help(self): return "Work with quota settings for an Apache Aurora cluster" @classmethod def create_context(cls): return AuroraCommandContext() def __init__(self): super(Quota, self).__init__() self.register_verb(GetQuotaCmd())
unknown
codeparrot/codeparrot-clean
name: Labeling new issues on: issues: types: ['opened'] permissions: contents: read jobs: automate-issues-labels: permissions: issues: write runs-on: ubuntu-24.04 if: github.repository == 'llvm/llvm-project' steps: - uses: llvm/actions/issue-labeler@89a8cf80982d830faab019237860b344a6390c30 # main with: repo-token: ${{ secrets.ISSUE_SUBSCRIBER_TOKEN }} configuration-path: .github/new-issues-labeler.yml include-title: 1 include-body: 0 sync-labels: 0 enable-versioned-regex: 0
unknown
github
https://github.com/llvm/llvm-project
.github/workflows/new-issues.yml
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Tests for the LogFailHandler test helper.""" import logging import pytest import pytest_catchlog def test_log_debug(): logging.debug('foo') def test_log_warning(): with pytest.raises(pytest.fail.Exception): logging.warning('foo') def test_log_expected(caplog): with caplog.at_level(logging.ERROR): logging.error('foo') def test_log_expected_logger(caplog): logger = 'logfail_test_logger' with caplog.at_level(logging.ERROR, logger): logging.getLogger(logger).error('foo') def test_log_expected_wrong_level(caplog): with pytest.raises(pytest.fail.Exception): with caplog.at_level(logging.ERROR): logging.critical('foo') def test_log_expected_logger_wrong_level(caplog): logger = 'logfail_test_logger' with pytest.raises(pytest.fail.Exception): with caplog.at_level(logging.ERROR, logger): logging.getLogger(logger).critical('foo') def test_log_expected_wrong_logger(caplog): logger = 'logfail_test_logger' with pytest.raises(pytest.fail.Exception): with caplog.at_level(logging.ERROR, logger): logging.error('foo') @pytest.fixture def skipping_fixture(): pytest.skip("Skipping to test caplog workaround.") def test_caplog_bug_workaround_1(caplog, skipping_fixture): pass def test_caplog_bug_workaround_2(): """Make sure caplog_bug_workaround works correctly after a skipped test. There should be only one capturelog handler. """ caplog_handler = None for h in logging.getLogger().handlers: if isinstance(h, pytest_catchlog.LogCaptureHandler): assert caplog_handler is None caplog_handler = h
unknown
codeparrot/codeparrot-clean
# (C) 2012-2013, Michael DeHaan, <michael.dehaan@gmail.com> # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class CallbackModule(object): """ this is an example ansible callback file that does nothing. You can drop other classes in the same directory to define your own handlers. Methods you do not use can be omitted. example uses include: logging, emailing, storing info, etc """ def on_any(self, *args, **kwargs): pass def runner_on_failed(self, host, res, ignore_errors=False): pass def runner_on_ok(self, host, res): pass def runner_on_error(self, host, msg): pass def runner_on_skipped(self, host, item=None): pass def runner_on_unreachable(self, host, res): pass def runner_on_no_hosts(self): pass def runner_on_async_poll(self, host, res, jid, clock): pass def runner_on_async_ok(self, host, res, jid): pass def runner_on_async_failed(self, host, res, jid): pass def playbook_on_start(self): pass def playbook_on_notify(self, host, handler): pass def playbook_on_no_hosts_matched(self): pass def playbook_on_no_hosts_remaining(self): pass def playbook_on_task_start(self, name, is_conditional): pass def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None): pass def playbook_on_setup(self): pass def playbook_on_import_for_host(self, host, imported_file): pass def playbook_on_not_import_for_host(self, host, missing_file): pass def playbook_on_play_start(self, pattern): pass def playbook_on_stats(self, stats): pass
unknown
codeparrot/codeparrot-clean
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Bring in all of the public TensorFlow interface into this module.""" from __future__ import absolute_import as _absolute_import from __future__ import division as _division from __future__ import print_function as _print_function import os as _os # pylint: disable=g-bad-import-order from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import # API IMPORTS PLACEHOLDER from tensorflow.python.tools import component_api_helper as _component_api_helper _component_api_helper.package_hook( parent_package_str=__name__, child_package_str=( 'tensorflow_estimator.python.estimator.api._v1.estimator')) _component_api_helper.package_hook( parent_package_str=__name__, child_package_str=('tensorflow.python.keras.api._v1.keras')) from tensorflow.python.platform import flags # pylint: disable=g-import-not-at-top app.flags = flags # pylint: disable=undefined-variable
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: linode short_description: create / delete / stop / restart an instance in Linode Public Cloud description: - creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'. version_added: "1.3" options: state: description: - Indicate desired state of the resource choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted'] default: present api_key: description: - Linode API key default: null name: description: - Name to give the instance (alphanumeric, dashes, underscore) - To keep sanity on the Linode Web Console, name is prepended with LinodeID_ default: null type: string linode_id: description: - Unique ID of a linode server aliases: [ 'lid' ] default: null type: integer plan: description: - plan to use for the instance (Linode plan) default: null type: integer payment_term: description: - payment term to use for the instance (payment term in months) default: 1 type: integer choices: [1, 12, 24] password: description: - root password to apply to a new server (auto generated if missing) default: null type: string ssh_pub_key: description: - SSH public key applied to root user default: null type: string swap: description: - swap size in MB default: 512 type: integer distribution: description: - distribution to use for the instance (Linode Distribution) default: null type: integer datacenter: description: - datacenter to create an instance in (Linode Datacenter) default: null type: integer wait: description: - wait for the instance to be in state 'running' before returning default: "no" choices: [ "yes", "no" ] wait_timeout: description: - how long before wait gives up, in seconds default: 300 requirements: - "python >= 2.6" - "linode-python" - "pycurl" author: "Vincent Viallet (@zbal)" notes: - LINODE_API_KEY env variable can be used instead ''' EXAMPLES = ''' # Create a server - local_action: module: linode api_key: 'longStringFromLinodeApi' name: linode-test1 plan: 1 datacenter: 2 distribution: 99 password: 'superSecureRootPassword' ssh_pub_key: 'ssh-rsa qwerty' swap: 768 wait: yes wait_timeout: 600 state: present # Ensure a running server (create if missing) - local_action: module: linode api_key: 'longStringFromLinodeApi' name: linode-test1 linode_id: 12345678 plan: 1 datacenter: 2 distribution: 99 password: 'superSecureRootPassword' ssh_pub_key: 'ssh-rsa qwerty' swap: 768 wait: yes wait_timeout: 600 state: present # Delete a server - local_action: module: linode api_key: 'longStringFromLinodeApi' name: linode-test1 linode_id: 12345678 state: absent # Stop a server - local_action: module: linode api_key: 'longStringFromLinodeApi' name: linode-test1 linode_id: 12345678 state: stopped # Reboot a server - local_action: module: linode api_key: 'longStringFromLinodeApi' name: linode-test1 linode_id: 12345678 state: restarted ''' import time import os try: import pycurl HAS_PYCURL = True except ImportError: HAS_PYCURL = False try: from linode import api as linode_api HAS_LINODE = True except ImportError: HAS_LINODE = False def randompass(): ''' Generate a long random password that comply to Linode requirements ''' # Linode API currently requires the following: # It must contain at least two of these four character classes: # lower case letters - upper case letters - numbers - punctuation # we play it safe :) import random import string # as of python 2.4, this reseeds the PRNG from urandom random.seed() lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6)) upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6)) number = ''.join(random.choice(string.digits) for x in range(6)) punct = ''.join(random.choice(string.punctuation) for x in range(6)) p = lower + upper + number + punct return ''.join(random.sample(p, len(p))) def getInstanceDetails(api, server): ''' Return the details of an instance, populating IPs, etc. ''' instance = {'id': server['LINODEID'], 'name': server['LABEL'], 'public': [], 'private': []} # Populate with ips for ip in api.linode_ip_list(LinodeId=server['LINODEID']): if ip['ISPUBLIC'] and 'ipv4' not in instance: instance['ipv4'] = ip['IPADDRESS'] instance['fqdn'] = ip['RDNS_NAME'] if ip['ISPUBLIC']: instance['public'].append({'ipv4': ip['IPADDRESS'], 'fqdn': ip['RDNS_NAME'], 'ip_id': ip['IPADDRESSID']}) else: instance['private'].append({'ipv4': ip['IPADDRESS'], 'fqdn': ip['RDNS_NAME'], 'ip_id': ip['IPADDRESSID']}) return instance def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, payment_term, password, ssh_pub_key, swap, wait, wait_timeout): instances = [] changed = False new_server = False servers = [] disks = [] configs = [] jobs = [] # See if we can match an existing server details with the provided linode_id if linode_id: # For the moment we only consider linode_id as criteria for match # Later we can use more (size, name, etc.) and update existing servers = api.linode_list(LinodeId=linode_id) # Attempt to fetch details about disks and configs only if servers are # found with linode_id if servers: disks = api.linode_disk_list(LinodeId=linode_id) configs = api.linode_config_list(LinodeId=linode_id) # Act on the state if state in ('active', 'present', 'started'): # TODO: validate all the plan / distribution / datacenter are valid # Multi step process/validation: # - need linode_id (entity) # - need disk_id for linode_id - create disk from distrib # - need config_id for linode_id - create config (need kernel) # Any create step triggers a job that need to be waited for. if not servers: for arg in ('name', 'plan', 'distribution', 'datacenter'): if not eval(arg): module.fail_json(msg='%s is required for active state' % arg) # Create linode entity new_server = True try: res = api.linode_create(DatacenterID=datacenter, PlanID=plan, PaymentTerm=payment_term) linode_id = res['LinodeID'] # Update linode Label to match name api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name)) # Save server servers = api.linode_list(LinodeId=linode_id) except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) if not disks: for arg in ('name', 'linode_id', 'distribution'): if not eval(arg): module.fail_json(msg='%s is required for active state' % arg) # Create disks (1 from distrib, 1 for SWAP) new_server = True try: if not password: # Password is required on creation, if not provided generate one password = randompass() if not swap: swap = 512 # Create data disk size = servers[0]['TOTALHD'] - swap if ssh_pub_key: res = api.linode_disk_createfromdistribution( LinodeId=linode_id, DistributionID=distribution, rootPass=password, rootSSHKey=ssh_pub_key, Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) else: res = api.linode_disk_createfromdistribution( LinodeId=linode_id, DistributionID=distribution, rootPass=password, Label='%s data disk (lid: %s)' % (name, linode_id), Size=size) jobs.append(res['JobID']) # Create SWAP disk res = api.linode_disk_create(LinodeId=linode_id, Type='swap', Label='%s swap disk (lid: %s)' % (name, linode_id), Size=swap) jobs.append(res['JobID']) except Exception as e: # TODO: destroy linode ? module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) if not configs: for arg in ('name', 'linode_id', 'distribution'): if not eval(arg): module.fail_json(msg='%s is required for active state' % arg) # Check architecture for distrib in api.avail_distributions(): if distrib['DISTRIBUTIONID'] != distribution: continue arch = '32' if distrib['IS64BIT']: arch = '64' break # Get latest kernel matching arch for kernel in api.avail_kernels(): if not kernel['LABEL'].startswith('Latest %s' % arch): continue kernel_id = kernel['KERNELID'] break # Get disk list disks_id = [] for disk in api.linode_disk_list(LinodeId=linode_id): if disk['TYPE'] == 'ext3': disks_id.insert(0, str(disk['DISKID'])) continue disks_id.append(str(disk['DISKID'])) # Trick to get the 9 items in the list while len(disks_id) < 9: disks_id.append('') disks_list = ','.join(disks_id) # Create config new_server = True try: api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id, Disklist=disks_list, Label='%s config' % name) configs = api.linode_config_list(LinodeId=linode_id) except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) # Start / Ensure servers are running for server in servers: # Refresh server state server = api.linode_list(LinodeId=server['LINODEID'])[0] # Ensure existing servers are up and running, boot if necessary if server['STATUS'] != 1: res = api.linode_boot(LinodeId=linode_id) jobs.append(res['JobID']) changed = True # wait here until the instances are up wait_timeout = time.time() + wait_timeout while wait and wait_timeout > time.time(): # refresh the server details server = api.linode_list(LinodeId=server['LINODEID'])[0] # status: # -2: Boot failed # 1: Running if server['STATUS'] in (-2, 1): break time.sleep(5) if wait and wait_timeout <= time.time(): # waiting took too long module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' % (server['LABEL'], server['LINODEID'])) # Get a fresh copy of the server details server = api.linode_list(LinodeId=server['LINODEID'])[0] if server['STATUS'] == -2: module.fail_json(msg = '%s (lid: %s) failed to boot' % (server['LABEL'], server['LINODEID'])) # From now on we know the task is a success # Build instance report instance = getInstanceDetails(api, server) # depending on wait flag select the status if wait: instance['status'] = 'Running' else: instance['status'] = 'Starting' # Return the root password if this is a new box and no SSH key # has been provided if new_server and not ssh_pub_key: instance['password'] = password instances.append(instance) elif state in ('stopped'): for arg in ('name', 'linode_id'): if not eval(arg): module.fail_json(msg='%s is required for active state' % arg) if not servers: module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) for server in servers: instance = getInstanceDetails(api, server) if server['STATUS'] != 2: try: res = api.linode_shutdown(LinodeId=linode_id) except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Stopping' changed = True else: instance['status'] = 'Stopped' instances.append(instance) elif state in ('restarted'): for arg in ('name', 'linode_id'): if not eval(arg): module.fail_json(msg='%s is required for active state' % arg) if not servers: module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id)) for server in servers: instance = getInstanceDetails(api, server) try: res = api.linode_reboot(LinodeId=server['LINODEID']) except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Restarting' changed = True instances.append(instance) elif state in ('absent', 'deleted'): for server in servers: instance = getInstanceDetails(api, server) try: api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True) except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) instance['status'] = 'Deleting' changed = True instances.append(instance) # Ease parsing if only 1 instance if len(instances) == 1: module.exit_json(changed=changed, instance=instances[0]) module.exit_json(changed=changed, instances=instances) def main(): module = AnsibleModule( argument_spec = dict( state = dict(default='present', choices=['active', 'present', 'started', 'deleted', 'absent', 'stopped', 'restarted']), api_key = dict(), name = dict(type='str'), plan = dict(type='int'), distribution = dict(type='int'), datacenter = dict(type='int'), linode_id = dict(type='int', aliases=['lid']), payment_term = dict(type='int', default=1, choices=[1, 12, 24]), password = dict(type='str'), ssh_pub_key = dict(type='str'), swap = dict(type='int', default=512), wait = dict(type='bool', default=True), wait_timeout = dict(default=300), ) ) if not HAS_PYCURL: module.fail_json(msg='pycurl required for this module') if not HAS_LINODE: module.fail_json(msg='linode-python required for this module') state = module.params.get('state') api_key = module.params.get('api_key') name = module.params.get('name') plan = module.params.get('plan') distribution = module.params.get('distribution') datacenter = module.params.get('datacenter') linode_id = module.params.get('linode_id') payment_term = module.params.get('payment_term') password = module.params.get('password') ssh_pub_key = module.params.get('ssh_pub_key') swap = module.params.get('swap') wait = module.params.get('wait') wait_timeout = int(module.params.get('wait_timeout')) # Setup the api_key if not api_key: try: api_key = os.environ['LINODE_API_KEY'] except KeyError as e: module.fail_json(msg = 'Unable to load %s' % e.message) # setup the auth try: api = linode_api.Api(api_key) api.test_echo() except Exception as e: module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE']) linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id, payment_term, password, ssh_pub_key, swap, wait, wait_timeout) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
// Need InlineOnly for efficient bytecode on Android @file:Suppress("INVISIBLE_REFERENCE", "INVISIBLE_MEMBER", "NOTHING_TO_INLINE") package kotlinx.coroutines import java.util.concurrent.locks.* import kotlin.internal.InlineOnly internal abstract class AbstractTimeSource { abstract fun currentTimeMillis(): Long abstract fun nanoTime(): Long abstract fun wrapTask(block: Runnable): Runnable abstract fun trackTask() abstract fun unTrackTask() abstract fun registerTimeLoopThread() abstract fun unregisterTimeLoopThread() abstract fun parkNanos(blocker: Any, nanos: Long) // should return immediately when nanos <= 0 abstract fun unpark(thread: Thread) } // For tests only // @JvmField: Don't use JvmField here to enable R8 optimizations via "assumenosideeffects" private var timeSource: AbstractTimeSource? = null // TODO: without this, there's a compilation error. Why? internal inline fun mockTimeSource(source: AbstractTimeSource?) { timeSource = source } @InlineOnly internal inline fun currentTimeMillis(): Long = timeSource?.currentTimeMillis() ?: System.currentTimeMillis() @InlineOnly internal actual inline fun nanoTime(): Long = timeSource?.nanoTime() ?: System.nanoTime() @InlineOnly internal inline fun wrapTask(block: Runnable): Runnable = timeSource?.wrapTask(block) ?: block @InlineOnly internal inline fun trackTask() { timeSource?.trackTask() } @InlineOnly internal inline fun unTrackTask() { timeSource?.unTrackTask() } @InlineOnly internal inline fun registerTimeLoopThread() { timeSource?.registerTimeLoopThread() } @InlineOnly internal inline fun unregisterTimeLoopThread() { timeSource?.unregisterTimeLoopThread() } @InlineOnly internal inline fun parkNanos(blocker: Any, nanos: Long) { timeSource?.parkNanos(blocker, nanos) ?: LockSupport.parkNanos(blocker, nanos) } @InlineOnly internal inline fun unpark(thread: Thread) { timeSource?.unpark(thread) ?: LockSupport.unpark(thread) }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
kotlinx-coroutines-core/jvm/src/AbstractTimeSource.kt
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2015, Peter Mounce <public@neverrunwithscissors.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # this is a windows documentation stub. actual code lives in the .ps1 # file of the same name ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: win_webpicmd version_added: "2.0" short_description: Installs packages using Web Platform Installer command-line description: - Installs packages using Web Platform Installer command-line (U(http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release)). - Must be installed and present in PATH (see M(win_chocolatey) module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)? - Install IIS first (see M(win_feature) module). notes: - Accepts EULAs and suppresses reboot - you will need to check manage reboots yourself (see M(win_reboot) module) options: name: description: - Name of the package to be installed. required: yes author: - Peter Mounce (@petemounce) ''' EXAMPLES = r''' # Install URLRewrite2. win_webpicmd: name: URLRewrite2 '''
unknown
codeparrot/codeparrot-clean
//===--- DebugTypeInfo.h - Type Info for Debugging --------------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // // This file defines the data structure that holds all the debug info // we want to emit for types. // //===----------------------------------------------------------------------===// #ifndef SWIFT_IRGEN_DEBUGTYPEINFO_H #define SWIFT_IRGEN_DEBUGTYPEINFO_H #include "IRGen.h" #include "swift/AST/Decl.h" #include "swift/AST/Types.h" namespace llvm { class Type; } namespace swift { class SILDebugScope; class SILGlobalVariable; namespace irgen { class TypeInfo; class IRGenModule; /// This data structure holds everything needed to emit debug info /// for a type. class DebugTypeInfo { protected: /// The type we need to emit may be different from the type /// mentioned in the Decl, for example, stripped of qualifiers. TypeBase *Type = nullptr; /// Needed to determine the size of basic types and to determine /// the storage type for undefined variables. std::optional<uint32_t> NumExtraInhabitants; Alignment Align; bool DefaultAlignment = true; bool IsMetadataType = false; bool IsFixedBuffer = false; bool IsForwardDecl = false; public: DebugTypeInfo() = default; DebugTypeInfo(swift::Type Ty, Alignment AlignInBytes = Alignment(1), bool HasDefaultAlignment = true, bool IsMetadataType = false, bool IsFixedBuffer = false, std::optional<uint32_t> NumExtraInhabitants = {}); /// Create type for a local variable. static DebugTypeInfo getLocalVariable(VarDecl *Decl, swift::Type Ty, const TypeInfo &Info, IRGenModule &IGM); /// Create type for global type metadata. static DebugTypeInfo getGlobalMetadata(swift::Type Ty, Size size, Alignment align); /// Create type for an artificial metadata variable. static DebugTypeInfo getTypeMetadata(swift::Type Ty, Size size, Alignment align); /// Create a forward declaration for a type whose size is unknown. static DebugTypeInfo getForwardDecl(swift::Type Ty); /// Create a standalone type from a TypeInfo object. static DebugTypeInfo getFromTypeInfo(swift::Type Ty, const TypeInfo &Info, IRGenModule &IGM); /// Global variables. static DebugTypeInfo getGlobal(SILGlobalVariable *GV, IRGenModule &IGM); static DebugTypeInfo getGlobalFixedBuffer(SILGlobalVariable *GV, Alignment align, IRGenModule &IGM); /// ObjC classes. static DebugTypeInfo getObjCClass(ClassDecl *theClass, Size size, Alignment align); /// Error type. static DebugTypeInfo getErrorResult(swift::Type Ty, IRGenModule &IGM); TypeBase *getType() const { return Type; } TypeDecl *getDecl() const; // Determine whether this type is an Archetype dependent on a generic context. bool isContextArchetype() const { if (auto archetype = Type->getWithoutSpecifierType()->getAs<ArchetypeType>()) { return !isa<OpaqueTypeArchetypeType>(archetype); } return false; } Alignment getAlignment() const { return Align; } bool isNull() const { return !Type; } bool isMetadataType() const { return IsMetadataType; } bool hasDefaultAlignment() const { return DefaultAlignment; } bool isFixedBuffer() const { return IsFixedBuffer; } bool isForwardDecl() const { return IsForwardDecl; } std::optional<uint32_t> getNumExtraInhabitants() const { return NumExtraInhabitants; } bool operator==(DebugTypeInfo T) const; bool operator!=(DebugTypeInfo T) const; #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD void dump() const; #endif }; /// A DebugTypeInfo with a defined size (that may be 0). class CompletedDebugTypeInfo : public DebugTypeInfo { Size::int_type SizeInBits; CompletedDebugTypeInfo(DebugTypeInfo DbgTy, Size::int_type SizeInBits) : DebugTypeInfo(DbgTy), SizeInBits(SizeInBits) {} public: static std::optional<CompletedDebugTypeInfo> get(DebugTypeInfo DbgTy, std::optional<Size::int_type> SizeInBits) { if (!SizeInBits) return {}; return CompletedDebugTypeInfo(DbgTy, *SizeInBits); } static std::optional<CompletedDebugTypeInfo> getFromTypeInfo(swift::Type Ty, const TypeInfo &Info, IRGenModule &IGM, std::optional<Size::int_type> SizeInBits = {}); Size::int_type getSizeInBits() const { return SizeInBits; } }; } } namespace llvm { // Dense map specialization. template <> struct DenseMapInfo<swift::irgen::DebugTypeInfo> { static swift::irgen::DebugTypeInfo getEmptyKey() { return {}; } static swift::irgen::DebugTypeInfo getTombstoneKey() { return swift::irgen::DebugTypeInfo( llvm::DenseMapInfo<swift::TypeBase *>::getTombstoneKey(), swift::irgen::Alignment(), /* HasDefaultAlignment = */ false); } static unsigned getHashValue(swift::irgen::DebugTypeInfo Val) { return DenseMapInfo<swift::CanType>::getHashValue(Val.getType()); } static bool isEqual(swift::irgen::DebugTypeInfo LHS, swift::irgen::DebugTypeInfo RHS) { return LHS == RHS; } }; } #endif
c
github
https://github.com/apple/swift
lib/IRGen/DebugTypeInfo.h
use anyhow::Result; use async_trait::async_trait; use next_custom_transforms::transforms::react_server_components::*; use swc_core::{ common::FileName, ecma::{ast::Program, visit::VisitWith}, }; use turbo_tasks::Vc; use turbo_tasks_fs::FileSystemPath; use turbopack::module_options::ModuleRule; use turbopack_ecmascript::{CustomTransformer, TransformContext}; use super::get_ecma_transform_rule; use crate::{next_config::NextConfig, next_shared::transforms::EcmascriptTransformStage}; /// Returns a rule which applies the Next.js react server components transform. /// This transform owns responsibility to assert various import / usage /// conditions against each code's context. Refer below table how we are /// applying this rules against various contexts. /// /// +-----------------+---------+--------------------+ /// | Context\Enabled | Enabled | isReactServerLayer | /// +-----------------+---------+--------------------+ /// | SSR | true | false | /// | Client | true | false | /// | Middleware | false | false | /// | Api | false | false | /// | RSC | true | true | /// | Pages | true | false | /// +-----------------+---------+--------------------+ pub async fn get_next_react_server_components_transform_rule( next_config: Vc<NextConfig>, is_react_server_layer: bool, app_dir: Option<FileSystemPath>, ) -> Result<ModuleRule> { let enable_mdx_rs = next_config.mdx_rs().await?.is_some(); let cache_components_enabled = *next_config.enable_cache_components().await?; let use_cache_enabled = *next_config.enable_use_cache().await?; let taint_enabled = *next_config.enable_taint().await?; Ok(get_ecma_transform_rule( Box::new(NextJsReactServerComponents::new( is_react_server_layer, cache_components_enabled, use_cache_enabled, taint_enabled, app_dir, )), enable_mdx_rs, EcmascriptTransformStage::Preprocess, )) } #[derive(Debug)] struct NextJsReactServerComponents { is_react_server_layer: bool, cache_components_enabled: bool, use_cache_enabled: bool, taint_enabled: bool, app_dir: Option<FileSystemPath>, } impl NextJsReactServerComponents { fn new( is_react_server_layer: bool, cache_components_enabled: bool, use_cache_enabled: bool, taint_enabled: bool, app_dir: Option<FileSystemPath>, ) -> Self { Self { is_react_server_layer, cache_components_enabled, use_cache_enabled, taint_enabled, app_dir, } } } #[async_trait] impl CustomTransformer for NextJsReactServerComponents { #[tracing::instrument(level = tracing::Level::TRACE, name = "next_react_server_components", skip_all)] async fn transform(&self, program: &mut Program, ctx: &TransformContext<'_>) -> Result<()> { let file_name = if ctx.file_path_str.is_empty() { FileName::Anon } else { FileName::Real(ctx.file_path_str.into()) }; let mut visitor = server_components_assert( file_name, Config::WithOptions(Options { is_react_server_layer: self.is_react_server_layer, cache_components_enabled: self.cache_components_enabled, use_cache_enabled: self.use_cache_enabled, taint_enabled: self.taint_enabled, }), self.app_dir.as_ref().map(|path| path.path.clone().into()), ); program.visit_with(&mut visitor); Ok(()) } }
rust
github
https://github.com/vercel/next.js
crates/next-core/src/next_shared/transforms/next_react_server_components.rs
'use action'; export async function foo() {}
javascript
github
https://github.com/vercel/next.js
crates/next-custom-transforms/tests/errors/server-actions/server-graph/27/output.js
package network import ( "encoding" "fmt" "net" ) // A HardwareAddr represents a physical hardware address. // It implements [encoding.TextMarshaler] and [encoding.TextUnmarshaler] // in the absence of go.dev/issue/29678. type HardwareAddr net.HardwareAddr var ( _ encoding.TextMarshaler = (HardwareAddr)(nil) _ encoding.TextUnmarshaler = (*HardwareAddr)(nil) _ fmt.Stringer = (HardwareAddr)(nil) ) func (m *HardwareAddr) UnmarshalText(text []byte) error { if len(text) == 0 { *m = nil return nil } hw, err := net.ParseMAC(string(text)) if err != nil { return err } *m = HardwareAddr(hw) return nil } func (m HardwareAddr) MarshalText() ([]byte, error) { return []byte(net.HardwareAddr(m).String()), nil } func (m HardwareAddr) String() string { return net.HardwareAddr(m).String() }
go
github
https://github.com/moby/moby
api/types/network/hwaddr.go
#!/usr/bin/python ###################################### # Originally written by Ryan Hoffmann from Wolynes group, # Modified by Weihua Zheng 06/01/2011 # For oligomers, make sure the fasta file has all the chains. ###################################################### # The code has three main parts, BLAST fragment sequence # in each window and output, Find homologs and Write memories. ####################################################################### import sys import os import re import operator from IndexPdb import * from Pdb2GroLib import * from Bio import SeqIO from Bio.PDB.PDBParser import PDBParser if len(sys.argv) != 6: print "\n######################################################################" print "*.py database-prefix file.fasta N_mem brain_damage_flag (1/0.5/0 for yes/exclude self/no) frag_length > logfile" print "######################################################################" sys.exit() # read arguments into variables database = sys.argv[1] fasta = sys.argv[2] N_mem = int(sys.argv[3]) brain_damage = float(sys.argv[4]) fragmentLength = int(sys.argv[5]) handle = open(fasta, "rU") weight = 1 # weight of each fragment memoriesPerPosition = N_mem # can be any integer > 0 # needs to be large enough that PSI-BLAST returns at least memoriesPerPosition EvalueThreshold = 10000 cutoff_identical = 90 # Consider >90% identity as itself, < 90% as homologs. # set up directories # myhome = os.environ.get("HOME") # pdbDir = myhome + "/opt/script/PDBs/" # indexDir = myhome + "/opt/script/Indices/" # fLibDir = "fraglib/" # fLibDir = myhome + "/opt/script/Gros/" # pdbSeqres = "/Users/weilu/opt/script/pdb_seqres.txt" openawsem_location = os.environ.get("OPENAWSEM_LOCATION") pdbDir = openawsem_location + "/PDBs/" indexDir = openawsem_location + "/Indices/" fLibDir = "./fraglib/" fLibDir = openawsem_location + "/Gros/" pdbSeqres = openawsem_location + "/pdb_seqres.txt" if not os.path.exists(indexDir): os.makedirs(indexDir) if not os.path.exists(pdbDir): os.makedirs(pdbDir) if not os.path.exists(fLibDir): os.makedirs(fLibDir) if not os.path.exists(pdbDir) or not os.path.exists(fLibDir) or not os.path.exists(indexDir): print "Can't create necessary directories" sys.exit() # set up out LAMWmatch = open('frags.mem', 'w') LAMWmatch.write('[Target]' + "\n") LAMWmatch.write("query" + "\n\n" + '[Memories]' + "\n") log_match = open('log.mem', 'w') # loop1, fasta file includes one or multiple chains residue_base = 0 # a shift of residue index set for multiple chains. # loop1, multiple entries in fasta file for multiple chains. for record in SeqIO.parse(handle, "fasta"): # Part I, BLAST fragments if(len(record.seq) < fragmentLength): print "Exception::query sequence is shorter than " + str(fragmentLength) + " residues. Exit." sys.exit() query = str(record.name)[0:4] print 'processing sequence:', record.name match = open('prepFrags.match', 'w') match.write(query + "\n") # FRAGMENT GENERATION LOOP iterations = len(record.seq) - fragmentLength + \ 1 # number of sliding windows for i in range(1, iterations + 1): # loop2, run through all sliding windows in a given chain rangeStart = i - 1 rangeEnd = i + fragmentLength - 1 print "window position:::" + str(i) subrange = str(record[rangeStart:rangeEnd].seq) print "fragment subrange:::" + subrange fragment = open('fragment.fasta', 'w') fragment.write(subrange) fragment.close() # a temporary file for BLAST # submit PSI-BLAST, run "psiblast -help" for more details of output # format (outfmt) exeline = "psiblast -num_iterations 5 -comp_based_stats 0 -word_size 2 -evalue " + \ str(EvalueThreshold) #exeline+=" -outfmt '6 sseqid qstart qend sstart send qseq sseq length gaps bitscore evalue' -matrix PAM30 -threshold 9 -window_size 0" exeline += " -outfmt '6 sseqid qstart qend sstart send qseq sseq length gaps bitscore evalue' -matrix BLOSUM62 -threshold 9 -window_size 0" exeline += " -db " + database + " -query fragment.fasta" print "executing:::" + exeline psiblastOut = os.popen(exeline).read() psiblastOut = psiblastOut.splitlines() # now an array N_blast = len(psiblastOut) # print psiblastOut if psiblastOut[N_blast - 1] == 'Search has CONVERGED!': N_blast = N_blast - 2 # exclude last two lines for the text N_start_new = 1 line_count = 0 e_score_old = 0 pdbID_old = 'BBBBB' # set initial variables for processing PSI-BLAST output for line in psiblastOut: # For PSI-BLAST with multiple Iterations, find N_start_new for the starting position of the output alignments of the final round line_count += 1 if line_count >= N_blast: break that = line.split() pdbID = that[0] e_score = float(that[10]) if e_score < e_score_old: N_start_new = line_count if pdbID != pdbID_old: e_score_old = e_score pdbID_old = pdbID print "Number of searched PDBs: ", N_blast, N_start_new # convert psiblastOut to a list, sorted by evalue psilist = [None] * (N_blast - N_start_new + 1) line_count = 0 kk = 0 for line in psiblastOut: line_count += 1 if line_count < N_start_new: continue if line_count > N_blast: break that = line.split() list_tmp = list() for ii in range(0, 11): # PSI-BLAST output has 11 columns if not ii == 10: list_tmp.append(that[ii]) # column 10 is evalue else: list_tmp.append(float(that[ii])) psilist[kk] = list_tmp kk += 1 print list_tmp psilist.sort(lambda x, y: cmp(x[10], y[10])) # write output alignments to match file for jj in range(0, N_blast - N_start_new + 1): this = psilist[jj] this[10] = str(this[10]) this.append(str(i)) queryStart = int(this[1]) + rangeStart + residue_base queryEnd = int(this[2]) + rangeStart + residue_base this[1] = str(queryStart) this[2] = str(queryEnd) out = ' '.join(this) out += '\n' gaps = this[8] if(gaps == '0'): match.write(out) # skip gapped alignments # loop2 close match.close() match = open('prepFrags.match', 'r') # match is read-only now # list unique PDB IDs for downloading later matchlines = list() keys = {} for line in match.readlines(): matchlines.append(line) entries = line.split() pdbfull = str(entries[0]) keys[pdbfull] = 1 unique = keys.keys() # pdbparse=PDBParser(PERMISSIVE=1) # Part II, BLAST the whole sequence to find homologs print record.seq fragment = open('fragment.fasta', 'w') fragment.write(str(record.seq)) fragment.close() homo = {} failed_pdb = {} homo_count = {} for pdbfull in unique: pdbID = pdbfull[0:4].lower() pdbIDsecond = pdbfull[1:2].lower() pdbIDthird = pdbfull[2:3].lower() chainID = pdbfull[4:5].lower() failed_pdb[pdbID] = 0 homo[pdbID] = 0 homo_count[pdbID] = 0 # download PDBs if not exist ##from script 'pdbget' (original author # unknown) if not os.path.isfile(pdbDir + pdbID.upper() + ".pdb"): exeline = "wget ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/" exeline += pdbIDsecond + pdbIDthird + "/pdb" + pdbID + ".ent.gz" os.system(exeline) print(exeline) os.system("nice gunzip pdb" + pdbID + ".ent.gz; mv pdb" + pdbID + ".ent " + pdbDir + pdbID.upper() + ".pdb") if not os.path.isfile(pdbDir + pdbID.upper() + ".pdb"): failed_pdb[pdbID] = 1 print ":::Cannot build PDB for PDB ID, failed to download:" + pdbID.upper() # exit() # blast the whole sequence to identify homologs Evalue 0.005 exeline = "psiblast -num_iterations 1 -word_size 3 -evalue 0.005" exeline += " -outfmt '6 sseqid slen bitscore score evalue pident' -matrix BLOSUM62 -db " + \ database + " -query fragment.fasta" print "finding homologs" print "executing::: " + exeline homoOut = os.popen(exeline).read() homoOut = homoOut.splitlines() # now an array for line in homoOut: entries = line.split() print "homologues: ", entries if len(entries): pdbfull = entries[0] pdbID = pdbfull[0:4].lower() if brain_damage == 0.5: identity = float(entries[5]) # check identity, add only self (>90% identity) to homo[] if identity > cutoff_identical: homo[pdbID] = 1 homo_count[pdbID] = 0 else: homo[pdbID] = 1 homo_count[pdbID] = 0 # Part III, Write memories iter = 0 count = {} for i in range(1, iterations + 1): count[str(i)] = 0 # count number of mem per fragments Missing_count = 0 Missing_pdb = {} fastFile = "./tmp.fasta" for line in matchlines: # loop3 iter += 1 if not(iter == 1): entries = line.split() windows_index_str = entries[11] if count[windows_index_str] >= N_mem: continue pdbfull = str(entries[0]) pdbID = pdbfull[0:4].lower() pdbIDsecond = pdbfull[1:2].lower() pdbIDthird = pdbfull[2:3].lower() chainID = pdbfull[4:5].lower() groFile = fLibDir + pdbID + chainID + ".gro" groName = pdbID + chainID + ".gro" pdbFile = pdbDir + pdbID.upper() + ".pdb" indexFile = indexDir + pdbID + chainID + ".index" if failed_pdb[pdbID]: continue # failed-downloaded ones are still in matchlines, need to be ignored if homo[pdbID]: if brain_damage == 0: print pdbID, " Using a homolog." homo_count[pdbID] += 1 if brain_damage: print pdbID, " is a homolog, discard" continue residue_list = entries[6] # sseq res_Start = int(entries[3]) res_End = int(entries[4]) print pdbFile, "start: ", res_Start, "end: ", res_End # Do I have the index file? If No, write it if not os.path.isfile(indexFile): # generate fasta file if not os.path.isfile(pdbSeqres): print pdbSeqres print "Need to download pdb_seqres.txt from PDB!" print "ftp://ftp.wwpdb.org/pub/pdb/derived_data/pdb_seqres.txt" print "Copy to $HOME/opt/script/" exit() fastaFile = pdbID + '_' + chainID.upper() exeline = "grep -A1 " + fastaFile + " " + pdbSeqres + " > ./tmp.fasta" print "generating fastaFile: ", fastaFile os.popen(exeline) if os.path.getsize('tmp.fasta') > 0: writeIndexFile(fastFile, pdbFile, indexFile, chainID.upper()) print "Writing indexFile: ", indexFile else: print indexFile, "exist, no need to create." if not os.path.isfile(indexFile): print "Can't create index file, ignore and go on!" continue # Read index file index = open(indexFile, 'r') # create new_index for frag_seq starting position line_count = 0 flag = ' ' index_shift = 0 # read and get the flag indexlines = list() for index_line in index.readlines(): indexlines.append(index_line) line_count += 1 tmp_line = index_line.split() if line_count == 1: # first line is the flag flag = tmp_line[0] # index_line if flag == "SHIFT" and line_count == 2: index_shift = int(tmp_line[0]) print "shift: ", tmp_line[0] r_list = '' # list() if flag == "SKIP": Missing_pdb[pdbID] = 1 Missing_count += 1 print "***********", flag print "SKIP pdb:", pdbID + chainID continue elif flag == "FULLMATCH": new_index = int(entries[3]) r_list = residue_list print "***********", flag elif flag == "SHIFT": new_index = int(entries[3]) + index_shift r_list = residue_list print "***********", flag elif flag == "INDEXED": print "***********", flag # check if there is gaps for the fragment of sequence count_flag = 0 line_count1 = 0 for index_line in indexlines: line_count1 += 1 if not line_count1 == 1: index_entries = index_line.split() seq_id = int(index_entries[0]) res_id = int(index_entries[1]) if seq_id < res_Start: continue if seq_id > res_End: break if res_id == -1: print "Missing residues in PDB: ", pdbID + chainID break if count_flag == 0: new_index = res_id count_flag += 1 res_nm = index_entries[2] r_list += res_nm else: print "Skip wrongly written index file ", indexFile continue if r_list != residue_list: print "Missing residues: ", pdbID + chainID, residue_list, " incomplete: ", r_list Missing_pdb[pdbID] = 1 Missing_count += 1 continue if os.path.isfile(pdbFile): if not os.path.isfile(groFile): Pdb2Gro(pdbFile, groFile, chainID.upper()) print "converting...... " + pdbFile + " --> " + groFile else: print "Exist " + groFile count[windows_index_str] += 1 length = res_End - res_Start + 1 out = groFile + ' ' + entries[1] + ' ' # queue start out += str(new_index) + ' ' + str(length) + ' ' + \ str(weight) + "\n" # frag_seq start LAMWmatch.write(out) out1 = windows_index_str + ' ' + str(count[windows_index_str]) out1 += ' ' + entries[9] + ' ' + entries[10] + ' ' + groName out1 += ' ' + entries[1] + ' ' + str(new_index) + ' ' + str( length) + ' ' + str(weight) + ' 0' + entries[5] + ' 0' + entries[6] + "\n" log_match.write(out1) else: print pdbFile, "does not exist! Go figure..." # loop3 ends print "HOMOLOGS:::" total_homo_count = 0 for line in homoOut: entries = line.split() print "sseqid slen bitscore score evalue pident" print entries pdbfull = entries[0] pdbID = pdbfull[0:4].lower() if brain_damage == 0: total_homo_count += homo_count[pdbID] print "Homolog count =", homo_count[pdbID] if brain_damage == 0: print "Total homolog count = ", total_homo_count, round(total_homo_count / iterations, 2) print "memories per position that is fewer than expected:" for i in count: if count[i] < N_mem: print i, count[i] print "Number of blasted PDB: ", len(failed_pdb) print "Number of failed downloaded PDB: ", sum(failed_pdb.values()) print "Number of PDB with Missing atoms: ", len(Missing_pdb) print "Discarded fragments with Missing atoms: ", Missing_count residue_base += len(record.seq) for line in homoOut: entries = line.split() if len(entries): pdbfull = entries[0] pdbID = pdbfull[0:4].lower() print pdbID # loop1 close
unknown
codeparrot/codeparrot-clean
# ERPNext: Copyright 2013 Web Notes Technologies Pvt Ltd # GNU General Public License. See "license.txt" from __future__ import unicode_literals import webnotes import home def boot_session(bootinfo): """boot session - send website info if guest""" import webnotes import webnotes.model.doc bootinfo['custom_css'] = webnotes.conn.get_value('Style Settings', None, 'custom_css') or '' bootinfo['website_settings'] = webnotes.model.doc.getsingle('Website Settings') if webnotes.session['user']=='Guest': bootinfo['website_menus'] = webnotes.conn.sql("""select label, url, custom_page, parent_label, parentfield from `tabTop Bar Item` where parent='Website Settings' order by idx asc""", as_dict=1) bootinfo['startup_code'] = \ webnotes.conn.get_value('Website Settings', None, 'startup_code') else: bootinfo['letter_heads'] = get_letter_heads() import webnotes.model.doctype bootinfo['notification_settings'] = webnotes.doc("Notification Control", "Notification Control").get_values() # if no company, show a dialog box to create a new company bootinfo['setup_complete'] = webnotes.conn.sql("""select name from tabCompany limit 1""") and 'Yes' or 'No' # load subscription info import conf for key in ['max_users', 'expires_on', 'max_space', 'status', 'developer_mode']: if hasattr(conf, key): bootinfo[key] = getattr(conf, key) bootinfo['docs'] += webnotes.conn.sql("""select name, default_currency, cost_center, cost_center as 'cost_center_other_charges' from `tabCompany`""", as_dict=1, update={"doctype":":Company"}) def get_letter_heads(): """load letter heads with startup""" import webnotes ret = webnotes.conn.sql("""select name, content from `tabLetter Head` where ifnull(disabled,0)=0""") return dict(ret)
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import fauxfactory import pytest from cfme.automate.explorer import Domain, Namespace, Class, Instance, Method from cfme.automate.simulation import simulate from cfme.common.vm import VM from cfme.fixtures import pytest_selenium as sel from cfme.infrastructure.virtual_machines import Vm # For Vm.Snapshot from utils import testgen from utils.conf import credentials from utils.log import logger from utils.path import data_path from utils.ssh import SSHClient from utils.wait import wait_for pytestmark = [pytest.mark.long_running, pytest.mark.tier(2)] pytest_generate_tests = testgen.generate(testgen.infra_providers, scope="module") @pytest.fixture(scope="module") def vm_name(): return "test_snpsht_" + fauxfactory.gen_alphanumeric() @pytest.fixture(scope="module") def domain(request): dom = Domain(fauxfactory.gen_alpha(), enabled=True) dom.create() request.addfinalizer(dom.delete) return dom @pytest.fixture(scope="module") def test_vm(setup_provider_modscope, provider, vm_name): """Fixture to provision appliance to the provider being tested if necessary""" vm = VM.factory(vm_name, provider, template_name=provider.data['full_template']['name']) if not provider.mgmt.does_vm_exist(vm_name): vm.create_on_provider(find_in_cfme=True, allow_skip="default") return vm def new_snapshot(test_vm): new_snapshot = Vm.Snapshot( name="snpshot_" + fauxfactory.gen_alphanumeric(8), description="snapshot", memory=False, parent_vm=test_vm) return new_snapshot @pytest.mark.uncollectif(lambda provider: provider.type != 'virtualcenter') def test_snapshot_crud(test_vm, provider): """Tests snapshot crud Metadata: test_flag: snapshot, provision """ snapshot = new_snapshot(test_vm) snapshot.create() snapshot.delete() @pytest.mark.uncollectif(lambda provider: provider.type != 'virtualcenter') def test_delete_all_snapshots(test_vm, provider): """Tests snapshot removal Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) snapshot1.create() snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot2.delete_all() @pytest.mark.meta(blockers=[1333566]) @pytest.mark.uncollectif(lambda provider: provider.type != 'virtualcenter') def test_verify_revert_snapshot(test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ snapshot1 = new_snapshot(test_vm) ip = snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name) ssh_kwargs = { 'username': credentials[provider.data['full_template']['creds']]['username'], 'password': credentials[provider.data['full_template']['creds']]['password'], 'hostname': ip } ssh = SSHClient(**ssh_kwargs) ssh.run_command('touch snapshot1.txt') snapshot1.create() ssh.run_command('touch snapshot2.txt') snapshot2 = new_snapshot(test_vm) snapshot2.create() snapshot1.revert_to() # Wait for the snapshot to become active logger.info('Waiting for vm %s to become active', snapshot1.name) wait_for(snapshot1.wait_for_snapshot_active, num_sec=300, delay=20, fail_func=sel.refresh) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=720) register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False) pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") client = SSHClient(**ssh_kwargs) request.addfinalizer(test_vm.delete_from_provider) try: wait_for(lambda: client.run_command('test -e snapshot2.txt')[1] == 0, fail_condition=False) logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.info('Revert to snapshot %s Failed', snapshot1.name) @pytest.mark.uncollectif(lambda provider: provider.type != 'virtualcenter') @pytest.mark.meta(blockers=[1247664], automates=[1247664]) def test_create_snapshot_via_ae(request, domain, test_vm): """This test checks whether the vm.create_snapshot works in AE. Prerequisities: * A VMware provider * A VM that has been discovered by CFME Steps: * Clone the Request class inside the System namespace into a new domain * Add a method named ``snapshot`` and insert the provided code there. * Add an instance named ``snapshot`` and set the methd from previous step as ``meth5`` * Run the simulation of the method against the VM, preferably setting ``snap_name`` to something that can be checked * Wait until snapshot with such name appears. """ # PREPARE file = data_path.join("ui").join("automate").join("test_create_snapshot_via_ae.rb") with file.open("r") as f: method_contents = f.read() miq_domain = Domain("ManageIQ (Locked)") miq_class = Class("Request", namespace=Namespace("System", domain=miq_domain)) request_cls = miq_class.copy_to(domain) request.addfinalizer(request_cls.delete) method = Method("snapshot", data=method_contents, cls=request_cls) method.create() request.addfinalizer(method.delete) instance = Instance("snapshot", values={"meth5": "snapshot"}, cls=request_cls) instance.create() request.addfinalizer(instance.delete) # SIMULATE snap_name = fauxfactory.gen_alpha() snapshot = Vm.Snapshot(name=snap_name, parent_vm=test_vm) simulate( instance="Request", request="snapshot", attribute=["VM and Instance", test_vm.name], execute_methods=True, avp={"snap_name": snap_name}) wait_for(snapshot.does_snapshot_exist, timeout="2m", delay=10) # Clean up if it appeared snapshot.delete()
unknown
codeparrot/codeparrot-clean
--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/reference/current/rest-api-compatibility.html applies_to: stack: all navigation_title: Compatibility --- # Elasticsearch API compatibility [rest-api-compatibility] To help REST clients mitigate the impact of non-compatible (breaking) API changes, {{es}} provides a per-request, opt-in API compatibility mode. {{es}} REST APIs are generally stable across versions. However, some improvements require changes that are not compatible with previous versions. When an API is targeted for removal or is going to be changed in a non-compatible way, the original API is deprecated for one or more releases. Using the original API triggers a deprecation warning in the logs. This enables you to review the deprecation logs and take the appropriate actions before upgrading. However, in some cases it is difficult to identify all places where deprecated APIs are being used. This is where REST API compatibility can help. When you request REST API compatibility, {{es}} attempts to honor the previous REST API version. {{es}} attempts to apply the most compatible URL, request body, response body, and HTTP parameters. For compatible APIs, this has no effect— it only impacts calls to APIs that have breaking changes from the previous version. An error can still be returned in compatibility mode if {{es}} cannot automatically resolve the incompatibilities. ::::{important} REST API compatibility does not guarantee the same behavior as the prior version. It instructs {{es}} to automatically resolve any incompatibilities so the request can be processed instead of returning an error. :::: REST API compatibility should be a bridge to smooth out the upgrade process, not a long term strategy. REST API compatibility is only honored across one major version: honor 8.x requests/responses from 9.x. When you submit requests using REST API compatibility and {{es}} resolves the incompatibility, a message is written to the deprecation log with the category "compatible_api". Review the deprecation log to identify any gaps in usage and fully supported features. ## Requesting REST API compatibility [request-rest-api-compatibility] REST API compatibility is implemented per request via the Accept and/or Content-Type headers. For example: ```text Accept: "application/vnd.elasticsearch+json;compatible-with=8" Content-Type: "application/vnd.elasticsearch+json;compatible-with=8" ``` The Accept header is always required and the Content-Type header is only required when a body is sent with the request. The following values are valid when communicating with a 8.x or 9.x {{es}} server: ```text "application/vnd.elasticsearch+json;compatible-with=8" "application/vnd.elasticsearch+yaml;compatible-with=8" "application/vnd.elasticsearch+smile;compatible-with=8" "application/vnd.elasticsearch+cbor;compatible-with=8" ``` The [officially supported {{es}} clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html) can enable REST API compatibility for all requests. To enable REST API compatibility for all requests received by {{es}} set the environment variable `ELASTIC_CLIENT_APIVERSIONING` to true. ## REST API compatibility workflow [_rest_api_compatibility_workflow] To leverage REST API compatibility during an upgrade from the last 8.x to {{version.stack}}: 1. Upgrade your [{{es}} clients](https://www.elastic.co/guide/en/elasticsearch/client/index.html) to the latest 8.x version and enable REST API compatibility. 2. Use the [Upgrade Assistant](docs-content://deploy-manage/upgrade/prepare-to-upgrade/upgrade-assistant.md) to review all critical issues and explore the deprecation logs. Some critical issues might be mitigated by REST API compatibility. 3. Resolve all critical issues before proceeding with the upgrade. 4. Upgrade Elasticsearch to {{version.stack}}. 5. Review the deprecation logs for entries with the category `compatible_api`. Review the workflow associated with the requests that relied on compatibility mode. 6. Upgrade your {{es}} clients to 9.x and resolve compatibility issues manually where needed.
unknown
github
https://github.com/elastic/elasticsearch
docs/reference/elasticsearch/rest-apis/compatibility.md
import json import logging from airflow.exceptions import AirflowException from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults from airflow.utils.file import TemporaryDirectory from docker import Client, tls import ast class DockerOperator(BaseOperator): """ Execute a command inside a docker container. A temporary directory is created on the host and mounted into a container to allow storing files that together exceed the default disk size of 10GB in a container. The path to the mounted directory can be accessed via the environment variable ``AIRFLOW_TMP_DIR``. :param image: Docker image from which to create the container. :type image: str :param api_version: Remote API version. :type api_version: str :param command: Command to be run in the container. :type command: str or list :param cpus: Number of CPUs to assign to the container. This value gets multiplied with 1024. See https://docs.docker.com/engine/reference/run/#cpu-share-constraint :type cpus: float :param docker_url: URL of the host running the docker daemon. :type docker_url: str :param environment: Environment variables to set in the container. :type environment: dict :param force_pull: Pull the docker image on every run. :type force_pull: bool :param mem_limit: Maximum amount of memory the container can use. Either a float value, which represents the limit in bytes, or a string like ``128m`` or ``1g``. :type mem_limit: float or str :param network_mode: Network mode for the container. :type network_mode: str :param tls_ca_cert: Path to a PEM-encoded certificate authority to secure the docker connection. :type tls_ca_cert: str :param tls_client_cert: Path to the PEM-encoded certificate used to authenticate docker client. :type tls_client_cert: str :param tls_client_key: Path to the PEM-encoded key used to authenticate docker client. :type tls_client_key: str :param tls_hostname: Hostname to match against the docker server certificate or False to disable the check. :type tls_hostname: str or bool :param tls_ssl_version: Version of SSL to use when communicating with docker daemon. :type tls_ssl_version: str :param tmp_dir: Mount point inside the container to a temporary directory created on the host by the operator. The path is also made available via the environment variable ``AIRFLOW_TMP_DIR`` inside the container. :type tmp_dir: str :param user: Default user inside the docker container. :type user: int or str :param volumes: List of volumes to mount into the container, e.g. ``['/host/path:/container/path', '/host/path2:/container/path2:ro']``. :param xcom_push: Does the stdout will be pushed to the next step using XCom. The default is False. :type xcom_push: bool :param xcom_all: Push all the stdout or just the last line. The default is False (last line). :type xcom_all: bool """ template_fields = ('command',) template_ext = ('.sh', '.bash',) @apply_defaults def __init__( self, image, api_version=None, command=None, cpus=1.0, docker_url='unix://var/run/docker.sock', environment=None, force_pull=False, mem_limit=None, network_mode=None, tls_ca_cert=None, tls_client_cert=None, tls_client_key=None, tls_hostname=None, tls_ssl_version=None, tmp_dir='/tmp/airflow', user=None, volumes=None, xcom_push=False, xcom_all=False, *args, **kwargs): super(DockerOperator, self).__init__(*args, **kwargs) self.api_version = api_version self.command = command self.cpus = cpus self.docker_url = docker_url self.environment = environment or {} self.force_pull = force_pull self.image = image self.mem_limit = mem_limit self.network_mode = network_mode self.tls_ca_cert = tls_ca_cert self.tls_client_cert = tls_client_cert self.tls_client_key = tls_client_key self.tls_hostname = tls_hostname self.tls_ssl_version = tls_ssl_version self.tmp_dir = tmp_dir self.user = user self.volumes = volumes or [] self.xcom_push = xcom_push self.xcom_all = xcom_all self.cli = None self.container = None def execute(self, context): logging.info('Starting docker container from image ' + self.image) tls_config = None if self.tls_ca_cert and self.tls_client_cert and self.tls_client_key: tls_config = tls.TLSConfig( ca_cert=self.tls_ca_cert, client_cert=(self.tls_client_cert, self.tls_client_key), verify=True, ssl_version=self.tls_ssl_version, assert_hostname=self.tls_hostname ) self.docker_url = self.docker_url.replace('tcp://', 'https://') self.cli = Client(base_url=self.docker_url, version=self.api_version, tls=tls_config) if ':' not in self.image: image = self.image + ':latest' else: image = self.image if self.force_pull or len(self.cli.images(name=image)) == 0: logging.info('Pulling docker image ' + image) for l in self.cli.pull(image, stream=True): output = json.loads(l) logging.info("{}".format(output['status'])) cpu_shares = int(round(self.cpus * 1024)) with TemporaryDirectory(prefix='airflowtmp') as host_tmp_dir: self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir)) self.container = self.cli.create_container( command=self.get_command(), cpu_shares=cpu_shares, environment=self.environment, host_config=self.cli.create_host_config(binds=self.volumes, network_mode=self.network_mode), image=image, mem_limit=self.mem_limit, user=self.user ) self.cli.start(self.container['Id']) line = '' for line in self.cli.logs(container=self.container['Id'], stream=True): logging.info("{}".format(line.strip())) exit_code = self.cli.wait(self.container['Id']) if exit_code != 0: raise AirflowException('docker container failed') if self.xcom_push: return self.cli.logs(container=self.container['Id']) if self.xcom_all else str(line.strip()) def get_command(self): if self.command is not None and self.command.strip().find('[') == 0: commands = ast.literal_eval(self.command) else: commands = self.command return commands def on_kill(self): if self.cli is not None: logging.info('Stopping docker container') self.cli.stop(self.container['Id'])
unknown
codeparrot/codeparrot-clean
# coding: utf-8 """ MailMojo API v1 of the MailMojo API # noqa: E501 OpenAPI spec version: 1.1.0 Contact: hjelp@mailmojo.no Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import io import json import logging import re import ssl import certifi # python 2 and python 3 compatibility library import six from six.moves.urllib.parse import urlencode try: import urllib3 except ImportError: raise ImportError('Swagger python client requires urllib3.') logger = logging.getLogger(__name__) class RESTResponse(io.IOBase): def __init__(self, resp): self.urllib3_response = resp self.status = resp.status self.reason = resp.reason self.data = resp.data def getheaders(self): """Returns a dictionary of the response headers.""" return self.urllib3_response.getheaders() def getheader(self, name, default=None): """Returns a given response header.""" return self.urllib3_response.getheader(name, default) class RESTClientObject(object): def __init__(self, configuration, pools_size=4, maxsize=None): # urllib3.PoolManager will pass all kw parameters to connectionpool # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501 # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501 # maxsize is the number of requests to host that are allowed in parallel # noqa: E501 # Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501 # cert_reqs if configuration.verify_ssl: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE # ca_certs if configuration.ssl_ca_cert: ca_certs = configuration.ssl_ca_cert else: # if not set certificate file, use Mozilla's root certificates. ca_certs = certifi.where() addition_pool_args = {} if configuration.assert_hostname is not None: addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501 if maxsize is None: if configuration.connection_pool_maxsize is not None: maxsize = configuration.connection_pool_maxsize else: maxsize = 4 # https pool manager if configuration.proxy: self.pool_manager = urllib3.ProxyManager( num_pools=pools_size, maxsize=maxsize, cert_reqs=cert_reqs, ca_certs=ca_certs, cert_file=configuration.cert_file, key_file=configuration.key_file, proxy_url=configuration.proxy, **addition_pool_args ) else: self.pool_manager = urllib3.PoolManager( num_pools=pools_size, maxsize=maxsize, cert_reqs=cert_reqs, ca_certs=ca_certs, cert_file=configuration.cert_file, key_file=configuration.key_file, **addition_pool_args ) def request(self, method, url, query_params=None, headers=None, body=None, post_params=None, _preload_content=True, _request_timeout=None): """Perform requests. :param method: http request method :param url: http request url :param query_params: query parameters in the url :param headers: http request headers :param body: request json body, for `application/json` :param post_params: request post parameters, `application/x-www-form-urlencoded` and `multipart/form-data` :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. """ method = method.upper() assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', 'PATCH', 'OPTIONS'] if post_params and body: raise ValueError( "body parameter cannot be used with post_params parameter." ) post_params = post_params or {} headers = headers or {} timeout = None if _request_timeout: if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821 timeout = urllib3.Timeout(total=_request_timeout) elif (isinstance(_request_timeout, tuple) and len(_request_timeout) == 2): timeout = urllib3.Timeout( connect=_request_timeout[0], read=_request_timeout[1]) if 'Content-Type' not in headers: headers['Content-Type'] = 'application/json' try: # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']: if query_params: url += '?' + urlencode(query_params) if re.search('json', headers['Content-Type'], re.IGNORECASE): request_body = '{}' if body is not None: request_body = json.dumps(body) r = self.pool_manager.request( method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers) elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501 r = self.pool_manager.request( method, url, fields=post_params, encode_multipart=False, preload_content=_preload_content, timeout=timeout, headers=headers) elif headers['Content-Type'] == 'multipart/form-data': # must del headers['Content-Type'], or the correct # Content-Type which generated by urllib3 will be # overwritten. del headers['Content-Type'] r = self.pool_manager.request( method, url, fields=post_params, encode_multipart=True, preload_content=_preload_content, timeout=timeout, headers=headers) # Pass a `string` parameter directly in the body to support # other content types than Json when `body` argument is # provided in serialized form elif isinstance(body, str): request_body = body r = self.pool_manager.request( method, url, body=request_body, preload_content=_preload_content, timeout=timeout, headers=headers) else: # Cannot generate the request from given parameters msg = """Cannot prepare a request message for provided arguments. Please check that your arguments match declared content type.""" raise ApiException(status=0, reason=msg) # For `GET`, `HEAD` else: r = self.pool_manager.request(method, url, fields=query_params, preload_content=_preload_content, timeout=timeout, headers=headers) except urllib3.exceptions.SSLError as e: msg = "{0}\n{1}".format(type(e).__name__, str(e)) raise ApiException(status=0, reason=msg) if _preload_content: r = RESTResponse(r) # In the python 3, the response.data is bytes. # we need to decode it to string. if six.PY3: r.data = r.data.decode('utf8') # log response body logger.debug("response body: %s", r.data) if not 200 <= r.status <= 299: raise ApiException(http_resp=r) return r def GET(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): return self.request("GET", url, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, query_params=query_params) def HEAD(self, url, headers=None, query_params=None, _preload_content=True, _request_timeout=None): return self.request("HEAD", url, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, query_params=query_params) def OPTIONS(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("OPTIONS", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def DELETE(self, url, headers=None, query_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("DELETE", url, headers=headers, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def POST(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("POST", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def PUT(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("PUT", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) def PATCH(self, url, headers=None, query_params=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): return self.request("PATCH", url, headers=headers, query_params=query_params, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) class ApiException(Exception): def __init__(self, status=None, reason=None, http_resp=None): if http_resp: self.status = http_resp.status self.reason = http_resp.reason self.body = http_resp.data self.headers = http_resp.getheaders() else: self.status = status self.reason = reason self.body = None self.headers = None def __str__(self): """Custom error messages for exception""" error_message = "({0})\n"\ "Reason: {1}\n".format(self.status, self.reason) if self.headers: error_message += "HTTP response headers: {0}\n".format( self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) return error_message
unknown
codeparrot/codeparrot-clean
/** * Copyright 2013, GitHub, Inc * Copyright 2009-2013, Daniel Lemire, Cliff Moon, * David McIntosh, Robert Becho, Google Inc. and Veronika Zenz * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef __EWOK_BITMAP_H__ #define __EWOK_BITMAP_H__ struct strbuf; typedef uint64_t eword_t; #define BITS_IN_EWORD (sizeof(eword_t) * 8) /** * Do not use __builtin_popcountll. The GCC implementation * is notoriously slow on all platforms. * * See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36041 */ static inline uint32_t ewah_bit_popcount64(uint64_t x) { x = (x & 0x5555555555555555ULL) + ((x >> 1) & 0x5555555555555555ULL); x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x >> 4) & 0x0F0F0F0F0F0F0F0FULL); return (x * 0x0101010101010101ULL) >> 56; } /* __builtin_ctzll was not available until 3.4.0 */ #if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR > 3)) #define ewah_bit_ctz64(x) __builtin_ctzll(x) #else static inline int ewah_bit_ctz64(uint64_t x) { int n = 0; if ((x & 0xffffffff) == 0) { x >>= 32; n += 32; } if ((x & 0xffff) == 0) { x >>= 16; n += 16; } if ((x & 0xff) == 0) { x >>= 8; n += 8; } if ((x & 0xf) == 0) { x >>= 4; n += 4; } if ((x & 0x3) == 0) { x >>= 2; n += 2; } if ((x & 0x1) == 0) { x >>= 1; n += 1; } return n + !x; } #endif struct ewah_bitmap { eword_t *buffer; size_t buffer_size; size_t alloc_size; size_t bit_size; eword_t *rlw; }; typedef void (*ewah_callback)(size_t pos, void *); struct ewah_bitmap *ewah_pool_new(void); void ewah_pool_free(struct ewah_bitmap *self); /** * Allocate a new EWAH Compressed bitmap */ struct ewah_bitmap *ewah_new(void); /** * Free all the memory of the bitmap */ void ewah_free(struct ewah_bitmap *self); int ewah_serialize_to(struct ewah_bitmap *self, int (*write_fun)(void *out, const void *buf, size_t len), void *out); int ewah_serialize_strbuf(struct ewah_bitmap *self, struct strbuf *); ssize_t ewah_read_mmap(struct ewah_bitmap *self, const void *map, size_t len); uint32_t ewah_checksum(struct ewah_bitmap *self); /** * Call the given callback with the position of every single bit * that has been set on the bitmap. * * This is an efficient operation that does not fully decompress * the bitmap. */ void ewah_each_bit(struct ewah_bitmap *self, ewah_callback callback, void *payload); /** * Set a given bit on the bitmap. * * The bit at position `pos` will be set to true. Because of the * way that the bitmap is compressed, a set bit cannot be unset * later on. * * Furthermore, since the bitmap uses streaming compression, bits * can only set incrementally. * * E.g. * ewah_set(bitmap, 1); // ok * ewah_set(bitmap, 76); // ok * ewah_set(bitmap, 77); // ok * ewah_set(bitmap, 8712800127); // ok * ewah_set(bitmap, 25); // failed, assert raised */ void ewah_set(struct ewah_bitmap *self, size_t i); struct ewah_iterator { const eword_t *buffer; size_t buffer_size; size_t pointer; eword_t compressed, literals; eword_t rl, lw; int b; }; /** * Initialize a new iterator to run through the bitmap in uncompressed form. * * The iterator can be stack allocated. The underlying bitmap must not be freed * before the iteration is over. * * E.g. * * struct ewah_bitmap *bitmap = ewah_new(); * struct ewah_iterator it; * * ewah_iterator_init(&it, bitmap); */ void ewah_iterator_init(struct ewah_iterator *it, struct ewah_bitmap *parent); /** * Yield every single word in the bitmap in uncompressed form. This is: * yield single words (32-64 bits) where each bit represents an actual * bit from the bitmap. * * Return: true if a word was yield, false if there are no words left */ int ewah_iterator_next(eword_t *next, struct ewah_iterator *it); struct ewah_or_iterator { struct ewah_iterator *its; size_t nr; }; void ewah_or_iterator_init(struct ewah_or_iterator *it, struct ewah_bitmap **parents, size_t nr); int ewah_or_iterator_next(eword_t *next, struct ewah_or_iterator *it); void ewah_or_iterator_release(struct ewah_or_iterator *it); void ewah_xor( struct ewah_bitmap *ewah_i, struct ewah_bitmap *ewah_j, struct ewah_bitmap *out); /** * Direct word access */ size_t ewah_add_empty_words(struct ewah_bitmap *self, int v, size_t number); void ewah_add_dirty_words( struct ewah_bitmap *self, const eword_t *buffer, size_t number, int negate); size_t ewah_add(struct ewah_bitmap *self, eword_t word); /** * Uncompressed, old-school bitmap that can be efficiently compressed * into an `ewah_bitmap`. */ struct bitmap { eword_t *words; size_t word_alloc; }; struct bitmap *bitmap_new(void); struct bitmap *bitmap_word_alloc(size_t word_alloc); struct bitmap *bitmap_dup(const struct bitmap *src); void bitmap_set(struct bitmap *self, size_t pos); void bitmap_unset(struct bitmap *self, size_t pos); int bitmap_get(struct bitmap *self, size_t pos); void bitmap_free(struct bitmap *self); int bitmap_equals(struct bitmap *self, struct bitmap *other); int bitmap_equals_ewah(struct bitmap *self, struct ewah_bitmap *other); /* * Both `bitmap_is_subset()` and `ewah_bitmap_is_subset()` return 1 if the set * of bits in 'self' are a subset of the bits in 'other'. Returns 0 otherwise. */ int bitmap_is_subset(struct bitmap *self, struct bitmap *other); int ewah_bitmap_is_subset(struct ewah_bitmap *self, struct bitmap *other); struct ewah_bitmap * bitmap_to_ewah(struct bitmap *bitmap); struct bitmap *ewah_to_bitmap(struct ewah_bitmap *ewah); void bitmap_and_not(struct bitmap *self, struct bitmap *other); void bitmap_or_ewah(struct bitmap *self, struct ewah_bitmap *other); void bitmap_or(struct bitmap *self, const struct bitmap *other); size_t bitmap_popcount(struct bitmap *self); size_t ewah_bitmap_popcount(struct ewah_bitmap *self); int bitmap_is_empty(struct bitmap *self); #endif
c
github
https://github.com/git/git
ewah/ewok.h
import {RenderPropAsChild, StaticText1, StaticText2} from 'shared-runtime'; function Component(props: {showText1: boolean}) { const Foo = props.showText1 ? StaticText1 : StaticText2; return <RenderPropAsChild items={[() => <Foo key="0" />]} />; } export const FIXTURE_ENTRYPOINT = { fn: Component, params: [{showText1: false}], };
typescript
github
https://github.com/facebook/react
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/jsx-ternary-local-variable.tsx
#!/usr/bin/python # -*- coding: utf-8 -*- """ Virt management features Copyright 2007, 2012 Red Hat, Inc Michael DeHaan <michael.dehaan@gmail.com> Seth Vidal <skvidal@fedoraproject.org> This software may be freely redistributed under the terms of the GNU general public license. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: virt short_description: Manages virtual machines supported by libvirt description: - Manages virtual machines supported by I(libvirt). version_added: "0.2" options: name: description: - name of the guest VM being managed. Note that VM must be previously defined with xml. required: true default: null aliases: [] state: description: - Note that there may be some lag for state requests like C(shutdown) since these refer only to VM states. After starting a guest, it may not be immediately accessible. required: false choices: [ "running", "shutdown", "destroyed", "paused" ] default: "no" command: description: - in addition to state management, various non-idempotent commands are available. See examples required: false choices: ["create","status", "start", "stop", "pause", "unpause", "shutdown", "undefine", "destroy", "get_xml", "autostart", "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"] uri: description: - libvirt connection uri required: false defaults: qemu:///system xml: description: - XML document used with the define command required: false default: null requirements: - "python >= 2.6" - "libvirt-python" author: - "Ansible Core Team" - "Michael DeHaan" - "Seth Vidal" ''' EXAMPLES = ''' # a playbook task line: - virt: name=alpha state=running # /usr/bin/ansible invocations ansible host -m virt -a "name=alpha command=status" ansible host -m virt -a "name=alpha command=get_xml" ansible host -m virt -a "name=alpha command=create uri=lxc:///" # a playbook example of defining and launching an LXC guest tasks: - name: define vm virt: name=foo command=define xml="{{ lookup('template', 'container-template.xml.j2') }}" uri=lxc:/// - name: start vm virt: name=foo state=running uri=lxc:/// ''' RETURN = ''' # for list_vms command list_vms: description: The list of vms defined on the remote system type: dictionary returned: success sample: [ "build.example.org", "dev.example.org" ] # for status command status: description: The status of the VM, among running, crashed, paused and shutdown type: string sample: "success" returned: success ''' VIRT_FAILED = 1 VIRT_SUCCESS = 0 VIRT_UNAVAILABLE=2 import sys try: import libvirt except ImportError: HAS_VIRT = False else: HAS_VIRT = True ALL_COMMANDS = [] VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define'] HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] ALL_COMMANDS.extend(VM_COMMANDS) ALL_COMMANDS.extend(HOST_COMMANDS) VIRT_STATE_NAME_MAP = { 0 : "running", 1 : "running", 2 : "running", 3 : "paused", 4 : "shutdown", 5 : "shutdown", 6 : "crashed" } class VMNotFound(Exception): pass class LibvirtConnection(object): def __init__(self, uri, module): self.module = module cmd = "uname -r" rc, stdout, stderr = self.module.run_command(cmd) if "xen" in stdout: conn = libvirt.open(None) elif "esx" in uri: auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None] conn = libvirt.openAuth(uri, auth) else: conn = libvirt.open(uri) if not conn: raise Exception("hypervisor connection failure") self.conn = conn def find_vm(self, vmid): """ Extra bonus feature: vmid = -1 returns a list of everything """ conn = self.conn vms = [] # this block of code borrowed from virt-manager: # get working domain's name ids = conn.listDomainsID() for id in ids: vm = conn.lookupByID(id) vms.append(vm) # get defined domain names = conn.listDefinedDomains() for name in names: vm = conn.lookupByName(name) vms.append(vm) if vmid == -1: return vms for vm in vms: if vm.name() == vmid: return vm raise VMNotFound("virtual machine %s not found" % vmid) def shutdown(self, vmid): return self.find_vm(vmid).shutdown() def pause(self, vmid): return self.suspend(self.conn,vmid) def unpause(self, vmid): return self.resume(self.conn,vmid) def suspend(self, vmid): return self.find_vm(vmid).suspend() def resume(self, vmid): return self.find_vm(vmid).resume() def create(self, vmid): return self.find_vm(vmid).create() def destroy(self, vmid): return self.find_vm(vmid).destroy() def undefine(self, vmid): return self.find_vm(vmid).undefine() def get_status2(self, vm): state = vm.info()[0] return VIRT_STATE_NAME_MAP.get(state,"unknown") def get_status(self, vmid): state = self.find_vm(vmid).info()[0] return VIRT_STATE_NAME_MAP.get(state,"unknown") def nodeinfo(self): return self.conn.getInfo() def get_type(self): return self.conn.getType() def get_xml(self, vmid): vm = self.conn.lookupByName(vmid) return vm.XMLDesc(0) def get_maxVcpus(self, vmid): vm = self.conn.lookupByName(vmid) return vm.maxVcpus() def get_maxMemory(self, vmid): vm = self.conn.lookupByName(vmid) return vm.maxMemory() def getFreeMemory(self): return self.conn.getFreeMemory() def get_autostart(self, vmid): vm = self.conn.lookupByName(vmid) return vm.autostart() def set_autostart(self, vmid, val): vm = self.conn.lookupByName(vmid) return vm.setAutostart(val) def define_from_xml(self, xml): return self.conn.defineXML(xml) class Virt(object): def __init__(self, uri, module): self.module = module self.uri = uri def __get_conn(self): self.conn = LibvirtConnection(self.uri, self.module) return self.conn def get_vm(self, vmid): self.__get_conn() return self.conn.find_vm(vmid) def state(self): vms = self.list_vms() state = [] for vm in vms: state_blurb = self.conn.get_status(vm) state.append("%s %s" % (vm,state_blurb)) return state def info(self): vms = self.list_vms() info = dict() for vm in vms: data = self.conn.find_vm(vm).info() # libvirt returns maxMem, memory, and cpuTime as long()'s, which # xmlrpclib tries to convert to regular int's during serialization. # This throws exceptions, so convert them to strings here and # assume the other end of the xmlrpc connection can figure things # out or doesn't care. info[vm] = { "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"), "maxMem" : str(data[1]), "memory" : str(data[2]), "nrVirtCpu" : data[3], "cpuTime" : str(data[4]), } info[vm]["autostart"] = self.conn.get_autostart(vm) return info def nodeinfo(self): self.__get_conn() info = dict() data = self.conn.nodeinfo() info = { "cpumodel" : str(data[0]), "phymemory" : str(data[1]), "cpus" : str(data[2]), "cpumhz" : str(data[3]), "numanodes" : str(data[4]), "sockets" : str(data[5]), "cpucores" : str(data[6]), "cputhreads" : str(data[7]) } return info def list_vms(self, state=None): self.conn = self.__get_conn() vms = self.conn.find_vm(-1) results = [] for x in vms: try: if state: vmstate = self.conn.get_status2(x) if vmstate == state: results.append(x.name()) else: results.append(x.name()) except: pass return results def virttype(self): return self.__get_conn().get_type() def autostart(self, vmid): self.conn = self.__get_conn() return self.conn.set_autostart(vmid, True) def freemem(self): self.conn = self.__get_conn() return self.conn.getFreeMemory() def shutdown(self, vmid): """ Make the machine with the given vmid stop running. Whatever that takes. """ self.__get_conn() self.conn.shutdown(vmid) return 0 def pause(self, vmid): """ Pause the machine with the given vmid. """ self.__get_conn() return self.conn.suspend(vmid) def unpause(self, vmid): """ Unpause the machine with the given vmid. """ self.__get_conn() return self.conn.resume(vmid) def create(self, vmid): """ Start the machine via the given vmid """ self.__get_conn() return self.conn.create(vmid) def start(self, vmid): """ Start the machine via the given id/name """ self.__get_conn() return self.conn.create(vmid) def destroy(self, vmid): """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ self.__get_conn() return self.conn.destroy(vmid) def undefine(self, vmid): """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ self.__get_conn() return self.conn.undefine(vmid) def status(self, vmid): """ Return a state suitable for server consumption. Aka, codes.py values, not XM output. """ self.__get_conn() return self.conn.get_status(vmid) def get_xml(self, vmid): """ Receive a Vm id as input Return an xml describing vm config returned by a libvirt call """ self.__get_conn() return self.conn.get_xml(vmid) def get_maxVcpus(self, vmid): """ Gets the max number of VCPUs on a guest """ self.__get_conn() return self.conn.get_maxVcpus(vmid) def get_max_memory(self, vmid): """ Gets the max memory on a guest """ self.__get_conn() return self.conn.get_MaxMemory(vmid) def define(self, xml): """ Define a guest with the given xml """ self.__get_conn() return self.conn.define_from_xml(xml) def core(module): state = module.params.get('state', None) guest = module.params.get('name', None) command = module.params.get('command', None) uri = module.params.get('uri', None) xml = module.params.get('xml', None) v = Virt(uri, module) res = {} if state and command=='list_vms': res = v.list_vms(state=state) if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res if state: if not guest: module.fail_json(msg = "state change requires a guest specified") res['changed'] = False if state == 'running': if v.status(guest) is 'paused': res['changed'] = True res['msg'] = v.unpause(guest) elif v.status(guest) is not 'running': res['changed'] = True res['msg'] = v.start(guest) elif state == 'shutdown': if v.status(guest) is not 'shutdown': res['changed'] = True res['msg'] = v.shutdown(guest) elif state == 'destroyed': if v.status(guest) is not 'shutdown': res['changed'] = True res['msg'] = v.destroy(guest) elif state == 'paused': if v.status(guest) is 'running': res['changed'] = True res['msg'] = v.pause(guest) else: module.fail_json(msg="unexpected state") return VIRT_SUCCESS, res if command: if command in VM_COMMANDS: if not guest: module.fail_json(msg = "%s requires 1 argument: guest" % command) if command == 'define': if not xml: module.fail_json(msg = "define requires xml argument") try: v.get_vm(guest) except VMNotFound: v.define(xml) res = {'changed': True, 'created': guest} return VIRT_SUCCESS, res res = getattr(v, command)(guest) if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res elif hasattr(v, command): res = getattr(v, command)() if type(res) != dict: res = { command: res } return VIRT_SUCCESS, res else: module.fail_json(msg="Command %s not recognized" % basecmd) module.fail_json(msg="expected state or command parameter to be specified") def main(): module = AnsibleModule(argument_spec=dict( name = dict(aliases=['guest']), state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), command = dict(choices=ALL_COMMANDS), uri = dict(default='qemu:///system'), xml = dict(), )) if not HAS_VIRT: module.fail_json( msg='The `libvirt` module is not importable. Check the requirements.' ) rc = VIRT_SUCCESS try: rc, result = core(module) except Exception, e: module.fail_json(msg=str(e)) if rc != 0: # something went wrong emit the msg module.fail_json(rc=rc, msg=result) else: module.exit_json(**result) # import module snippets from ansible.module_utils.basic import * main()
unknown
codeparrot/codeparrot-clean
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """variables tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import numpy as np from tensorflow.contrib.framework.python.ops import arg_scope from tensorflow.contrib.framework.python.ops import variables as variables_lib2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import partitioned_variables from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.training import device_setter from tensorflow.python.training import saver as saver_lib class LocalVariableTest(test.TestCase): def test_local_variable(self): with self.test_session() as sess: self.assertEquals([], variables_lib.local_variables()) value0 = 42 variables_lib2.local_variable(value0) value1 = 43 variables_lib2.local_variable(value1) variables = variables_lib.local_variables() self.assertEquals(2, len(variables)) self.assertRaises(errors_impl.OpError, sess.run, variables) variables_lib.variables_initializer(variables).run() self.assertAllEqual(set([value0, value1]), set(sess.run(variables))) def testLocalVariableNameAndShape(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.local_variable([1, 1, 1, 1, 1], name='a') self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertListEqual([a], variables_lib2.get_local_variables()) def testLocalVariableNotInAllVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.local_variable(0) self.assertFalse(a in variables_lib.global_variables()) self.assertTrue(a in variables_lib.local_variables()) def testLocalVariableNotInVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.local_variable(0) self.assertFalse(a in variables_lib2.get_variables_to_restore()) self.assertTrue(a in variables_lib.local_variables()) def testGetVariablesDontReturnsTransients(self): with self.test_session(): with variable_scope.variable_scope('A'): variables_lib2.local_variable(0) with variable_scope.variable_scope('B'): variables_lib2.local_variable(0) self.assertEquals([], variables_lib2.get_variables('A')) self.assertEquals([], variables_lib2.get_variables('B')) def testGetLocalVariablesReturnsTransients(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.local_variable(0) with variable_scope.variable_scope('B'): b = variables_lib2.local_variable(0) self.assertEquals([a], variables_lib2.get_local_variables('A')) self.assertEquals([b], variables_lib2.get_local_variables('B')) def testInitializedVariableValue(self): with self.test_session() as sess: a = variables_lib2.local_variable([0, 0, 0, 0, 0], name='a') sess.run(variables_lib.local_variables_initializer()) self.assertAllEqual(a.eval(), [0] * 5) def testResourceVariable(self): a = variables_lib2.local_variable(0) b = variables_lib2.local_variable(0, use_resource=True) self.assertEqual(type(a), variables_lib.Variable) self.assertEqual(type(b), resource_variable_ops.ResourceVariable) class GlobalVariableTest(test.TestCase): def test_global_variable(self): with self.test_session() as sess: self.assertEquals([], variables_lib.global_variables()) value0 = 42 variables_lib2.global_variable(value0) value1 = 43 variables_lib2.global_variable(value1) variables = variables_lib.global_variables() self.assertEquals(2, len(variables)) with self.assertRaisesOpError( 'Attempting to use uninitialized value Variable'): sess.run(variables) variables_lib.variables_initializer(variables).run() self.assertAllEqual(set([value0, value1]), set(sess.run(variables))) def testVariableNameAndShape(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.global_variable([1, 1, 1, 1, 1], name='a') self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertListEqual([a], variables_lib.global_variables()) def testGlobalVariableNotInLocalVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.global_variable(0) self.assertFalse(a in variables_lib.local_variables()) self.assertTrue(a in variables_lib.global_variables()) def testGlobalVariableInVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.global_variable(0) self.assertFalse(a in variables_lib.local_variables()) self.assertTrue(a in variables_lib2.get_variables_to_restore()) def testGetVariablesReturnsThem(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.global_variable(0) with variable_scope.variable_scope('B'): b = variables_lib2.global_variable(0) self.assertEquals([a], variables_lib2.get_variables('A')) self.assertEquals([b], variables_lib2.get_variables('B')) def testGetLocalVariablesDontReturnsThem(self): with self.test_session(): with variable_scope.variable_scope('A'): variables_lib2.global_variable(0) with variable_scope.variable_scope('B'): variables_lib2.global_variable(0) self.assertEquals([], variables_lib2.get_local_variables('A')) self.assertEquals([], variables_lib2.get_local_variables('B')) def testInitializedVariableValue(self): with self.test_session() as sess: a = variables_lib2.global_variable([0, 0, 0, 0, 0], name='a') sess.run(variables_lib.global_variables_initializer()) self.assertAllEqual(a.eval(), [0] * 5) def testResourceVariable(self): a = variables_lib2.global_variable(0) b = variables_lib2.global_variable(0, use_resource=True) self.assertEqual(type(a), variables_lib.Variable) self.assertEqual(type(b), resource_variable_ops.ResourceVariable) class GlobalStepTest(test.TestCase): def _assert_global_step(self, global_step, expected_dtype=dtypes.int64): self.assertEquals('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name) self.assertEquals(expected_dtype, global_step.dtype.base_dtype) self.assertEquals([], global_step.get_shape().as_list()) def test_invalid_dtype(self): with ops.Graph().as_default() as g: self.assertEquals(None, variables_lib2.get_global_step()) variables_lib.Variable( 0.0, trainable=False, dtype=dtypes.float32, name=ops.GraphKeys.GLOBAL_STEP) self.assertRaisesRegexp(TypeError, 'does not have integer type', variables_lib2.get_global_step) self.assertRaisesRegexp(TypeError, 'does not have integer type', variables_lib2.get_global_step, g) def test_invalid_shape(self): with ops.Graph().as_default() as g: self.assertEquals(None, variables_lib2.get_global_step()) variables_lib.Variable( [0], trainable=False, dtype=dtypes.int32, name=ops.GraphKeys.GLOBAL_STEP) self.assertRaisesRegexp(TypeError, 'not scalar', variables_lib2.get_global_step) self.assertRaisesRegexp(TypeError, 'not scalar', variables_lib2.get_global_step, g) def test_create_global_step(self): self.assertEquals(None, variables_lib2.get_global_step()) with ops.Graph().as_default() as g: global_step = variables_lib2.create_global_step() self._assert_global_step(global_step) self.assertRaisesRegexp(ValueError, 'already exists', variables_lib2.create_global_step) self.assertRaisesRegexp(ValueError, 'already exists', variables_lib2.create_global_step, g) self._assert_global_step(variables_lib2.create_global_step(ops.Graph())) def test_get_global_step(self): with ops.Graph().as_default() as g: self.assertEquals(None, variables_lib2.get_global_step()) variables_lib.Variable( 0, trainable=False, dtype=dtypes.int32, name=ops.GraphKeys.GLOBAL_STEP) self._assert_global_step( variables_lib2.get_global_step(), expected_dtype=dtypes.int32) self._assert_global_step( variables_lib2.get_global_step(g), expected_dtype=dtypes.int32) def test_get_or_create_global_step(self): with ops.Graph().as_default() as g: self.assertEquals(None, variables_lib2.get_global_step()) self._assert_global_step(variables_lib2.get_or_create_global_step()) self._assert_global_step(variables_lib2.get_or_create_global_step(g)) class VariablesTest(test.TestCase): def testCreateVariable(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertTrue(a in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)) self.assertFalse(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)) self.assertFalse(a in variables_lib.local_variables()) def testGetVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.variable('a', [5]) self.assertEquals([a, b], variables_lib2.get_variables()) self.assertEquals([a], variables_lib2.get_variables('A')) self.assertEquals([b], variables_lib2.get_variables('B')) def testGetVariablesWithScope(self): with self.test_session(): with variable_scope.variable_scope('A') as var_scope: a = variables_lib2.variable('a', [5]) b = variables_lib2.variable('b', [5]) self.assertSetEqual( set([a, b]), set(variables_lib2.get_variables(var_scope))) def testGetVariablesSuffix(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('A'): b = variables_lib2.variable('b', [5]) self.assertEquals([a], variables_lib2.get_variables(suffix='a')) self.assertEquals([b], variables_lib2.get_variables(suffix='b')) def testGetVariableWithSingleVar(self): with self.test_session(): with variable_scope.variable_scope('parent'): a = variables_lib2.variable('child', [5]) self.assertEquals(a, variables_lib2.get_unique_variable('parent/child')) def testGetVariableWithDistractors(self): with self.test_session(): with variable_scope.variable_scope('parent'): a = variables_lib2.variable('child', [5]) with variable_scope.variable_scope('child'): variables_lib2.variable('grandchild1', [7]) variables_lib2.variable('grandchild2', [9]) self.assertEquals(a, variables_lib2.get_unique_variable('parent/child')) def testGetVariableThrowsExceptionWithNoMatch(self): var_name = 'cant_find_me' with self.test_session(): with self.assertRaises(ValueError): variables_lib2.get_unique_variable(var_name) def testGetThrowsExceptionWithChildrenButNoMatch(self): var_name = 'parent/child' with self.test_session(): with variable_scope.variable_scope(var_name): variables_lib2.variable('grandchild1', [7]) variables_lib2.variable('grandchild2', [9]) with self.assertRaises(ValueError): variables_lib2.get_unique_variable(var_name) def testGetVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.variable('a', [5]) self.assertEquals([a, b], variables_lib2.get_variables_to_restore()) def testIncludeGetVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.variable('a', [5]) self.assertEquals([a, b], variables_lib2.get_variables()) self.assertEquals([a], variables_lib2.get_variables_to_restore(['A'])) def testExcludeGetVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.variable('a', [5]) self.assertEquals([a, b], variables_lib2.get_variables()) self.assertEquals( [a], variables_lib2.get_variables_to_restore(exclude=['B'])) def testWrongIncludeGetVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.variable('a', [5]) self.assertEquals([a, b], variables_lib2.get_variables()) self.assertEquals([], variables_lib2.get_variables_to_restore(['a'])) def testGetMixedVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) b = variables_lib2.variable('b', [5]) with variable_scope.variable_scope('B'): c = variables_lib2.variable('c', [5]) d = variables_lib2.variable('d', [5]) self.assertEquals([a, b, c, d], variables_lib2.get_variables()) self.assertEquals( [a, c], variables_lib2.get_variables_to_restore(include=['A/a', 'B/c'])) def testExcludeGetMixedVariablesToRestore(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) b = variables_lib2.variable('b', [5]) with variable_scope.variable_scope('B'): c = variables_lib2.variable('c', [5]) d = variables_lib2.variable('d', [5]) self.assertEquals([a, b, c, d], variables_lib2.get_variables()) self.assertEquals( [b, d], variables_lib2.get_variables_to_restore(exclude=['A/a', 'B/c'])) def testReuseVariable(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', []) with variable_scope.variable_scope('A', reuse=True): b = variables_lib2.variable('a', []) self.assertEquals(a, b) self.assertListEqual([a], variables_lib2.get_variables()) def testVariableWithRegularizer(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [], regularizer=nn_ops.l2_loss) loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device) def testVariableWithRegularizerColocate(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable( 'a', [], device='gpu:0', regularizer=nn_ops.l2_loss) loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)[0] self.assertDeviceEqual(loss.device, a.device) def testVariableWithDevice(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [], device='cpu:0') b = variables_lib2.variable('b', [], device='cpu:1') self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(b.device, 'cpu:1') def testVariableWithDeviceFromScope(self): with self.test_session(): with ops.device('/cpu:0'): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', [], device='cpu:1') self.assertDeviceEqual(a.device, 'cpu:0') self.assertDeviceEqual(b.device, 'cpu:1') def testVariableWithDeviceFunction(self): class DevFn(object): def __init__(self): self.counter = -1 def __call__(self, op): self.counter += 1 return 'cpu:%d' % self.counter with self.test_session(): with arg_scope([variables_lib2.variable], device=DevFn()): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], device='cpu:12') d = variables_lib2.variable('d', []) with ops.device('cpu:99'): e_init = constant_op.constant(12) e = variables_lib2.variable('e', initializer=e_init) self.assertDeviceEqual(a.device, 'cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, 'cpu:1') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) self.assertDeviceEqual(c.device, 'cpu:12') self.assertEqual(c.initial_value.op.colocation_groups(), c.op.colocation_groups()) self.assertDeviceEqual(d.device, 'cpu:2') self.assertEqual(d.initial_value.op.colocation_groups(), d.op.colocation_groups()) self.assertDeviceEqual(e.device, 'cpu:3') self.assertDeviceEqual(e.initial_value.device, 'cpu:99') def testVariableWithReplicaDeviceSetter(self): with self.test_session(): with ops.device(device_setter.replica_device_setter(ps_tasks=2)): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], device='cpu:12') d = variables_lib2.variable('d', []) with ops.device('cpu:99'): e_init = constant_op.constant(12) e = variables_lib2.variable('e', initializer=e_init) # The values below highlight how the replica_device_setter puts initial # values on the worker job, and how it merges explicit devices. self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12') self.assertEqual(c.initial_value.op.colocation_groups(), c.op.colocation_groups()) self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0') self.assertEqual(d.initial_value.op.colocation_groups(), d.op.colocation_groups()) self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99') def testVariableWithVariableDeviceChooser(self): with ops.Graph().as_default(): device_fn = variables_lib2.VariableDeviceChooser(num_tasks=2) with arg_scope([variables_lib2.variable], device=device_fn): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], device='cpu:12') d = variables_lib2.variable('d', []) with ops.device('cpu:99'): e_init = constant_op.constant(12) e = variables_lib2.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) self.assertDeviceEqual(c.device, '/cpu:12') self.assertEqual(c.initial_value.op.colocation_groups(), c.op.colocation_groups()) self.assertDeviceEqual(d.device, '/job:ps/task:0/cpu:0') self.assertEqual(d.initial_value.op.colocation_groups(), d.op.colocation_groups()) self.assertDeviceEqual(e.device, '/job:ps/task:1/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99') def testVariableWithVariableDeviceChooserWithReplica(self): with ops.Graph().as_default(): device_fn = variables_lib2.VariableDeviceChooser(replica=3, num_tasks=2) with arg_scope([variables_lib2.variable], device=device_fn): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], device='cpu:12') d = variables_lib2.variable('d', []) with ops.device('cpu:99'): e_init = constant_op.constant(12) e = variables_lib2.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/job:ps/replica:3/task:0/cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, '/job:ps/replica:3/task:1/cpu:0') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) self.assertDeviceEqual(c.device, '/cpu:12') self.assertEqual(c.initial_value.op.colocation_groups(), c.op.colocation_groups()) self.assertDeviceEqual(d.device, '/job:ps/replica:3/task:0/cpu:0') self.assertEqual(d.initial_value.op.colocation_groups(), d.op.colocation_groups()) self.assertDeviceEqual(e.device, '/job:ps/replica:3/task:1/cpu:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99') def testVariableGPUPlacement(self): with ops.Graph().as_default(): device_fn = variables_lib2.VariableDeviceChooser(device_type='GPU') with arg_scope([variables_lib2.variable], device=device_fn): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], device='cpu:12') d = variables_lib2.variable('d', []) with ops.device('cpu:99'): e_init = constant_op.constant(12) e = variables_lib2.variable('e', initializer=e_init) # The values below highlight how the VariableDeviceChooser puts initial # values on the same device as the variable job. self.assertDeviceEqual(a.device, '/device:GPU:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, '/device:GPU:0') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) self.assertDeviceEqual(c.device, '/cpu:12') self.assertEqual(c.initial_value.op.colocation_groups(), c.op.colocation_groups()) self.assertDeviceEqual(d.device, '/device:GPU:0') self.assertEqual(d.initial_value.op.colocation_groups(), d.op.colocation_groups()) self.assertDeviceEqual(e.device, '/device:GPU:0') self.assertDeviceEqual(e.initial_value.device, '/cpu:99') class ModelVariablesTest(test.TestCase): def testNameAndShape(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.model_variable('a', [5]) self.assertEquals(a.op.name, 'A/a') self.assertListEqual(a.get_shape().as_list(), [5]) self.assertListEqual([a], variables_lib2.get_model_variables('A')) def testNotInLocalVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.model_variable('a', [5]) self.assertTrue(a in variables_lib.global_variables()) self.assertTrue(a in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES)) self.assertFalse(a in variables_lib.local_variables()) def testGetVariablesReturns(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.model_variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.model_variable('a', [5]) self.assertEquals([a], variables_lib2.get_variables('A')) self.assertEquals([b], variables_lib2.get_variables('B')) def testGetModelVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.model_variable('a', [5]) with variable_scope.variable_scope('B'): b = variables_lib2.model_variable('a', [5]) self.assertEquals([a], variables_lib2.get_model_variables('A')) self.assertEquals([b], variables_lib2.get_model_variables('B')) def testGetTrainableVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): variables_lib2.local_variable([5]) a = variables_lib.Variable([5]) with variable_scope.variable_scope('B'): variables_lib2.local_variable([5]) b = variables_lib.Variable([5]) self.assertEquals([a], variables_lib2.get_trainable_variables('A')) self.assertEquals([b], variables_lib2.get_trainable_variables('B')) def testGetLocalVariables(self): with self.test_session(): with variable_scope.variable_scope('A'): _ = variables_lib2.model_variable('a', [5]) with variable_scope.variable_scope('B'): _ = variables_lib2.model_variable('a', [5]) self.assertEquals([], variables_lib2.get_local_variables('A')) self.assertEquals([], variables_lib2.get_local_variables('B')) def testInitializedVariableValue(self): with self.test_session() as sess: a = variables_lib2.model_variable( 'a', [5], initializer=init_ops.ones_initializer()) sess.run(variables_lib.global_variables_initializer()) self.assertAllEqual(a.eval(), [1] * 5) def testDeviceFn(self): class DevFn(object): def __init__(self): self.counter = -1 def __call__(self, op): self.counter += 1 return '/cpu:%d' % self.counter with ops.Graph().as_default(): with arg_scope([variables_lib2.model_variable], device=DevFn()): a = variables_lib2.model_variable('a', [5]) b = variables_lib2.model_variable('b', [20]) self.assertDeviceEqual(a.device, '/cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, '/cpu:1') self.assertEqual(b.initial_value.op.colocation_groups(), b.op.colocation_groups()) def testVariableWithVariableDeviceChooser(self): with ops.Graph().as_default(): device_fn = variables_lib2.VariableDeviceChooser() with arg_scope([variables_lib2.model_variable], device=device_fn): a = variables_lib2.model_variable('a', [5]) b = variables_lib2.model_variable('b', [20]) self.assertDeviceEqual(a.device, 'cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) self.assertDeviceEqual(b.device, 'cpu:0') self.assertEqual(a.initial_value.op.colocation_groups(), a.op.colocation_groups()) class GetVariablesCollections(test.TestCase): def testVariableCollection(self): with self.test_session(): a = variables_lib2.variable('a', [], collections='A') b = variables_lib2.variable('b', [], collections='B') self.assertEquals(a, ops.get_collection('A')[0]) self.assertEquals(b, ops.get_collection('B')[0]) def testVariableCollections(self): with self.test_session(): a = variables_lib2.variable('a', [], collections=['A', 'C']) b = variables_lib2.variable('b', [], collections=['B', 'C']) self.assertEquals(a, ops.get_collection('A')[0]) self.assertEquals(b, ops.get_collection('B')[0]) self.assertListEqual([a, b], ops.get_collection('C')) def testVariableCollectionsWithArgScope(self): with self.test_session(): with arg_scope([variables_lib2.variable], collections='A'): a = variables_lib2.variable('a', []) b = variables_lib2.variable('b', []) self.assertListEqual([a, b], ops.get_collection('A')) def testVariableCollectionsWithArgScopeNested(self): with self.test_session(): with arg_scope([variables_lib2.variable], collections='A'): a = variables_lib2.variable('a', []) with arg_scope([variables_lib2.variable], collections='B'): b = variables_lib2.variable('b', []) self.assertEquals(a, ops.get_collection('A')[0]) self.assertEquals(b, ops.get_collection('B')[0]) def testVariableCollectionsWithArgScopeNonNested(self): with self.test_session(): with arg_scope([variables_lib2.variable], collections='A'): a = variables_lib2.variable('a', []) with arg_scope([variables_lib2.variable], collections='B'): b = variables_lib2.variable('b', []) variables_lib2.variable('c', []) self.assertListEqual([a], ops.get_collection('A')) self.assertListEqual([b], ops.get_collection('B')) def testVariableRestoreWithArgScopeNested(self): with self.test_session(): a = variables_lib2.variable('a', []) with arg_scope( [variables_lib2.variable], trainable=False, collections=['A', 'B']): b = variables_lib2.variable('b', []) c = variables_lib2.variable('c', [], trainable=False) self.assertEquals([a, c], variables_lib2.get_variables_to_restore()) self.assertEquals([a], variables_lib.trainable_variables()) self.assertEquals([b], ops.get_collection('A')) self.assertEquals([b], ops.get_collection('B')) class GetVariablesBySuffixTest(test.TestCase): def testGetVariableGivenNameScoped(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) b = variables_lib2.variable('b', [5]) self.assertEquals([a], variables_lib2.get_variables_by_suffix('a')) self.assertEquals([b], variables_lib2.get_variables_by_suffix('b')) def testGetVariableWithScope(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) fooa = variables_lib2.variable('fooa', [5]) with variable_scope.variable_scope('B'): a2 = variables_lib2.variable('a', [5]) matched_variables = variables_lib2.get_variables_by_suffix('a') self.assertEquals([a, fooa, a2], matched_variables) matched_variables = variables_lib2.get_variables_by_suffix('/a') self.assertEquals([a, a2], matched_variables) matched_variables = variables_lib2.get_variables_by_suffix('a', scope='A') self.assertEquals([a, fooa], matched_variables) def testGetVariableWithoutScope(self): with self.test_session(): a = variables_lib2.variable('a', [5]) fooa = variables_lib2.variable('fooa', [5]) b_a = variables_lib2.variable('B/a', [5]) matched_variables = variables_lib2.get_variables_by_suffix('a') self.assertEquals([a, fooa, b_a], matched_variables) matched_variables = variables_lib2.get_variables_by_suffix('fooa') self.assertEquals([fooa], matched_variables) class GetVariablesByNameTest(test.TestCase): def testGetVariableGivenNameScoped(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) b = variables_lib2.variable('b', [5]) self.assertEquals([a], variables_lib2.get_variables_by_name('a')) self.assertEquals([b], variables_lib2.get_variables_by_name('b')) def testGetVariableWithScope(self): with self.test_session(): with variable_scope.variable_scope('A'): a = variables_lib2.variable('a', [5]) fooa = variables_lib2.variable('fooa', [5]) with variable_scope.variable_scope('B'): a2 = variables_lib2.variable('a', [5]) matched_variables = variables_lib2.get_variables_by_name('a') self.assertEquals([a, a2], matched_variables) matched_variables = variables_lib2.get_variables_by_name('fooa') self.assertEquals([fooa], matched_variables) matched_variables = variables_lib2.get_variables_by_name('/a') self.assertEquals([], matched_variables) matched_variables = variables_lib2.get_variables_by_name('a', scope='A') self.assertEquals([a], matched_variables) def testGetVariableWithoutScope(self): with self.test_session(): a = variables_lib2.variable('a', [5]) fooa = variables_lib2.variable('fooa', [5]) b_a = variables_lib2.variable('B/a', [5]) matched_variables = variables_lib2.get_variables_by_name('a') self.assertEquals([a, b_a], matched_variables) matched_variables = variables_lib2.get_variables_by_name('fooa') self.assertEquals([fooa], matched_variables) class GetVariableFullNameTest(test.TestCase): def testVariable(self): my_var0 = variables_lib2.variable('my_var0', shape=[]) full_name = variables_lib2.get_variable_full_name(my_var0) self.assertEquals(full_name, my_var0.op.name) def testPartitionedVariable(self): input_full_name = 'my_var0' partitioner = partitioned_variables.variable_axis_size_partitioner(2) my_var0 = variables_lib2.variable( 'my_var0', shape=[2, 2], partitioner=partitioner) for part_var in list(my_var0): computed_full_name = variables_lib2.get_variable_full_name(part_var) self.assertEquals(input_full_name, computed_full_name) class AssignFromValuesTest(test.TestCase): def testNoScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = init_ops.truncated_normal_initializer(stddev=.1) var0 = variables_lib2.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) var1 = variables_lib2.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1} assign_op, feed_dict = variables_lib2.assign_from_values( var_names_to_values) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. sess.run(assign_op, feed_dict) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) def testWithScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = init_ops.truncated_normal_initializer(stddev=.1) with variable_scope.variable_scope('my_model/my_layer0'): var0 = variables_lib2.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) with variable_scope.variable_scope('my_model/my_layer1'): var1 = variables_lib2.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = { 'my_model/my_layer0/my_var0': init_value0, 'my_model/my_layer1/my_var1': init_value1 } assign_op, feed_dict = variables_lib2.assign_from_values( var_names_to_values) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. sess.run(assign_op, feed_dict) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) class AssignFromValuesFnTest(test.TestCase): def testNoScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = init_ops.truncated_normal_initializer(stddev=.1) var0 = variables_lib2.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) var1 = variables_lib2.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = {'my_var0': init_value0, 'my_var1': init_value1} init_fn = variables_lib2.assign_from_values_fn(var_names_to_values) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) def testWithScopes(self): init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1)) init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2)) with self.test_session() as sess: initializer = init_ops.truncated_normal_initializer(stddev=.1) with variable_scope.variable_scope('my_model/my_layer0'): var0 = variables_lib2.variable( 'my_var0', shape=[1, 3, 1], initializer=initializer) with variable_scope.variable_scope('my_model/my_layer1'): var1 = variables_lib2.variable( 'my_var1', shape=[2, 1, 2], initializer=initializer) var_names_to_values = { 'my_model/my_layer0/my_var0': init_value0, 'my_model/my_layer1/my_var1': init_value1 } init_fn = variables_lib2.assign_from_values_fn(var_names_to_values) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: var0, var1 = sess.run([var0, var1]) self.assertAllEqual(init_value0, var0) self.assertAllEqual(init_value1, var1) class AssignFromCheckpointTest(test.TestCase): def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir, global_step=None): """Creates a checkpoint from a mapping of name to values in model_dir. Args: var_names_to_values: a map from variable names to values. checkpoint_dir: the directory where the checkpoint will be saved. global_step: the global step used to save the checkpoint. Returns: the model_path to the checkpoint. """ var_list = [] with session.Session('', graph=ops.Graph()) as sess: # Create a set of variables to save in the checkpoint. for var_name in var_names_to_values: var_value = var_names_to_values[var_name] var_list.append(variables_lib.Variable(var_value, name=var_name)) saver = saver_lib.Saver(var_list) init_op = variables_lib.variables_initializer(var_list) sess.run(init_op) # Save the initialized values in the file at 'checkpoint_dir' return saver.save(sess, checkpoint_dir, global_step=global_step) def testLoadExistingVariables(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables')) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[]) var1 = variables_lib2.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} op, feed_dict = variables_lib2.assign_from_checkpoint( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. sess.run(op, feed_dict) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) # Tests restoring PartitionedVariables and tests using a dictionary # of lists as the assign_from_checkpoint() var_list param. def testLoadPartitionedVariables(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'load_partitioned_variables')) init_value0 = np.array([[10.0, 11.0], [12.0, 13.0]]) init_value1 = np.array([20.0]) # Partitioned into 1 part, edge case. var_names_to_values = {'var0': init_value0, 'var1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) # var0 and var1 are PartitionedVariables. partitioner = partitioned_variables.variable_axis_size_partitioner(2) var0 = variables_lib2.variable( 'var0', shape=init_value0.shape, partitioner=partitioner) var0full = variables_lib2.variable('var0full', shape=init_value0.shape) var1 = variables_lib2.variable( 'var1', shape=init_value1.shape, partitioner=partitioner) # Convert var0 and var1 into a list of underlying variables. vars_to_restore = {'var0': list(var0) + [var0full], 'var1': list(var1)} op, feed_dict = variables_lib2.assign_from_checkpoint( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. sess.run(op, feed_dict) # Request and test the variable values. PartitionedVariables can't # be evaled so we wrap them in an identity. self.assertTrue( np.array_equal(init_value0, array_ops.identity(var0).eval())) self.assertTrue(np.array_equal(init_value0, var0full.eval())) self.assertTrue( np.array_equal(init_value1, array_ops.identity(var1).eval())) def testRaisesValueErrorIfAVariableIsntFound(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'raises_value_error_if_var_isnt_found')) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session(): model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[]) var1 = variables_lib2.variable('my_var1', shape=[]) vars_to_restore = {'v0_fake': var0, 'v1': var1} with self.assertRaises(ValueError): variables_lib2.assign_from_checkpoint(model_path, vars_to_restore) def testInitFromCheckpointWithScopes(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'init_from_checkpoint_with_scopes')) init_value0 = np.asarray( [1.0, 3.0, 9.0], dtype=np.float32).reshape((1, 3, 1)) init_value1 = np.asarray( [2.0, 4.0, 6.0, 8.0], dtype=np.float32).reshape((2, 1, 2)) var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) with variable_scope.variable_scope('my_model/my_layer0'): var0 = variables_lib2.variable('my_var0', shape=init_value0.shape) with variable_scope.variable_scope('my_model/my_layer1'): var1 = variables_lib2.variable('my_var1', shape=init_value1.shape) vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1} op, feed_dict = variables_lib2.assign_from_checkpoint( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. sess.run(op, feed_dict) # Request and test the variable values: self.assertAllEqual(init_value0, var0.eval()) self.assertAllEqual(init_value1, var1.eval()) class AssignFromCheckpointFnTest(test.TestCase): def create_checkpoint_from_values(self, var_names_to_values, checkpoint_dir, global_step=None): """Creates a checkpoint from a mapping of name to values in model_dir. Args: var_names_to_values: a map from variable names to values. checkpoint_dir: the directory where the checkpoint will be saved. global_step: the global step used to save the checkpoint. Returns: the model_path to the checkpoint. """ var_list = [] with session.Session('', graph=ops.Graph()) as sess: # Create a set of variables to save in the checkpoint. for var_name in var_names_to_values: var_value = var_names_to_values[var_name] var_list.append(variables_lib.Variable(var_value, name=var_name)) saver = saver_lib.Saver(var_list) init_op = variables_lib.variables_initializer(var_list) sess.run(init_op) # Save the initialized values in the file at 'checkpoint_dir' return saver.save(sess, checkpoint_dir, global_step=global_step) def testLoadExistingVariables(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'load_existing_variables')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[]) var1 = variables_lib2.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) def testLoadExistingVariablesDifferentShapeDefaultDoesNotAllowReshape(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'load_existing_vars_no_reshape')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = [[10.0, 11.0]] init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[2, 1]) var1 = variables_lib2.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. with self.assertRaises(errors_impl.InvalidArgumentError): init_fn(sess) def testLoadExistingVariablesDifferentShapeAllowReshape(self): model_dir = tempfile.mkdtemp( prefix=os.path.join( self.get_temp_dir(), 'load_existing_variables_different_shape_allow_reshape')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = [[10.0, 11.0]] init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[2, 1]) var1 = variables_lib2.variable('my_var1', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1} init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore, reshape_variables=True) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertAllEqual(np.transpose(np.array(init_value0)), var0.eval()) self.assertEqual(init_value1, var1.eval()) def testNotFoundError(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'not_found_error')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[]) var1 = variables_lib2.variable('my_var1', shape=[]) var2 = variables_lib2.variable('my_var2', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2} init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. with self.assertRaises(errors_impl.NotFoundError): init_fn(sess) def testMissingVariablesList(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'missing_variables_list')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('v0', shape=[]) var1 = variables_lib2.variable('v1', shape=[]) var2 = variables_lib2.variable('v2', shape=[]) vars_to_restore = [var0, var1, var2] init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore, ignore_missing_vars=True) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) def testMissingVariablesDict(self): model_dir = tempfile.mkdtemp( prefix=os.path.join(self.get_temp_dir(), 'missing_variables_dict')) if gfile.Exists(model_dir): gfile.DeleteRecursively(model_dir) init_value0 = 10.0 init_value1 = 20.0 var_names_to_values = {'v0': init_value0, 'v1': init_value1} with self.test_session() as sess: model_path = self.create_checkpoint_from_values(var_names_to_values, model_dir) var0 = variables_lib2.variable('my_var0', shape=[]) var1 = variables_lib2.variable('my_var1', shape=[]) var2 = variables_lib2.variable('my_var2', shape=[]) vars_to_restore = {'v0': var0, 'v1': var1, 'v2': var2} init_fn = variables_lib2.assign_from_checkpoint_fn( model_path, vars_to_restore, ignore_missing_vars=True) # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) # Perform the assignment. init_fn(sess) # Request and test the variable values: self.assertEqual(init_value0, var0.eval()) self.assertEqual(init_value1, var1.eval()) class ZeroInitializerOpTest(test.TestCase): def _testZeroInitializer(self, shape, initializer, use_init): var = variables_lib.Variable(initializer) var_zero = variables_lib2.zero_initializer(var) with self.test_session() as sess: with self.assertRaisesOpError('Attempting to use uninitialized value'): var.eval() if use_init: sess.run(var.initializer) with self.assertRaisesOpError('input is already initialized'): var_zero.eval() self.assertAllClose(np.ones(shape), var.eval()) else: var_zero.eval() self.assertAllClose(np.zeros(shape), var.eval()) def testZeroInitializer(self): for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64): for use_init in (False, True): self._testZeroInitializer([10, 20], array_ops.ones( [10, 20], dtype=dtype), use_init) class ZeroVarInitializerOpTest(test.TestCase): def _testZeroVarInitializer(self, shape, initializer, use_init): var = resource_variable_ops.ResourceVariable(initializer) var_zero = variables_lib2.zero_initializer(var) with self.test_session() as sess: with self.assertRaisesOpError('Error while reading resource variable'): var.eval() if use_init: sess.run(var.initializer) with self.assertRaisesOpError('input is already initialized'): var_zero.eval() self.assertAllClose(np.ones(shape), var.eval()) else: var_zero.eval() self.assertAllClose(np.zeros(shape), var.eval()) def testZeroVarInitializer(self): for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64): for use_init in (False, True): self._testZeroVarInitializer([10, 20], array_ops.ones([10, 20], dtype=dtype), use_init) class FilterVariablesTest(test.TestCase): def setUp(self): g = ops.Graph() with g.as_default(): var_list = [] var_list.append(variables_lib.Variable(0, name='conv1/weights')) var_list.append(variables_lib.Variable(0, name='conv1/biases')) var_list.append(variables_lib.Variable(0, name='conv2/weights')) var_list.append(variables_lib.Variable(0, name='conv2/biases')) var_list.append(variables_lib.Variable(0, name='clfs/weights')) var_list.append(variables_lib.Variable(0, name='clfs/biases')) self._var_list = var_list def _test_filter_variables(self, expected_var_names, include_patterns=None, exclude_patterns=None, reg_search=True): filtered_var_list = variables_lib2.filter_variables( self._var_list, include_patterns=include_patterns, exclude_patterns=exclude_patterns, reg_search=reg_search) filtered_var_names = [var.op.name for var in filtered_var_list] for name in filtered_var_names: self.assertIn(name, expected_var_names) for name in expected_var_names: self.assertIn(name, filtered_var_names) self.assertEqual(len(filtered_var_names), len(expected_var_names)) def testNoFiltering(self): self._test_filter_variables(expected_var_names=[ 'conv1/weights', 'conv1/biases', 'conv2/weights', 'conv2/biases', 'clfs/weights', 'clfs/biases' ]) def testIncludeBiases(self): self._test_filter_variables( expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'], include_patterns=['biases']) def testExcludeWeights(self): self._test_filter_variables( expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'], exclude_patterns=['weights']) def testExcludeWeightsAndConv1(self): self._test_filter_variables( expected_var_names=['conv2/biases', 'clfs/biases'], exclude_patterns=['weights', 'conv1']) def testTwoIncludePatternsEnsureNoVariablesTwiceInFilteredList(self): self._test_filter_variables( expected_var_names=[ 'conv1/weights', 'conv1/biases', 'conv2/weights', 'clfs/weights' ], include_patterns=['conv1', 'weights']) def testIncludeConv1ExcludeBiases(self): self._test_filter_variables( expected_var_names=['conv1/weights'], include_patterns=['conv1'], exclude_patterns=['biases']) def testRegMatchIncludeBiases(self): self._test_filter_variables( expected_var_names=['conv1/biases', 'conv2/biases', 'clfs/biases'], include_patterns=['.*biases'], reg_search=False) def testRegMatchIncludeBiasesWithIncompleteRegExpHasNoMatches(self): self._test_filter_variables( expected_var_names=[], include_patterns=['biases'], reg_search=False) if __name__ == '__main__': test.main()
unknown
codeparrot/codeparrot-clean
# Copyright 2016 Rackspace Inc. # # Author: Tim Simmons <tim.simmons@rackspace.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg WORKER_GROUP = cfg.OptGroup( name='service:worker', title="Configuration for the Worker Service" ) WORKER_OPTS = [ cfg.IntOpt('workers', help='Number of Worker worker processes to spawn'), cfg.IntOpt('threads', default=200, help='Number of Worker threads to spawn per process'), # cfg.ListOpt('enabled_tasks', # help='Enabled tasks to run'), cfg.StrOpt('storage_driver', default='sqlalchemy', help='The storage driver to use'), cfg.IntOpt('threshold-percentage', default=100, help='The percentage of servers requiring a successful update ' 'for a domain change to be considered active'), cfg.IntOpt('poll_timeout', default=30, help='The time to wait for a response from a server'), cfg.IntOpt('poll_retry_interval', default=15, help='The time between retrying to send a request and ' 'waiting for a response from a server'), cfg.IntOpt('poll_max_retries', default=10, help='The maximum number of times to retry sending a request ' 'and wait for a response from a server'), cfg.IntOpt('poll_delay', default=5, help='The time to wait before sending the first request ' 'to a server'), cfg.BoolOpt('notify', default=True, deprecated_for_removal=True, deprecated_reason='This option is being removed to reduce ' 'complexity', help='Whether to allow worker to send NOTIFYs, this will ' 'noop NOTIFYs in mdns if true'), cfg.BoolOpt('export_synchronous', default=True, help='Whether to allow synchronous zone exports'), cfg.StrOpt('topic', default='worker', help='RPC topic name for worker'), ] def register_opts(conf): conf.register_group(WORKER_GROUP) conf.register_opts(WORKER_OPTS, group=WORKER_GROUP) def list_opts(): return { WORKER_GROUP: WORKER_OPTS, }
unknown
codeparrot/codeparrot-clean
#ifndef Py_BLTINMODULE_H #define Py_BLTINMODULE_H #ifdef __cplusplus extern "C" { #endif PyAPI_DATA(PyTypeObject) PyFilter_Type; PyAPI_DATA(PyTypeObject) PyMap_Type; PyAPI_DATA(PyTypeObject) PyZip_Type; #ifdef __cplusplus } #endif #endif /* !Py_BLTINMODULE_H */
c
github
https://github.com/python/cpython
Include/bltinmodule.h
from __future__ import print_function, division from sympy import zeros, Matrix, diff, solve_linear_system_LU, eye from sympy.core.compatibility import range from sympy.utilities import default_sort_key from sympy.physics.vector import (ReferenceFrame, dynamicsymbols, partial_velocity) from sympy.physics.mechanics.particle import Particle from sympy.physics.mechanics.rigidbody import RigidBody from sympy.physics.mechanics.functions import (msubs, find_dynamicsymbols, _f_list_parser) from sympy.physics.mechanics.linearize import Linearizer from sympy.utilities.exceptions import SymPyDeprecationWarning from sympy.utilities.iterables import iterable __all__ = ['KanesMethod'] class KanesMethod(object): """Kane's method object. This object is used to do the "book-keeping" as you go through and form equations of motion in the way Kane presents in: Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill The attributes are for equations in the form [M] udot = forcing. Attributes ========== q, u : Matrix Matrices of the generalized coordinates and speeds bodylist : iterable Iterable of Point and RigidBody objects in the system. forcelist : iterable Iterable of (Point, vector) or (ReferenceFrame, vector) tuples describing the forces on the system. auxiliary : Matrix If applicable, the set of auxiliary Kane's equations used to solve for non-contributing forces. mass_matrix : Matrix The system's mass matrix forcing : Matrix The system's forcing vector mass_matrix_full : Matrix The "mass matrix" for the u's and q's forcing_full : Matrix The "forcing vector" for the u's and q's Examples ======== This is a simple example for a one degree of freedom translational spring-mass-damper. In this example, we first need to do the kinematics. This involves creating generalized speeds and coordinates and their derivatives. Then we create a point and set its velocity in a frame. >>> from sympy import symbols >>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame >>> from sympy.physics.mechanics import Point, Particle, KanesMethod >>> q, u = dynamicsymbols('q u') >>> qd, ud = dynamicsymbols('q u', 1) >>> m, c, k = symbols('m c k') >>> N = ReferenceFrame('N') >>> P = Point('P') >>> P.set_vel(N, u * N.x) Next we need to arrange/store information in the way that KanesMethod requires. The kinematic differential equations need to be stored in a dict. A list of forces/torques must be constructed, where each entry in the list is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the Vectors represent the Force or Torque. Next a particle needs to be created, and it needs to have a point and mass assigned to it. Finally, a list of all bodies and particles needs to be created. >>> kd = [qd - u] >>> FL = [(P, (-k * q - c * u) * N.x)] >>> pa = Particle('pa', P, m) >>> BL = [pa] Finally we can generate the equations of motion. First we create the KanesMethod object and supply an inertial frame, coordinates, generalized speeds, and the kinematic differential equations. Additional quantities such as configuration and motion constraints, dependent coordinates and speeds, and auxiliary speeds are also supplied here (see the online documentation). Next we form FR* and FR to complete: Fr + Fr* = 0. We have the equations of motion at this point. It makes sense to rearrnge them though, so we calculate the mass matrix and the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is the mass matrix, udot is a vector of the time derivatives of the generalized speeds, and forcing is a vector representing "forcing" terms. >>> KM = KanesMethod(N, q_ind=[q], u_ind=[u], kd_eqs=kd) >>> (fr, frstar) = KM.kanes_equations(BL, FL) >>> MM = KM.mass_matrix >>> forcing = KM.forcing >>> rhs = MM.inv() * forcing >>> rhs Matrix([[(-c*u(t) - k*q(t))/m]]) >>> KM.linearize(A_and_B=True, new_method=True)[0] Matrix([ [ 0, 1], [-k/m, -c/m]]) Please look at the documentation pages for more information on how to perform linearization and how to deal with dependent coordinates & speeds, and how do deal with bringing non-contributing forces into evidence. """ def __init__(self, frame, q_ind, u_ind, kd_eqs=None, q_dependent=None, configuration_constraints=None, u_dependent=None, velocity_constraints=None, acceleration_constraints=None, u_auxiliary=None): """Please read the online documentation. """ if not isinstance(frame, ReferenceFrame): raise TypeError('An intertial ReferenceFrame must be supplied') self._inertial = frame self._fr = None self._frstar = None self._forcelist = None self._bodylist = None self._initialize_vectors(q_ind, q_dependent, u_ind, u_dependent, u_auxiliary) self._initialize_kindiffeq_matrices(kd_eqs) self._initialize_constraint_matrices(configuration_constraints, velocity_constraints, acceleration_constraints) def _initialize_vectors(self, q_ind, q_dep, u_ind, u_dep, u_aux): """Initialize the coordinate and speed vectors.""" none_handler = lambda x: Matrix(x) if x else Matrix() # Initialize generalized coordinates q_dep = none_handler(q_dep) if not iterable(q_ind): raise TypeError('Generalized coordinates must be an iterable.') if not iterable(q_dep): raise TypeError('Dependent coordinates must be an iterable.') q_ind = Matrix(q_ind) self._qdep = q_dep self._q = Matrix([q_ind, q_dep]) self._qdot = self.q.diff(dynamicsymbols._t) # Initialize generalized speeds u_dep = none_handler(u_dep) if not iterable(u_ind): raise TypeError('Generalized speeds must be an iterable.') if not iterable(u_dep): raise TypeError('Dependent speeds must be an iterable.') u_ind = Matrix(u_ind) self._udep = u_dep self._u = Matrix([u_ind, u_dep]) self._udot = self.u.diff(dynamicsymbols._t) self._uaux = none_handler(u_aux) def _initialize_constraint_matrices(self, config, vel, acc): """Initializes constraint matrices.""" # Define vector dimensions o = len(self.u) m = len(self._udep) p = o - m none_handler = lambda x: Matrix(x) if x else Matrix() # Initialize configuration constraints config = none_handler(config) if len(self._qdep) != len(config): raise ValueError('There must be an equal number of dependent ' 'coordinates and configuration constraints.') self._f_h = none_handler(config) # Initialize velocity and acceleration constraints vel = none_handler(vel) acc = none_handler(acc) if len(vel) != m: raise ValueError('There must be an equal number of dependent ' 'speeds and velocity constraints.') if acc and (len(acc) != m): raise ValueError('There must be an equal number of dependent ' 'speeds and acceleration constraints.') if vel: u_zero = dict((i, 0) for i in self.u) udot_zero = dict((i, 0) for i in self._udot) # When calling kanes_equations, another class instance will be # created if auxiliary u's are present. In this case, the # computation of kinetic differential equation matrices will be # skipped as this was computed during the original KanesMethod # object, and the qd_u_map will not be available. if self._qdot_u_map is not None: vel = msubs(vel, self._qdot_u_map) self._f_nh = msubs(vel, u_zero) self._k_nh = (vel - self._f_nh).jacobian(self.u) # If no acceleration constraints given, calculate them. if not acc: self._f_dnh = (self._k_nh.diff(dynamicsymbols._t) * self.u + self._f_nh.diff(dynamicsymbols._t)) self._k_dnh = self._k_nh else: if self._qdot_u_map is not None: acc = msubs(acc, self._qdot_u_map) self._f_dnh = msubs(acc, udot_zero) self._k_dnh = (acc - self._f_dnh).jacobian(self._udot) # Form of non-holonomic constraints is B*u + C = 0. # We partition B into independent and dependent columns: # Ars is then -B_dep.inv() * B_ind, and it relates dependent speeds # to independent speeds as: udep = Ars*uind, neglecting the C term. B_ind = self._k_nh[:, :p] B_dep = self._k_nh[:, p:o] self._Ars = -B_dep.LUsolve(B_ind) else: self._f_nh = Matrix() self._k_nh = Matrix() self._f_dnh = Matrix() self._k_dnh = Matrix() self._Ars = Matrix() def _initialize_kindiffeq_matrices(self, kdeqs): """Initialize the kinematic differential equation matrices.""" if kdeqs: if len(self.q) != len(kdeqs): raise ValueError('There must be an equal number of kinematic ' 'differential equations and coordinates.') kdeqs = Matrix(kdeqs) u = self.u qdot = self._qdot # Dictionaries setting things to zero u_zero = dict((i, 0) for i in u) uaux_zero = dict((i, 0) for i in self._uaux) qdot_zero = dict((i, 0) for i in qdot) f_k = msubs(kdeqs, u_zero, qdot_zero) k_ku = (msubs(kdeqs, qdot_zero) - f_k).jacobian(u) k_kqdot = (msubs(kdeqs, u_zero) - f_k).jacobian(qdot) f_k = k_kqdot.LUsolve(f_k) k_ku = k_kqdot.LUsolve(k_ku) k_kqdot = eye(len(qdot)) self._qdot_u_map = solve_linear_system_LU( Matrix([k_kqdot.T, -(k_ku * u + f_k).T]).T, qdot) self._f_k = msubs(f_k, uaux_zero) self._k_ku = msubs(k_ku, uaux_zero) self._k_kqdot = k_kqdot else: self._qdot_u_map = None self._f_k = Matrix() self._k_ku = Matrix() self._k_kqdot = Matrix() def _form_fr(self, fl): """Form the generalized active force.""" if fl != None and (len(fl) == 0 or not iterable(fl)): raise ValueError('Force pairs must be supplied in an ' 'non-empty iterable or None.') N = self._inertial # pull out relevant velocities for constructing partial velocities vel_list, f_list = _f_list_parser(fl, N) vel_list = [msubs(i, self._qdot_u_map) for i in vel_list] # Fill Fr with dot product of partial velocities and forces o = len(self.u) b = len(f_list) FR = zeros(o, 1) partials = partial_velocity(vel_list, self.u, N) for i in range(o): FR[i] = sum(partials[j][i] & f_list[j] for j in range(b)) # In case there are dependent speeds if self._udep: p = o - len(self._udep) FRtilde = FR[:p, 0] FRold = FR[p:o, 0] FRtilde += self._Ars.T * FRold FR = FRtilde self._forcelist = fl self._fr = FR return FR def _form_frstar(self, bl): """Form the generalized inertia force.""" if not iterable(bl): raise TypeError('Bodies must be supplied in an iterable.') t = dynamicsymbols._t N = self._inertial # Dicts setting things to zero udot_zero = dict((i, 0) for i in self._udot) uaux_zero = dict((i, 0) for i in self._uaux) uauxdot = [diff(i, t) for i in self._uaux] uauxdot_zero = dict((i, 0) for i in uauxdot) # Dictionary of q' and q'' to u and u' q_ddot_u_map = dict((k.diff(t), v.diff(t)) for (k, v) in self._qdot_u_map.items()) q_ddot_u_map.update(self._qdot_u_map) # Fill up the list of partials: format is a list with num elements # equal to number of entries in body list. Each of these elements is a # list - either of length 1 for the translational components of # particles or of length 2 for the translational and rotational # components of rigid bodies. The inner most list is the list of # partial velocities. def get_partial_velocity(body): if isinstance(body, RigidBody): vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)] elif isinstance(body, Particle): vlist = [body.point.vel(N),] else: raise TypeError('The body list may only contain either ' 'RigidBody or Particle as list elements.') v = [msubs(vel, self._qdot_u_map) for vel in vlist] return partial_velocity(v, self.u, N) partials = [get_partial_velocity(body) for body in bl] # Compute fr_star in two components: # fr_star = -(MM*u' + nonMM) o = len(self.u) MM = zeros(o, o) nonMM = zeros(o, 1) zero_uaux = lambda expr: msubs(expr, uaux_zero) zero_udot_uaux = lambda expr: msubs(msubs(expr, udot_zero), uaux_zero) for i, body in enumerate(bl): if isinstance(body, RigidBody): M = zero_uaux(body.mass) I = zero_uaux(body.central_inertia) vel = zero_uaux(body.masscenter.vel(N)) omega = zero_uaux(body.frame.ang_vel_in(N)) acc = zero_udot_uaux(body.masscenter.acc(N)) inertial_force = (M.diff(t) * vel + M * acc) inertial_torque = zero_uaux((I.dt(body.frame) & omega) + msubs(I & body.frame.ang_acc_in(N), udot_zero) + (omega ^ (I & omega))) for j in range(o): tmp_vel = zero_uaux(partials[i][0][j]) tmp_ang = zero_uaux(I & partials[i][1][j]) for k in range(o): # translational MM[j, k] += M * (tmp_vel & partials[i][0][k]) # rotational MM[j, k] += (tmp_ang & partials[i][1][k]) nonMM[j] += inertial_force & partials[i][0][j] nonMM[j] += inertial_torque & partials[i][1][j] else: M = zero_uaux(body.mass) vel = zero_uaux(body.point.vel(N)) acc = zero_udot_uaux(body.point.acc(N)) inertial_force = (M.diff(t) * vel + M * acc) for j in range(o): temp = zero_uaux(partials[i][0][j]) for k in range(o): MM[j, k] += M * (temp & partials[i][0][k]) nonMM[j] += inertial_force & partials[i][0][j] # Compose fr_star out of MM and nonMM MM = zero_uaux(msubs(MM, q_ddot_u_map)) nonMM = msubs(msubs(nonMM, q_ddot_u_map), udot_zero, uauxdot_zero, uaux_zero) fr_star = -(MM * msubs(Matrix(self._udot), uauxdot_zero) + nonMM) # If there are dependent speeds, we need to find fr_star_tilde if self._udep: p = o - len(self._udep) fr_star_ind = fr_star[:p, 0] fr_star_dep = fr_star[p:o, 0] fr_star = fr_star_ind + (self._Ars.T * fr_star_dep) # Apply the same to MM MMi = MM[:p, :] MMd = MM[p:o, :] MM = MMi + (self._Ars.T * MMd) self._bodylist = bl self._frstar = fr_star self._k_d = MM self._f_d = -msubs(self._fr + self._frstar, udot_zero) return fr_star def to_linearizer(self): """Returns an instance of the Linearizer class, initiated from the data in the KanesMethod class. This may be more desirable than using the linearize class method, as the Linearizer object will allow more efficient recalculation (i.e. about varying operating points).""" if (self._fr is None) or (self._frstar is None): raise ValueError('Need to compute Fr, Fr* first.') # Get required equation components. The Kane's method class breaks # these into pieces. Need to reassemble f_c = self._f_h if self._f_nh and self._k_nh: f_v = self._f_nh + self._k_nh*Matrix(self.u) else: f_v = Matrix() if self._f_dnh and self._k_dnh: f_a = self._f_dnh + self._k_dnh*Matrix(self._udot) else: f_a = Matrix() # Dicts to sub to zero, for splitting up expressions u_zero = dict((i, 0) for i in self.u) ud_zero = dict((i, 0) for i in self._udot) qd_zero = dict((i, 0) for i in self._qdot) qd_u_zero = dict((i, 0) for i in Matrix([self._qdot, self.u])) # Break the kinematic differential eqs apart into f_0 and f_1 f_0 = msubs(self._f_k, u_zero) + self._k_kqdot*Matrix(self._qdot) f_1 = msubs(self._f_k, qd_zero) + self._k_ku*Matrix(self.u) # Break the dynamic differential eqs into f_2 and f_3 f_2 = msubs(self._frstar, qd_u_zero) f_3 = msubs(self._frstar, ud_zero) + self._fr f_4 = zeros(len(f_2), 1) # Get the required vector components q = self.q u = self.u if self._qdep: q_i = q[:-len(self._qdep)] else: q_i = q q_d = self._qdep if self._udep: u_i = u[:-len(self._udep)] else: u_i = u u_d = self._udep # Form dictionary to set auxiliary speeds & their derivatives to 0. uaux = self._uaux uauxdot = uaux.diff(dynamicsymbols._t) uaux_zero = dict((i, 0) for i in Matrix([uaux, uauxdot])) # Checking for dynamic symbols outside the dynamic differential # equations; throws error if there is. sym_list = set(Matrix([q, self._qdot, u, self._udot, uaux, uauxdot])) if any(find_dynamicsymbols(i, sym_list) for i in [self._k_kqdot, self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]): raise ValueError('Cannot have dynamicsymbols outside dynamic \ forcing vector.') # Find all other dynamic symbols, forming the forcing vector r. # Sort r to make it canonical. r = list(find_dynamicsymbols(msubs(self._f_d, uaux_zero), sym_list)) r.sort(key=default_sort_key) # Check for any derivatives of variables in r that are also found in r. for i in r: if diff(i, dynamicsymbols._t) in r: raise ValueError('Cannot have derivatives of specified \ quantities when linearizing forcing terms.') return Linearizer(f_0, f_1, f_2, f_3, f_4, f_c, f_v, f_a, q, u, q_i, q_d, u_i, u_d, r) def linearize(self, **kwargs): """ Linearize the equations of motion about a symbolic operating point. If kwarg A_and_B is False (default), returns M, A, B, r for the linearized form, M*[q', u']^T = A*[q_ind, u_ind]^T + B*r. If kwarg A_and_B is True, returns A, B, r for the linearized form dx = A*x + B*r, where x = [q_ind, u_ind]^T. Note that this is computationally intensive if there are many symbolic parameters. For this reason, it may be more desirable to use the default A_and_B=False, returning M, A, and B. Values may then be substituted in to these matrices, and the state space form found as A = P.T*M.inv()*A, B = P.T*M.inv()*B, where P = Linearizer.perm_mat. In both cases, r is found as all dynamicsymbols in the equations of motion that are not part of q, u, q', or u'. They are sorted in canonical form. The operating points may be also entered using the ``op_point`` kwarg. This takes a dictionary of {symbol: value}, or a an iterable of such dictionaries. The values may be numberic or symbolic. The more values you can specify beforehand, the faster this computation will run. As part of the deprecation cycle, the new method will not be used unless the kwarg ``new_method`` is set to True. If the kwarg is missing, or set to false, the old linearization method will be used. After next release the need for this kwarg will be removed. For more documentation, please see the ``Linearizer`` class.""" if 'new_method' not in kwargs or not kwargs['new_method']: # User is still using old code. SymPyDeprecationWarning('The linearize class method has changed ' 'to a new interface, the old method is deprecated. To ' 'use the new method, set the kwarg `new_method=True`. ' 'For more information, read the docstring ' 'of `linearize`.').warn() return self._old_linearize() # Remove the new method flag, before passing kwargs to linearize kwargs.pop('new_method') linearizer = self.to_linearizer() result = linearizer.linearize(**kwargs) return result + (linearizer.r,) def _old_linearize(self): """Old method to linearize the equations of motion. Returns a tuple of (f_lin_A, f_lin_B, y) for forming [M]qudot = [f_lin_A]qu + [f_lin_B]y. Deprecated in favor of new method using Linearizer class. Please change your code to use the new `linearize` method.""" if (self._fr is None) or (self._frstar is None): raise ValueError('Need to compute Fr, Fr* first.') # Note that this is now unneccessary, and it should never be # encountered; I still think it should be in here in case the user # manually sets these matrices incorrectly. for i in self.q: if self._k_kqdot.diff(i) != 0 * self._k_kqdot: raise ValueError('Matrix K_kqdot must not depend on any q.') t = dynamicsymbols._t uaux = self._uaux uauxdot = [diff(i, t) for i in uaux] # dictionary of auxiliary speeds & derivatives which are equal to zero subdict = dict(zip(uaux[:] + uauxdot[:], [0] * (len(uaux) + len(uauxdot)))) # Checking for dynamic symbols outside the dynamic differential # equations; throws error if there is. insyms = set(self.q[:] + self._qdot[:] + self.u[:] + self._udot[:] + uaux[:] + uauxdot) if any(find_dynamicsymbols(i, insyms) for i in [self._k_kqdot, self._k_ku, self._f_k, self._k_dnh, self._f_dnh, self._k_d]): raise ValueError('Cannot have dynamicsymbols outside dynamic \ forcing vector.') other_dyns = list(find_dynamicsymbols(msubs(self._f_d, subdict), insyms)) # make it canonically ordered so the jacobian is canonical other_dyns.sort(key=default_sort_key) for i in other_dyns: if diff(i, dynamicsymbols._t) in other_dyns: raise ValueError('Cannot have derivatives of specified ' 'quantities when linearizing forcing terms.') o = len(self.u) # number of speeds n = len(self.q) # number of coordinates l = len(self._qdep) # number of configuration constraints m = len(self._udep) # number of motion constraints qi = Matrix(self.q[: n - l]) # independent coords qd = Matrix(self.q[n - l: n]) # dependent coords; could be empty ui = Matrix(self.u[: o - m]) # independent speeds ud = Matrix(self.u[o - m: o]) # dependent speeds; could be empty qdot = Matrix(self._qdot) # time derivatives of coordinates # with equations in the form MM udot = forcing, expand that to: # MM_full [q,u].T = forcing_full. This combines coordinates and # speeds together for the linearization, which is necessary for the # linearization process, due to dependent coordinates. f1 is the rows # from the kinematic differential equations, f2 is the rows from the # dynamic differential equations (and differentiated non-holonomic # constraints). f1 = self._k_ku * Matrix(self.u) + self._f_k f2 = self._f_d # Only want to do this if these matrices have been filled in, which # occurs when there are dependent speeds if m != 0: f2 = self._f_d.col_join(self._f_dnh) fnh = self._f_nh + self._k_nh * Matrix(self.u) f1 = msubs(f1, subdict) f2 = msubs(f2, subdict) fh = msubs(self._f_h, subdict) fku = msubs(self._k_ku * Matrix(self.u), subdict) fkf = msubs(self._f_k, subdict) # In the code below, we are applying the chain rule by hand on these # things. All the matrices have been changed into vectors (by # multiplying the dynamic symbols which it is paired with), so we can # take the jacobian of them. The basic operation is take the jacobian # of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of # q, u, and t; f2 is a function of q, qdot, u, and t. In the code # below, we are not considering perturbations in t. So if f1 is a # function of the q's, u's but some of the q's or u's could be # dependent on other q's or u's (qd's might be dependent on qi's, ud's # might be dependent on ui's or qi's), so what we do is take the # jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's # gets multiplied by the jacobian of qd wrt qi, this is extended for # the ud's as well. dqd_dqi is computed by taking a taylor expansion of # the holonomic constraint equations about q*, treating q* - q as dq, # separating into dqd (depedent q's) and dqi (independent q's) and the # rearranging for dqd/dqi. This is again extended for the speeds. # First case: configuration and motion constraints if (l != 0) and (m != 0): fh_jac_qi = fh.jacobian(qi) fh_jac_qd = fh.jacobian(qd) fnh_jac_qi = fnh.jacobian(qi) fnh_jac_qd = fnh.jacobian(qd) fnh_jac_ui = fnh.jacobian(ui) fnh_jac_ud = fnh.jacobian(ud) fku_jac_qi = fku.jacobian(qi) fku_jac_qd = fku.jacobian(qd) fku_jac_ui = fku.jacobian(ui) fku_jac_ud = fku.jacobian(ud) fkf_jac_qi = fkf.jacobian(qi) fkf_jac_qd = fkf.jacobian(qd) f1_jac_qi = f1.jacobian(qi) f1_jac_qd = f1.jacobian(qd) f1_jac_ui = f1.jacobian(ui) f1_jac_ud = f1.jacobian(ud) f2_jac_qi = f2.jacobian(qi) f2_jac_qd = f2.jacobian(qd) f2_jac_ui = f2.jacobian(ui) f2_jac_ud = f2.jacobian(ud) f2_jac_qdot = f2.jacobian(qdot) dqd_dqi = - fh_jac_qd.LUsolve(fh_jac_qi) dud_dqi = fnh_jac_ud.LUsolve(fnh_jac_qd * dqd_dqi - fnh_jac_qi) dud_dui = - fnh_jac_ud.LUsolve(fnh_jac_ui) dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui + fku_jac_ud * dud_dui) dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi + (fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi) f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi f1_u = f1_jac_ui + f1_jac_ud * dud_dui f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi + f2_jac_ud * dud_dqi) f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui # Second case: configuration constraints only elif l != 0: dqd_dqi = - fh.jacobian(qd).LUsolve(fh.jacobian(qi)) dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui) dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) + fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) * dqd_dqi) f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi) f1_u = f1.jacobian(ui) f2_jac_qdot = f2.jacobian(qdot) f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi + f2.jac_qdot * dqdot_dqi) f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui # Third case: motion constraints only elif m != 0: dud_dqi = fnh.jacobian(ud).LUsolve(- fnh.jacobian(qi)) dud_dui = - fnh.jacobian(ud).LUsolve(fnh.jacobian(ui)) dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) + fku.jacobian(ud) * dud_dui) dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) + fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi) f1_jac_ud = f1.jacobian(ud) f2_jac_qdot = f2.jacobian(qdot) f2_jac_ud = f2.jacobian(ud) f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud * dud_dqi) f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui) # Fourth case: No constraints else: dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui) dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) + fkf.jacobian(qi)) f1_q = f1.jacobian(qi) f1_u = f1.jacobian(ui) f2_jac_qdot = f2.jacobian(qdot) f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u)) if other_dyns: f1_oths = f1.jacobian(other_dyns) f2_oths = f2.jacobian(other_dyns) f_lin_B = -f1_oths.col_join(f2_oths) else: f_lin_B = Matrix() return (f_lin_A, f_lin_B, Matrix(other_dyns)) def kanes_equations(self, bodies, loads=None): """ Method to form Kane's equations, Fr + Fr* = 0. Returns (Fr, Fr*). In the case where auxiliary generalized speeds are present (say, s auxiliary speeds, o generalized speeds, and m motion constraints) the length of the returned vectors will be o - m + s in length. The first o - m equations will be the constrained Kane's equations, then the s auxiliary Kane's equations. These auxiliary equations can be accessed with the auxiliary_eqs(). Parameters ========== bodies : iterable An iterable of all RigidBody's and Particle's in the system. A system must have at least one body. loads : iterable Takes in an iterable of (Particle, Vector) or (ReferenceFrame, Vector) tuples which represent the force at a point or torque on a frame. Must be either a non-empty iterable of tuples or None which corresponds to a system with no constraints. """ if (bodies is None and loads != None) or isinstance(bodies[0], tuple): # This switches the order if they use the old way. bodies, loads = loads, bodies SymPyDeprecationWarning(value='The API for kanes_equations() has changed such ' 'that the loads (forces and torques) are now the second argument ' 'and is optional with None being the default.', feature='The kanes_equation() argument order', useinstead='switched argument order to update your code, For example: ' 'kanes_equations(loads, bodies) > kanes_equations(bodies, loads).', issue=10945, deprecated_since_version="1.1").warn() if not self._k_kqdot: raise AttributeError('Create an instance of KanesMethod with ' 'kinematic differential equations to use this method.') fr = self._form_fr(loads) frstar = self._form_frstar(bodies) if self._uaux: if not self._udep: km = KanesMethod(self._inertial, self.q, self._uaux, u_auxiliary=self._uaux) else: km = KanesMethod(self._inertial, self.q, self._uaux, u_auxiliary=self._uaux, u_dependent=self._udep, velocity_constraints=(self._k_nh * self.u + self._f_nh)) km._qdot_u_map = self._qdot_u_map self._km = km fraux = km._form_fr(loads) frstaraux = km._form_frstar(bodies) self._aux_eq = fraux + frstaraux self._fr = fr.col_join(fraux) self._frstar = frstar.col_join(frstaraux) return (self._fr, self._frstar) def rhs(self, inv_method=None): """Returns the system's equations of motion in first order form. The output is the right hand side of:: x' = |q'| =: f(q, u, r, p, t) |u'| The right hand side is what is needed by most numerical ODE integrators. Parameters ========== inv_method : str The specific sympy inverse matrix calculation method to use. For a list of valid methods, see :meth:`~sympy.matrices.matrices.MatrixBase.inv` """ rhs = zeros(len(self.q) + len(self.u), c=1) kdes = self.kindiffdict() for i, q_i in enumerate(self.q): rhs[i] = kdes[q_i.diff()] if inv_method is None: rhs[len(self.q):, 0] = self.mass_matrix.LUsolve(self.forcing) else: rhs[len(self.q):, 0] = (self.mass_matrix.inv(inv_method, try_block_diag=True) * self.forcing) return rhs def kindiffdict(self): """Returns a dictionary mapping q' to u.""" if not self._qdot_u_map: raise AttributeError('Create an instance of KanesMethod with ' 'kinematic differential equations to use this method.') return self._qdot_u_map @property def auxiliary_eqs(self): """A matrix containing the auxiliary equations.""" if not self._fr or not self._frstar: raise ValueError('Need to compute Fr, Fr* first.') if not self._uaux: raise ValueError('No auxiliary speeds have been declared.') return self._aux_eq @property def mass_matrix(self): """The mass matrix of the system.""" if not self._fr or not self._frstar: raise ValueError('Need to compute Fr, Fr* first.') return Matrix([self._k_d, self._k_dnh]) @property def mass_matrix_full(self): """The mass matrix of the system, augmented by the kinematic differential equations.""" if not self._fr or not self._frstar: raise ValueError('Need to compute Fr, Fr* first.') o = len(self.u) n = len(self.q) return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o, n)).row_join(self.mass_matrix)) @property def forcing(self): """The forcing vector of the system.""" if not self._fr or not self._frstar: raise ValueError('Need to compute Fr, Fr* first.') return -Matrix([self._f_d, self._f_dnh]) @property def forcing_full(self): """The forcing vector of the system, augmented by the kinematic differential equations.""" if not self._fr or not self._frstar: raise ValueError('Need to compute Fr, Fr* first.') f1 = self._k_ku * Matrix(self.u) + self._f_k return -Matrix([f1, self._f_d, self._f_dnh]) @property def q(self): return self._q @property def u(self): return self._u @property def bodylist(self): return self._bodylist @property def forcelist(self): return self._forcelist
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp import _, api, models class Issue(models.Model): _name = "project.issue" _inherit = ['project.issue'] @api.multi def get_access_action(self): """ Override method that generated the link to access the document. Instead of the classic form view, redirect to the post on the website directly """ self.ensure_one() return { 'type': 'ir.actions.act_url', 'url': '/my/issues/%s' % self.id, 'target': 'self', 'res_id': self.id, } @api.multi def _notification_get_recipient_groups(self, message, recipients): """ Override to set the access button: everyone can see an access button on their notification email. It will lead on the website view of the post. """ res = super(Issue, self)._notification_get_recipient_groups(message, recipients) access_action = self._notification_link_helper('view', model=message.model, res_id=message.res_id) for category, data in res.iteritems(): res[category]['button_access'] = {'url': access_action, 'title': _('View Issue')} return res
unknown
codeparrot/codeparrot-clean
import { test } from '../../test'; export default test({ html: ` Default <p>B slot</p> ` });
javascript
github
https://github.com/sveltejs/svelte
packages/svelte/tests/runtime-legacy/samples/component-svelte-fragment-nested/_config.js
#!/usr/bin/env python # encoding: utf-8 # pylint: disable=W0110, W0614, W0141, C0103 import sys import re import textwrap from fn import F from lib.parse_args import add_common_arguments from lib.d3_fn import cross_flist from lib.d3_io import load_datasets, save_datasets from lib.d3_utils import pct_change from math import * def get_argument_settings(): """@todo: Docstring for get_argument_settings. :returns: @todo """ description = textwrap.dedent(""" Joins two delimited files by common keys. """) epilog = textwrap.dedent(""" Examples: """) m_overrides = [] parser = add_common_arguments(m_overrides, description=description, epilog=epilog) parser.add_argument('-x', '--expresssion', dest='expression', default=None, required=True, action='append', help='XXX' 'XXX') return parser def main(): """@todo: Docstring for main. :returns: @todo """ def apply_add_column(x, expression): """@todo: Docstring for apply_add_column. :dataset: @todo :expression: @todo :returns: @todo """ x[expression[0]] = eval(expression[1]) return x parser = get_argument_settings() args = parser.parse_args() delimiter = args.delimiter expression_list = map(lambda x: re.split(r'\s*[:=]\s*', x, 1), re.split(r'\s*;\s*', ';'.join(args.expression))) # TODO: Apply built in functions here. eval_list = map(lambda x: (x[0], re.sub(r'<(\S+)>', "x['\\1']", re.sub(r'\[(\S+)\]', "x['\\1']", x[1]))), expression_list) input_files = args.input_files or [sys.stdin] data = map(F(load_datasets, delimiter), input_files) transformed = cross_flist(apply_add_column, data, eval_list) result = save_datasets(transformed, args.output_file, delimiter=delimiter) return result if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
""" This module houses the GEOS ctypes prototype functions for the topological operations on geometries. """ __all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull', 'geos_difference', 'geos_envelope', 'geos_intersection', 'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify', 'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate'] from ctypes import c_char_p, c_double, c_int from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string from django.contrib.gis.geos.prototypes.geom import geos_char_p from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc def topology(func, *args): "For GEOS unary topology functions." argtypes = [GEOM_PTR] if args: argtypes += args func.argtypes = argtypes func.restype = GEOM_PTR func.errcheck = check_geom return func ### Topology Routines ### geos_boundary = topology(GEOSFunc('GEOSBoundary')) geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int) geos_centroid = topology(GEOSFunc('GEOSGetCentroid')) geos_convexhull = topology(GEOSFunc('GEOSConvexHull')) geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR) geos_envelope = topology(GEOSFunc('GEOSEnvelope')) geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR) geos_linemerge = topology(GEOSFunc('GEOSLineMerge')) geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface')) geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double) geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double) geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR) geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR) # GEOSRelate returns a string, not a geometry. geos_relate = GEOSFunc('GEOSRelate') geos_relate.argtypes = [GEOM_PTR, GEOM_PTR] geos_relate.restype = geos_char_p geos_relate.errcheck = check_string # Routines only in GEOS 3.1+ if GEOS_PREPARE: geos_cascaded_union = GEOSFunc('GEOSUnionCascaded') geos_cascaded_union.argtypes = [GEOM_PTR] geos_cascaded_union.restype = GEOM_PTR __all__.append('geos_cascaded_union')
unknown
codeparrot/codeparrot-clean
package kotlinx.coroutines.flow import kotlinx.coroutines.testing.* import kotlinx.coroutines.* import kotlinx.coroutines.channels.* import kotlin.math.* import kotlin.test.* /** * A _behavioral_ test for buffering that is introduced by the [buffer] operator to test that it is * implemented properly and that adjacent [buffer] calls are fused properly. */ class BufferTest : TestBase() { private val n = 200 // number of elements to emit for test private val defaultBufferSize = 64 // expected default buffer size (per docs) // Use capacity == -1 to check case of "no buffer" private fun checkBuffer(capacity: Int, op: suspend Flow<Int>.() -> Flow<Int>) = runTest { expect(1) /* Channels perform full rendezvous. Sender does not suspend when there is a suspended receiver and vice-versa. Thus, perceived batch size is +2 from capacity. */ val batchSize = capacity + 2 flow { repeat(n) { i -> val batchNo = i / batchSize val batchIdx = i % batchSize expect(batchNo * batchSize * 2 + batchIdx + 2) emit(i) } } .op() // insert user-defined operator .collect { i -> val batchNo = i / batchSize val batchIdx = i % batchSize // last batch might have smaller size val k = min((batchNo + 1) * batchSize, n) - batchNo * batchSize expect(batchNo * batchSize * 2 + k + batchIdx + 2) } finish(2 * n + 2) } @Test // capacity == -1 to checkBuffer means "no buffer" -- emits / collects are sequentially ordered fun testBaseline() = checkBuffer(-1) { this } @Test fun testBufferDefault() = checkBuffer(defaultBufferSize) { buffer() } @Test fun testBufferRendezvous() = checkBuffer(0) { buffer(0) } @Test fun testBuffer1() = checkBuffer(1) { buffer(1) } @Test fun testBuffer2() = checkBuffer(2) { buffer(2) } @Test fun testBuffer3() = checkBuffer(3) { buffer(3) } @Test fun testBuffer00Fused() = checkBuffer(0) { buffer(0).buffer(0) } @Test fun testBuffer01Fused() = checkBuffer(1) { buffer(0).buffer(1) } @Test fun testBuffer11Fused() = checkBuffer(2) { buffer(1).buffer(1) } @Test fun testBuffer111Fused() = checkBuffer(3) { buffer(1).buffer(1).buffer(1) } @Test fun testBuffer123Fused() = checkBuffer(6) { buffer(1).buffer(2).buffer(3) } @Test // multiple calls to buffer() create one channel of default size fun testBufferDefaultTwiceFused() = checkBuffer(defaultBufferSize) { buffer().buffer() } @Test // explicit buffer takes precedence of default buffer on fuse fun testBufferDefaultBufferFused() = checkBuffer(7) { buffer().buffer(7) } @Test // explicit buffer takes precedence of default buffer on fuse fun testBufferBufferDefaultFused() = checkBuffer(8) { buffer(8).buffer() } @Test // flowOn operator does not use buffer when dispatches does not change fun testFlowOnNameNoBuffer() = checkBuffer(-1) { flowOn(CoroutineName("Name")) } @Test // flowOn operator uses default buffer size when dispatcher changes fun testFlowOnDispatcherBufferDefault() = checkBuffer(defaultBufferSize) { flowOn(wrapperDispatcher()) } @Test // flowOn(...).buffer(n) sets explicit buffer size to n fun testFlowOnDispatcherBufferFused() = checkBuffer(5) { flowOn(wrapperDispatcher()).buffer(5) } @Test // buffer(n).flowOn(...) sets explicit buffer size to n fun testBufferFlowOnDispatcherFused() = checkBuffer(6) { buffer(6).flowOn(wrapperDispatcher()) } @Test // flowOn(...).buffer(n) sets explicit buffer size to n fun testFlowOnNameBufferFused() = checkBuffer(7) { flowOn(CoroutineName("Name")).buffer(7) } @Test // buffer(n).flowOn(...) sets explicit buffer size to n fun testBufferFlowOnNameFused() = checkBuffer(8) { buffer(8).flowOn(CoroutineName("Name")) } @Test // multiple flowOn/buffer all fused together fun testBufferFlowOnMultipleFused() = checkBuffer(12) { flowOn(wrapperDispatcher()).buffer(3) .flowOn(CoroutineName("Name")).buffer(4) .flowOn(wrapperDispatcher()).buffer(5) } @Test fun testCancellation() = runTest { val result = flow { emit(1) emit(2) emit(3) expectUnreached() emit(4) }.buffer(0) .take(2) .toList() assertEquals(listOf(1, 2), result) } @Test fun testFailsOnIllegalArguments() { val flow = emptyFlow<Int>() assertFailsWith<IllegalArgumentException> { flow.buffer(capacity = -3) } assertFailsWith<IllegalArgumentException> { flow.buffer(capacity = Int.MIN_VALUE) } assertFailsWith<IllegalArgumentException> { flow.buffer(capacity = Channel.CONFLATED, onBufferOverflow = BufferOverflow.DROP_LATEST) } assertFailsWith<IllegalArgumentException> { flow.buffer(capacity = Channel.CONFLATED, onBufferOverflow = BufferOverflow.DROP_OLDEST) } } }
kotlin
github
https://github.com/Kotlin/kotlinx.coroutines
kotlinx-coroutines-core/common/test/flow/operators/BufferTest.kt
#!/usr/bin/env python3 # Copyright (c) 2014-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the RBF code.""" from decimal import Decimal from test_framework.blocktools import COINBASE_MATURITY from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, BIP125_SEQUENCE_NUMBER from test_framework.script import CScript, OP_DROP from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round from test_framework.script_util import DUMMY_P2WPKH_SCRIPT, DUMMY_2_P2WPKH_SCRIPT from test_framework.wallet import MiniWallet MAX_REPLACEMENT_LIMIT = 100 def txToHex(tx): return tx.serialize().hex() def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT): """Create a txout with a given amount and scriptPubKey Mines coins as needed. confirmed - txouts created will be confirmed in the blockchain; unconfirmed otherwise. """ fee = 1 * COIN while node.getbalance() < satoshi_round((amount + fee) / COIN): node.generate(COINBASE_MATURITY) new_addr = node.getnewaddress() txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN)) tx1 = node.getrawtransaction(txid, 1) txid = int(txid, 16) i, _ = next(filter(lambda vout: new_addr == vout[1]['scriptPubKey']['address'], enumerate(tx1['vout']))) tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(txid, i))] tx2.vout = [CTxOut(amount, scriptPubKey)] tx2.rehash() signed_tx = node.signrawtransactionwithwallet(txToHex(tx2)) txid = node.sendrawtransaction(signed_tx['hex'], 0) # If requested, ensure txouts are confirmed. if confirmed: mempool_size = len(node.getrawmempool()) while mempool_size > 0: node.generate(1) new_size = len(node.getrawmempool()) # Error out if we have something stuck in the mempool, as this # would likely be a bug. assert new_size < mempool_size mempool_size = new_size return COutPoint(int(txid, 16), 0) class ReplaceByFeeTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [ [ "-acceptnonstdtxn=1", "-maxorphantx=1000", "-limitancestorcount=50", "-limitancestorsize=101", "-limitdescendantcount=200", "-limitdescendantsize=101", ], ] self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): make_utxo(self.nodes[0], 1 * COIN) # Ensure nodes are synced self.sync_all() self.log.info("Running test simple doublespend...") self.test_simple_doublespend() self.log.info("Running test doublespend chain...") self.test_doublespend_chain() self.log.info("Running test doublespend tree...") self.test_doublespend_tree() self.log.info("Running test replacement feeperkb...") self.test_replacement_feeperkb() self.log.info("Running test spends of conflicting outputs...") self.test_spends_of_conflicting_outputs() self.log.info("Running test new unconfirmed inputs...") self.test_new_unconfirmed_inputs() self.log.info("Running test too many replacements...") self.test_too_many_replacements() self.log.info("Running test opt-in...") self.test_opt_in() self.log.info("Running test RPC...") self.test_rpc() self.log.info("Running test prioritised transactions...") self.test_prioritised_transactions() self.log.info("Running test no inherited signaling...") self.test_no_inherited_signaling() self.log.info("Passed") def test_simple_doublespend(self): """Simple doublespend""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # make_utxo may have generated a bunch of blocks, so we need to sync # before we can spend the coins generated, or else the resulting # transactions might not be accepted by our peers. self.sync_all() tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) self.sync_all() # Should fail because we haven't changed the fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)] tx1b_hex = txToHex(tx1b) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) # Extra 0.1 BTC fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] tx1b_hex = txToHex(tx1b) # Works when enabled tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) mempool = self.nodes[0].getrawmempool() assert tx1a_txid not in mempool assert tx1b_txid in mempool assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) def test_doublespend_chain(self): """Doublespend of a long chain""" initial_nValue = 50 * COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) prevout = tx0_outpoint remaining_value = initial_nValue chain_txids = [] while remaining_value > 10 * COIN: remaining_value -= 1 * COIN tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))] tx_hex = txToHex(tx) txid = self.nodes[0].sendrawtransaction(tx_hex, 0) chain_txids.append(txid) prevout = COutPoint(int(txid, 16), 0) # Whether the double-spend is allowed is evaluated by including all # child fees - 40 BTC - so this attempt is rejected. dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, DUMMY_P2WPKH_SCRIPT)] dbl_tx_hex = txToHex(dbl_tx) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) # Accepted with sufficient fee dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, 0) mempool = self.nodes[0].getrawmempool() for doublespent_txid in chain_txids: assert doublespent_txid not in mempool def test_doublespend_tree(self): """Doublespend of a big tree of transactions""" initial_nValue = 50 * COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001 * COIN, _total_txs=None): if _total_txs is None: _total_txs = [0] if _total_txs[0] >= max_txs: return txout_value = (initial_value - fee) // tree_width if txout_value < fee: return vout = [CTxOut(txout_value, CScript([i+1])) for i in range(tree_width)] tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = vout tx_hex = txToHex(tx) assert len(tx.serialize()) < 100000 txid = self.nodes[0].sendrawtransaction(tx_hex, 0) yield tx _total_txs[0] += 1 txid = int(txid, 16) for i, txout in enumerate(tx.vout): for x in branch(COutPoint(txid, i), txout_value, max_txs, tree_width=tree_width, fee=fee, _total_txs=_total_txs): yield x fee = int(0.0001 * COIN) n = MAX_REPLACEMENT_LIMIT tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) # Attempt double-spend, will fail because too little fee paid dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)] dbl_tx_hex = txToHex(dbl_tx) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) # 1 BTC fee is enough dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, DUMMY_P2WPKH_SCRIPT)] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, 0) mempool = self.nodes[0].getrawmempool() for tx in tree_txs: tx.rehash() assert tx.hash not in mempool # Try again, but with more total transactions than the "max txs # double-spent at once" anti-DoS limit. for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2): fee = int(0.0001 * COIN) tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)] dbl_tx_hex = txToHex(dbl_tx) # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) for tx in tree_txs: tx.rehash() self.nodes[0].getrawtransaction(tx.hash) def test_replacement_feeperkb(self): """Replacement requires fee-per-KB to be higher""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1a_hex = txToHex(tx1a) self.nodes[0].sendrawtransaction(tx1a_hex, 0) # Higher fee, but the fee per KB is much lower, so the replacement is # rejected. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))] tx1b_hex = txToHex(tx1b) # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) def test_spends_of_conflicting_outputs(self): """Replacements that spend conflicting tx outputs are rejected""" utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN)) utxo2 = make_utxo(self.nodes[0], 3 * COIN) tx1a = CTransaction() tx1a.vin = [CTxIn(utxo1, nSequence=0)] tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) tx1a_txid = int(tx1a_txid, 16) # Direct spend an output of the transaction we're replacing. tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0) # Spend tx1a's output to test the indirect case. tx1b = CTransaction() tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1b_hex = txToHex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) tx1b_txid = int(tx1b_txid, 16) tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), CTxIn(COutPoint(tx1b_txid, 0))] tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0) def test_new_unconfirmed_inputs(self): """Replacements that add new unconfirmed inputs are rejected""" confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN)) unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False) tx1 = CTransaction() tx1.vin = [CTxIn(confirmed_utxo)] tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1_hex = txToHex(tx1) self.nodes[0].sendrawtransaction(tx1_hex, 0) tx2 = CTransaction() tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] tx2.vout = tx1.vout tx2_hex = txToHex(tx2) # This will raise an exception assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0) def test_too_many_replacements(self): """Replacements that evict too many transactions are rejected""" # Try directly replacing more than MAX_REPLACEMENT_LIMIT # transactions # Start by creating a single transaction with many outputs initial_nValue = 10 * COIN utxo = make_utxo(self.nodes[0], initial_nValue) fee = int(0.0001 * COIN) split_value = int((initial_nValue - fee) / (MAX_REPLACEMENT_LIMIT + 1)) outputs = [] for _ in range(MAX_REPLACEMENT_LIMIT + 1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() splitting_tx.vin = [CTxIn(utxo, nSequence=0)] splitting_tx.vout = outputs splitting_tx_hex = txToHex(splitting_tx) txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0) txid = int(txid, 16) # Now spend each of those outputs individually for i in range(MAX_REPLACEMENT_LIMIT + 1): tx_i = CTransaction() tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)] tx_i_hex = txToHex(tx_i) self.nodes[0].sendrawtransaction(tx_i_hex, 0) # Now create doublespend of the whole lot; should fail. # Need a big enough fee to cover all spending transactions and have # a higher fee rate double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1) inputs = [] for i in range(MAX_REPLACEMENT_LIMIT + 1): inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) double_tx = CTransaction() double_tx.vin = inputs double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0) # If we remove an input, it should pass double_tx = CTransaction() double_tx.vin = inputs[0:-1] double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) self.nodes[0].sendrawtransaction(double_tx_hex, 0) def test_opt_in(self): """Replacing should only work if orig tx opted in""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # Create a non-opting in transaction tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) # This transaction isn't shown as replaceable assert_equal(self.nodes[0].getmempoolentry(tx1a_txid)['bip125-replaceable'], False) # Shouldn't be able to double-spend tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] tx1b_hex = txToHex(tx1b) # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0) tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # Create a different non-opting in transaction tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx2a_hex = txToHex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0) # Still shouldn't be able to double-spend tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] tx2b_hex = txToHex(tx2b) # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0) # Now create a new transaction that spends from tx1a and tx2a # opt-in on one of the inputs # Transaction should be replaceable on either input tx1a_txid = int(tx1a_txid, 16) tx2a_txid = int(tx2a_txid, 16) tx3a = CTransaction() tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))] tx3a_hex = txToHex(tx3a) tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0) # This transaction is shown as replaceable assert_equal(self.nodes[0].getmempoolentry(tx3a_txid)['bip125-replaceable'], True) tx3b = CTransaction() tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)] tx3b_hex = txToHex(tx3b) tx3c = CTransaction() tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)] tx3c_hex = txToHex(tx3c) self.nodes[0].sendrawtransaction(tx3b_hex, 0) # If tx3b was accepted, tx3c won't look like a replacement, # but make sure it is accepted anyway self.nodes[0].sendrawtransaction(tx3c_hex, 0) def test_prioritised_transactions(self): # Ensure that fee deltas used via prioritisetransaction are # correctly used by replacement logic # 1. Check that feeperkb uses modified fees tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) # Higher fee, but the actual fee per KB is much lower. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))] tx1b_hex = txToHex(tx1b) # Verify tx1b cannot replace tx1a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) # Use prioritisetransaction to set tx1a's fee to 0. self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN)) # Now tx1b should be able to replace tx1a tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) assert tx1b_txid in self.nodes[0].getrawmempool() # 2. Check that absolute fee checks use modified fee. tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] tx2a_hex = txToHex(tx2a) self.nodes[0].sendrawtransaction(tx2a_hex, 0) # Lower fee, but we'll prioritise it tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)] tx2b.rehash() tx2b_hex = txToHex(tx2b) # Verify tx2b cannot replace tx2a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0) # Now prioritise tx2b to have a higher modified fee self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN)) # tx2b should now be accepted tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0) assert tx2b_txid in self.nodes[0].getrawmempool() def test_rpc(self): us0 = self.nodes[0].listunspent()[0] ins = [us0] outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)} rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) json0 = self.nodes[0].decoderawtransaction(rawtx0) json1 = self.nodes[0].decoderawtransaction(rawtx1) assert_equal(json0["vin"][0]["sequence"], 4294967293) assert_equal(json1["vin"][0]["sequence"], 4294967295) rawtx2 = self.nodes[0].createrawtransaction([], outs) frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) assert_equal(json0["vin"][0]["sequence"], 4294967293) assert_equal(json1["vin"][0]["sequence"], 4294967294) def test_no_inherited_signaling(self): wallet = MiniWallet(self.nodes[0]) wallet.scan_blocks(start=76, num=1) confirmed_utxo = wallet.get_utxo() # Create an explicitly opt-in parent transaction optin_parent_tx = wallet.send_self_transfer( from_node=self.nodes[0], utxo_to_spend=confirmed_utxo, sequence=BIP125_SEQUENCE_NUMBER, fee_rate=Decimal('0.01'), ) assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) replacement_parent_tx = wallet.create_self_transfer( from_node=self.nodes[0], utxo_to_spend=confirmed_utxo, sequence=BIP125_SEQUENCE_NUMBER, fee_rate=Decimal('0.02'), ) # Test if parent tx can be replaced. res = self.nodes[0].testmempoolaccept(rawtxs=[replacement_parent_tx['hex']])[0] # Parent can be replaced. assert_equal(res['allowed'], True) # Create an opt-out child tx spending the opt-in parent parent_utxo = wallet.get_utxo(txid=optin_parent_tx['txid']) optout_child_tx = wallet.send_self_transfer( from_node=self.nodes[0], utxo_to_spend=parent_utxo, sequence=0xffffffff, fee_rate=Decimal('0.01'), ) # Reports true due to inheritance assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable']) replacement_child_tx = wallet.create_self_transfer( from_node=self.nodes[0], utxo_to_spend=parent_utxo, sequence=0xffffffff, fee_rate=Decimal('0.02'), mempool_valid=False, ) # Broadcast replacement child tx # BIP 125 : # 1. The original transactions signal replaceability explicitly or through inheritance as described in the above # Summary section. # The original transaction (`optout_child_tx`) doesn't signal RBF but its parent (`optin_parent_tx`) does. # The replacement transaction (`replacement_child_tx`) should be able to replace the original transaction. # See CVE-2021-31876 for further explanations. assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0) if __name__ == '__main__': ReplaceByFeeTest().main()
unknown
codeparrot/codeparrot-clean
// MODULE: dependency // MODULE_KIND: Source // FILE: dependency.kt fun f<caret>oo() { } // MODULE: app(dependency) // MODULE_KIND: Source // USE_SITE_MODULE // FILE: app.kt fun bar() { }
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api/testData/components/analysisScopeProvider/canBeAnalysed/dependencySourceElement_appSourceModule.kt
//// [tests/cases/conformance/expressions/binaryOperators/additionOperator/additionOperatorWithUndefinedValueAndInvalidOperands.ts] //// //// [additionOperatorWithUndefinedValueAndInvalidOperands.ts] // If one operand is the null or undefined value, it is treated as having the type of the other operand. function foo(): void { return undefined } declare var a: boolean; declare var b: Object; declare var c: void; declare var d: Number; // undefined + boolean/Object var r1 = undefined + a; var r2 = undefined + b; var r3 = undefined + c; var r4 = a + undefined; var r5 = b + undefined; var r6 = undefined + c; // other cases var r7 = undefined + d; var r8 = undefined + true; var r9 = undefined + { a: '' }; var r10 = undefined + foo(); var r11 = undefined + (() => { }); //// [additionOperatorWithUndefinedValueAndInvalidOperands.js] "use strict"; // If one operand is the null or undefined value, it is treated as having the type of the other operand. function foo() { return undefined; } // undefined + boolean/Object var r1 = undefined + a; var r2 = undefined + b; var r3 = undefined + c; var r4 = a + undefined; var r5 = b + undefined; var r6 = undefined + c; // other cases var r7 = undefined + d; var r8 = undefined + true; var r9 = undefined + { a: '' }; var r10 = undefined + foo(); var r11 = undefined + (() => { });
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/additionOperatorWithUndefinedValueAndInvalidOperands.js
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- $id: http://devicetree.org/schemas/cpufreq/mediatek,mt8196-cpufreq-hw.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: MediaTek Hybrid CPUFreq for MT8196/MT6991 series SoCs maintainers: - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> description: MT8196 uses CPUFreq management hardware that supports dynamic voltage frequency scaling (dvfs), and can support several performance domains. properties: compatible: const: mediatek,mt8196-cpufreq-hw reg: items: - description: FDVFS control register region - description: OPP tables and control for performance domain 0 - description: OPP tables and control for performance domain 1 - description: OPP tables and control for performance domain 2 "#performance-domain-cells": const: 1 required: - compatible - reg - "#performance-domain-cells" additionalProperties: false examples: - | cpus { #address-cells = <1>; #size-cells = <0>; cpu0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a720"; enable-method = "psci"; performance-domains = <&performance 0>; reg = <0x000>; }; /* ... */ cpu6: cpu@600 { device_type = "cpu"; compatible = "arm,cortex-x4"; enable-method = "psci"; performance-domains = <&performance 1>; reg = <0x600>; }; cpu7: cpu@700 { device_type = "cpu"; compatible = "arm,cortex-x925"; enable-method = "psci"; performance-domains = <&performance 2>; reg = <0x700>; }; }; /* ... */ soc { #address-cells = <2>; #size-cells = <2>; performance: performance-controller@c2c2034 { compatible = "mediatek,mt8196-cpufreq-hw"; reg = <0 0xc220400 0 0x20>, <0 0xc2c0f20 0 0x120>, <0 0xc2c1040 0 0x120>, <0 0xc2c1160 0 0x120>; #performance-domain-cells = <1>; }; };
unknown
github
https://github.com/torvalds/linux
Documentation/devicetree/bindings/cpufreq/mediatek,mt8196-cpufreq-hw.yaml
#! /usr/bin/env python # -*- coding: utf-8 -*- # Last Change: Tue Jul 17 05:00 PM 2007 J # The code and descriptive text is copyrighted and offered under the terms of # the BSD License from the authors; see below. However, the actual dataset may # have a different origin and intellectual property status. See the SOURCE and # COPYRIGHT variables for this information. # Copyright (c) 2007 David Cournapeau <cournape@gmail.com> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the author nor the names of any contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __all__ = ['COPYRIGHT','TITLE','SOURCE','DESCRSHORT','DESCRLONG','NOTE', 'load'] """US Macroeconomic data.""" __docformat__ = 'restructuredtext' COPYRIGHT = """This is public domain.""" TITLE = "" SOURCE = """ Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St. Louis [1] except the unemployment rate which was taken from the National Bureau of Labor Statistics [2]. [1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15, 2009. [2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor; http://www.bls.gov/data/; accessed December 15, 2009. """ DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3""" DESCRLONG = DESCRSHORT NOTE = """ Number of Observations: 203 Number of Variables: 14 Variable name definitions: year - 1959q1 - 2009q3 quarter - 1-4 realgdp - Real gross domestic product (Bil. of chained 2005 US$, seasonally adjusted annual rate) realcons - Real personal consumption expenditures (Bil. of chained 2005 US$, seasonally adjusted annual rate) realinv - Real gross private domestic investment (Bil. of chained 2005 US$, seasonally adjusted annual rate) realgovt - Real federal consumption expenditures & gross investment (Bil. of chained 2005 US$, seasonally adjusted annual rate) realdpi - Real gross private domestic investment (Bil. of chained 2005 US$, seasonally adjusted annual rate) cpi - End of the quarter consumer price index for all urban consumers: all items (1982-84 = 100, seasonally adjusted). m1 - End of the quarter M1 nominal money stock (Seasonally adjusted) tbilrate - Quarterly monthly average of the monthly 3-month treasury bill: secondary market rate unemp - Seasonally adjusted unemployment rate (%) pop - End of the quarter total population: all ages incl. armed forces over seas infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400) realint - Real interest rate (tbilrate - infl) """ from numpy import recfromtxt, column_stack, array from scikits.statsmodels.datasets import Dataset from os.path import dirname, abspath def load(): """ Load the US macro data and return a Dataset class. Returns ------- Dataset instance: See DATASET_PROPOSAL.txt for more information. Notes ----- The macrodata Dataset instance does not contain endog and exog attributes. """ filepath = dirname(abspath(__file__)) data = recfromtxt(filepath + '/macrodata.csv', delimiter=",", names=True, dtype=float) names = data.dtype.names dataset = Dataset(data=data, names=names) return dataset
unknown
codeparrot/codeparrot-clean
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Project.repo_type' db.add_column('projects_project', 'repo_type', self.gf('django.db.models.fields.CharField')(default='git', max_length=10), keep_default=False) def backwards(self, orm): # Deleting field 'Project.repo_type' db.delete_column('projects_project', 'repo_type') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'projects.file': { 'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'}, 'content': ('django.db.models.fields.TextField', [], {}), 'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}) }, 'projects.filerevision': { 'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'}, 'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'revision_number': ('django.db.models.fields.IntegerField', [], {}) }, 'projects.project': { 'Meta': {'ordering': "('-modified_date', 'name')", 'object_name': 'Project'}, 'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}), 'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) } } complete_apps = ['projects']
unknown
codeparrot/codeparrot-clean
//// [tests/cases/compiler/ParameterList6.ts] //// //// [ParameterList6.ts] class C { constructor(C: (public A) => any) { } } //// [ParameterList6.js] "use strict"; class C { constructor(C) { } }
javascript
github
https://github.com/microsoft/TypeScript
tests/baselines/reference/ParameterList6.js
/* * Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors. * Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file. */ package org.jetbrains.kotlin.analysis.api.impl.base.symbols.pointers import org.jetbrains.kotlin.analysis.api.KaImplementationDetail import org.jetbrains.kotlin.analysis.api.symbols.KaSymbolLocation import kotlin.reflect.KClass @KaImplementationDetail class KaCannotCreateSymbolPointerForLocalLibraryDeclarationException(identifier: String) : IllegalStateException("Could not create a symbol pointer for local symbol $identifier") { constructor(klass: KClass<*>) : this(klass.java.simpleName) } @KaImplementationDetail class KaUnsupportedSymbolLocation(identifier: String, location: KaSymbolLocation) : IllegalStateException( "For symbol with kind = KaSymbolLocation.${location.name} was $identifier" ) { constructor(clazz: KClass<*>, location: KaSymbolLocation) : this(clazz.java.simpleName, location) }
kotlin
github
https://github.com/JetBrains/kotlin
analysis/analysis-api-impl-base/src/org/jetbrains/kotlin/analysis/api/impl/base/symbols/pointers/exceptions.kt
'use strict'; const { join } = require('node:path'); const common = require('../common.js'); const tmpdir = require('../../test/common/tmpdir'); let cnt = 0; tmpdir.refresh(); function nextLocalStorage() { return join(tmpdir.path, `${++cnt}.localstorage`); } const options = { flags: [`--localstorage-file=${nextLocalStorage()}`], }; const bench = common.createBenchmark(main, { type: ['localStorage-setItem', 'localStorage-setter', 'sessionStorage-setItem', 'sessionStorage-setter'], // Note: web storage has only 10mb quota n: [1e5], }, options); function main({ n, type }) { const localStorage = globalThis.localStorage; const sessionStorage = globalThis.sessionStorage; switch (type) { case 'localStorage-setItem': bench.start(); for (let i = 0; i < n; i++) { localStorage.setItem(i, i); } bench.end(n); break; case 'localStorage-setter': bench.start(); for (let i = 0; i < n; i++) { localStorage[i] = i; } bench.end(n); break; case 'sessionStorage-setItem': bench.start(); for (let i = 0; i < n; i++) { sessionStorage.setItem(i, i); } bench.end(n); break; case 'sessionStorage-setter': bench.start(); for (let i = 0; i < n; i++) { sessionStorage[i] = i; } bench.end(n); break; default: new Error('Invalid type'); } }
javascript
github
https://github.com/nodejs/node
benchmark/webstorage/setItem.js
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19546 if testnet else 9546 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
import logging from flask import jsonify, url_for from betterapis.app import api, db from betterapis.models import Talk def get(talk_id): # find the talk by id or return a 404 talk = Talk.query.get_or_404(talk_id) return talk.dump() def post(talk): # create a new talk from the post data new_talk = Talk(**talk) # save the new talk to the database db.session.add(new_talk) db.session.commit() # find the url of the new talk new_talk_url = url_for('.betterapis_controllers_talks_get', talk_id=new_talk.id) # build a 201 Created response with a location header and empty body response = jsonify(talk_id=new_talk.id) response.status_code = 201 response.headers = {'Location': new_talk_url} return response def put(talk_id, talk): orig_talk = Talk.query.get_or_404(talk_id) orig_talk.update(**talk) db.session.add(orig_talk) db.session.commit() updated_talk = Talk.query.get_or_404(talk_id) return updated_talk.dump() def delete(talk_id): talk = Talk.query.get_or_404(talk_id) db.session.delete(talk) db.session.commit() return jsonify() def search(): talks = Talk.query.all() talks_response = [talk.dump() for talk in talks] return jsonify(talks_response)
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import org.apache.kafka.common.Node; import java.util.Comparator; import java.util.Objects; /** * View of a replica used by {@link ReplicaSelector} to determine a preferred replica. */ public interface ReplicaView { /** * The endpoint information for this replica (hostname, port, rack, etc) */ Node endpoint(); /** * The log end offset for this replica */ long logEndOffset(); /** * The number of milliseconds (if any) since the last time this replica was caught up to the high watermark. * For a leader replica, this is always zero. */ long timeSinceLastCaughtUpMs(); /** * Comparator for ReplicaView that returns in the order of "most caught up". This is used for deterministic * selection of a replica when there is a tie from a selector. */ static Comparator<ReplicaView> comparator() { return Comparator.comparingLong(ReplicaView::logEndOffset) .thenComparing(Comparator.comparingLong(ReplicaView::timeSinceLastCaughtUpMs).reversed()) .thenComparing(replicaInfo -> replicaInfo.endpoint().id()); } class DefaultReplicaView implements ReplicaView { private final Node endpoint; private final long logEndOffset; private final long timeSinceLastCaughtUpMs; public DefaultReplicaView(Node endpoint, long logEndOffset, long timeSinceLastCaughtUpMs) { this.endpoint = endpoint; this.logEndOffset = logEndOffset; this.timeSinceLastCaughtUpMs = timeSinceLastCaughtUpMs; } @Override public Node endpoint() { return endpoint; } @Override public long logEndOffset() { return logEndOffset; } @Override public long timeSinceLastCaughtUpMs() { return timeSinceLastCaughtUpMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultReplicaView that = (DefaultReplicaView) o; return logEndOffset == that.logEndOffset && Objects.equals(endpoint, that.endpoint) && Objects.equals(timeSinceLastCaughtUpMs, that.timeSinceLastCaughtUpMs); } @Override public int hashCode() { return Objects.hash(endpoint, logEndOffset, timeSinceLastCaughtUpMs); } @Override public String toString() { return "DefaultReplicaView{" + "endpoint=" + endpoint + ", logEndOffset=" + logEndOffset + ", timeSinceLastCaughtUpMs=" + timeSinceLastCaughtUpMs + '}'; } } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/common/replica/ReplicaView.java
<?php namespace Illuminate\Testing\Constraints; use Illuminate\Database\Connection; use PHPUnit\Framework\Constraint\Constraint; class NotSoftDeletedInDatabase extends Constraint { /** * Number of records that will be shown in the console in case of failure. * * @var int */ protected $show = 3; /** * The database connection. * * @var \Illuminate\Database\Connection */ protected $database; /** * The data that will be used to narrow the search in the database table. * * @var array */ protected $data; /** * The name of the column that indicates soft deletion has occurred. * * @var string */ protected $deletedAtColumn; /** * Create a new constraint instance. * * @param \Illuminate\Database\Connection $database * @param array $data * @param string $deletedAtColumn */ public function __construct(Connection $database, array $data, string $deletedAtColumn) { $this->database = $database; $this->data = $data; $this->deletedAtColumn = $deletedAtColumn; } /** * Check if the data is found in the given table. * * @param string $table * @return bool */ public function matches($table): bool { return $this->database->table($table) ->where($this->data) ->whereNull($this->deletedAtColumn) ->exists(); } /** * Get the description of the failure. * * @param string $table * @return string */ public function failureDescription($table): string { return sprintf( "any existing row in the table [%s] matches the attributes %s.\n\n%s", $table, $this->toString(), $this->getAdditionalInfo($table) ); } /** * Get additional info about the records found in the database table. * * @param string $table * @return string */ protected function getAdditionalInfo($table) { $query = $this->database->table($table); $results = $query->limit($this->show)->get(); if ($results->isEmpty()) { return 'The table is empty'; } $description = 'Found: '.json_encode($results, JSON_PRETTY_PRINT); if ($query->count() > $this->show) { $description .= sprintf(' and %s others', $query->count() - $this->show); } return $description; } /** * Get a string representation of the object. * * @return string */ public function toString(): string { return json_encode($this->data); } }
php
github
https://github.com/laravel/framework
src/Illuminate/Testing/Constraints/NotSoftDeletedInDatabase.php
#! /usr/bin/env python3 """Freeze a Python script into a binary. usage: freeze [options...] script [module]... Options: -p prefix: This is the prefix used when you ran ``make install'' in the Python build directory. (If you never ran this, freeze won't work.) The default is whatever sys.prefix evaluates to. It can also be the top directory of the Python source tree; then -P must point to the build tree. -P exec_prefix: Like -p but this is the 'exec_prefix', used to install objects etc. The default is whatever sys.exec_prefix evaluates to, or the -p argument if given. If -p points to the Python source tree, -P must point to the build tree, if different. -e extension: A directory containing additional .o files that may be used to resolve modules. This directory should also have a Setup file describing the .o files. On Windows, the name of a .INI file describing one or more extensions is passed. More than one -e option may be given. -o dir: Directory where the output files are created; default '.'. -m: Additional arguments are module names instead of filenames. -a package=dir: Additional directories to be added to the package's __path__. Used to simulate directories added by the package at runtime (eg, by OpenGL and win32com). More than one -a option may be given for each package. -l file: Pass the file to the linker (windows only) -d: Debugging mode for the module finder. -q: Make the module finder totally quiet. -h: Print this help message. -x module Exclude the specified module. It will still be imported by the frozen binary if it exists on the host system. -X module Like -x, except the module can never be imported by the frozen binary. -E: Freeze will fail if any modules can't be found (that were not excluded using -x or -X). -i filename: Include a file with additional command line options. Used to prevent command lines growing beyond the capabilities of the shell/OS. All arguments specified in filename are read and the -i option replaced with the parsed params (note - quoting args in this file is NOT supported) -s subsystem: Specify the subsystem (For Windows only.); 'console' (default), 'windows', 'service' or 'com_dll' -w: Toggle Windows (NT or 95) behavior. (For debugging only -- on a win32 platform, win32 behavior is automatic.) -r prefix=f: Replace path prefix. Replace prefix with f in the source path references contained in the resulting binary. Arguments: script: The Python script to be executed by the resulting binary. module ...: Additional Python modules (referenced by pathname) that will be included in the resulting binary. These may be .py or .pyc files. If -m is specified, these are module names that are search in the path instead. NOTES: In order to use freeze successfully, you must have built Python and installed it ("make install"). The script should not use modules provided only as shared libraries; if it does, the resulting binary is not self-contained. """ # Import standard modules import modulefinder import getopt import os import sys # Import the freeze-private modules import checkextensions import makeconfig import makefreeze import makemakefile import parsesetup import bkfile # Main program def main(): # overridable context prefix = None # settable with -p option exec_prefix = None # settable with -P option extensions = [] exclude = [] # settable with -x option addn_link = [] # settable with -l, but only honored under Windows. path = sys.path[:] modargs = 0 debug = 1 odir = '' win = sys.platform[:3] == 'win' replace_paths = [] # settable with -r option error_if_any_missing = 0 # default the exclude list for each platform if win: exclude = exclude + [ 'dos', 'dospath', 'mac', 'macpath', 'macfs', 'MACFS', 'posix', ] fail_import = exclude[:] # output files frozen_c = 'frozen.c' config_c = 'config.c' target = 'a.out' # normally derived from script name makefile = 'Makefile' subsystem = 'console' # parse command line by first replacing any "-i" options with the # file contents. pos = 1 while pos < len(sys.argv)-1: # last option can not be "-i", so this ensures "pos+1" is in range! if sys.argv[pos] == '-i': try: options = open(sys.argv[pos+1]).read().split() except IOError as why: usage("File name '%s' specified with the -i option " "can not be read - %s" % (sys.argv[pos+1], why) ) # Replace the '-i' and the filename with the read params. sys.argv[pos:pos+2] = options pos = pos + len(options) - 1 # Skip the name and the included args. pos = pos + 1 # Now parse the command line with the extras inserted. try: opts, args = getopt.getopt(sys.argv[1:], 'r:a:dEe:hmo:p:P:qs:wX:x:l:') except getopt.error as msg: usage('getopt error: ' + str(msg)) # process option arguments for o, a in opts: if o == '-h': print(__doc__) return if o == '-d': debug = debug + 1 if o == '-e': extensions.append(a) if o == '-m': modargs = 1 if o == '-o': odir = a if o == '-p': prefix = a if o == '-P': exec_prefix = a if o == '-q': debug = 0 if o == '-w': win = not win if o == '-s': if not win: usage("-s subsystem option only on Windows") subsystem = a if o == '-x': exclude.append(a) if o == '-X': exclude.append(a) fail_import.append(a) if o == '-E': error_if_any_missing = 1 if o == '-l': addn_link.append(a) if o == '-a': modulefinder.AddPackagePath(*a.split("=", 2)) if o == '-r': f,r = a.split("=", 2) replace_paths.append( (f,r) ) # modules that are imported by the Python runtime implicits = [] for module in ('site', 'warnings', 'encodings.utf_8', 'encodings.latin_1'): if module not in exclude: implicits.append(module) # default prefix and exec_prefix if not exec_prefix: if prefix: exec_prefix = prefix else: exec_prefix = sys.exec_prefix if not prefix: prefix = sys.prefix # determine whether -p points to the Python source tree ishome = os.path.exists(os.path.join(prefix, 'Python', 'ceval.c')) # locations derived from options version = '%d.%d' % sys.version_info[:2] flagged_version = version + sys.abiflags if win: extensions_c = 'frozen_extensions.c' if ishome: print("(Using Python source directory)") binlib = exec_prefix incldir = os.path.join(prefix, 'Include') config_h_dir = exec_prefix config_c_in = os.path.join(prefix, 'Modules', 'config.c.in') frozenmain_c = os.path.join(prefix, 'Python', 'frozenmain.c') makefile_in = os.path.join(exec_prefix, 'Makefile') if win: frozendllmain_c = os.path.join(exec_prefix, 'Pc\\frozen_dllmain.c') else: binlib = os.path.join(exec_prefix, 'lib', 'python%s' % version, 'config-%s' % flagged_version) incldir = os.path.join(prefix, 'include', 'python%s' % flagged_version) config_h_dir = os.path.join(exec_prefix, 'include', 'python%s' % flagged_version) config_c_in = os.path.join(binlib, 'config.c.in') frozenmain_c = os.path.join(binlib, 'frozenmain.c') makefile_in = os.path.join(binlib, 'Makefile') frozendllmain_c = os.path.join(binlib, 'frozen_dllmain.c') supp_sources = [] defines = [] includes = ['-I' + incldir, '-I' + config_h_dir] # sanity check of directories and files check_dirs = [prefix, exec_prefix, binlib, incldir] if not win: # These are not directories on Windows. check_dirs = check_dirs + extensions for dir in check_dirs: if not os.path.exists(dir): usage('needed directory %s not found' % dir) if not os.path.isdir(dir): usage('%s: not a directory' % dir) if win: files = supp_sources + extensions # extensions are files on Windows. else: files = [config_c_in, makefile_in] + supp_sources for file in supp_sources: if not os.path.exists(file): usage('needed file %s not found' % file) if not os.path.isfile(file): usage('%s: not a plain file' % file) if not win: for dir in extensions: setup = os.path.join(dir, 'Setup') if not os.path.exists(setup): usage('needed file %s not found' % setup) if not os.path.isfile(setup): usage('%s: not a plain file' % setup) # check that enough arguments are passed if not args: usage('at least one filename argument required') # check that file arguments exist for arg in args: if arg == '-m': break # if user specified -m on the command line before _any_ # file names, then nothing should be checked (as the # very first file should be a module name) if modargs: break if not os.path.exists(arg): usage('argument %s not found' % arg) if not os.path.isfile(arg): usage('%s: not a plain file' % arg) # process non-option arguments scriptfile = args[0] modules = args[1:] # derive target name from script name base = os.path.basename(scriptfile) base, ext = os.path.splitext(base) if base: if base != scriptfile: target = base else: target = base + '.bin' # handle -o option base_frozen_c = frozen_c base_config_c = config_c base_target = target if odir and not os.path.isdir(odir): try: os.mkdir(odir) print("Created output directory", odir) except OSError as msg: usage('%s: mkdir failed (%s)' % (odir, str(msg))) base = '' if odir: base = os.path.join(odir, '') frozen_c = os.path.join(odir, frozen_c) config_c = os.path.join(odir, config_c) target = os.path.join(odir, target) makefile = os.path.join(odir, makefile) if win: extensions_c = os.path.join(odir, extensions_c) # Handle special entry point requirements # (on Windows, some frozen programs do not use __main__, but # import the module directly. Eg, DLLs, Services, etc custom_entry_point = None # Currently only used on Windows python_entry_is_main = 1 # Is the entry point called __main__? # handle -s option on Windows if win: import winmakemakefile try: custom_entry_point, python_entry_is_main = \ winmakemakefile.get_custom_entry_point(subsystem) except ValueError as why: usage(why) # Actual work starts here... # collect all modules of the program dir = os.path.dirname(scriptfile) path[0] = dir mf = modulefinder.ModuleFinder(path, debug, exclude, replace_paths) if win and subsystem=='service': # If a Windows service, then add the "built-in" module. mod = mf.add_module("servicemanager") mod.__file__="dummy.pyd" # really built-in to the resulting EXE for mod in implicits: mf.import_hook(mod) for mod in modules: if mod == '-m': modargs = 1 continue if modargs: if mod[-2:] == '.*': mf.import_hook(mod[:-2], None, ["*"]) else: mf.import_hook(mod) else: mf.load_file(mod) # Alias "importlib._bootstrap" to "_frozen_importlib" so that the # import machinery can bootstrap. Do the same for # importlib._bootstrap_external. mf.modules["_frozen_importlib"] = mf.modules["importlib._bootstrap"] mf.modules["_frozen_importlib_external"] = mf.modules["importlib._bootstrap_external"] # Add the main script as either __main__, or the actual module name. if python_entry_is_main: mf.run_script(scriptfile) else: mf.load_file(scriptfile) if debug > 0: mf.report() print() dict = mf.modules if error_if_any_missing: missing = mf.any_missing() if missing: sys.exit("There are some missing modules: %r" % missing) # generate output for frozen modules files = makefreeze.makefreeze(base, dict, debug, custom_entry_point, fail_import) # look for unfrozen modules (builtin and of unknown origin) builtins = [] unknown = [] mods = sorted(dict.keys()) for mod in mods: if dict[mod].__code__: continue if not dict[mod].__file__: builtins.append(mod) else: unknown.append(mod) # search for unknown modules in extensions directories (not on Windows) addfiles = [] frozen_extensions = [] # Windows list of modules. if unknown or (not win and builtins): if not win: addfiles, addmods = \ checkextensions.checkextensions(unknown+builtins, extensions) for mod in addmods: if mod in unknown: unknown.remove(mod) builtins.append(mod) else: # Do the windows thang... import checkextensions_win32 # Get a list of CExtension instances, each describing a module # (including its source files) frozen_extensions = checkextensions_win32.checkextensions( unknown, extensions, prefix) for mod in frozen_extensions: unknown.remove(mod.name) # report unknown modules if unknown: sys.stderr.write('Warning: unknown modules remain: %s\n' % ' '.join(unknown)) # windows gets different treatment if win: # Taking a shortcut here... import winmakemakefile, checkextensions_win32 checkextensions_win32.write_extension_table(extensions_c, frozen_extensions) # Create a module definition for the bootstrap C code. xtras = [frozenmain_c, os.path.basename(frozen_c), frozendllmain_c, os.path.basename(extensions_c)] + files maindefn = checkextensions_win32.CExtension( '__main__', xtras ) frozen_extensions.append( maindefn ) with open(makefile, 'w') as outfp: winmakemakefile.makemakefile(outfp, locals(), frozen_extensions, os.path.basename(target)) return # generate config.c and Makefile builtins.sort() with open(config_c_in) as infp, bkfile.open(config_c, 'w') as outfp: makeconfig.makeconfig(infp, outfp, builtins) cflags = ['$(OPT)'] cppflags = defines + includes libs = [os.path.join(binlib, '$(LDLIBRARY)')] somevars = {} if os.path.exists(makefile_in): makevars = parsesetup.getmakevars(makefile_in) for key in makevars: somevars[key] = makevars[key] somevars['CFLAGS'] = ' '.join(cflags) # override somevars['CPPFLAGS'] = ' '.join(cppflags) # override files = [base_config_c, base_frozen_c] + \ files + supp_sources + addfiles + libs + \ ['$(MODLIBS)', '$(LIBS)', '$(SYSLIBS)'] with bkfile.open(makefile, 'w') as outfp: makemakefile.makemakefile(outfp, somevars, files, base_target) # Done! if odir: print('Now run "make" in', odir, end=' ') print('to build the target:', base_target) else: print('Now run "make" to build the target:', base_target) # Print usage message and exit def usage(msg): sys.stdout = sys.stderr print("Error:", msg) print("Use ``%s -h'' for help" % sys.argv[0]) sys.exit(2) main()
unknown
codeparrot/codeparrot-clean
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from pylib.base import environment from pylib.device import adb_wrapper from pylib.device import device_errors from pylib.device import device_utils from pylib.utils import parallelizer class LocalDeviceEnvironment(environment.Environment): def __init__(self, args, _error_func): super(LocalDeviceEnvironment, self).__init__() self._device = args.test_device self._devices = [] self._max_tries = 1 + args.num_retries self._tool_name = args.tool #override def SetUp(self): # TODO(jbudorick): This can be refined to support filters etc. available_devices = adb_wrapper.AdbWrapper.GetDevices() if not available_devices: raise device_errors.NoDevicesError if self._device: if self._device not in available_devices: raise device_errors.DeviceUnreachableError( 'Could not find device %r' % self._device) self._devices = [device_utils.DeviceUtils(self._device)] else: self._devices = [ device_utils.DeviceUtils(s) for s in available_devices] @property def devices(self): return self._devices @property def parallel_devices(self): return parallelizer.SyncParallelizer(self._devices) @property def max_tries(self): return self._max_tries @property def tool(self): return self._tool_name #override def TearDown(self): pass
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2012 The Plaso Project Authors. # Please see the AUTHORS file for details on individual authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The event extraction worker.""" import logging import os import pdb import threading from dfvfs.lib import definitions as dfvfs_definitions from dfvfs.resolver import context from dfvfs.resolver import resolver as path_spec_resolver from plaso.lib import classifier from plaso.lib import errors from plaso.lib import queue from plaso.lib import utils class EventExtractionWorker(queue.PathSpecQueueConsumer): """Class that extracts events for files and directories. This class is designed to watch a queue for path specifications of files and directories (file entries) for which events need to be extracted. The event extraction worker needs to determine if a parser suitable for parsing a particular file is available. All extracted event objects are pushed on a storage queue for further processing. """ def __init__( self, identifier, process_queue, storage_queue_producer, pre_obj, parsers, rpc_proxy=None): """Initializes the event extraction worker object. Args: identifier: A thread identifier, usually an incrementing integer. process_queue: the process queue (instance of Queue). storage_queue_producer: the storage queue producer (instance of EventObjectQueueProducer). pre_obj: A preprocess object containing information collected from image (instance of PreprocessObject). parsers: A list of parser objects to use for processing. rpc_proxy: A proxy object (instance of proxy.ProxyServer) that can be used to setup RPC functionality for the worker. This is optional and if not provided the worker will not listen to RPC requests. The default value is None. """ super(EventExtractionWorker, self).__init__(process_queue) self._debug_mode = False self._filter_object = None self._identifier = identifier self._mount_path = None self._open_files = False self._parsers = parsers self._pre_obj = pre_obj self._rpc_proxy = rpc_proxy # We need a resolver context per process to prevent multi processing # issues with file objects stored in images. self._resolver_context = context.Context() self._single_process_mode = False self._storage_queue_producer = storage_queue_producer self._text_prepend = None # Few attributes that contain the current status of the worker. self._counter_of_extracted_events = 0 self._current_working_file = u'' self._is_running = False if pre_obj: self._user_mapping = pre_obj.GetUserMappings() else: self._user_mapping = {} def _ConsumePathSpec(self, path_spec): """Consumes a path specification callback for ConsumePathSpecs.""" file_entry = path_spec_resolver.Resolver.OpenFileEntry( path_spec, resolver_context=self._resolver_context) if file_entry is None: logging.warning(u'Unable to open file entry: {0:s}'.format( path_spec.comparable)) return try: self.ParseFile(file_entry) except IOError as exception: logging.warning(u'Unable to parse file: {0:s} with error: {1:s}'.format( path_spec.comparable, exception)) if self._open_files: try: for sub_file_entry in classifier.Classifier.SmartOpenFiles(file_entry): self.ParseFile(sub_file_entry) except IOError as exception: logging.warning( u'Unable to parse file: {0:s} with error: {1:s}'.format( file_entry.path_spec.comparable, exception)) def _ParseEvent(self, event_object, file_entry, parser_name, stat_obj): """Adjust value of an extracted EventObject before storing it.""" # TODO: Make some more adjustments to the event object. # Need to apply time skew, and other information extracted from # the configuration of the tool. # TODO: deperecate text_prepend in favor of an event tag. if self._text_prepend: event_object.text_prepend = self._text_prepend file_path = getattr(file_entry.path_spec, 'location', file_entry.name) # If we are parsing a mount point we don't want to include the full # path to file's location here, we are only interested in the relative # path to the mount point. # TODO: Solve this differently, quite possibly inside dfVFS using mount # path spec. type_indicator = file_entry.path_spec.type_indicator if (type_indicator == dfvfs_definitions.TYPE_INDICATOR_OS and self._mount_path): if self._mount_path: _, _, file_path = file_path.partition(self._mount_path) # TODO: dfVFS refactor: move display name to output since the path # specification contains the full information. event_object.display_name = u'{0:s}:{1:s}'.format( file_entry.path_spec.type_indicator, file_path) if not getattr(event_object, 'filename', None): event_object.filename = file_path event_object.pathspec = file_entry.path_spec event_object.parser = parser_name if hasattr(self._pre_obj, 'hostname'): event_object.hostname = self._pre_obj.hostname if not hasattr(event_object, 'inode') and hasattr(stat_obj, 'ino'): event_object.inode = utils.GetInodeValue(stat_obj.ino) # Set the username that is associated to the record. if getattr(event_object, 'user_sid', None) and self._user_mapping: username = self._user_mapping.get(event_object.user_sid, None) if username: event_object.username = username if not self._filter_object or self._filter_object.Matches(event_object): self._storage_queue_producer.ProduceEventObject(event_object) self._counter_of_extracted_events += 1 def ParseFile(self, file_entry): """Run through classifier and appropriate parsers. Args: file_entry: A file entry object. """ logging.debug(u'[ParseFile] Parsing: {0:s}'.format( file_entry.path_spec.comparable)) self._current_working_file = getattr( file_entry.path_spec, u'location', file_entry.name) # TODO: Not go through all parsers, just the ones # that the classifier classifies the file as. # Do this when classifier is ready. # The classifier will return a "type" back, which refers # to a key in the self._parsers dict. If the results are # inconclusive the "all" key is used, or the key is not found. # key = self._parsers.get(classification, 'all') stat_obj = file_entry.GetStat() for parsing_object in self._parsers['all']: logging.debug(u'Checking [{0:s}] against: {1:s}'.format( file_entry.name, parsing_object.parser_name)) try: for event_object in parsing_object.Parse(file_entry): if not event_object: continue self._ParseEvent( event_object, file_entry, parsing_object.parser_name, stat_obj) except errors.UnableToParseFile as exception: logging.debug(u'Not a {0:s} file ({1:s}) - {2:s}'.format( parsing_object.parser_name, file_entry.name, exception)) except IOError as exception: logging.debug( u'[{0:s}] Unable to parse: {1:s} with error: {2:s}'.format( parsing_object.parser_name, file_entry.path_spec.comparable, exception)) # Casting a wide net, catching all exceptions. Done to keep the worker # running, despite the parser hitting errors, so the worker doesn't die # if a single file is corrupted or there is a bug in a parser. except Exception as exception: logging.warning( u'[{0:s}] Unable to process file: {1:s} with error: {2:s}.'.format( parsing_object.parser_name, file_entry.path_spec.comparable, exception)) logging.debug( u'The path specification that caused the error: {0:s}'.format( file_entry.path_spec.comparable)) logging.exception(exception) # Check for debug mode and single process mode, then we would like # to debug this problem. if self._single_process_mode and self._debug_mode: pdb.post_mortem() logging.debug(u'Done parsing: {0:s}'.format( file_entry.path_spec.comparable)) def GetStatus(self): """Returns a status dictionary for the worker process.""" return { 'is_running': self._is_running, 'identifier': u'Worker_{0:d}'.format(self._identifier), 'current_file': self._current_working_file, 'counter': self._counter_of_extracted_events} def Run(self): """Start the worker, monitor the queue and parse files.""" self.pid = os.getpid() logging.info( u'Worker {0:d} (PID: {1:d}) started monitoring process queue.'.format( self._identifier, self.pid)) self._counter_of_extracted_events = 0 self._is_running = True if self._rpc_proxy: try: self._rpc_proxy.SetListeningPort(self.pid) self._rpc_proxy.Open() self._rpc_proxy.RegisterFunction('status', self.GetStatus) proxy_thread = threading.Thread( name='rpc_proxy', target=self._rpc_proxy.StartProxy) proxy_thread.start() except errors.ProxyFailedToStart as exception: logging.error( u'Unable to setup a RPC server for the worker: {0:d} [PID {1:d}] ' u'with error: {2:s}'.format(self._identifier, self.pid, exception)) self.ConsumePathSpecs() logging.info( u'Worker {0:d} (PID: {1:d}) stopped monitoring process queue.'.format( self._identifier, os.getpid())) self._is_running = False self._current_working_file = u'' self._resolver_context.Empty() if self._rpc_proxy: # Close the proxy, free up resources so we can shut down the thread. self._rpc_proxy.Close() if proxy_thread.isAlive(): proxy_thread.join() def SetDebugMode(self, debug_mode): """Sets the debug mode. Args: debug_mode: boolean value to indicate if the debug mode should be enabled. """ self._debug_mode = debug_mode def SetFilterObject(self, filter_object): """Sets the filter object. Args: filter_object: the filter object (instance of objectfilter.Filter). """ self._filter_object = filter_object def SetMountPath(self, mount_path): """Sets the mount path. Args: mount_path: string containing the mount path. """ # Remove a trailing path separator from the mount path so the relative # paths will start with a path separator. if mount_path and mount_path.endswith(os.sep): mount_path = mount_path[:-1] self._mount_path = mount_path # TODO: rename this mode. def SetOpenFiles(self, open_files): """Sets the open files mode. Args: file_files: boolean value to indicate if the worker should scan for sun file entries inside files. """ self._open_files = open_files def SetSingleProcessMode(self, single_process_mode): """Sets the single process mode. Args: single_process_mode: boolean value to indicate if the single process mode should be enabled. """ self._single_process_mode = single_process_mode def SetTextPrepend(self, text_prepend): """Sets the text prepend. Args: text_prepend: string that contains the text to prepend to every event object. """ self._text_prepend = text_prepend
unknown
codeparrot/codeparrot-clean
package runtime import "github.com/moby/moby/api/types/swarm" func privilegesFromAPI(privs []*swarm.RuntimePrivilege) []*PluginPrivilege { var out []*PluginPrivilege for _, p := range privs { out = append(out, &PluginPrivilege{ Name: p.Name, Description: p.Description, Value: p.Value, }) } return out } // FromAPI converts an API RuntimeSpec to a PluginSpec, // which can be proto encoded. func FromAPI(spec swarm.RuntimeSpec) PluginSpec { return PluginSpec{ Name: spec.Name, Remote: spec.Remote, Privileges: privilegesFromAPI(spec.Privileges), Disabled: spec.Disabled, Env: spec.Env, } } func privilegesToAPI(privs []*PluginPrivilege) []*swarm.RuntimePrivilege { var out []*swarm.RuntimePrivilege for _, p := range privs { out = append(out, &swarm.RuntimePrivilege{ Name: p.Name, Description: p.Description, Value: p.Value, }) } return out } func ToAPI(spec PluginSpec) swarm.RuntimeSpec { return swarm.RuntimeSpec{ Name: spec.Name, Remote: spec.Remote, Privileges: privilegesToAPI(spec.Privileges), Disabled: spec.Disabled, Env: spec.Env, } }
go
github
https://github.com/moby/moby
daemon/cluster/internal/runtime/convert.go
# This file is part of Indico. # Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. import os import warnings from flask import current_app as app from persistent import Persistent from persistent.dict import PersistentDict from BTrees import OOBTree from indico.core.config import Config from indico.core import db DEFAULT_PROTECTION_DISCLAINER_RESTRICTED = ("Circulation to people other than the intended audience is not authorized. " "You are obliged to treat the information with the appropriate level of " "confidentiality.") DEFAULT_PROTECTION_DISCLAINER_PROTECTED = ("As such, this information is intended for an internal audience only. " "You are obliged to treat the information with the appropriate level of " "confidentiality.") class MaKaCInfo(Persistent): """Holds and manages general information and data concerning each system """ def __init__(self): # server description fields self._title = "" self._organisation = "" self._city = "" self._country = "" self._lang = "en_GB" # Account-related features self._authorisedAccountCreation = True self._notifyAccountCreation = False self._moderateAccountCreation = False self._moderators = [] # Global poster/badge templates self._defaultConference = None # News management self._news = "" # special features self._newsActive = False self._debugActive = False # template set self._defaultTemplateSet = None # social sites self.getSocialAppConfig() # Define if Indico use a proxy (load balancing) self. _proxy = False # Define the volume used in archiving: # e.g. /afs/cern.ch/project/indico/XXXX/2008/... # XXXX will be replaced by self._archivingVolume # if self._archivingVolume is empty the path would be: # /afs/cern.ch/project/indico/2008/... self._archivingVolume = "" self._ip_based_acl_mgr = IPBasedACLMgr() # http api self._apiHTTPSRequired = False self._apiPersistentAllowed = False self._apiMode = 0 self._apiCacheTTL = 600 self._apiSignatureTTL = 600 # Event display style manager self._styleMgr = StyleManager() self._protectionDisclaimerRestricted = DEFAULT_PROTECTION_DISCLAINER_RESTRICTED self._protectionDisclaimerProtected = DEFAULT_PROTECTION_DISCLAINER_PROTECTED def getStyleManager( self ): try: return self._styleMgr except: self._styleMgr = StyleManager() return self._styleMgr def getSocialAppConfig( self ): if not hasattr(self, '_socialAppConfig'): self._socialAppConfig = PersistentDict({'active': False, 'facebook': {}}) return self._socialAppConfig def isDebugActive(self): msg = 'MaKaCinfo.isDebugActive() is deprecated; use app.debug or Config.getInstance().getDebug() instead' warnings.warn(msg, DeprecationWarning, 2) return app.debug def getNews( self ): try: return self._news except: self._news = "" return "" def setNews( self, news ): self._news = news def getModerators( self ): try: return self._moderators except: self._moderators = [] return self._moderators def addModerator( self, av ): if av not in self.getModerators(): self._moderators.append(av) self._p_changed=1 def removeModerator( self, av ): if av in self.getModerators(): self._moderators.remove( av ) self._p_changed=1 def isModerator( self, av ): if av in self.getModerators(): return True else: return False def getAuthorisedAccountCreation( self ): try: return self._authorisedAccountCreation except: self.setAuthorisedAccountCreation() return self._authorisedAccountCreation def setAuthorisedAccountCreation( self, flag=True ): self._authorisedAccountCreation = flag def getNotifyAccountCreation( self ): try: return self._notifyAccountCreation except: self.setNotifyAccountCreation() return self._notifyAccountCreation def setNotifyAccountCreation( self, flag=False ): self._notifyAccountCreation = flag def getModerateAccountCreation( self ): try: return self._moderateAccountCreation except: self.setModerateAccountCreation() return self._moderateAccountCreation def setModerateAccountCreation( self, flag=False ): self._moderateAccountCreation = flag def getTitle( self ): return self._title def isNewsActive( self ): if hasattr( self, "_newsActive" ): return self._newsActive else: self._newsActive = False return False def setNewsActive( self, bool=True ): self._newsActive = bool def setTitle( self, newTitle ): self._title = newTitle.strip() def getOrganisation( self ): return self._organisation def getAffiliation( self ): return self._organisation def setOrganisation( self, newOrg ): self._organisation = newOrg.strip() def getCity( self ): return self._city def setCity( self, newCity ): self._city = newCity.strip() def getCountry( self ): return self._country def setCountry( self, newCountry ): self._country = newCountry def getTimezone(self): msg = 'MaKaCinfo.getTimezone() is deprecated; use Config.getInstance().getDefaultTimezone() instead' warnings.warn(msg, DeprecationWarning, 2) return Config.getInstance().getDefaultTimezone() def getDefaultConference( self ): try: self._defaultConference except AttributeError: self._defaultConference = None return self._defaultConference def setDefaultConference( self, dConf ): self._defaultConference = dConf return self._defaultConference def getDefaultTemplateSet( self ): try: return self._defaultTemplateSet except: self._defaultTemplateSet = None return None def setDefaultTemplateSet( self, defTemp ): self._defaultTemplateSet = defTemp return self._defaultTemplateSet def getLang( self ): try: return self._lang except: self._lang = "en_GB" #Logger.get('i18n').warning('No language set in MaKaCInfo... using %s by default' % self._lang) return self._lang def setLang( self, lang ): self._lang = lang def setArchivingVolume(self, volume): if isinstance(volume, str): self._archivingVolume = volume def getArchivingVolume(self): if not hasattr(self, "_archivingVolume"): self._archivingVolume = "" return self._archivingVolume def getIPBasedACLMgr(self): return self._ip_based_acl_mgr def getProtectionDisclaimerProtected(self): if not hasattr(self, '_protectionDisclaimerProtected'): self._protectionDisclaimerProtected = DEFAULT_PROTECTION_DISCLAINER_PROTECTED return self._protectionDisclaimerProtected def setProtectionDisclaimerProtected(self, v): self._protectionDisclaimerProtected = v def getProtectionDisclaimerRestricted(self): if not hasattr(self, '_protectionDisclaimerRestricted'): self._protectionDisclaimerRestricted = DEFAULT_PROTECTION_DISCLAINER_RESTRICTED return self._protectionDisclaimerRestricted def setProtectionDisclaimerRestricted(self, v): self._protectionDisclaimerRestricted = v class HelperMaKaCInfo: """Helper class used for getting and instance of MaKaCInfo. It will be migrated into a static method in MaKaCInfo class once the ZODB4 is released and the Persistent classes are no longer Extension ones. """ @classmethod def getMaKaCInfoInstance(cls): dbmgr = db.DBMgr.getInstance() root = dbmgr.getDBConnection().root() try: minfo = root['MaKaCInfo']['main'] except KeyError: minfo = MaKaCInfo() root['MaKaCInfo'] = OOBTree.OOBTree() root['MaKaCInfo']['main'] = minfo return minfo class StyleManager(Persistent): """This class manages the styles used by the server for the display of events timetables """ def __init__( self ): self._styles = Config.getInstance().getStyles() self._eventStylesheets = Config.getInstance().getEventStylesheets() self._defaultEventStylesheet = Config.getInstance().getDefaultEventStylesheet() def getStyles(self): try: return self._styles except AttributeError: self._styles = Config.getInstance().getStyles() return self._styles def setStyles(self, newStyles): self._styles = newStyles def getEventStyles(self): """gives back the entire style/event association list. """ return self._eventStylesheets def setEventStyles(self, sDict={}): self._eventStylesheets = sDict def getDefaultEventStyles(self): """gives back the default styles/event association """ return self._defaultEventStylesheet def setDefaultEventStyle(self, sDict={}): self._defaultEventStylesheet = sDict def getDefaultStyleForEventType(self, eventType): """gives back the default stylesheet for the given type of event """ return self._defaultEventStylesheet.get(eventType, "") def removeStyle(self, styleId, type=""): if type == "": # style globally removed styles = self.getStyles() if styles.has_key(styleId): del styles[styleId] self.setStyles(styles) self.removeStyleFromAllTypes(styleId) else: # style removed only in the type list self.removeStyleFromEventType(styleId, type) def addStyleToEventType( self, style, type ): dict = self.getEventStyles() styles = dict.get(type,[]) if style not in styles: styles.append(style) dict[type] = styles self.setEventStyles(dict) def removeStyleFromEventType( self, style, type ): # style removed only in the type list dict = self.getEventStyles() styles = dict.get(type,[]) defaultStyle = self.getDefaultStyleForEventType(type) if style != "" and style in styles: styles.remove(style) dict[type] = styles self.setEventStyles(dict) if style.strip() == defaultStyle.strip(): if len(styles) > 0: newDefaultStyle = styles[0] else: newDefaultStyle = None self.setDefaultStyle( newDefaultStyle, type ) def setDefaultStyle( self, style, type ): if style != "": dict = self.getDefaultEventStyles() dict[type] = style self.setDefaultEventStyle(dict) def removeStyleFromAllTypes( self, style ): for type in ["simple_event", "meeting", "conference"]: self.removeStyleFromEventType(style, type) def getStyleListForEventType(self, eventType): """gives back the style list associated to a given type of event. If no event was specified it returns the empty list. Params: type -- unique identifier of the event type """ return self._eventStylesheets.get(eventType, []) def isCorrectStyle(self, styleId): if styleId not in self.getStyles(): return False correctCSS = self.existsCSSFile(styleId) or not self.getCSSFilename(styleId) return styleId == 'static' or (self.existsTPLFile(styleId) and correctCSS) or (self.existsXSLFile(styleId) and correctCSS) def getExistingStylesForEventType(self, eventType): result = [] for style in self.getStyleListForEventType(eventType): if self.isCorrectStyle(style): result.append(style) return result def getStyleDictForEventType(self, type): """gives back the style list associated to a given type of event. If no event was specified it returns the empty list. Params: type -- unique identifier of the event type """ styles = self.getStyles() return dict((styleID, styles[styleID]) for styleID in self._eventStylesheets.get(type, [])) def getStyleName(self, styleId): styles = self.getStyles() if styleId in styles: return styles[styleId][0] else: return "" def getBaseTPLPath(self): tplDir = Config.getInstance().getTPLDir() return os.path.join(tplDir, "events") def getBaseXSLPath(self): xslDir = Config.getInstance().getStylesheetsDir() return os.path.join(xslDir, "events") def existsTPLFile(self, styleId): if styleId.strip() != "": tplFile = self.getTemplateFilename(styleId) if not tplFile: return False path = os.path.join(self.getBaseTPLPath(), tplFile) if os.path.exists(path): return True return False def existsXSLFile(self, styleId): if styleId.strip() != "": xslFile = self.getTemplateFilename(styleId) if not xslFile: return False path = os.path.join(self.getBaseXSLPath(), xslFile) if os.path.exists(path): return True return False def getXSLPath(self, styleId): if styleId.strip() != "": xslFile = self.getTemplateFilename(styleId) if not xslFile: return False path = os.path.join(self.getBaseXSLPath(), xslFile) if os.path.exists(path): return path return None def getTemplateFilename(self, styleId): styles = self.getStyles() if styleId in styles: fileName = styles[styleId][1] return fileName else: return None def getBaseCSSPath( self ): return os.path.join(Config.getInstance().getHtdocsDir(),"css", "events") def existsCSSFile(self, styleId): if styleId.strip() != "": basepath = self.getBaseCSSPath() filename = self.getCSSFilename(styleId) if not filename: return False path = os.path.join(basepath, filename) if os.path.exists(path): return True return False def getCSSFilename( self, stylesheet ): if stylesheet.strip() != "" and stylesheet in self.getStyles(): return self._styles[stylesheet][2] return None def getXSLStyles(self): xslStyles = [] for style in self.getStyles(): if self._styles[style][1]!= None and self._styles[style][1].endswith(".xsl"): xslStyles.append(style) return xslStyles def existsStyle(self, styleId): styles = self.getStyles() if styleId in styles: return True else: return False class IPBasedACLMgr(Persistent): def __init__(self): self._full_access_acl = set() def get_full_access_acl(self): return self._full_access_acl def grant_full_access(self, ip): self._full_access_acl.add(ip) self.notify_modification() def revoke_full_access(self, ip): if ip in self._full_access_acl: self._full_access_acl.remove(ip) self.notify_modification() def notify_modification(self): self._p_changed = 1
unknown
codeparrot/codeparrot-clean
import pickle import cv2 import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg # Read in the saved camera matrix and distortion coefficients # These are the arrays you calculated using cv2.calibrateCamera() dist_pickle = pickle.load(open("wide_dist_pickle.p", "rb")) mtx = dist_pickle["mtx"] dist = dist_pickle["dist"] # Read in an image img = cv2.imread('test_image2.png') nx = 8 # the number of inside corners in x ny = 6 # the number of inside corners in y # MODIFY THIS FUNCTION TO GENERATE OUTPUT # THAT LOOKS LIKE THE IMAGE ABOVE # def corners_unwarp(img, nx, ny, mtx, dist): # # Pass in your image into this function # # Write code to do the following steps # # 1) Undistort using mtx and dist # # 2) Convert to grayscale # # 3) Find the chessboard corners # # 4) If corners found: # # a) draw corners # # b) define 4 source points src = np.float32([[,],[,],[,],[,]]) # # Note: you could pick any four of the detected corners # # as long as those four corners define a rectangle # # One especially smart way to do this would be to use four well-chosen # # corners that were automatically detected during the undistortion steps # # We recommend using the automatic detection of corners in your code # # c) define 4 destination points dst = np.float32([[,],[,],[,],[,]]) # # d) use cv2.getPerspectiveTransform() to get M, the transform matrix # # e) use cv2.warpPerspective() to warp your image to a top-down view # # delete the next two lines # M = None # warped = np.copy(img) # return warped, M def corners_unwarp(img, nx, ny, mtx, dist): # Use the OpenCV undistort() function to remove distortion undist = cv2.undistort(img, mtx, dist, None, mtx) # Convert undistorted image to grayscale gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY) # Search for corners in the grayscaled image ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) if ret == True: # If we found corners, draw them! (just for fun) cv2.drawChessboardCorners(undist, (nx, ny), corners, ret) # Choose offset from image corners to plot detected corners # This should be chosen to present the result at the proper aspect ratio # My choice of 100 pixels is not exact, but close enough for our purpose here offset = 100 # offset for dst points # Grab the image shape img_size = (gray.shape[1], gray.shape[0]) # For source points I'm grabbing the outer four detected corners src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]]) # For destination points, I'm arbitrarily choosing some points to be # a nice fit for displaying our warped result # again, not exact, but close enough for our purposes dst = np.float32([[offset, offset], [img_size[0]-offset, offset], [img_size[0]-offset, img_size[1]-offset], [offset, img_size[1]-offset]]) # Given src and dst points, calculate the perspective transform matrix M = cv2.getPerspectiveTransform(src, dst) # Warp the image using OpenCV warpPerspective() warped = cv2.warpPerspective(undist, M, img_size) # Return the resulting image and matrix return warped, M top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) f.tight_layout() ax1.imshow(img) ax1.set_title('Original Image', fontsize=50) ax2.imshow(top_down) ax2.set_title('Undistorted and Warped Image', fontsize=50) plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
unknown
codeparrot/codeparrot-clean
/* MIT License http://www.opensource.org/licenses/mit-license.php Author Tobias Koppers @sokra */ "use strict"; const ModuleDependency = require("./ModuleDependency"); /** @typedef {import("webpack-sources").ReplaceSource} ReplaceSource */ /** @typedef {import("../Dependency")} Dependency */ /** @typedef {import("../DependencyTemplate").DependencyTemplateContext} DependencyTemplateContext */ class ModuleDependencyTemplateAsRequireId extends ModuleDependency.Template { /** * @param {Dependency} dependency the dependency for which the template should be applied * @param {ReplaceSource} source the current replace source which can be modified * @param {DependencyTemplateContext} templateContext the context object * @returns {void} */ apply( dependency, source, { runtimeTemplate, moduleGraph, chunkGraph, runtimeRequirements } ) { const dep = /** @type {ModuleDependency} */ (dependency); if (!dep.range) return; const content = runtimeTemplate.moduleExports({ module: moduleGraph.getModule(dep), chunkGraph, request: dep.request, weak: dep.weak, runtimeRequirements }); source.replace(dep.range[0], dep.range[1] - 1, content); } } module.exports = ModuleDependencyTemplateAsRequireId;
javascript
github
https://github.com/webpack/webpack
lib/dependencies/ModuleDependencyTemplateAsRequireId.js
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Response.answer_number' db.add_column(u'survey_response', 'answer_number', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Response.answer_number' db.delete_column(u'survey_response', 'answer_number') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'survey.block': { 'Meta': {'object_name': 'Block'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}) }, u'survey.gridanswer': { 'Meta': {'object_name': 'GridAnswer'}, 'answer_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'col_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'col_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}), 'row_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'row_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, u'survey.location': { 'Meta': {'object_name': 'Location'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'lat': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}), 'lng': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '7'}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}), 'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}) }, u'survey.locationanswer': { 'Meta': {'object_name': 'LocationAnswer'}, 'answer': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Location']"}) }, u'survey.multianswer': { 'Meta': {'object_name': 'MultiAnswer'}, 'answer_label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'answer_text': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'response': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Response']"}) }, u'survey.option': { 'Meta': {'object_name': 'Option'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'text': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'integer'", 'max_length': '20'}) }, u'survey.page': { 'Meta': {'ordering': "['survey', 'question__order']", 'object_name': 'Page'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']", 'null': 'True', 'blank': 'True'}) }, u'survey.question': { 'Meta': {'ordering': "['order']", 'object_name': 'Question'}, 'allow_other': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'blocks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Block']", 'null': 'True', 'blank': 'True'}), 'cols': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'filterBy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'filter_questions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter_questions_rel_+'", 'null': 'True', 'to': u"orm['survey.Question']"}), 'foreach_question': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreach'", 'null': 'True', 'to': u"orm['survey.Question']"}), 'grid_cols': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'grid_cols'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Option']"}), 'hoist_answers': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'hoisted'", 'null': 'True', 'to': u"orm['survey.Question']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'info': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'integer_max': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'integer_min': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'lng': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'min_zoom': ('django.db.models.fields.IntegerField', [], {'default': '10', 'null': 'True', 'blank': 'True'}), 'modalQuestion': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modal_question'", 'null': 'True', 'to': u"orm['survey.Question']"}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Option']", 'null': 'True', 'blank': 'True'}), 'options_from_previous_answer': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'options_json': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'randomize_groups': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'report_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True'}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'rows': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'skip_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'skip_question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']", 'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64'}), 'term_condition': ('django.db.models.fields.CharField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.TextField', [], {}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}), 'visualize': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'zoom': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}) }, u'survey.respondant': { 'Meta': {'object_name': 'Respondant'}, 'complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'county': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '254', 'null': 'True', 'blank': 'True'}), 'last_question': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}), 'locations': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'responses': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'responses'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['survey.Response']"}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '240', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '20', 'null': 'True', 'blank': 'True'}), 'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"}), 'surveyor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'ddec2809-7f56-44c8-adf6-d609312f8e15'", 'max_length': '36', 'primary_key': 'True'}) }, u'survey.response': { 'Meta': {'object_name': 'Response'}, 'answer': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'answer_number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'answer_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'respondant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Respondant']", 'null': 'True', 'blank': 'True'}), 'ts': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 12, 0, 0)'}) }, u'survey.survey': { 'Meta': {'object_name': 'Survey'}, 'anon': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Question']", 'null': 'True', 'through': u"orm['survey.Page']", 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '254'}), 'states': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['survey']
unknown
codeparrot/codeparrot-clean
{ "description": "tests handling of multiline strings when updating", "include_files": [], "ignore_fields": {} }
json
github
https://github.com/hashicorp/terraform
testing/equivalence-tests/tests/basic_multiline_string_update/spec.json
#!/usr/bin/python # Copyright 2015 Google Inc. All Rights Reserved. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. """An Ansible module to utilize GCE image resources.""" DOCUMENTATION = ''' --- module: gce_img version_added: "1.9" short_description: utilize GCE image resources description: - This module can create and delete GCE private images from gzipped compressed tarball containing raw disk data or from existing detached disks in any zone. U(https://cloud.google.com/compute/docs/images) options: name: description: - the name of the image to create or delete required: true default: null aliases: [] description: description: - an optional description required: false default: null aliases: [] source: description: - the source disk or the Google Cloud Storage URI to create the image from required: false default: null aliases: [] state: description: - desired state of the image required: false default: "present" choices: ["present", "absent"] aliases: [] zone: description: - the zone of the disk specified by source required: false default: "us-central1-a" aliases: [] service_account_email: description: - service account email required: false default: null aliases: [] pem_file: description: - path to the pem file associated with the service account email required: false default: null aliases: [] project_id: description: - your GCE project ID required: false default: null aliases: [] requirements: - "python >= 2.6" - "apache-libcloud" author: "Peter Tan (@tanpeter)" ''' EXAMPLES = ''' # Create an image named test-image from the disk 'test-disk' in zone us-central1-a. - gce_img: name: test-image source: test-disk zone: us-central1-a state: present # Create an image named test-image from a tarball in Google Cloud Storage. - gce_img: name: test-image source: https://storage.googleapis.com/bucket/path/to/image.tgz # Alternatively use the gs scheme - gce_img: name: test-image source: gs://bucket/path/to/image.tgz # Delete an image named test-image. - gce_img: name: test-image state: absent ''' import sys try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.google import GoogleBaseError from libcloud.common.google import ResourceExistsError from libcloud.common.google import ResourceNotFoundError _ = Provider.GCE has_libcloud = True except ImportError: has_libcloud = False GCS_URI = 'https://storage.googleapis.com/' def create_image(gce, name, module): """Create an image with the specified name.""" source = module.params.get('source') zone = module.params.get('zone') desc = module.params.get('description') if not source: module.fail_json(msg='Must supply a source', changed=False) if source.startswith(GCS_URI): # source is a Google Cloud Storage URI volume = source elif source.startswith('gs://'): # libcloud only accepts https URI. volume = source.replace('gs://', GCS_URI) else: try: volume = gce.ex_get_volume(source, zone) except ResourceNotFoundError: module.fail_json(msg='Disk %s not found in zone %s' % (source, zone), changed=False) except GoogleBaseError, e: module.fail_json(msg=str(e), changed=False) try: gce.ex_create_image(name, volume, desc, False) return True except ResourceExistsError: return False except GoogleBaseError, e: module.fail_json(msg=str(e), changed=False) def delete_image(gce, name, module): """Delete a specific image resource by name.""" try: gce.ex_delete_image(name) return True except ResourceNotFoundError: return False except GoogleBaseError, e: module.fail_json(msg=str(e), changed=False) def main(): module = AnsibleModule( argument_spec=dict( name=dict(required=True), description=dict(), source=dict(), state=dict(default='present', choices=['present', 'absent']), zone=dict(default='us-central1-a'), service_account_email=dict(), pem_file=dict(), project_id=dict(), ) ) if not has_libcloud: module.fail_json(msg='libcloud with GCE support is required.') gce = gce_connect(module) name = module.params.get('name') state = module.params.get('state') changed = False # user wants to create an image. if state == 'present': changed = create_image(gce, name, module) # user wants to delete the image. if state == 'absent': changed = delete_image(gce, name, module) module.exit_json(changed=changed, name=name) sys.exit(0) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.gce import * main()
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- import time import datetime try: import matplotlib.pyplot as plt except ImportError: plt = None def perf_client_attrs(): prepare_data = '''sh: ssh $usr@$ip "${client_env_vars} client_idx=$idx $dir/$client/stress.sh prepare ${type} ${client_start_args}"''' return locals() def perf_ct_attrs(): prepare_data = 'all: client prepare_data type=all' reboot_to_prepare_data = 'seq: stop conf rsync clear prepare_data' return locals() def perf_role_attrs(): save_profile = '''sh: scp $usr@$ip:$dir/log/$role.profile $_rest_''' save_gprofile = '''sh: scp $usr@$ip:$gprofiler_output $_rest_''' start_periodic_pstack = '''sh: ssh $usr@$ip "sh $dir/tools/periodic_pstack.sh $__self__ \`pgrep -f ^$exe\` $dir/log/pstacks/ $ptime > /dev/null 2>&1 < /dev/zero &"''' save_periodic_pstack = '''sh: scp -r $usr@$ip:$dir/log/pstacks/$__self__ $_rest_''' return locals() def perf_obi_attrs(): perf_load_params = r'''call: obmysql extra="-e \"alter system set merge_delay_interval='1s' server_type=chunkserver;\""''' perf_create_tables = r'''call: obmysql extra="< perf/${case}.create"''' perf_init = 'seq: perf_load_params perf_create_tables sleep[sleep_time=2]' perf = 'seq: reboot sleep[sleep_time=10] perf_init perf_prepare.reboot_to_prepare_data turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf' perf_ups = 'seq: reboot sleep[sleep_time=10] perf_init turn_on_perf perf_run.reboot sleep[sleep_time=$ptime] perf_run.stop turn_off_perf collect_perf' running_ts = datetime.datetime.now().strftime("%Y%m%d%H%M%S") local_tmp_dir = "tmp.%s" % (running_ts) def get_cluster_ips(*args, **ob): def get_server_ips(server_role): server_list = get_match_child(ob, server_role) return [find_attr(find_attr(ob, k), "ip") for k in server_list.keys()] cluster_ips = get_server_ips("mergeserver") + get_server_ips("chunkserver") + get_server_ips("updateserver") + get_server_ips("rootserver") seen = set() seen_add = seen.add return [ x for x in cluster_ips if x not in seen and not seen_add(x)] def turn_on_perf(*args, **ob): # turn on profile log and gprofiler all_do(ob, 'mergeserver', 'kill', '-50') all_do(ob, 'chunkserver', 'kill', '-50') all_do(ob, 'mergeserver', 'kill', '-60') all_do(ob, 'chunkserver', 'kill', '-60') all_do(ob, 'updateserver', 'kill', '-60') #call_(ob, 'ms0.kill', '-50') #call_(ob, 'ms0.kill', '-60') #call_(ob, 'cs0.kill', '-60') #call_(ob, 'ups0.kill', '-60') for ip in get_cluster_ips(*args, **ob): ssh(ip, sub2("$dir/tools/linuxmon_x64.bin time=${ptime}s wait=1 back=yes > $dir/server_stats 2>&1 < /dev/zero &", ob)) #call_(ob, 'ms0.start_periodic_pstack') #call_(ob, 'cs0.start_periodic_pstack') #call_(ob, 'ups0.start_periodic_pstack') def turn_off_perf(*args, **ob): # turn off profile log and gprofiler all_do(ob, 'mergeserver', 'kill', '-51') all_do(ob, 'chunkserver', 'kill', '-51') all_do(ob, 'mergeserver', 'kill', '-61') all_do(ob, 'chunkserver', 'kill', '-61') all_do(ob, 'updateserver', 'kill', '-61') #call_(ob, 'ms0.kill', '-51') #call_(ob, 'ms0.kill', '-61') #call_(ob, 'cs0.kill', '-61') #call_(ob, 'ups0.kill', '-61') def pprof_gprofile_output(server_bin, profile_output, perf_result_dir, *args, **ob): st = time.time() cost_graph_name = "${role}_${ip}_cost_graph.pdf" cost_graph_path = "%s/%s" % (perf_result_dir, cost_graph_name) top50_cmd = sub2("pprof --text $server_bin $profile_output", locals()) graph_cmd = sub2("pprof --pdf $server_bin --edgefraction=0 $profile_output > $cost_graph_path", locals()) pro_res = popen(sub2(top50_cmd, ob)).splitlines() i = 0 while i < len(pro_res): if pro_res[i].startswith('Total: '): i += 1 break i += 1 func_top50 = '\n'.join(pro_res[i:i + 50]) sh(sub2(graph_cmd, ob)) info('drawing %s profile graph costs %ss' % (server_bin, time.time() - st)) output = """ <p>$role $ip 函数消耗排名前50: <pre>%s</pre></p> <p><a href="%s">函数消耗线框图</a></p> """ % (func_top50, cost_graph_name) return sub2(output, ob) def parse_profile_log(profile_log, perf_result_dir, *args, **ob): time_format = "%Y-%m-%d %H:%M:%S" query_ratio = int(sub2("$ptime", ob), 10) d = dict() start_time = None end_time = None sql_count = 0 real_sql_count = 0 sql_time = 0 sql_time_dist = dict() wait_time = 0 ud = dict(sql_count = 0, real_sql_count = 0, sql_time = 0, wait_time = 0) qps2time = dict() rpcs = dict() rpcs_html = "" wait_times = [] parse_log_st = time.time() def get_packet_name(pcode): if pcode == 4002: return "OB_PHY_PLAN_EXECUTE" elif pcode == 409: return "OB_SQL_GET_REQUEST" elif pcode == 405: return "OB_SQL_SCAN_REQUEST" else: return "OB_UNKNOWN_PACKET" def add_sql(trace_id, ts, sqlt, waitt, rpc_list): if qps2time.has_key(ts): qps2time[ts] += query_ratio else: qps2time[ts] = 0 ud['sql_count'] += query_ratio ud['real_sql_count'] += 1 ud['sql_time'] += sqlt if sql_time_dist.has_key(sqlt): sql_time_dist[sqlt] += query_ratio else: sql_time_dist[sqlt] = 0 ud['wait_time'] += waitt wait_times.append(waitt) for rpc in rpc_list: if rpcs.has_key(rpc['pcode']): rpcs[rpc['pcode']]['rpc_time'] += rpc['latency'] rpcs[rpc['pcode']]['rpc_times'].append(rpc['latency']) else: rpcs[rpc['pcode']] = dict(rpc_time = rpc['latency'], rpc_times = [rpc['latency']]) with open(sub2(profile_log, ob)) as f: for l in f: m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l) if m is not None: end_time = int(m.group(6)) trace_id = m.group(1) ts = m.group(6)[:-6] sql_time = int(m.group(2)) wait_time = int(m.group(3)) rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4)))] add_sql(trace_id, ts, sql_time, wait_time, rpc_list) else: m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l) if m is not None: end_time = int(m.group(8)) trace_id = m.group(1) ts = m.group(8)[:-6] sql_time = int(m.group(2)) wait_time = int(m.group(3)) rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))), dict(pcode = int(m.group(7)), latency = int(m.group(6))),] add_sql(trace_id, ts, sql_time, wait_time, rpc_list) else: m = re.search(r'trace_id=\[(\d+)\] sql=\[.*\] sql_to_logicalplan=\[\d+\] logicalplan_to_physicalplan=\[\d+\] handle_sql_time=\[(\d+)\] wait_sql_queue_time=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] rpc:channel_id=\[\d+\] rpc_start=\[\d+\] latency=\[(\d+)\] pcode=\[(\d+)\] sql_queue_size=\[\d+\] print_time=\[(\d+)\]', l) if m is not None: end_time = int(m.group(10)) trace_id = m.group(1) ts = m.group(10)[:-6] sql_time = int(m.group(2)) wait_time = int(m.group(3)) rpc_list = [dict(pcode = int(m.group(5)), latency = int(m.group(4))), dict(pcode = int(m.group(7)), latency = int(m.group(6))), dict(pcode = int(m.group(9)), latency = int(m.group(8))),] add_sql(trace_id, ts, sql_time, wait_time, rpc_list) if start_time is None and end_time is not None: start_time = end_time info("parsing log costs %ss" % (time.time() - parse_log_st)) sql_time = ud['sql_time'] sql_count = ud['sql_count'] real_sql_count = ud['real_sql_count'] wait_time = ud['wait_time'] drawing_st = time.time() if end_time is None: elapsed_seconds = 0 qps = 0 avg_sql_time = 0 avg_wait_time = 0 for pcode, rpc in rpcs.items(): rpc['avg_rpc_time'] = 0 else: elapsed_seconds = (end_time - start_time) / 10**6 if elapsed_seconds > 0: qps = sql_count / elapsed_seconds avg_sql_time = float(sql_time) / real_sql_count avg_wait_time = float(wait_time) / real_sql_count else: qps = 0 avg_sql_time = 0 avg_wait_time = 0 for pcode, rpc in rpcs.items(): rpc['avg_rpc_time'] = float(rpc['rpc_time']) / len(rpc['rpc_times']) if plt is not None: plt.plot([x for k,x in sorted(qps2time.items())], '-') plt.xlabel('Timeline') plt.ylabel('QPS') plt.savefig(sub2("%s/${role}_${ip}_qps.png" % (perf_result_dir), ob)) plt.clf() plt.bar(sql_time_dist.keys(), sql_time_dist.values()) plt.xlabel('Response Time (us)') plt.ylabel('Number of Requests') plt.savefig(sub2("%s/${role}_${ip}_total_time_dist.png" % (perf_result_dir), ob)) plt.clf() plt.plot(wait_times, ',') plt.xlabel('Timeline') plt.ylabel('Wait Time in Mergeserver Queue (us)') plt.savefig(sub2("%s/${role}_${ip}_queue_time.png" % (perf_result_dir), ob)) plt.clf() for pcode, rpc in rpcs.items(): plt.plot(rpc['rpc_times'], ',') plt.xlabel('Timeline') plt.ylabel('Response Time (us)') plt.savefig(sub2("%s/${role}_${ip}_%s.png" % (perf_result_dir, pcode), ob)) plt.clf() rpcs_html += sub2("""<p>$role $ip %s(%s)请求次数:%s 平均响应延时:%sus<br><img src="${role}_${ip}_%s.png" /></p>""" % (pcode, get_packet_name(pcode), len(rpc['rpc_times']), rpc['avg_rpc_time'], pcode), ob) info("drawing performance graph costs %ss" % (time.time() - drawing_st)) parse_perf = sub2(sub2(""" <p> ${role} ${ip} 测试运行时间:${elapsed_seconds}s<br> ${role} ${ip} SQL请求次数:$sql_count<br> ${role} ${ip} MS的QPS:$qps</p> <p>${role} ${ip} QPS:<br> <img src="${role}_${ip}_qps.png" /></p> <p>${role} ${ip} 平均响应延时:${avg_sql_time}us<br> <img src="${role}_${ip}_total_time_dist.png" /></p> <p>${role} ${ip} MS队列中平均花费的时间:${avg_wait_time}us<br> <img src="${role}_${ip}_queue_time.png" /></p> $rpcs_html """, locals()), ob) return dict( parse_res = parse_perf, stat = dict( sql_count = sql_count, real_sql_count = real_sql_count, elapsed_seconds = elapsed_seconds, sql_time = sql_time, wait_time = wait_time, qps2time = qps2time, sql_time_dist = sql_time_dist, wait_times = wait_times, rpcs = rpcs, ) ) def collect_perf(*args, **ob): def get_server_list(server_role): server_list = get_match_child(ob, server_role) server_list_str = ' '.join('${%s.ip}:${%s.port}'%(k, k) for k in server_list.keys()) return server_list_str perf_result_dir = "/home/yanran.hfs/public_html/ob_perf/not_published/%s" % (running_ts) os.mkdir(perf_result_dir) os.mkdir(local_tmp_dir) ms_profile = '%s/$role.$ip.profile' % (local_tmp_dir) all_do(ob, 'mergeserver', 'save_gprofile', local_tmp_dir) all_do(ob, 'chunkserver', 'save_gprofile', local_tmp_dir) all_do(ob, 'updateserver', 'save_gprofile', local_tmp_dir) all_do(ob, 'mergeserver', 'save_profile', ms_profile) #call_(ob, 'ms0.save_periodic_pstack', perf_result_dir) #call_(ob, 'cs0.save_periodic_pstack', perf_result_dir) #call_(ob, 'ups0.save_periodic_pstack', perf_result_dir) ms_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ms0.gprofiler_output"), '/')[-1]) cs_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "cs0.gprofiler_output"), '/')[-1]) ups_gprofile_output = "%s/%s" % (local_tmp_dir, str.split(find_attr(ob, "ups0.gprofiler_output"), '/')[-1]) ms_gprof = all_do(ob, 'mergeserver', 'pprof_gprofile_output', "bin/mergeserver", ms_gprofile_output, perf_result_dir) cs_gprof = all_do(ob, 'chunkserver', 'pprof_gprofile_output', "bin/chunkserver", cs_gprofile_output, perf_result_dir) ups_gprof = all_do(ob, 'updateserver', 'pprof_gprofile_output', "bin/updateserver", ups_gprofile_output, perf_result_dir) ms_gprof = ''.join([x[1] for x in ms_gprof]) cs_gprof = ''.join([x[1] for x in cs_gprof]) ups_gprof = ''.join([x[1] for x in ups_gprof]) ms_prof = all_do(ob, 'mergeserver', 'parse_profile_log', ms_profile, perf_result_dir) ms_prof_htmls = ''.join([x[1]['parse_res'] for x in ms_prof]) sql_count = 0 real_sql_count = 0 elapsed_seconds = 0 sql_time = 0 wait_time = 0 qps2time = None sql_time_dist = None for ms_tuple in ms_prof: ms = ms_tuple[1]['stat'] if elapsed_seconds < ms['elapsed_seconds']: elapsed_seconds = ms['elapsed_seconds'] sql_count += ms['sql_count'] real_sql_count += ms['real_sql_count'] sql_time += ms['sql_time'] wait_time += ms['wait_time'] qps2time = dict_add(qps2time, ms['qps2time']) sql_time_dist = dict_add(sql_time_dist, ms['sql_time_dist']) if elapsed_seconds == 0: qps = 0 avg_sql_time = 0 else: qps = sql_count / elapsed_seconds avg_sql_time = float(sql_time) / real_sql_count if plt is not None: plt.plot([x for k,x in sorted(qps2time.items())], '-') plt.xlabel('Timeline') plt.ylabel('QPS') plt.savefig("%s/cluster_qps.png" % (perf_result_dir)) plt.clf() plt.bar(sql_time_dist.keys(), sql_time_dist.values()) plt.xlabel('Response Time (us)') plt.ylabel('Number of Requests') plt.savefig("%s/cluster_total_time_dist.png" % (perf_result_dir)) plt.clf() user_name = find_attr(ob, "usr") case_name = find_attr(ob, "case") rs_list = get_server_list("rootserver") ups_list = get_server_list("updateserver") ms_list = get_server_list("mergeserver") cs_list = get_server_list("chunkserver") server_stats = "" for ip in get_cluster_ips(*args, **ob): sh(sub2('scp $usr@%s:$dir/server_stats %s/' % (ip, local_tmp_dir), ob)) server_stats += "<p>%s监控信息:<pre>%s</pre></p>" % (ip, read("%s/server_stats" % (local_tmp_dir))) result_html_template = (""" <p>测试人员:${user_name}<br> 测试Case:${case_name}<br> 运行环境: <ul> <li>RootServer: ${rs_list}</li> <li>UpdateServer: ${ups_list}</li> <li>MergeServer: ${ms_list}</li> <li>ChunkServer: ${cs_list}</li> </ul> 测试运行时间:${elapsed_seconds}s<br> SQL请求次数:$sql_count<br> 集群QPS:$qps</p> <p>QPS:<br> <img src="cluster_qps.png" /></p> <p>平均响应延时:${avg_sql_time}us<br> <img src="cluster_total_time_dist.png" /></p> ${ms_prof_htmls} ${ms_gprof} ${cs_gprof} ${ups_gprof} ${server_stats} <p>Server栈信息: <ul> <li><a href="ms0">ms0</a></li> <li><a href="cs0">cs0</a></li> <li><a href="ups0">ups0</a></li> </ul></p> """) all_vars = copy.copy(ob) all_vars.update(locals()) with open("%s/index.html" % (perf_result_dir), "w") as f: f.write(sub2(result_html_template, all_vars)) with open("/home/yanran.hfs/public_html/ob_perf/not_published/index.html", "a") as f: f.write("""<li><a href="%s/">%s %s %s</a></li>\n""" % (running_ts, running_ts, user_name, case_name)) sh("rm -r %s" % (local_tmp_dir)) return locals() def perf_environ_setup(): #ob['server_ld_preload'] = "$dir/lib/libprofiler_helper.so" #ob['gprofiler_output'] = "$dir/$ip.gprofiler.output" #ob['environ_extras'] = "PROFILE_SAMPLE_INTERVAL=$ptime" #ob['ms0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so" #ob['ms0']['gprofiler_output'] = "$dir/ms0.gprofiler.output" #ob['ms0']['environ_extras'] = sub2("PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime", ob) #ob['cs0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so" #ob['cs0']['gprofiler_output'] = "$dir/cs0.gprofiler.output" #ob['cs0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output" #ob['ups0']['server_ld_preload'] = "$dir/lib/libprofiler_helper.so" #ob['ups0']['gprofiler_output'] = "$dir/ups0.gprofiler.output" #ob['ups0']['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output" #obi_vars.update(dict(environ_extras = "PROFILE_SAMPLE_INTERVAL=$ptime")) obi_vars['gprofiler_output'] = "$dir/$ip.$role.gprofiler.output" obi_vars['environ_extras'] = "PROFILEOUTPUT=$gprofiler_output PROFILE_SAMPLE_INTERVAL=$ptime" obi_vars['server_ld_preload'] = "$dir/lib/libprofiler_helper.so" #call_(ob, 'ms0.kill', '-50') def perf_install(): client_vars.update(dict_filter_out_special_attrs(perf_client_attrs())) ct_vars.update(dict_filter_out_special_attrs(perf_ct_attrs())) role_vars.update(dict_filter_out_special_attrs(perf_role_attrs())) obi_vars.update(dict_filter_out_special_attrs(perf_obi_attrs())) perf_environ_setup() perf_install()
unknown
codeparrot/codeparrot-clean
from sqlagg import CountUniqueColumn, AliasColumn from sqlagg.columns import SimpleColumn, SumColumn from sqlagg.filters import LTE, AND, GTE, GT, EQ, NOTEQ, OR, IN from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn from corehq.apps.reports.sqlreport import DatabaseColumn, AggregateColumn from custom.world_vision.sqldata import BaseSqlData from custom.world_vision.sqldata.main_sqldata import AnteNatalCareServiceOverview, DeliveryPlaceDetails class MotherRegistrationDetails(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'mother_registration_details' title = 'Mother Registration Details' @property def filters(self): return super(MotherRegistrationDetails, self).filters[1:] @property def rows(self): from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS result = [] for column in self.columns: result.append([{'sort_key': column.header, 'html': column.header, 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['mother_registration_details'], column.slug)}, {'sort_key': self.data[column.slug], 'html': self.data[column.slug]}]) return result @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number')]) @property def columns(self): columns = [ DatabaseColumn("Total mothers registered ever", CountUniqueColumn('doc_id', alias="total")), ] if 'startdate' not in self.config and 'enddate' not in self.config or 'startdate' not in self.config and 'enddate' in self.config: columns.extend([ DatabaseColumn("Total open mother cases", CountUniqueColumn('doc_id', alias="no_date_opened", filters=self.filters + [EQ('closed_on', 'empty')] ) ), DatabaseColumn("Total closed mother cases", CountUniqueColumn('doc_id', alias="no_date_closed", filters=self.filters + [NOTEQ('closed_on', 'empty')] ) ), DatabaseColumn("New registrations during last 30 days", CountUniqueColumn('doc_id', alias="no_date_new_registrations", filters=self.filters + [AND([GTE('opened_on', "last_month"), LTE('opened_on', "today")])] ) ) ]) else: columns.extend([ DatabaseColumn( "Mother cases open at end of period", CountUniqueColumn( 'doc_id', alias="opened", filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'), GT('closed_on', "stred")])])] ) ), DatabaseColumn( "Mother cases closed during period", CountUniqueColumn( 'doc_id', alias="closed", filters=self.filters + [AND([GTE('closed_on', "strsd"), LTE('closed_on', "stred")])] ) ), DatabaseColumn( "Total mothers followed during period", CountUniqueColumn( 'doc_id', alias="followed", filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'), GTE('closed_on', "strsd")])])] ) ), DatabaseColumn( "Total pregnant", CountUniqueColumn( 'doc_id', alias="total_pregnant", filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'), GTE('closed_on', "strsd")]), EQ('mother_state', 'pregnant_mother_type')])] ) ), DatabaseColumn( "New registrations during time period", CountUniqueColumn( 'doc_id', alias="new_registrations", filters=self.filters + [AND([LTE('opened_on', "stred"), GTE('opened_on', "strsd")])] ) ) ]) return columns class ClosedMotherCasesBreakdown(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'closed_mother_cases-breakdown' title = 'Closed Mother Cases Breakdown' show_total = True total_row_name = "Mother cases closed during the time period" chart_title = 'Closed Maternal Cases' show_charts = True chart_x_label = '' chart_y_label = '' chart_only = True @property def group_by(self): return ['reason_for_mother_closure'] @property def rows(self): from custom.world_vision import REASON_FOR_CLOSURE_MAPPING return self._get_rows(REASON_FOR_CLOSURE_MAPPING, super(ClosedMotherCasesBreakdown, self).rows) @property def filters(self): filter = super(ClosedMotherCasesBreakdown, self).filters[1:] if 'strsd' in self.config: filter.append(GTE('closed_on', 'strsd')) if 'stred' in self.config: filter.append(LTE('closed_on', 'stred')) return filter @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Reason for closure'), DataTablesColumn('Number'), DataTablesColumn('Percentage')]) @property def columns(self): return [ DatabaseColumn("Reason for closure", SimpleColumn('reason_for_mother_closure')), DatabaseColumn("Number", CountUniqueColumn('doc_id')) ] class PregnantMotherBreakdownByTrimester(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'pregnant_mother_by_trimester' title = 'Pregnant Woman Breakdown by Trimester' chart_title = 'Pregnant Mother Visits' show_total = True total_row_name = "Total pregnant " show_charts = True chart_x_label = '' chart_y_label = '' chart_only = True def percent_fn(self, y): x = self.data['trimester_1'] + self.data['trimester_2'] + self.data['trimester_3'] return "%(p).0f%%" % \ { "p": (100 * float(y or 0) / float(x or 1)) } @property def filters(self): filter = super(PregnantMotherBreakdownByTrimester, self).filters filter.append(EQ('mother_state', 'pregnant_mother_type')) filter.append(NOTEQ('edd', 'empty')) return filter @property def rows(self): result = [] for column in self.columns: percent = self.percent_fn(self.data[column.slug]) result.append([{'sort_key': column.header, 'html': column.header}, {'sort_key': self.data[column.slug], 'html': self.data[column.slug]}, {'sort_key': 'percentage', 'html': percent}] ) return result @property def columns(self): return [ DatabaseColumn("Trimester 1", CountUniqueColumn('doc_id', alias="trimester_1", filters=self.filters + [GT('edd', "today_plus_196")] ) ), DatabaseColumn("Trimester 2", CountUniqueColumn('doc_id', alias="trimester_2", filters=self.filters + [AND([LTE('edd', "today_plus_196"), GT('edd', "today_plus_84")])] ) ), DatabaseColumn("Trimester 3", CountUniqueColumn('doc_id', alias="trimester_3", filters=self.filters + [LTE('edd', 'today_plus_84')] ) ) ] class AnteNatalCareServiceOverviewExtended(AnteNatalCareServiceOverview): slug = 'ante_natal_care_service_overview_extended' show_charts = True chart_x_label = '' chart_y_label = '' chart_only = True @property def rows(self): from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS result = [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header}, {'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]}, {'sort_key': 'n/a', 'html': 'n/a'}, {'sort_key': 'n/a', 'html': 'n/a'}]] for i in range(1,15): result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header, 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['ante_natal_care_service_details'], self.columns[i].slug)}, {'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]}, {'sort_key': self.data[self.columns[i + 14].slug], 'html': self.data[self.columns[i + 14].slug], 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['ante_natal_care_service_details'], self.columns[i+ 14].slug)}, {'sort_key': self.percent_fn(self.data[self.columns[i + 14].slug], self.data[self.columns[i].slug]), 'html': self.percent_fn(self.data[self.columns[i + 14].slug], self.data[self.columns[i].slug])}]) return result @property def columns(self): return [ DatabaseColumn("Total pregnant", CountUniqueColumn('doc_id', alias="total_pregnant")), DatabaseColumn("No ANC", CountUniqueColumn('doc_id', alias="no_anc", filters=self.filters + [NOTEQ('anc_1', 'yes')])), DatabaseColumn("ANC1", CountUniqueColumn('doc_id', alias="anc_1", filters=self.filters + [EQ('anc_1', 'yes')])), DatabaseColumn("ANC2", CountUniqueColumn('doc_id', alias="anc_2", filters=self.filters + [EQ('anc_2', 'yes')])), DatabaseColumn("ANC3", CountUniqueColumn('doc_id', alias="anc_3", filters=self.filters + [EQ('anc_3', 'yes')])), DatabaseColumn("ANC4", CountUniqueColumn('doc_id', alias="anc_4", filters=self.filters + [EQ('anc_4', 'yes')])), DatabaseColumn("TT1", CountUniqueColumn('doc_id', alias="tt_1", filters=self.filters + [EQ('tt_1', 'yes')])), DatabaseColumn("TT2", CountUniqueColumn('doc_id', alias="tt_2", filters=self.filters + [EQ('tt_2', 'yes')])), DatabaseColumn("TT Booster", CountUniqueColumn('doc_id', alias="tt_booster", filters=self.filters + [EQ('tt_booster', 'yes')])), DatabaseColumn("TT Complete", CountUniqueColumn('doc_id', alias="tt_completed", filters=self.filters + [OR([EQ('tt_2', 'yes'), EQ('tt_booster', 'yes')])])), DatabaseColumn("IFA received", CountUniqueColumn('doc_id', alias="ifa_tablets", filters=self.filters + [EQ('iron_folic', 'yes')])), DatabaseColumn("100 IFA consumed", CountUniqueColumn('doc_id', alias="100_tablets", filters=self.filters[1:-1] + [AND([EQ('completed_100_ifa', 'yes'), GTE('delivery_date', 'strsd'), LTE('delivery_date', 'stred')])])), DatabaseColumn("Clinically anemic mothers", CountUniqueColumn('doc_id', alias="clinically_anemic", filters=self.filters + [EQ('anemia_signs', 'yes')])), DatabaseColumn("Number of pregnant mother referrals due to danger signs", CountUniqueColumn('doc_id', alias="danger_signs", filters=self.filters + [EQ('currently_referred', 'yes')])), DatabaseColumn("Knows closest health facility", CountUniqueColumn('doc_id', alias="knows_closest_facility", filters=self.filters + [EQ('knows_closest_facility', 'yes')])), DatabaseColumn("No ANC Total Eligible", CountUniqueColumn('doc_id', alias="no_anc_eligible", filters=self.filters + [LTE('edd', 'today_plus_196')])), DatabaseColumn("ANC1 Total Eligible", CountUniqueColumn('doc_id', alias="anc_1_eligible", filters=self.filters + [LTE('edd', 'today_plus_196')])), DatabaseColumn("ANC2 Total Eligible", CountUniqueColumn('doc_id', alias="anc_2_eligible", filters=self.filters + [AND([EQ('anc_1', 'yes'), LTE('edd', 'today_plus_112')])])), DatabaseColumn("ANC3 Total Eligible", CountUniqueColumn('doc_id', alias="anc_3_eligible", filters=self.filters + [AND([EQ('anc_2', 'yes'), LTE('edd', 'today_plus_56')])])), DatabaseColumn("ANC4 Total Eligible", CountUniqueColumn('doc_id', alias="anc_4_eligible", filters=self.filters + [AND([EQ('anc_3', 'yes'), LTE('edd', 'today_plus_35')])])), DatabaseColumn("TT1 Total Eligible", CountUniqueColumn('doc_id', alias="tt_1_eligible", filters=self.filters + [NOTEQ('previous_tetanus', 'yes')])), DatabaseColumn("TT2 Total Eligible", CountUniqueColumn('doc_id', alias="tt_2_eligible", filters=self.filters + [EQ('tt_1', 'yes')])), DatabaseColumn("TT Booster Total Eligible", CountUniqueColumn('doc_id', alias="tt_booster_eligible", filters=self.filters + [EQ('previous_tetanus', 'yes')])), DatabaseColumn("TT Completed (TT2 or Booster) Total Eligible", CountUniqueColumn('doc_id', alias="tt_completed_eligible", filters=self.filters + [OR([EQ('tt_1', 'yes'), EQ('previous_tetanus', 'yes')])])), DatabaseColumn("Taking IFA tablets Total Eligible", CountUniqueColumn('doc_id', alias="ifa_tablets_eligible")), DatabaseColumn("Completed 100 IFA tablets Total Eligible", CountUniqueColumn('doc_id', alias="100_tablets_eligible", filters=self.filters[1:-1] + [AND([GTE('delivery_date', 'strsd'), LTE('delivery_date', 'stred')])])), DatabaseColumn("Clinically anemic mothers Total Eligible", CountUniqueColumn('doc_id', alias="clinically_anemic_eligible")), DatabaseColumn("Number of mother referrals due to danger signs Total Eligible", CountUniqueColumn('doc_id', alias="danger_signs_eligible")), DatabaseColumn("Know closest health facility Total Eligible", CountUniqueColumn('doc_id', alias="knows_closest_facility_eligible")) ] class DeliveryMothersIds(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" @property def filters(self): filter = super(DeliveryMothersIds, self).filters[1:] if 'strsd' in self.config: filter.append(GTE('delivery_date', 'strsd')) if 'stred' in self.config: filter.append(LTE('delivery_date', 'stred')) return filter @property def group_by(self): return ['doc_id'] @property def columns(self): return [ DatabaseColumn("Mother ID", SimpleColumn('doc_id')) ] class DeliveryLiveBirthDetails(BaseSqlData): table_name = "fluff_WorldVisionChildFluff" slug = 'delivery_live_birth_details' title = '' show_charts = True chart_x_label = '' chart_y_label = '' show_total = True total_row_name = "Total live births" chart_title = 'Live Births' accordion_start = False accordion_end = False chart_only = True @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Percentage')]) @property def filters(self): self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',) return [IN('mother_id', 'mother_ids')] @property def columns(self): return [ DatabaseColumn("Live birth (Male)", CountUniqueColumn('doc_id', alias='girls', filters=self.filters + [EQ('gender', 'female')]) ), DatabaseColumn("Live birth (Female)", CountUniqueColumn('doc_id', alias='boys', filters=self.filters + [EQ('gender', 'male')]) ) ] @property def rows(self): total = sum(v if v else 0 for v in self.data.values()) result = [] for column in self.columns: percent = self.percent_fn(total, self.data[column.slug]) result.append([{'sort_key': column.header, 'html': column.header}, {'sort_key': self.data[column.slug] if self.data[column.slug] else 0, 'html': self.data[column.slug] if self.data[column.slug] else 0}, {'sort_key': 'percentage', 'html': percent} ]) return result class DeliveryStillBirthDetails(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'delivery_still_birth_details' title = '' accordion_start = False accordion_end = True @property def filters(self): filter = super(DeliveryStillBirthDetails, self).filters[1:] if 'strsd' in self.config: filter.append(GTE('delivery_date', 'strsd')) if 'stred' in self.config: filter.append(LTE('delivery_date', 'stred')) return filter @property def headers(self): return DataTablesHeader(*[DataTablesColumn(''), DataTablesColumn('Number')]) @property def columns(self): return [ DatabaseColumn("Still births", SumColumn('number_of_children_born_dead_total') ), DatabaseColumn("Abortions", CountUniqueColumn('doc_id', alias="abortions", filters=self.filters + [EQ('reason_for_mother_closure', 'abortion')]), ), ] @property def rows(self): from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS result = [] for column in self.columns: result.append([{'sort_key': column.header, 'html': column.header, 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['delivery_details'], column.slug)}, {'sort_key': self.data[column.slug] if self.data[column.slug] else 0, 'html': self.data[column.slug] if self.data[column.slug] else 0}] ) return result class PostnatalCareOverview(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'postnatal_care_overview' title = 'Postnatal Care Overview' show_charts = True chart_title = 'PNC Visits' chart_x_label = '' chart_y_label = '' accordion_end = False chart_only = True @property def filters(self): filter = super(PostnatalCareOverview, self).filters[1:] if 'strsd' in self.config: filter.append(GTE('delivery_date', 'strsd')) if 'stred' in self.config: filter.append(LTE('delivery_date', 'stred')) return filter @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')]) @property def rows(self): from custom.world_vision import MOTHER_INDICATOR_TOOLTIPS result = [] for i in range(0,4): result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header, 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['postnatal_care_details'], self.columns[i].slug)}, {'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]}, {'sort_key': self.data[self.columns[i + 4].slug], 'html': self.data[self.columns[i + 4].slug], 'tooltip': self.get_tooltip(MOTHER_INDICATOR_TOOLTIPS['postnatal_care_details'], self.columns[i+4].slug)}, {'sort_key': self.percent_fn(self.data[self.columns[i + 4].slug], self.data[self.columns[i].slug]), 'html': self.percent_fn(self.data[self.columns[i + 4].slug], self.data[self.columns[i].slug])}]) return result @property def columns(self): return [ DatabaseColumn( "PNC in 48 hours", CountUniqueColumn('doc_id', alias="pnc_1", filters=self.filters + [EQ('pp_1_done', 'yes')]), ), DatabaseColumn( "PNC in 2-4 days", CountUniqueColumn('doc_id', alias="pnc_2", filters=self.filters + [EQ('pp_2_done', 'yes')]), ), DatabaseColumn( "PNC in 5-7", CountUniqueColumn('doc_id', alias="pnc_3", filters=self.filters + [EQ('pp_3_done', 'yes')]), ), DatabaseColumn( "PNC in 21-42 days", CountUniqueColumn('doc_id', alias="pnc_4", filters=self.filters + [EQ('pp_4_done', 'yes')]), ), DatabaseColumn( "PNC 1 visits Total Eligible", CountUniqueColumn('doc_id', alias="pnc_1_eligible", filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today')])]), ), DatabaseColumn("PNC 2 visits Total Eligible", CountUniqueColumn('doc_id', alias="pnc_2_eligible", filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_2')])]), ), DatabaseColumn("PNC 3 visits Total Eligible", CountUniqueColumn('doc_id', alias="pnc_3_eligible", filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_25')])]), ), DatabaseColumn("PNC 4 visits Total Eligible", CountUniqueColumn('doc_id', alias="pnc_4_eligible", filters=self.filters + [AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_21')])]), ) ] class CauseOfMaternalDeaths(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'Cause_of_maternal_deaths' title = 'Cause of Maternal Deaths' show_total = True total_row_name = "Total Mother Deaths" show_charts = True chart_x_label = '' chart_y_label = '' chart_title = 'Mother Deaths' table_only = True @property def group_by(self): return ['cause_of_death_maternal'] @property def rows(self): from custom.world_vision import MOTHER_DEATH_MAPPING return self._get_rows(MOTHER_DEATH_MAPPING, super(CauseOfMaternalDeaths, self).rows) @property def filters(self): filter = super(CauseOfMaternalDeaths, self).filters[1:] filter.append(EQ('reason_for_mother_closure', 'death')) if 'strsd' in self.config: filter.append(GTE('date_of_mother_death', 'strsd')) if 'stred' in self.config: filter.append(LTE('date_of_mother_death', 'stred')) return filter @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Maternal Death'), DataTablesColumn('Number'), DataTablesColumn('Percentage')]) @property def columns(self): return [ DatabaseColumn("Reason", SimpleColumn('cause_of_death_maternal')), DatabaseColumn("Number", CountUniqueColumn('doc_id')) ] class FamilyPlanningMethods(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'family_planning_methods' title = 'Family Planning Methods' show_total = True total_row_name = "Total Families who reported using Family Planning" show_charts = True chart_title = 'Family Planning Methods' chart_x_label = '' chart_y_label = '' @property def group_by(self): return ['fp_method'] @property def rows(self): from custom.world_vision import FAMILY_PLANNING_METHODS return self._get_rows(FAMILY_PLANNING_METHODS, super(FamilyPlanningMethods, self).rows) @property def filters(self): filter = super(FamilyPlanningMethods, self).filters filter.append(NOTEQ('fp_method', 'empty')) return filter @property def headers(self): return DataTablesHeader(*[DataTablesColumn('Method'), DataTablesColumn('Number'), DataTablesColumn('Percentage')]) @property def columns(self): return [ DatabaseColumn("Method", SimpleColumn('fp_method')), DatabaseColumn("Number", CountUniqueColumn('doc_id')) ] class DeliveryPlaceDetailsExtended(DeliveryPlaceDetails): show_charts = True chart_title = 'Delivery Place' chart_x_label = '' chart_y_label = '' slug = 'delivery_place_details_extended' @property def columns(self): columns = super(DeliveryPlaceDetailsExtended, self).columns additional_columns = [ DatabaseColumn("Home deliveries", CountUniqueColumn('doc_id', alias="home_deliveries", filters=self.filters + [OR([EQ('place_of_birth', 'home'), EQ('place_of_birth', 'on_route')])])), DatabaseColumn("Other places", CountUniqueColumn('doc_id', alias="other_places", filters=self.filters + [OR([EQ('place_of_birth', 'empty'), EQ('place_of_birth', 'other')])])) ] columns.extend(additional_columns) return columns class DeliveryPlaceMotherDetails(DeliveryPlaceDetails): title = '' show_charts = True chart_x_label = '' chart_y_label = '' chart_title = 'Delivery Place Mother' slug = 'delivery_place_mother_details' accordion_start = False accordion_end = False @property def columns(self): return [ DatabaseColumn("Total Deliveries (with/without outcome)", CountUniqueColumn('doc_id', alias="total_delivery", filters=self.filters), ), DatabaseColumn("Normal deliveries", CountUniqueColumn('doc_id', alias="normal_deliveries", filters=self.filters + [EQ('type_of_delivery', 'normal_delivery')])), DatabaseColumn("Caesarean deliveries", CountUniqueColumn('doc_id', alias="caesarean_deliveries", filters=self.filters + [EQ('type_of_delivery', 'cesarean_delivery')])), DatabaseColumn("Delivery type unknown", CountUniqueColumn('doc_id', alias="unknown", filters=self.filters + [OR([EQ('type_of_delivery', 'empty'), EQ('type_of_delivery', 'unknown_delivery')])])) ] @property def rows(self): return super(DeliveryPlaceMotherDetails, self).rows[1:] class NumberOfPNCVisits(BaseSqlData): table_name = "fluff_WorldVisionMotherFluff" slug = 'number_of_pnc_visits' title = '' show_total = True total_row_name = "Total mothers who delivered more than 42 days ago" show_charts = True chart_title = 'PNC Visits' chart_x_label = '' chart_y_label = '' accordion_start = False accordion_end = True @property def rows(self): result = [] rows = super(NumberOfPNCVisits, self).rows counter = {k: 0 for k in range(0, 5)} for row in rows: counter[row[-1]['html']] += 1 for k, v in counter.iteritems(): percent = self.percent_fn(len(rows), v) result.append([{'sort_key': "Mothers with %d PNC visits within 42 days of delivery" % k, 'html': "Mothers with %d PNC visits within 42 days of delivery" % k}, {'sort_key': v, 'html': v}, {'sort_key': 'percentage', 'html': percent}]) return result @property def group_by(self): return ['doc_id', 'pp_1_done', 'pp_2_done', 'pp_3_done', 'pp_4_done'] @property def filters(self): filters = super(NumberOfPNCVisits, self).filters[1:] filters.append(AND([NOTEQ('delivery_date', 'empty'), LTE('delivery_date', 'today_minus_42')])) return filters @property def columns(self): def format_pnc_count(*args): return sum([1 if arg == 'yes' else 0 for arg in args]) return [ DatabaseColumn("PP 1", SimpleColumn('pp_1_done', alias='pp_1_done')), DatabaseColumn("PP 2", SimpleColumn('pp_2_done', alias='pp_2_done')), DatabaseColumn("PP 3", SimpleColumn('pp_3_done', alias='pp_3_done')), DatabaseColumn("PP 4", SimpleColumn('pp_4_done', alias='pp_4_done')), AggregateColumn('PNC Count', format_pnc_count, [AliasColumn('pp_1_done'), AliasColumn('pp_2_done'), AliasColumn('pp_3_done'), AliasColumn('pp_4_done')])]
unknown
codeparrot/codeparrot-clean
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import time from metrics import smoothness from telemetry.page import page_measurement class StatsCollector(object): def __init__(self, timeline): """ Utility class for collecting rendering stats from timeline model. timeline -- The timeline model """ self.timeline = timeline self.total_best_rasterize_time = 0 self.total_best_record_time = 0 self.total_pixels_rasterized = 0 self.total_pixels_recorded = 0 self.trigger_event = self.FindTriggerEvent() self.renderer_process = self.trigger_event.start_thread.parent def FindTriggerEvent(self): events = [s for s in self.timeline.GetAllEventsOfName( 'measureNextFrame') if s.parent_slice == None] if len(events) != 1: raise LookupError, 'no measureNextFrame event found' return events[0] def FindFrameNumber(self, trigger_time): start_event = None for event in self.renderer_process.IterAllSlicesOfName( "LayerTreeHost::UpdateLayers"): if event.start > trigger_time: if start_event == None: start_event = event elif event.start < start_event.start: start_event = event if start_event is None: raise LookupError, \ 'no LayterTreeHost::UpdateLayers after measureNextFrame found' return start_event.args["source_frame_number"] def GatherRasterizeStats(self, frame_number): for event in self.renderer_process.IterAllSlicesOfName( "RasterWorkerPoolTaskImpl::RunRasterOnThread"): if event.args["data"]["source_frame_number"] == frame_number: for raster_loop_event in event.GetAllSubSlicesOfName("RasterLoop"): best_rasterize_time = float("inf") for raster_event in raster_loop_event.GetAllSubSlicesOfName( "Picture::Raster"): if "num_pixels_rasterized" in raster_event.args: best_rasterize_time = min(best_rasterize_time, raster_event.duration) self.total_pixels_rasterized += \ raster_event.args["num_pixels_rasterized"] if best_rasterize_time == float('inf'): best_rasterize_time = 0 self.total_best_rasterize_time += best_rasterize_time def GatherRecordStats(self, frame_number): for event in self.renderer_process.IterAllSlicesOfName( "PictureLayer::Update"): if event.args["source_frame_number"] == frame_number: for record_loop_event in event.GetAllSubSlicesOfName("RecordLoop"): best_record_time = float('inf') for record_event in record_loop_event.GetAllSubSlicesOfName( "Picture::Record"): best_record_time = min(best_record_time, record_event.duration) self.total_pixels_recorded += ( record_event.args["data"]["width"] * record_event.args["data"]["height"]) if best_record_time == float('inf'): best_record_time = 0 self.total_best_record_time += best_record_time def GatherRenderingStats(self): trigger_time = self.trigger_event.start frame_number = self.FindFrameNumber(trigger_time) self.GatherRasterizeStats(frame_number) self.GatherRecordStats(frame_number) def DivideIfPossibleOrZero(numerator, denominator): if denominator == 0: return 0 return numerator / denominator class RasterizeAndRecord(page_measurement.PageMeasurement): def __init__(self): super(RasterizeAndRecord, self).__init__('', True) self._metrics = None def AddCommandLineOptions(self, parser): parser.add_option('--report-all-results', dest='report_all_results', action='store_true', help='Reports all data collected') parser.add_option('--raster-record-repeat', dest='raster_record_repeat', default=20, help='Repetitions in raster and record loops.' + 'Higher values reduce variance, but can cause' + 'instability (timeouts, event buffer overflows, etc.).') parser.add_option('--start-wait-time', dest='start_wait_time', default=2, help='Wait time before the benchmark is started ' + '(must be long enought to load all content)') parser.add_option('--stop-wait-time', dest='stop_wait_time', default=5, help='Wait time before measurement is taken ' + '(must be long enough to render one frame)') def CustomizeBrowserOptions(self, options): options.extra_browser_args.append('--enable-gpu-benchmarking') # Run each raster task N times. This allows us to report the time for the # best run, effectively excluding cache effects and time when the thread is # de-scheduled. options.extra_browser_args.append( '--slow-down-raster-scale-factor=' + str(options.raster_record_repeat)) # Enable impl-side-painting. Current version of benchmark only works for # this mode. options.extra_browser_args.append('--enable-impl-side-painting') options.extra_browser_args.append('--force-compositing-mode') options.extra_browser_args.append('--enable-threaded-compositing') def MeasurePage(self, page, tab, results): self._metrics = smoothness.SmoothnessMetrics(tab) # Rasterize only what's visible. tab.ExecuteJavaScript( 'chrome.gpuBenchmarking.setRasterizeOnlyVisibleContent();') # Wait until the page has loaded and come to a somewhat steady state. # Needs to be adjusted for every device (~2 seconds for workstation). time.sleep(float(self.options.start_wait_time)) # Render one frame before we start gathering a trace. On some pages, the # first frame requested has more variance in the number of pixels # rasterized. tab.ExecuteJavaScript(""" window.__rafFired = false; window.webkitRequestAnimationFrame(function() { chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers(); window.__rafFired = true; }); """) tab.browser.StartTracing('webkit,benchmark', 60) self._metrics.Start() tab.ExecuteJavaScript(""" console.time("measureNextFrame"); window.__rafFired = false; window.webkitRequestAnimationFrame(function() { chrome.gpuBenchmarking.setNeedsDisplayOnAllLayers(); window.__rafFired = true; }); """) # Wait until the frame was drawn. # Needs to be adjusted for every device and for different # raster_record_repeat counts. # TODO(ernstm): replace by call-back. time.sleep(float(self.options.stop_wait_time)) tab.ExecuteJavaScript('console.timeEnd("measureNextFrame")') tab.browser.StopTracing() self._metrics.Stop() timeline = tab.browser.GetTraceResultAndReset().AsTimelineModel() collector = StatsCollector(timeline) collector.GatherRenderingStats() rendering_stats = self._metrics.end_values results.Add('best_rasterize_time', 'seconds', collector.total_best_rasterize_time / 1.e3, data_type='unimportant') results.Add('best_record_time', 'seconds', collector.total_best_record_time / 1.e3, data_type='unimportant') results.Add('total_pixels_rasterized', 'pixels', collector.total_pixels_rasterized, data_type='unimportant') results.Add('total_pixels_recorded', 'pixels', collector.total_pixels_recorded, data_type='unimportant') if self.options.report_all_results: for k, v in rendering_stats.iteritems(): results.Add(k, '', v)
unknown
codeparrot/codeparrot-clean
"""distutils.ccompiler Contains CCompiler, an abstract base class that defines the interface for the Distutils compiler abstraction model.""" # This module should be kept compatible with Python 2.1. __revision__ = "$Id: ccompiler.py 46331 2006-05-26 14:07:23Z bob.ippolito $" import sys, os, re from types import * from copy import copy from distutils.errors import * from distutils.spawn import spawn from distutils.file_util import move_file from distutils.dir_util import mkpath from distutils.dep_util import newer_pairwise, newer_group from distutils.util import split_quoted, execute from distutils import log class CCompiler: """Abstract base class to define the interface that must be implemented by real compiler classes. Also has some utility methods used by several compiler classes. The basic idea behind a compiler abstraction class is that each instance can be used for all the compile/link steps in building a single project. Thus, attributes common to all of those compile and link steps -- include directories, macros to define, libraries to link against, etc. -- are attributes of the compiler instance. To allow for variability in how individual files are treated, most of those attributes may be varied on a per-compilation or per-link basis. """ # 'compiler_type' is a class attribute that identifies this class. It # keeps code that wants to know what kind of compiler it's dealing with # from having to import all possible compiler classes just to do an # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type' # should really, really be one of the keys of the 'compiler_class' # dictionary (see below -- used by the 'new_compiler()' factory # function) -- authors of new compiler interface classes are # responsible for updating 'compiler_class'! compiler_type = None # XXX things not handled by this compiler abstraction model: # * client can't provide additional options for a compiler, # e.g. warning, optimization, debugging flags. Perhaps this # should be the domain of concrete compiler abstraction classes # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base # class should have methods for the common ones. # * can't completely override the include or library searchg # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2". # I'm not sure how widely supported this is even by Unix # compilers, much less on other platforms. And I'm even less # sure how useful it is; maybe for cross-compiling, but # support for that is a ways off. (And anyways, cross # compilers probably have a dedicated binary with the # right paths compiled in. I hope.) # * can't do really freaky things with the library list/library # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against # different versions of libfoo.a in different locations. I # think this is useless without the ability to null out the # library search path anyways. # Subclasses that rely on the standard filename generation methods # implemented below should override these; see the comment near # those methods ('object_filenames()' et. al.) for details: src_extensions = None # list of strings obj_extension = None # string static_lib_extension = None shared_lib_extension = None # string static_lib_format = None # format string shared_lib_format = None # prob. same as static_lib_format exe_extension = None # string # Default language settings. language_map is used to detect a source # file or Extension target language, checking source filenames. # language_order is used to detect the language precedence, when deciding # what language to use when mixing source types. For example, if some # extension has two files with ".c" extension, and one with ".cpp", it # is still linked as c++. language_map = {".c" : "c", ".cc" : "c++", ".cpp" : "c++", ".cxx" : "c++", ".m" : "objc", } language_order = ["c++", "objc", "c"] def __init__ (self, verbose=0, dry_run=0, force=0): self.dry_run = dry_run self.force = force self.verbose = verbose # 'output_dir': a common output directory for object, library, # shared object, and shared library files self.output_dir = None # 'macros': a list of macro definitions (or undefinitions). A # macro definition is a 2-tuple (name, value), where the value is # either a string or None (no explicit value). A macro # undefinition is a 1-tuple (name,). self.macros = [] # 'include_dirs': a list of directories to search for include files self.include_dirs = [] # 'libraries': a list of libraries to include in any link # (library names, not filenames: eg. "foo" not "libfoo.a") self.libraries = [] # 'library_dirs': a list of directories to search for libraries self.library_dirs = [] # 'runtime_library_dirs': a list of directories to search for # shared libraries/objects at runtime self.runtime_library_dirs = [] # 'objects': a list of object files (or similar, such as explicitly # named library files) to include on any link self.objects = [] for key in self.executables.keys(): self.set_executable(key, self.executables[key]) # __init__ () def set_executables (self, **args): """Define the executables (and options for them) that will be run to perform the various stages of compilation. The exact set of executables that may be specified here depends on the compiler class (via the 'executables' class attribute), but most will have: compiler the C/C++ compiler linker_so linker used to create shared objects and libraries linker_exe linker used to create binary executables archiver static library creator On platforms with a command-line (Unix, DOS/Windows), each of these is a string that will be split into executable name and (optional) list of arguments. (Splitting the string is done similarly to how Unix shells operate: words are delimited by spaces, but quotes and backslashes can override this. See 'distutils.util.split_quoted()'.) """ # Note that some CCompiler implementation classes will define class # attributes 'cpp', 'cc', etc. with hard-coded executable names; # this is appropriate when a compiler class is for exactly one # compiler/OS combination (eg. MSVCCompiler). Other compiler # classes (UnixCCompiler, in particular) are driven by information # discovered at run-time, since there are many different ways to do # basically the same things with Unix C compilers. for key in args.keys(): if not self.executables.has_key(key): raise ValueError, \ "unknown executable '%s' for class %s" % \ (key, self.__class__.__name__) self.set_executable(key, args[key]) # set_executables () def set_executable(self, key, value): if type(value) is StringType: setattr(self, key, split_quoted(value)) else: setattr(self, key, value) def _find_macro (self, name): i = 0 for defn in self.macros: if defn[0] == name: return i i = i + 1 return None def _check_macro_definitions (self, definitions): """Ensures that every element of 'definitions' is a valid macro definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do nothing if all definitions are OK, raise TypeError otherwise. """ for defn in definitions: if not (type (defn) is TupleType and (len (defn) == 1 or (len (defn) == 2 and (type (defn[1]) is StringType or defn[1] is None))) and type (defn[0]) is StringType): raise TypeError, \ ("invalid macro definition '%s': " % defn) + \ "must be tuple (string,), (string, string), or " + \ "(string, None)" # -- Bookkeeping methods ------------------------------------------- def define_macro (self, name, value=None): """Define a preprocessor macro for all compilations driven by this compiler object. The optional parameter 'value' should be a string; if it is not supplied, then the macro will be defined without an explicit value and the exact outcome depends on the compiler used (XXX true? does ANSI say anything about this?) """ # Delete from the list of macro definitions/undefinitions if # already there (so that this one will take precedence). i = self._find_macro (name) if i is not None: del self.macros[i] defn = (name, value) self.macros.append (defn) def undefine_macro (self, name): """Undefine a preprocessor macro for all compilations driven by this compiler object. If the same macro is defined by 'define_macro()' and undefined by 'undefine_macro()' the last call takes precedence (including multiple redefinitions or undefinitions). If the macro is redefined/undefined on a per-compilation basis (ie. in the call to 'compile()'), then that takes precedence. """ # Delete from the list of macro definitions/undefinitions if # already there (so that this one will take precedence). i = self._find_macro (name) if i is not None: del self.macros[i] undefn = (name,) self.macros.append (undefn) def add_include_dir (self, dir): """Add 'dir' to the list of directories that will be searched for header files. The compiler is instructed to search directories in the order in which they are supplied by successive calls to 'add_include_dir()'. """ self.include_dirs.append (dir) def set_include_dirs (self, dirs): """Set the list of directories that will be searched to 'dirs' (a list of strings). Overrides any preceding calls to 'add_include_dir()'; subsequence calls to 'add_include_dir()' add to the list passed to 'set_include_dirs()'. This does not affect any list of standard include directories that the compiler may search by default. """ self.include_dirs = copy (dirs) def add_library (self, libname): """Add 'libname' to the list of libraries that will be included in all links driven by this compiler object. Note that 'libname' should *not* be the name of a file containing a library, but the name of the library itself: the actual filename will be inferred by the linker, the compiler, or the compiler class (depending on the platform). The linker will be instructed to link against libraries in the order they were supplied to 'add_library()' and/or 'set_libraries()'. It is perfectly valid to duplicate library names; the linker will be instructed to link against libraries as many times as they are mentioned. """ self.libraries.append (libname) def set_libraries (self, libnames): """Set the list of libraries to be included in all links driven by this compiler object to 'libnames' (a list of strings). This does not affect any standard system libraries that the linker may include by default. """ self.libraries = copy (libnames) def add_library_dir (self, dir): """Add 'dir' to the list of directories that will be searched for libraries specified to 'add_library()' and 'set_libraries()'. The linker will be instructed to search for libraries in the order they are supplied to 'add_library_dir()' and/or 'set_library_dirs()'. """ self.library_dirs.append (dir) def set_library_dirs (self, dirs): """Set the list of library search directories to 'dirs' (a list of strings). This does not affect any standard library search path that the linker may search by default. """ self.library_dirs = copy (dirs) def add_runtime_library_dir (self, dir): """Add 'dir' to the list of directories that will be searched for shared libraries at runtime. """ self.runtime_library_dirs.append (dir) def set_runtime_library_dirs (self, dirs): """Set the list of directories to search for shared libraries at runtime to 'dirs' (a list of strings). This does not affect any standard search path that the runtime linker may search by default. """ self.runtime_library_dirs = copy (dirs) def add_link_object (self, object): """Add 'object' to the list of object files (or analogues, such as explicitly named library files or the output of "resource compilers") to be included in every link driven by this compiler object. """ self.objects.append (object) def set_link_objects (self, objects): """Set the list of object files (or analogues) to be included in every link to 'objects'. This does not affect any standard object files that the linker may include by default (such as system libraries). """ self.objects = copy (objects) # -- Private utility methods -------------------------------------- # (here for the convenience of subclasses) # Helper method to prep compiler in subclass compile() methods def _setup_compile(self, outdir, macros, incdirs, sources, depends, extra): """Process arguments and decide which source files to compile. Merges _fix_compile_args() and _prep_compile(). """ if outdir is None: outdir = self.output_dir elif type(outdir) is not StringType: raise TypeError, "'output_dir' must be a string or None" if macros is None: macros = self.macros elif type(macros) is ListType: macros = macros + (self.macros or []) else: raise TypeError, "'macros' (if supplied) must be a list of tuples" if incdirs is None: incdirs = self.include_dirs elif type(incdirs) in (ListType, TupleType): incdirs = list(incdirs) + (self.include_dirs or []) else: raise TypeError, \ "'include_dirs' (if supplied) must be a list of strings" if extra is None: extra = [] # Get the list of expected output (object) files objects = self.object_filenames(sources, strip_dir=0, output_dir=outdir) assert len(objects) == len(sources) # XXX should redo this code to eliminate skip_source entirely. # XXX instead create build and issue skip messages inline if self.force: skip_source = {} # rebuild everything for source in sources: skip_source[source] = 0 elif depends is None: # If depends is None, figure out which source files we # have to recompile according to a simplistic check. We # just compare the source and object file, no deep # dependency checking involving header files. skip_source = {} # rebuild everything for source in sources: # no wait, rebuild nothing skip_source[source] = 1 n_sources, n_objects = newer_pairwise(sources, objects) for source in n_sources: # no really, only rebuild what's skip_source[source] = 0 # out-of-date else: # If depends is a list of files, then do a different # simplistic check. Assume that each object depends on # its source and all files in the depends list. skip_source = {} # L contains all the depends plus a spot at the end for a # particular source file L = depends[:] + [None] for i in range(len(objects)): source = sources[i] L[-1] = source if newer_group(L, objects[i]): skip_source[source] = 0 else: skip_source[source] = 1 pp_opts = gen_preprocess_options(macros, incdirs) build = {} for i in range(len(sources)): src = sources[i] obj = objects[i] ext = os.path.splitext(src)[1] self.mkpath(os.path.dirname(obj)) if skip_source[src]: log.debug("skipping %s (%s up-to-date)", src, obj) else: build[obj] = src, ext return macros, objects, extra, pp_opts, build def _get_cc_args(self, pp_opts, debug, before): # works for unixccompiler, emxccompiler, cygwinccompiler cc_args = pp_opts + ['-c'] if debug: cc_args[:0] = ['-g'] if before: cc_args[:0] = before return cc_args def _fix_compile_args (self, output_dir, macros, include_dirs): """Typecheck and fix-up some of the arguments to the 'compile()' method, and return fixed-up values. Specifically: if 'output_dir' is None, replaces it with 'self.output_dir'; ensures that 'macros' is a list, and augments it with 'self.macros'; ensures that 'include_dirs' is a list, and augments it with 'self.include_dirs'. Guarantees that the returned values are of the correct type, i.e. for 'output_dir' either string or None, and for 'macros' and 'include_dirs' either list or None. """ if output_dir is None: output_dir = self.output_dir elif type (output_dir) is not StringType: raise TypeError, "'output_dir' must be a string or None" if macros is None: macros = self.macros elif type (macros) is ListType: macros = macros + (self.macros or []) else: raise TypeError, "'macros' (if supplied) must be a list of tuples" if include_dirs is None: include_dirs = self.include_dirs elif type (include_dirs) in (ListType, TupleType): include_dirs = list (include_dirs) + (self.include_dirs or []) else: raise TypeError, \ "'include_dirs' (if supplied) must be a list of strings" return output_dir, macros, include_dirs # _fix_compile_args () def _prep_compile(self, sources, output_dir, depends=None): """Decide which souce files must be recompiled. Determine the list of object files corresponding to 'sources', and figure out which ones really need to be recompiled. Return a list of all object files and a dictionary telling which source files can be skipped. """ # Get the list of expected output (object) files objects = self.object_filenames(sources, output_dir=output_dir) assert len(objects) == len(sources) if self.force: skip_source = {} # rebuild everything for source in sources: skip_source[source] = 0 elif depends is None: # If depends is None, figure out which source files we # have to recompile according to a simplistic check. We # just compare the source and object file, no deep # dependency checking involving header files. skip_source = {} # rebuild everything for source in sources: # no wait, rebuild nothing skip_source[source] = 1 n_sources, n_objects = newer_pairwise(sources, objects) for source in n_sources: # no really, only rebuild what's skip_source[source] = 0 # out-of-date else: # If depends is a list of files, then do a different # simplistic check. Assume that each object depends on # its source and all files in the depends list. skip_source = {} # L contains all the depends plus a spot at the end for a # particular source file L = depends[:] + [None] for i in range(len(objects)): source = sources[i] L[-1] = source if newer_group(L, objects[i]): skip_source[source] = 0 else: skip_source[source] = 1 return objects, skip_source # _prep_compile () def _fix_object_args (self, objects, output_dir): """Typecheck and fix up some arguments supplied to various methods. Specifically: ensure that 'objects' is a list; if output_dir is None, replace with self.output_dir. Return fixed versions of 'objects' and 'output_dir'. """ if type (objects) not in (ListType, TupleType): raise TypeError, \ "'objects' must be a list or tuple of strings" objects = list (objects) if output_dir is None: output_dir = self.output_dir elif type (output_dir) is not StringType: raise TypeError, "'output_dir' must be a string or None" return (objects, output_dir) def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs): """Typecheck and fix up some of the arguments supplied to the 'link_*' methods. Specifically: ensure that all arguments are lists, and augment them with their permanent versions (eg. 'self.libraries' augments 'libraries'). Return a tuple with fixed versions of all arguments. """ if libraries is None: libraries = self.libraries elif type (libraries) in (ListType, TupleType): libraries = list (libraries) + (self.libraries or []) else: raise TypeError, \ "'libraries' (if supplied) must be a list of strings" if library_dirs is None: library_dirs = self.library_dirs elif type (library_dirs) in (ListType, TupleType): library_dirs = list (library_dirs) + (self.library_dirs or []) else: raise TypeError, \ "'library_dirs' (if supplied) must be a list of strings" if runtime_library_dirs is None: runtime_library_dirs = self.runtime_library_dirs elif type (runtime_library_dirs) in (ListType, TupleType): runtime_library_dirs = (list (runtime_library_dirs) + (self.runtime_library_dirs or [])) else: raise TypeError, \ "'runtime_library_dirs' (if supplied) " + \ "must be a list of strings" return (libraries, library_dirs, runtime_library_dirs) # _fix_lib_args () def _need_link (self, objects, output_file): """Return true if we need to relink the files listed in 'objects' to recreate 'output_file'. """ if self.force: return 1 else: if self.dry_run: newer = newer_group (objects, output_file, missing='newer') else: newer = newer_group (objects, output_file) return newer # _need_link () def detect_language (self, sources): """Detect the language of a given file, or list of files. Uses language_map, and language_order to do the job. """ if type(sources) is not ListType: sources = [sources] lang = None index = len(self.language_order) for source in sources: base, ext = os.path.splitext(source) extlang = self.language_map.get(ext) try: extindex = self.language_order.index(extlang) if extindex < index: lang = extlang index = extindex except ValueError: pass return lang # detect_language () # -- Worker methods ------------------------------------------------ # (must be implemented by subclasses) def preprocess (self, source, output_file=None, macros=None, include_dirs=None, extra_preargs=None, extra_postargs=None): """Preprocess a single C/C++ source file, named in 'source'. Output will be written to file named 'output_file', or stdout if 'output_file' not supplied. 'macros' is a list of macro definitions as for 'compile()', which will augment the macros set with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a list of directory names that will be added to the default list. Raises PreprocessError on failure. """ pass def compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): """Compile one or more source files. 'sources' must be a list of filenames, most likely C/C++ files, but in reality anything that can be handled by a particular compiler and compiler class (eg. MSVCCompiler can handle resource files in 'sources'). Return a list of object filenames, one per source filename in 'sources'. Depending on the implementation, not all source files will necessarily be compiled, but all corresponding object filenames will be returned. If 'output_dir' is given, object files will be put under it, while retaining their original path component. That is, "foo/bar.c" normally compiles to "foo/bar.o" (for a Unix implementation); if 'output_dir' is "build", then it would compile to "build/foo/bar.o". 'macros', if given, must be a list of macro definitions. A macro definition is either a (name, value) 2-tuple or a (name,) 1-tuple. The former defines a macro; if the value is None, the macro is defined without an explicit value. The 1-tuple case undefines a macro. Later definitions/redefinitions/ undefinitions take precedence. 'include_dirs', if given, must be a list of strings, the directories to add to the default include file search path for this compilation only. 'debug' is a boolean; if true, the compiler will be instructed to output debug symbols in (or alongside) the object file(s). 'extra_preargs' and 'extra_postargs' are implementation- dependent. On platforms that have the notion of a command-line (e.g. Unix, DOS/Windows), they are most likely lists of strings: extra command-line arguments to prepand/append to the compiler command line. On other platforms, consult the implementation class documentation. In any event, they are intended as an escape hatch for those occasions when the abstract compiler framework doesn't cut the mustard. 'depends', if given, is a list of filenames that all targets depend on. If a source file is older than any file in depends, then the source file will be recompiled. This supports dependency tracking, but only at a coarse granularity. Raises CompileError on failure. """ # A concrete compiler class can either override this method # entirely or implement _compile(). macros, objects, extra_postargs, pp_opts, build = \ self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) for obj in objects: try: src, ext = build[obj] except KeyError: continue self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) # Return *all* object filenames, not just the ones we just built. return objects def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): """Compile 'src' to product 'obj'.""" # A concrete compiler class that does not override compile() # should implement _compile(). pass def create_static_lib (self, objects, output_libname, output_dir=None, debug=0, target_lang=None): """Link a bunch of stuff together to create a static library file. The "bunch of stuff" consists of the list of object files supplied as 'objects', the extra object files supplied to 'add_link_object()' and/or 'set_link_objects()', the libraries supplied to 'add_library()' and/or 'set_libraries()', and the libraries supplied as 'libraries' (if any). 'output_libname' should be a library name, not a filename; the filename will be inferred from the library name. 'output_dir' is the directory where the library file will be put. 'debug' is a boolean; if true, debugging information will be included in the library (note that on most platforms, it is the compile step where this matters: the 'debug' flag is included here just for consistency). 'target_lang' is the target language for which the given objects are being compiled. This allows specific linkage time treatment of certain languages. Raises LibError on failure. """ pass # values for target_desc parameter in link() SHARED_OBJECT = "shared_object" SHARED_LIBRARY = "shared_library" EXECUTABLE = "executable" def link (self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): """Link a bunch of stuff together to create an executable or shared library file. The "bunch of stuff" consists of the list of object files supplied as 'objects'. 'output_filename' should be a filename. If 'output_dir' is supplied, 'output_filename' is relative to it (i.e. 'output_filename' can provide directory components if needed). 'libraries' is a list of libraries to link against. These are library names, not filenames, since they're translated into filenames in a platform-specific way (eg. "foo" becomes "libfoo.a" on Unix and "foo.lib" on DOS/Windows). However, they can include a directory component, which means the linker will look in that specific directory rather than searching all the normal locations. 'library_dirs', if supplied, should be a list of directories to search for libraries that were specified as bare library names (ie. no directory component). These are on top of the system default and those supplied to 'add_library_dir()' and/or 'set_library_dirs()'. 'runtime_library_dirs' is a list of directories that will be embedded into the shared library and used to search for other shared libraries that *it* depends on at run-time. (This may only be relevant on Unix.) 'export_symbols' is a list of symbols that the shared library will export. (This appears to be relevant only on Windows.) 'debug' is as for 'compile()' and 'create_static_lib()', with the slight distinction that it actually matters on most platforms (as opposed to 'create_static_lib()', which includes a 'debug' flag mostly for form's sake). 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except of course that they supply command-line arguments for the particular linker being used). 'target_lang' is the target language for which the given objects are being compiled. This allows specific linkage time treatment of certain languages. Raises LinkError on failure. """ raise NotImplementedError # Old 'link_*()' methods, rewritten to use the new 'link()' method. def link_shared_lib (self, objects, output_libname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): self.link(CCompiler.SHARED_LIBRARY, objects, self.library_filename(output_libname, lib_type='shared'), output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang) def link_shared_object (self, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): self.link(CCompiler.SHARED_OBJECT, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang) def link_executable (self, objects, output_progname, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, target_lang=None): self.link(CCompiler.EXECUTABLE, objects, self.executable_filename(output_progname), output_dir, libraries, library_dirs, runtime_library_dirs, None, debug, extra_preargs, extra_postargs, None, target_lang) # -- Miscellaneous methods ----------------------------------------- # These are all used by the 'gen_lib_options() function; there is # no appropriate default implementation so subclasses should # implement all of these. def library_dir_option (self, dir): """Return the compiler option to add 'dir' to the list of directories searched for libraries. """ raise NotImplementedError def runtime_library_dir_option (self, dir): """Return the compiler option to add 'dir' to the list of directories searched for runtime libraries. """ raise NotImplementedError def library_option (self, lib): """Return the compiler option to add 'dir' to the list of libraries linked into the shared library or executable. """ raise NotImplementedError def has_function(self, funcname, includes=None, include_dirs=None, libraries=None, library_dirs=None): """Return a boolean indicating whether funcname is supported on the current platform. The optional arguments can be used to augment the compilation environment. """ # this can't be included at module scope because it tries to # import math which might not be available at that point - maybe # the necessary logic should just be inlined? import tempfile if includes is None: includes = [] if include_dirs is None: include_dirs = [] if libraries is None: libraries = [] if library_dirs is None: library_dirs = [] fd, fname = tempfile.mkstemp(".c", funcname, text=True) f = os.fdopen(fd, "w") for incl in includes: f.write("""#include "%s"\n""" % incl) f.write("""\ main (int argc, char **argv) { %s(); } """ % funcname) f.close() try: objects = self.compile([fname], include_dirs=include_dirs) except CompileError: return False try: self.link_executable(objects, "a.out", libraries=libraries, library_dirs=library_dirs) except (LinkError, TypeError): return False return True def find_library_file (self, dirs, lib, debug=0): """Search the specified list of directories for a static or shared library file 'lib' and return the full path to that file. If 'debug' true, look for a debugging version (if that makes sense on the current platform). Return None if 'lib' wasn't found in any of the specified directories. """ raise NotImplementedError # -- Filename generation methods ----------------------------------- # The default implementation of the filename generating methods are # prejudiced towards the Unix/DOS/Windows view of the world: # * object files are named by replacing the source file extension # (eg. .c/.cpp -> .o/.obj) # * library files (shared or static) are named by plugging the # library name and extension into a format string, eg. # "lib%s.%s" % (lib_name, ".a") for Unix static libraries # * executables are named by appending an extension (possibly # empty) to the program name: eg. progname + ".exe" for # Windows # # To reduce redundant code, these methods expect to find # several attributes in the current object (presumably defined # as class attributes): # * src_extensions - # list of C/C++ source file extensions, eg. ['.c', '.cpp'] # * obj_extension - # object file extension, eg. '.o' or '.obj' # * static_lib_extension - # extension for static library files, eg. '.a' or '.lib' # * shared_lib_extension - # extension for shared library/object files, eg. '.so', '.dll' # * static_lib_format - # format string for generating static library filenames, # eg. 'lib%s.%s' or '%s.%s' # * shared_lib_format # format string for generating shared library filenames # (probably same as static_lib_format, since the extension # is one of the intended parameters to the format string) # * exe_extension - # extension for executable files, eg. '' or '.exe' def object_filenames(self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: base, ext = os.path.splitext(src_name) base = os.path.splitdrive(base)[1] # Chop off the drive base = base[os.path.isabs(base):] # If abs, chop off leading / if ext not in self.src_extensions: raise UnknownFileError, \ "unknown file type '%s' (from '%s')" % (ext, src_name) if strip_dir: base = os.path.basename(base) obj_names.append(os.path.join(output_dir, base + self.obj_extension)) return obj_names def shared_object_filename(self, basename, strip_dir=0, output_dir=''): assert output_dir is not None if strip_dir: basename = os.path.basename (basename) return os.path.join(output_dir, basename + self.shared_lib_extension) def executable_filename(self, basename, strip_dir=0, output_dir=''): assert output_dir is not None if strip_dir: basename = os.path.basename (basename) return os.path.join(output_dir, basename + (self.exe_extension or '')) def library_filename(self, libname, lib_type='static', # or 'shared' strip_dir=0, output_dir=''): assert output_dir is not None if lib_type not in ("static", "shared", "dylib"): raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\"" fmt = getattr(self, lib_type + "_lib_format") ext = getattr(self, lib_type + "_lib_extension") dir, base = os.path.split (libname) filename = fmt % (base, ext) if strip_dir: dir = '' return os.path.join(output_dir, dir, filename) # -- Utility methods ----------------------------------------------- def announce (self, msg, level=1): log.debug(msg) def debug_print (self, msg): from distutils.debug import DEBUG if DEBUG: print msg def warn (self, msg): sys.stderr.write ("warning: %s\n" % msg) def execute (self, func, args, msg=None, level=1): execute(func, args, msg, self.dry_run) def spawn (self, cmd): spawn (cmd, dry_run=self.dry_run) def move_file (self, src, dst): return move_file (src, dst, dry_run=self.dry_run) def mkpath (self, name, mode=0777): mkpath (name, mode, self.dry_run) # class CCompiler # Map a sys.platform/os.name ('posix', 'nt') to the default compiler # type for that platform. Keys are interpreted as re match # patterns. Order is important; platform mappings are preferred over # OS names. _default_compilers = ( # Platform string mappings # on a cygwin built python we can use gcc like an ordinary UNIXish # compiler ('cygwin.*', 'unix'), ('os2emx', 'emx'), ('java.*', 'jython'), # OS name mappings ('posix', 'unix'), ('nt', 'msvc'), ('mac', 'mwerks'), ) def get_default_compiler(osname=None, platform=None): """ Determine the default compiler to use for the given platform. osname should be one of the standard Python OS names (i.e. the ones returned by os.name) and platform the common value returned by sys.platform for the platform in question. The default values are os.name and sys.platform in case the parameters are not given. """ if osname is None: osname = os.name if platform is None: platform = sys.platform for pattern, compiler in _default_compilers: if re.match(pattern, platform) is not None or \ re.match(pattern, osname) is not None: return compiler # Default to Unix compiler return 'unix' # Map compiler types to (module_name, class_name) pairs -- ie. where to # find the code that implements an interface to this compiler. (The module # is assumed to be in the 'distutils' package.) compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler', "standard UNIX-style compiler"), 'msvc': ('msvccompiler', 'MSVCCompiler', "Microsoft Visual C++"), 'cygwin': ('cygwinccompiler', 'CygwinCCompiler', "Cygwin port of GNU C Compiler for Win32"), 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler', "Mingw32 port of GNU C Compiler for Win32"), 'bcpp': ('bcppcompiler', 'BCPPCompiler', "Borland C++ Compiler"), 'mwerks': ('mwerkscompiler', 'MWerksCompiler', "MetroWerks CodeWarrior"), 'emx': ('emxccompiler', 'EMXCCompiler', "EMX port of GNU C Compiler for OS/2"), 'jython': ('jythoncompiler', 'JythonCompiler', "Compiling is not supported on Jython"), } def show_compilers(): """Print list of available compilers (used by the "--help-compiler" options to "build", "build_ext", "build_clib"). """ # XXX this "knows" that the compiler option it's describing is # "--compiler", which just happens to be the case for the three # commands that use it. from distutils.fancy_getopt import FancyGetopt compilers = [] for compiler in compiler_class.keys(): compilers.append(("compiler="+compiler, None, compiler_class[compiler][2])) compilers.sort() pretty_printer = FancyGetopt(compilers) pretty_printer.print_help("List of available compilers:") def new_compiler (plat=None, compiler=None, verbose=0, dry_run=0, force=0): """Generate an instance of some CCompiler subclass for the supplied platform/compiler combination. 'plat' defaults to 'os.name' (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler for that platform. Currently only 'posix' and 'nt' are supported, and the default compilers are "traditional Unix interface" (UnixCCompiler class) and Visual C++ (MSVCCompiler class). Note that it's perfectly possible to ask for a Unix compiler object under Windows, and a Microsoft compiler object under Unix -- if you supply a value for 'compiler', 'plat' is ignored. """ if plat is None: plat = os.name try: if compiler is None: compiler = get_default_compiler(plat) (module_name, class_name, long_description) = compiler_class[compiler] except KeyError: msg = "don't know how to compile C/C++ code on platform '%s'" % plat if compiler is not None: msg = msg + " with '%s' compiler" % compiler raise DistutilsPlatformError, msg try: module_name = "distutils." + module_name __import__ (module_name) module = sys.modules[module_name] klass = vars(module)[class_name] except ImportError: raise DistutilsModuleError, \ "can't compile C/C++ code: unable to load module '%s'" % \ module_name except KeyError: raise DistutilsModuleError, \ ("can't compile C/C++ code: unable to find class '%s' " + "in module '%s'") % (class_name, module_name) # XXX The None is necessary to preserve backwards compatibility # with classes that expect verbose to be the first positional # argument. return klass (None, dry_run, force) def gen_preprocess_options (macros, include_dirs): """Generate C pre-processor options (-D, -U, -I) as used by at least two types of compilers: the typical Unix compiler and Visual C++. 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,) means undefine (-U) macro 'name', and (name,value) means define (-D) macro 'name' to 'value'. 'include_dirs' is just a list of directory names to be added to the header file search path (-I). Returns a list of command-line options suitable for either Unix compilers or Visual C++. """ # XXX it would be nice (mainly aesthetic, and so we don't generate # stupid-looking command lines) to go over 'macros' and eliminate # redundant definitions/undefinitions (ie. ensure that only the # latest mention of a particular macro winds up on the command # line). I don't think it's essential, though, since most (all?) # Unix C compilers only pay attention to the latest -D or -U # mention of a macro on their command line. Similar situation for # 'include_dirs'. I'm punting on both for now. Anyways, weeding out # redundancies like this should probably be the province of # CCompiler, since the data structures used are inherited from it # and therefore common to all CCompiler classes. pp_opts = [] for macro in macros: if not (type (macro) is TupleType and 1 <= len (macro) <= 2): raise TypeError, \ ("bad macro definition '%s': " + "each element of 'macros' list must be a 1- or 2-tuple") % \ macro if len (macro) == 1: # undefine this macro pp_opts.append ("-U%s" % macro[0]) elif len (macro) == 2: if macro[1] is None: # define with no explicit value pp_opts.append ("-D%s" % macro[0]) else: # XXX *don't* need to be clever about quoting the # macro value here, because we're going to avoid the # shell at all costs when we spawn the command! pp_opts.append ("-D%s=%s" % macro) for dir in include_dirs: pp_opts.append ("-I%s" % dir) return pp_opts # gen_preprocess_options () def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries): """Generate linker options for searching library directories and linking with specific libraries. 'libraries' and 'library_dirs' are, respectively, lists of library names (not filenames!) and search directories. Returns a list of command-line options suitable for use with some compiler (depending on the two format strings passed in). """ lib_opts = [] for dir in library_dirs: lib_opts.append (compiler.library_dir_option (dir)) for dir in runtime_library_dirs: opt = compiler.runtime_library_dir_option (dir) if type(opt) is ListType: lib_opts = lib_opts + opt else: lib_opts.append (opt) # XXX it's important that we *not* remove redundant library mentions! # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to # resolve all symbols. I just hope we never have to say "-lfoo obj.o # -lbar" to get things to work -- that's certainly a possibility, but a # pretty nasty way to arrange your C code. for lib in libraries: (lib_dir, lib_name) = os.path.split (lib) if lib_dir: lib_file = compiler.find_library_file ([lib_dir], lib_name) if lib_file: lib_opts.append (lib_file) else: compiler.warn ("no library file corresponding to " "'%s' found (skipping)" % lib) else: lib_opts.append (compiler.library_option (lib)) return lib_opts # gen_lib_options ()
unknown
codeparrot/codeparrot-clean
package reference type notFoundError string func (e notFoundError) Error() string { return string(e) } func (notFoundError) NotFound() {} type invalidTagError string func (e invalidTagError) Error() string { return string(e) } func (invalidTagError) InvalidParameter() {} type conflictingTagError string func (e conflictingTagError) Error() string { return string(e) } func (conflictingTagError) Conflict() {}
go
github
https://github.com/moby/moby
daemon/internal/refstore/errors.go
#!/usr/bin/env python # -*- coding: utf-8; py-indent-offset:4 -*- ############################################################################### # # Copyright (C) 2015-2020 Daniel Rodriguez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from __future__ import (absolute_import, division, print_function, unicode_literals) import argparse import datetime import random import backtrader as bt class CloseSMA(bt.Strategy): params = (('period', 15),) def __init__(self): sma = bt.indicators.SMA(self.data, period=self.p.period) self.crossover = bt.indicators.CrossOver(self.data, sma) def next(self): if self.crossover > 0: self.buy() elif self.crossover < 0: self.sell() class LongOnly(bt.Sizer): params = (('stake', 1),) def _getsizing(self, comminfo, cash, data, isbuy): if isbuy: return self.p.stake # Sell situation position = self.broker.getposition(data) if not position.size: return 0 # do not sell if nothing is open return self.p.stake class FixedReverser(bt.Sizer): params = (('stake', 1),) def _getsizing(self, comminfo, cash, data, isbuy): position = self.strategy.getposition(data) size = self.p.stake * (1 + (position.size != 0)) return size def runstrat(args=None): args = parse_args(args) cerebro = bt.Cerebro() cerebro.broker.set_cash(args.cash) dkwargs = dict() if args.fromdate: fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d') dkwargs['fromdate'] = fromdate if args.todate: todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d') dkwargs['todate'] = todate data0 = bt.feeds.YahooFinanceCSVData(dataname=args.data0, **dkwargs) cerebro.adddata(data0, name='Data0') cerebro.addstrategy(CloseSMA, period=args.period) if args.longonly: cerebro.addsizer(LongOnly, stake=args.stake) else: cerebro.addsizer(bt.sizers.FixedReverser, stake=args.stake) cerebro.run() if args.plot: pkwargs = dict() if args.plot is not True: # evals to True but is not True pkwargs = eval('dict(' + args.plot + ')') # args were passed cerebro.plot(**pkwargs) def parse_args(pargs=None): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Sample for sizer') parser.add_argument('--data0', required=False, default='../../datas/yhoo-1996-2015.txt', help='Data to be read in') parser.add_argument('--fromdate', required=False, default='2005-01-01', help='Starting date in YYYY-MM-DD format') parser.add_argument('--todate', required=False, default='2006-12-31', help='Ending date in YYYY-MM-DD format') parser.add_argument('--cash', required=False, action='store', type=float, default=50000, help=('Cash to start with')) parser.add_argument('--longonly', required=False, action='store_true', help=('Use the LongOnly sizer')) parser.add_argument('--stake', required=False, action='store', type=int, default=1, help=('Stake to pass to the sizers')) parser.add_argument('--period', required=False, action='store', type=int, default=15, help=('Period for the Simple Moving Average')) # Plot options parser.add_argument('--plot', '-p', nargs='?', required=False, metavar='kwargs', const=True, help=('Plot the read data applying any kwargs passed\n' '\n' 'For example:\n' '\n' ' --plot style="candle" (to plot candles)\n')) if pargs is not None: return parser.parse_args(pargs) return parser.parse_args() if __name__ == '__main__': runstrat()
unknown
codeparrot/codeparrot-clean
""" GraphViz Tag --------- This implements a Liquid-style graphviz tag for Pelican. You can use different Graphviz programs like dot, neato, twopi etc. [1] [1] http://www.graphviz.org/ Syntax ------ {% graphviz <program> { <DOT code> } %} Examples -------- {% graphviz dot { digraph graphname { a -> b -> c; b -> d; } } %} {% graphviz twopi { <code goes here> } %} {% graphviz neato { <code goes here> } %} ... Output ------ <div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div> """ import base64 import re from .mdx_liquid_tags import LiquidTags SYNTAX = '{% dot graphviz [program] [dot code] %}' DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL) def run_graphviz(program, code, options=[], format='png'): """ Runs graphviz programs and returns image data Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py """ import os from subprocess import Popen, PIPE dot_args = [program] + options + ['-T', format] if os.name == 'nt': # Avoid opening shell window. # * https://github.com/tkf/ipython-hierarchymagic/issues/1 # * http://stackoverflow.com/a/2935727/727827 p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000) else: p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE) wentwrong = False try: # Graphviz may close standard input when an error occurs, # resulting in a broken pipe on communicate() stdout, stderr = p.communicate(code.encode('utf-8')) except (OSError, IOError) as err: if err.errno != EPIPE: raise wentwrong = True except IOError as err: if err.errno != EINVAL: raise wentwrong = True if wentwrong: # in this case, read the standard output and standard error streams # directly, to get the error message(s) stdout, stderr = p.stdout.read(), p.stderr.read() p.wait() if p.returncode != 0: raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8'))) return stdout @LiquidTags.register('graphviz') def graphviz_parser(preprocessor, tag, markup): """ Simple Graphviz parser """ # Parse the markup string m = DOT_BLOCK_RE.search(markup) if m: # Get program and DOT code code = m.group('code') program = m.group('program').strip() # Run specified program with our markup output = run_graphviz(program, code) # Return Base64 encoded image return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output) else: raise ValueError('Error processing input. ' 'Expected syntax: {0}'.format(SYNTAX)) #---------------------------------------------------------------------- # This import allows image tag to be a Pelican plugin from .liquid_tags import register
unknown
codeparrot/codeparrot-clean
from __future__ import unicode_literals from django import forms from django.forms.formsets import BaseFormSet, DELETION_FIELD_NAME from django.forms.utils import ErrorDict, ErrorList from django.forms.models import modelform_factory, inlineformset_factory, modelformset_factory, BaseModelFormSet from django.test import TestCase from django.utils import six from .models import ( User, UserSite, UserProfile, ProfileNetwork, Restaurant, Manager, Network, Host, ) class InlineFormsetTests(TestCase): def test_formset_over_to_field(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #10243" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=User()) # Now create a new User and UserSite instance data = { 'serial': '1', 'username': 'apollo13', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '0', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-data': '10', 'usersite_set-0-user': 'apollo13' } user = User() form = Form(data) if form.is_valid(): user = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 10) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the UserSite instance data = { 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values() self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new UserSite instance data = { 'usersite_set-TOTAL_FORMS': '2', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '0', 'usersite_set-0-id': six.text_type(usersite[0]['id']), 'usersite_set-0-data': '11', 'usersite_set-0-user': 'apollo13', 'usersite_set-1-data': '42', 'usersite_set-1-user': 'apollo13' } form_set = FormSet(data, instance=user) if form_set.is_valid(): form_set.save() usersite = UserSite.objects.all().values().order_by('data') self.assertEqual(usersite[0]['data'], 11) self.assertEqual(usersite[0]['user_id'], 'apollo13') self.assertEqual(usersite[1]['data'], 42) self.assertEqual(usersite[1]['user_id'], 'apollo13') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_formset_over_inherited_model(self): "A formset over a ForeignKey with a to_field can be saved. Regression for #11120" Form = modelform_factory(Restaurant, fields="__all__") FormSet = inlineformset_factory(Restaurant, Manager, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a form with no data form = Form() form_set = FormSet(instance=Restaurant()) # Now create a new Restaurant and Manager instance data = { 'name': "Guido's House of Pasta", 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '0', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-name': 'Guido Van Rossum' } restaurant = User() form = Form(data) if form.is_valid(): restaurant = form.save() else: self.fail('Errors found on form:%s' % form_set) form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Guido Van Rossum') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now update the Manager instance data = { 'manager_set-TOTAL_FORMS': '1', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values() self.assertEqual(manager[0]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) # Now add a new Manager instance data = { 'manager_set-TOTAL_FORMS': '2', 'manager_set-INITIAL_FORMS': '1', 'manager_set-MAX_NUM_FORMS': '0', 'manager_set-0-id': six.text_type(manager[0]['id']), 'manager_set-0-name': 'Terry Gilliam', 'manager_set-1-name': 'John Cleese' } form_set = FormSet(data, instance=restaurant) if form_set.is_valid(): form_set.save() manager = Manager.objects.all().values().order_by('name') self.assertEqual(manager[0]['name'], 'John Cleese') self.assertEqual(manager[1]['name'], 'Terry Gilliam') else: self.fail('Errors found on formset:%s' % form_set.errors) def test_inline_model_with_to_field(self): """ #13794 --- An inline model with a to_field of a formset with instance has working relations. """ FormSet = inlineformset_factory(User, UserSite, exclude=('is_superuser',)) user = User.objects.create(username="guido", serial=1337) UserSite.objects.create(user=user, data=10) formset = FormSet(instance=user) # Testing the inline model's relation self.assertEqual(formset[0].instance.user_id, "guido") def test_inline_model_with_to_field_to_rel(self): """ #13794 --- An inline model with a to_field to a related field of a formset with instance has working relations. """ FormSet = inlineformset_factory(UserProfile, ProfileNetwork, exclude=[]) user = User.objects.create(username="guido", serial=1337, pk=1) self.assertEqual(user.pk, 1) profile = UserProfile.objects.create(user=user, about="about", pk=2) self.assertEqual(profile.pk, 2) ProfileNetwork.objects.create(profile=profile, network=10, identifier=10) formset = FormSet(instance=profile) # Testing the inline model's relation self.assertEqual(formset[0].instance.profile_id, 1) def test_formset_with_none_instance(self): "A formset with instance=None can be created. Regression for #11872" Form = modelform_factory(User, fields="__all__") FormSet = inlineformset_factory(User, UserSite, fields="__all__") # Instantiate the Form and FormSet to prove # you can create a formset with an instance of None Form(instance=None) FormSet(instance=None) def test_empty_fields_on_modelformset(self): "No fields passed to modelformset_factory should result in no fields on returned forms except for the id. See #14119." UserFormSet = modelformset_factory(User, fields=()) formset = UserFormSet() for form in formset.forms: self.assertIn('id', form.fields) self.assertEqual(len(form.fields), 1) def test_save_as_new_with_new_inlines(self): """ Existing and new inlines are saved with save_as_new. Regression for #14938. """ efnet = Network.objects.create(name="EFNet") host1 = Host.objects.create(hostname="irc.he.net", network=efnet) HostFormSet = inlineformset_factory(Network, Host, fields="__all__") # Add a new host, modify previous host, and save-as-new data = { 'host_set-TOTAL_FORMS': '2', 'host_set-INITIAL_FORMS': '1', 'host_set-MAX_NUM_FORMS': '0', 'host_set-0-id': six.text_type(host1.id), 'host_set-0-hostname': 'tranquility.hub.dal.net', 'host_set-1-hostname': 'matrix.de.eu.dal.net' } # To save a formset as new, it needs a new hub instance dalnet = Network.objects.create(name="DALnet") formset = HostFormSet(data, instance=dalnet, save_as_new=True) self.assertTrue(formset.is_valid()) formset.save() self.assertQuerysetEqual( dalnet.host_set.order_by("hostname"), ["<Host: matrix.de.eu.dal.net>", "<Host: tranquility.hub.dal.net>"] ) def test_initial_data(self): user = User.objects.create(username="bibi", serial=1) UserSite.objects.create(user=user, data=7) FormSet = inlineformset_factory(User, UserSite, extra=2, fields="__all__") formset = FormSet(instance=user, initial=[{'data': 41}, {'data': 42}]) self.assertEqual(formset.forms[0].initial['data'], 7) self.assertEqual(formset.extra_forms[0].initial['data'], 41) self.assertIn('value="42"', formset.extra_forms[1].as_p()) class FormsetTests(TestCase): def test_error_class(self): ''' Test the type of Formset and Form error attributes ''' Formset = modelformset_factory(User, fields="__all__") data = { 'form-TOTAL_FORMS': '2', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '0', 'form-0-id': '', 'form-0-username': 'apollo13', 'form-0-serial': '1', 'form-1-id': '', 'form-1-username': 'apollo13', 'form-1-serial': '2', } formset = Formset(data) # check if the returned error classes are correct # note: formset.errors returns a list as documented self.assertIsInstance(formset.errors, list) self.assertIsInstance(formset.non_form_errors(), ErrorList) for form in formset.forms: self.assertIsInstance(form.errors, ErrorDict) self.assertIsInstance(form.non_field_errors(), ErrorList) def test_initial_data(self): User.objects.create(username="bibi", serial=1) Formset = modelformset_factory(User, fields="__all__", extra=2) formset = Formset(initial=[{'username': 'apollo11'}, {'username': 'apollo12'}]) self.assertEqual(formset.forms[0].initial['username'], "bibi") self.assertEqual(formset.extra_forms[0].initial['username'], "apollo11") self.assertIn('value="apollo12"', formset.extra_forms[1].as_p()) def test_extraneous_query_is_not_run(self): Formset = modelformset_factory(Network, fields="__all__") data = {'test-TOTAL_FORMS': '1', 'test-INITIAL_FORMS': '0', 'test-MAX_NUM_FORMS': '', 'test-0-name': 'Random Place', } with self.assertNumQueries(1): formset = Formset(data, prefix="test") formset.save() class CustomWidget(forms.widgets.TextInput): pass class UserSiteForm(forms.ModelForm): class Meta: model = UserSite fields = "__all__" widgets = { 'id': CustomWidget, 'data': CustomWidget, } localized_fields = ('data',) class Callback(object): def __init__(self): self.log = [] def __call__(self, db_field, **kwargs): self.log.append((db_field, kwargs)) return db_field.formfield(**kwargs) class FormfieldCallbackTests(TestCase): """ Regression for #13095 and #17683: Using base forms with widgets defined in Meta should not raise errors and BaseModelForm should respect the specified pk widget. """ def test_inlineformset_factory_default(self): Formset = inlineformset_factory(User, UserSite, form=UserSiteForm, fields="__all__") form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def test_modelformset_factory_default(self): Formset = modelformset_factory(UserSite, form=UserSiteForm) form = Formset().forms[0] self.assertIsInstance(form['id'].field.widget, CustomWidget) self.assertIsInstance(form['data'].field.widget, CustomWidget) self.assertFalse(form.fields['id'].localize) self.assertTrue(form.fields['data'].localize) def assertCallbackCalled(self, callback): id_field, user_field, data_field = UserSite._meta.fields expected_log = [ (id_field, {'widget': CustomWidget}), (user_field, {}), (data_field, {'widget': CustomWidget, 'localize': True}), ] self.assertEqual(callback.log, expected_log) def test_inlineformset_custom_callback(self): callback = Callback() inlineformset_factory(User, UserSite, form=UserSiteForm, formfield_callback=callback, fields="__all__") self.assertCallbackCalled(callback) def test_modelformset_custom_callback(self): callback = Callback() modelformset_factory(UserSite, form=UserSiteForm, formfield_callback=callback) self.assertCallbackCalled(callback) class BaseCustomDeleteFormSet(BaseFormSet): """ A formset mix-in that lets a form decide if it's to be deleted. Works for BaseFormSets. Also works for ModelFormSets with #14099 fixed. form.should_delete() is called. The formset delete field is also suppressed. """ def add_fields(self, form, index): super(BaseCustomDeleteFormSet, self).add_fields(form, index) self.can_delete = True if DELETION_FIELD_NAME in form.fields: del form.fields[DELETION_FIELD_NAME] def _should_delete_form(self, form): return hasattr(form, 'should_delete') and form.should_delete() class FormfieldShouldDeleteFormTests(TestCase): """ Regression for #14099: BaseModelFormSet should use ModelFormSet method _should_delete_form """ class BaseCustomDeleteModelFormSet(BaseModelFormSet, BaseCustomDeleteFormSet): """ Model FormSet with CustomDelete MixIn """ class CustomDeleteUserForm(forms.ModelForm): """ A model form with a 'should_delete' method """ class Meta: model = User fields = "__all__" def should_delete(self): """ delete form if odd PK """ return self.instance.pk % 2 != 0 NormalFormset = modelformset_factory(User, form=CustomDeleteUserForm, can_delete=True) DeleteFormset = modelformset_factory(User, form=CustomDeleteUserForm, formset=BaseCustomDeleteModelFormSet) data = { 'form-TOTAL_FORMS': '4', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '4', 'form-0-username': 'John', 'form-0-serial': '1', 'form-1-username': 'Paul', 'form-1-serial': '2', 'form-2-username': 'George', 'form-2-serial': '3', 'form-3-username': 'Ringo', 'form-3-serial': '5', } delete_all_ids = { 'form-0-DELETE': '1', 'form-1-DELETE': '1', 'form-2-DELETE': '1', 'form-3-DELETE': '1', } def test_init_database(self): """ Add test data to database via formset """ formset = self.NormalFormset(self.data) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 4) def test_no_delete(self): """ Verify base formset doesn't modify database """ # reload database self.test_init_database() # pass standard data dict & see none updated data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update(dict( ('form-%d-id' % i, user.pk) for i, user in enumerate(User.objects.all()) )) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 4) def test_all_delete(self): """ Verify base formset honors DELETE field """ # reload database self.test_init_database() # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update(dict( ('form-%d-id' % i, user.pk) for i, user in enumerate(User.objects.all()) )) data.update(self.delete_all_ids) formset = self.NormalFormset(data, queryset=User.objects.all()) self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 0) def test_custom_delete(self): """ Verify DeleteFormset ignores DELETE field and uses form method """ # reload database self.test_init_database() # Create formset with custom Delete function # create data dict with all fields marked for deletion data = dict(self.data) data['form-INITIAL_FORMS'] = 4 data.update(dict( ('form-%d-id' % i, user.pk) for i, user in enumerate(User.objects.all()) )) data.update(self.delete_all_ids) formset = self.DeleteFormset(data, queryset=User.objects.all()) # verify two were deleted self.assertTrue(formset.is_valid()) self.assertEqual(len(formset.save()), 0) self.assertEqual(len(User.objects.all()), 2) # verify no "odd" PKs left odd_ids = [user.pk for user in User.objects.all() if user.pk % 2] self.assertEqual(len(odd_ids), 0) class RedeleteTests(TestCase): def test_resubmit(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) formset = formset_cls(data, instance=u) # Even if the "us" object isn't in the DB any more, the form # validates. self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0) def test_delete_already_deleted(self): u = User.objects.create(username='foo', serial=1) us = UserSite.objects.create(user=u, data=7) formset_cls = inlineformset_factory(User, UserSite, fields="__all__") data = { 'serial': '1', 'username': 'foo', 'usersite_set-TOTAL_FORMS': '1', 'usersite_set-INITIAL_FORMS': '1', 'usersite_set-MAX_NUM_FORMS': '1', 'usersite_set-0-id': six.text_type(us.pk), 'usersite_set-0-data': '7', 'usersite_set-0-user': 'foo', 'usersite_set-0-DELETE': '1' } formset = formset_cls(data, instance=u) us.delete() self.assertTrue(formset.is_valid()) formset.save() self.assertEqual(UserSite.objects.count(), 0)
unknown
codeparrot/codeparrot-clean
import io import numpy as np import pytest from pandas._config import using_string_dtype from pandas.compat import HAS_PYARROW from pandas.compat.pyarrow import pa_version_under14p0 from pandas import ( DataFrame, date_range, read_csv, read_excel, read_feather, read_json, read_parquet, read_pickle, read_stata, read_table, ) import pandas._testing as tm from pandas.util import _test_decorators as td pytestmark = pytest.mark.filterwarnings( "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" ) @pytest.fixture def fsspectest(): pytest.importorskip("fsspec") from fsspec import register_implementation from fsspec.implementations.memory import MemoryFileSystem from fsspec.registry import _registry as registry class TestMemoryFS(MemoryFileSystem): protocol = "testmem" test = [None] def __init__(self, **kwargs) -> None: self.test[0] = kwargs.pop("test", None) super().__init__(**kwargs) register_implementation("testmem", TestMemoryFS, clobber=True) yield TestMemoryFS() registry.pop("testmem", None) TestMemoryFS.test[0] = None TestMemoryFS.store.clear() @pytest.fixture def df1(): return DataFrame( { "int": [1, 3], "float": [2.0, np.nan], "str": ["t", "s"], "dt": date_range("2018-06-18", periods=2), } ) @pytest.fixture def cleared_fs(): fsspec = pytest.importorskip("fsspec") memfs = fsspec.filesystem("memory") yield memfs memfs.store.clear() def test_read_csv(cleared_fs, df1): text = str(df1.to_csv(index=False)).encode() with cleared_fs.open("test/test.csv", "wb") as w: w.write(text) df2 = read_csv("memory://test/test.csv", parse_dates=["dt"]) expected = df1.copy() expected["dt"] = expected["dt"].astype("M8[us]") tm.assert_frame_equal(df2, expected) def test_reasonable_error(monkeypatch, cleared_fs): from fsspec.registry import known_implementations with pytest.raises(ValueError, match="nosuchprotocol"): read_csv("nosuchprotocol://test/test.csv") err_msg = "test error message" monkeypatch.setitem( known_implementations, "couldexist", {"class": "unimportable.CouldExist", "err": err_msg}, ) with pytest.raises(ImportError, match=err_msg): read_csv("couldexist://test/test.csv") def test_to_csv(cleared_fs, df1): df1.to_csv("memory://test/test.csv", index=True) df2 = read_csv("memory://test/test.csv", parse_dates=["dt"], index_col=0) expected = df1.copy() expected["dt"] = expected["dt"].astype("M8[us]") tm.assert_frame_equal(df2, expected) def test_to_excel(cleared_fs, df1): pytest.importorskip("openpyxl") ext = "xlsx" path = f"memory://test/test.{ext}" df1.to_excel(path, index=True) df2 = read_excel(path, parse_dates=["dt"], index_col=0) expected = df1.copy() expected["dt"] = expected["dt"].astype("M8[us]") tm.assert_frame_equal(df2, expected) @pytest.mark.parametrize("binary_mode", [False, True]) def test_to_csv_fsspec_object(cleared_fs, binary_mode, df1): fsspec = pytest.importorskip("fsspec") path = "memory://test/test.csv" mode = "wb" if binary_mode else "w" with fsspec.open(path, mode=mode).open() as fsspec_object: df1.to_csv(fsspec_object, index=True) assert not fsspec_object.closed mode = mode.replace("w", "r") with fsspec.open(path, mode=mode) as fsspec_object: df2 = read_csv( fsspec_object, parse_dates=["dt"], index_col=0, ) assert not fsspec_object.closed expected = df1.copy() expected["dt"] = expected["dt"].astype("M8[us]") tm.assert_frame_equal(df2, expected) def test_csv_options(fsspectest): df = DataFrame({"a": [0]}) df.to_csv( "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False ) assert fsspectest.test[0] == "csv_write" read_csv("testmem://test/test.csv", storage_options={"test": "csv_read"}) assert fsspectest.test[0] == "csv_read" def test_read_table_options(fsspectest): # GH #39167 df = DataFrame({"a": [0]}) df.to_csv( "testmem://test/test.csv", storage_options={"test": "csv_write"}, index=False ) assert fsspectest.test[0] == "csv_write" read_table("testmem://test/test.csv", storage_options={"test": "csv_read"}) assert fsspectest.test[0] == "csv_read" def test_excel_options(fsspectest): pytest.importorskip("openpyxl") extension = "xlsx" df = DataFrame({"a": [0]}) path = f"testmem://test/test.{extension}" df.to_excel(path, storage_options={"test": "write"}, index=False) assert fsspectest.test[0] == "write" read_excel(path, storage_options={"test": "read"}) assert fsspectest.test[0] == "read" @pytest.mark.xfail( using_string_dtype() and HAS_PYARROW and not pa_version_under14p0, reason="TODO(infer_string) fastparquet", ) def test_to_parquet_new_file(cleared_fs, df1): """Regression test for writing to a not-yet-existent GCS Parquet file.""" pytest.importorskip("fastparquet") df1.to_parquet( "memory://test/test.csv", index=True, engine="fastparquet", compression=None ) def test_arrowparquet_options(fsspectest): """Regression test for writing to a not-yet-existent GCS Parquet file.""" pytest.importorskip("pyarrow") df = DataFrame({"a": [0]}) df.to_parquet( "testmem://test/test.csv", engine="pyarrow", compression=None, storage_options={"test": "parquet_write"}, ) assert fsspectest.test[0] == "parquet_write" read_parquet( "testmem://test/test.csv", engine="pyarrow", storage_options={"test": "parquet_read"}, ) assert fsspectest.test[0] == "parquet_read" def test_fastparquet_options(fsspectest): """Regression test for writing to a not-yet-existent GCS Parquet file.""" pytest.importorskip("fastparquet") df = DataFrame({"a": [0]}) df.to_parquet( "testmem://test/test.csv", engine="fastparquet", compression=None, storage_options={"test": "parquet_write"}, ) assert fsspectest.test[0] == "parquet_write" read_parquet( "testmem://test/test.csv", engine="fastparquet", storage_options={"test": "parquet_read"}, ) assert fsspectest.test[0] == "parquet_read" @pytest.mark.single_cpu @pytest.mark.parametrize("compression_suffix", ["", ".gz", ".bz2"]) def test_from_s3_csv(s3_bucket_public_with_data, s3so, tips_file, compression_suffix): pytest.importorskip("s3fs") df_from_s3 = read_csv( f"s3://{s3_bucket_public_with_data.name}/tips.csv{compression_suffix}", storage_options=s3so, ) df_from_local = read_csv(tips_file) tm.assert_equal(df_from_s3, df_from_local) @pytest.mark.single_cpu @pytest.mark.parametrize("protocol", ["s3", "s3a", "s3n"]) def test_s3_protocols(s3_bucket_public_with_data, s3so, tips_file, protocol): pytest.importorskip("s3fs") df_from_s3 = read_csv( f"{protocol}://{s3_bucket_public_with_data.name}/tips.csv", storage_options=s3so, ) df_from_local = read_csv(tips_file) tm.assert_equal(df_from_s3, df_from_local) @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string) fastparquet") @pytest.mark.single_cpu def test_s3_parquet(s3_bucket_public, s3so, df1): pytest.importorskip("fastparquet") pytest.importorskip("s3fs") fn = f"s3://{s3_bucket_public.name}/test.parquet" df1.to_parquet( fn, index=False, engine="fastparquet", compression=None, storage_options=s3so ) df2 = read_parquet(fn, engine="fastparquet", storage_options=s3so) tm.assert_equal(df1, df2) @td.skip_if_installed("fsspec") def test_not_present_exception(): msg = "`Import fsspec` failed. Use pip or conda to install the fsspec package." with pytest.raises(ImportError, match=msg): read_csv("memory://test/test.csv") def test_feather_options(fsspectest): pytest.importorskip("pyarrow") df = DataFrame({"a": [0]}) df.to_feather("testmem://mockfile", storage_options={"test": "feather_write"}) assert fsspectest.test[0] == "feather_write" out = read_feather("testmem://mockfile", storage_options={"test": "feather_read"}) assert fsspectest.test[0] == "feather_read" tm.assert_frame_equal(df, out) def test_pickle_options(fsspectest): df = DataFrame({"a": [0]}) df.to_pickle("testmem://mockfile", storage_options={"test": "pickle_write"}) assert fsspectest.test[0] == "pickle_write" out = read_pickle("testmem://mockfile", storage_options={"test": "pickle_read"}) assert fsspectest.test[0] == "pickle_read" tm.assert_frame_equal(df, out) def test_json_options(fsspectest, compression): df = DataFrame({"a": [0]}) df.to_json( "testmem://mockfile", compression=compression, storage_options={"test": "json_write"}, ) assert fsspectest.test[0] == "json_write" out = read_json( "testmem://mockfile", compression=compression, storage_options={"test": "json_read"}, ) assert fsspectest.test[0] == "json_read" tm.assert_frame_equal(df, out) def test_stata_options(fsspectest): df = DataFrame({"a": [0]}) df.to_stata( "testmem://mockfile", storage_options={"test": "stata_write"}, write_index=False ) assert fsspectest.test[0] == "stata_write" out = read_stata("testmem://mockfile", storage_options={"test": "stata_read"}) assert fsspectest.test[0] == "stata_read" tm.assert_frame_equal(df, out.astype("int64")) def test_markdown_options(fsspectest): pytest.importorskip("tabulate") df = DataFrame({"a": [0]}) df.to_markdown("testmem://mockfile", storage_options={"test": "md_write"}) assert fsspectest.test[0] == "md_write" assert fsspectest.cat("testmem://mockfile") def test_non_fsspec_options(): pytest.importorskip("pyarrow") with pytest.raises(ValueError, match="storage_options"): read_csv("localfile", storage_options={"a": True}) with pytest.raises(ValueError, match="storage_options"): # separate test for parquet, which has a different code path read_parquet("localfile", storage_options={"a": True}) by = io.BytesIO() with pytest.raises(ValueError, match="storage_options"): read_csv(by, storage_options={"a": True}) df = DataFrame({"a": [0]}) with pytest.raises(ValueError, match="storage_options"): df.to_parquet("nonfsspecpath", storage_options={"a": True})
python
github
https://github.com/pandas-dev/pandas
pandas/tests/io/test_fsspec.py
from ethereum.tools import tester from ethereum import opcodes from ethereum.utils import int_to_big_endian, encode_int32, big_endian_to_int from ethereum.tools import new_statetest_utils import json import py_pairing from ethereum.opcodes import GPAIRINGBASE as GPB from ethereum.opcodes import GPAIRINGPERPOINT as GPP c = tester.Chain(env='metropolis') c.head_state.gas_limit = 10**8 kode = """ h: bytes32 def foo(x: bytes <= 1920) -> bytes <= 32: o = raw_call(0x0000000000000000000000000000000000000008, x, gas=99999999, outsize=32) self.h = sha3(o) return o """ x1 = c.contract(kode, language='viper') # Generate a point on the G2 curve, but not in the correct subgroup fake_point = None FQ2_one = py_pairing.FQ2.one() big_order = py_pairing.curve_order * (py_pairing.field_modulus * 2 - py_pairing.curve_order) G1_zero = (py_pairing.FQ.one(), py_pairing.FQ.one(), py_pairing.FQ.zero()) G2_zero = (FQ2_one, FQ2_one, py_pairing.FQ2.zero()) for i in range(200): x = py_pairing.FQ2([8, i]) ysquared = x ** 3 + py_pairing.b2 y = ysquared ** ((py_pairing.field_modulus ** 2 + 15) // 32) if y ** 2 == ysquared: assert py_pairing.multiply((x, y, FQ2_one), big_order) == G2_zero assert py_pairing.multiply((x, y, FQ2_one), py_pairing.curve_order) != G2_zero fake_point = (x, y, FQ2_one) break def mk_ecpairing_data(pts): o = b'' for p, q in pts: np, nq = py_pairing.normalize(p), py_pairing.normalize(q) o += encode_int32(np[0].n) + encode_int32(np[1].n) + \ encode_int32(nq[0].coeffs[1]) + encode_int32(nq[0].coeffs[0]) + \ encode_int32(nq[1].coeffs[1]) + encode_int32(nq[1].coeffs[0]) return o def perturb(inp, pos, by): return inp[:pos] + encode_int32(big_endian_to_int(inp[pos: pos + 32]) + by) + inp[pos + 32:] def intrinsic_gas_of_data(d): return opcodes.GTXDATAZERO * d.count(0) + opcodes.GTXDATANONZERO * (len(d) - d.count(0)) def mk_test(encoded, execgas, expect): pre = tester.mk_state_test_prefill(c) try: o = x1.foo(encoded, startgas=21000 + intrinsic_gas_of_data(x1.translator.encode('foo', [encoded])) + execgas) except tester.TransactionFailed: o = False if o is False: if expect != 'error': raise Exception('OOG') elif o == encode_int32(1): if expect != 'yes': raise Exception('False positive') elif o == encode_int32(0): if expect != 'no': raise Exception('False negative') else: raise Exception("wtf: %r" % o) o = tester.mk_state_test_postfill(c, pre) o2 = tester.mk_state_test_postfill(c, pre, filler_mode=True) assert new_statetest_utils.verify_state_test(o) return o, o2 tests = [] G1, G2 = py_pairing.G1, py_pairing.G2 m = py_pairing.multiply co = py_pairing.curve_order fm = py_pairing.field_modulus tests.append((b'', GPB - 1, 'error', 'empty_data_insufficient_gas')) tests.append((b'', GPB + 30000, 'yes', 'empty_data')) tests.append((mk_ecpairing_data([(G1, G2)]), GPB + 30000, 'error', 'one_point_insufficient_gas')) tests.append((mk_ecpairing_data([(G1, G2)]), GPB + GPP + 30000, 'no', 'one_point_fail')) tests.append((mk_ecpairing_data([(G1_zero, G2)]), GPB + GPP + 30000, 'yes', 'one_point_with_g1_zero')) tests.append((mk_ecpairing_data([(G1, G2_zero)]), GPB + GPP + 30000, 'yes', 'one_point_with_g2_zero')) tests.append((mk_ecpairing_data([(G1, G2)])[:191], GPB + GPP + 30000, 'error', 'bad_length_191')) tests.append((mk_ecpairing_data([(G1, G2)]) + b'\x00', GPB + GPP + 30000, 'error', 'bad_length_193')) tests.append((mk_ecpairing_data([(G1, G2), (G1, G2)]), GPB + GPP * 2 + 30000, 'no', 'two_point_fail_1')) tests.append((mk_ecpairing_data([(G1, G2_zero), (G1, G2)]), GPB + GPP * 2 + 30000, 'no', 'two_points_with_one_g2_zero')) tests.append((mk_ecpairing_data([(G1, G2), (m(G1, co - 1), G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_1')) tests.append((mk_ecpairing_data([(G1, G2), (m(G1, co - 1), G2)]), GPB + GPP + 30000, 'error', 'two_point_oog')) tests.append((mk_ecpairing_data([(G1, G2), (G1, m(G2, co - 1))]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_2')) tests.append((mk_ecpairing_data([(G1, m(G2, 2)), (m(G1, co - 2), G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_3')) tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, co - 999))]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_4')) tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, 998))]), GPB + GPP * 2 + 30000, 'no', 'two_point_fail_2')) tests.append((mk_ecpairing_data([(G1, G2_zero), (G1_zero, G2)]), GPB + GPP * 2 + 30000, 'yes', 'two_point_match_5')) tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, co - 999)), (G1, G2_zero)]), GPB + GPP * 3 + 30000, 'yes', 'three_point_match_1')) tests.append((mk_ecpairing_data([(m(G1, 27), m(G2, 37)), (G1, m(G2, 999)), (G1, G2)]), GPB + GPP * 3 + 30000, 'no','three_point_fail_1')) tests.append((mk_ecpairing_data([(G1_zero, fake_point)]), GPB + GPP + 30000, 'error', 'one_point_not_in_subgroup')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, 1), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_one')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, co), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_curve_order')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 0, fm), GPB + GPP + 30000, 'error', 'perturb_zeropoint_by_field_modulus')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 64, 1), GPB + GPP + 30000, 'error', 'perturb_g2_by_one')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 96, co), GPB + GPP + 30000, 'error', 'perturb_g2_by_curve_order')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 128, fm), GPB + GPP + 30000, 'error', 'perturb_g2_by_field_modulus')) tests.append((perturb(mk_ecpairing_data([(G1_zero, G2)]), 160, fm), GPB + GPP + 30000, 'error', 'perturb_g2_by_field_modulus_again')) testout = {} testout_filler = {} for encoded, execgas, expect, desc in tests: print('testing', encoded, execgas, expect, desc) o1, o2 = mk_test(encoded, execgas, expect) testout["ecpairing_" + desc] = o1 o2["explanation"] = "Puts the given data into the ECPAIRING precompile" testout_filler["ecpairing_" + desc] = o2 open('ecpairing_tests.json', 'w').write(json.dumps(testout, indent=4)) open('ecpairing_tests_filler.json', 'w').write(json.dumps(testout_filler, indent=4))
unknown
codeparrot/codeparrot-clean
# -*- coding: utf-8 -*- ''' Module for sending messages to Slack .. versionadded:: 2015.5.0 :configuration: This module can be used by either passing an api key and version directly or by specifying both in a configuration profile in the salt master/minion config. For example: .. code-block:: yaml slack: api_key: peWcBiMOS9HrZG15peWcBiMOS9HrZG15 ''' # Import Python libs from __future__ import absolute_import import logging # Import 3rd-party libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext.six.moves.urllib.parse import urlencode as _urlencode from salt.ext.six.moves import range import salt.ext.six.moves.http_client import salt.utils.slack # pylint: enable=import-error,no-name-in-module from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) __virtualname__ = 'slack' def __virtual__(): ''' Return virtual name of the module. :return: The virtual name of the module. ''' return __virtualname__ def _get_api_key(): api_key = __salt__['config.get']('slack.api_key') or \ __salt__['config.get']('slack:api_key') if not api_key: raise SaltInvocationError('No Slack API key found.') return api_key def list_rooms(api_key=None): ''' List all Slack rooms. :param api_key: The Slack admin api key. :return: The room list. CLI Example: .. code-block:: bash salt '*' slack.list_rooms salt '*' slack.list_rooms api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 ''' if not api_key: api_key = _get_api_key() return salt.utils.slack.query(function='rooms', api_key=api_key, opts=__opts__) def list_users(api_key=None): ''' List all Slack users. :param api_key: The Slack admin api key. :return: The user list. CLI Example: .. code-block:: bash salt '*' slack.list_users salt '*' slack.list_users api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 ''' if not api_key: api_key = _get_api_key() return salt.utils.slack.query(function='users', api_key=api_key, opts=__opts__) def find_room(name, api_key=None): ''' Find a room by name and return it. :param name: The room name. :param api_key: The Slack admin api key. :return: The room object. CLI Example: .. code-block:: bash salt '*' slack.find_room name="random" salt '*' slack.find_room name="random" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 ''' if not api_key: api_key = _get_api_key() # search results don't include the name of the # channel with a hash, if the passed channel name # has a hash we remove it. if name.startswith('#'): name = name[1:] ret = list_rooms(api_key) if ret['res']: rooms = ret['message'] if rooms: for room in range(0, len(rooms)): if rooms[room]['name'] == name: return rooms[room] return False def find_user(name, api_key=None): ''' Find a user by name and return it. :param name: The user name. :param api_key: The Slack admin api key. :return: The user object. CLI Example: .. code-block:: bash salt '*' slack.find_user name="ThomasHatch" salt '*' slack.find_user name="ThomasHatch" api_key=peWcBiMOS9HrZG15peWcBiMOS9HrZG15 ''' if not api_key: api_key = _get_api_key() ret = list_users(api_key) if ret['res']: users = ret['message'] if users: for user in range(0, len(users)): if users[user]['name'] == name: return users[user] return False def post_message(channel, message, from_name, api_key=None, icon=None): ''' Send a message to a Slack channel. :param channel: The channel name, either will work. :param message: The message to send to the Slack channel. :param from_name: Specify who the message is from. :param api_key: The Slack api key, if not specified in the configuration. :param icon: URL to an image to use as the icon for this message :return: Boolean if message was sent successfully. CLI Example: .. code-block:: bash salt '*' slack.post_message channel="Development Room" message="Build is done" from_name="Build Server" ''' if not api_key: api_key = _get_api_key() if not channel: log.error('channel is a required option.') # channel must start with a hash if not channel.startswith('#'): channel = '#{0}'.format(channel) if not from_name: log.error('from_name is a required option.') if not message: log.error('message is a required option.') if not from_name: log.error('from_name is a required option.') parameters = { 'channel': channel, 'username': from_name, 'text': message } if icon is not None: parameters['icon_url'] = icon # Slack wants the body on POST to be urlencoded. result = salt.utils.slack.query(function='message', api_key=api_key, method='POST', header_dict={'Content-Type': 'application/x-www-form-urlencoded'}, data=_urlencode(parameters), opts=__opts__) if result['res']: return True else: return result
unknown
codeparrot/codeparrot-clean
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions called by the generated code to execute an eager-mode op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six from google.protobuf import text_format from tensorflow.core.framework import tensor_pb2 from tensorflow.python import pywrap_tfe from tensorflow.python.eager import core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.util import compat def quick_execute(op_name, num_outputs, inputs, attrs, ctx, name=None): """Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error. """ device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, inputs, attrs, num_outputs) except core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message six.raise_from(core._status_to_exception(e.code, message), None) except TypeError as e: keras_symbolic_tensors = [ x for x in inputs if ops._is_keras_symbolic_tensor(x) ] if keras_symbolic_tensors: raise core._SymbolicException( "Inputs to eager execution function cannot be Keras symbolic " "tensors, but found {}".format(keras_symbolic_tensors)) raise e # pylint: enable=protected-access return tensors def execute_with_cancellation(op_name, num_outputs, inputs, attrs, ctx, cancellation_manager, name=None): """Execute a TensorFlow operation. Args: op_name: Name of the TensorFlow operation (see REGISTER_OP in C++ code) to execute. num_outputs: The number of outputs of the operation to fetch. (Explicitly provided instead of being inferred for performance reasons). inputs: A list of inputs to the operation. Each entry should be a Tensor, or a value which can be passed to the Tensor constructor to create one. attrs: A tuple with alternating string attr names and attr values for this operation. ctx: The value of context.context(). cancellation_manager: a `CancellationManager` object that can be used to cancel the operation. name: Customized name for the operation. Returns: List of output Tensor objects. The list is empty if there are no outputs Raises: An exception on error. """ device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() tensors = pywrap_tfe.TFE_Py_ExecuteCancelable(ctx._handle, device_name, op_name, inputs, attrs, cancellation_manager._impl, num_outputs) except core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message six.raise_from(core._status_to_exception(e.code, message), None) except TypeError as e: keras_symbolic_tensors = [ x for x in inputs if ops._is_keras_symbolic_tensor(x) ] if keras_symbolic_tensors: raise core._SymbolicException( "Inputs to eager execution function cannot be Keras symbolic " "tensors, but found {}".format(keras_symbolic_tensors)) raise e # pylint: enable=protected-access return tensors def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name=None): """Monkey-patch to execute to enable execution callbacks.""" tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) for callback in ctx.op_callbacks: callback(op_name, tuple(inputs), attrs, tensors, name) return tensors execute = quick_execute def must_record_gradient(): """Import backprop if you want gradients recorded.""" return False def record_gradient(unused_op_name, unused_inputs, unused_attrs, unused_outputs): """Import backprop if you want gradients recorded.""" pass def make_float(v, arg_name): if not isinstance(v, compat.real_types): raise TypeError("Expected float for argument '%s' not %s." % (arg_name, repr(v))) return float(v) def make_int(v, arg_name): if isinstance(v, six.string_types): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) try: return int(v) except (ValueError, TypeError): raise TypeError("Expected int for argument '%s' not %s." % (arg_name, repr(v))) def make_str(v, arg_name): if not isinstance(v, compat.bytes_or_text_types): raise TypeError("Expected string for argument '%s' not %s." % (arg_name, repr(v))) return compat.as_bytes(v) # Convert unicode strings to bytes. def make_bool(v, arg_name): if not isinstance(v, bool): raise TypeError("Expected bool for argument '%s' not %s." % (arg_name, repr(v))) return v def make_type(v, arg_name): try: v = dtypes.as_dtype(v).base_dtype except TypeError: raise TypeError("Expected DataType for argument '%s' not %s." % (arg_name, repr(v))) i = v.as_datatype_enum return i def make_shape(v, arg_name): """Convert v into a list.""" # Args: # v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape. # arg_name: String, for error messages. # Returns: # None if the rank is unknown, otherwise a list of ints (or Nones in the # position where the dimension is unknown). try: shape = tensor_shape.as_shape(v) except TypeError as e: raise TypeError("Error converting %s to a TensorShape: %s." % (arg_name, e)) except ValueError as e: raise ValueError("Error converting %s to a TensorShape: %s." % (arg_name, e)) if shape.ndims is None: return None else: return shape.as_list() def make_tensor(v, arg_name): """Ensure v is a TensorProto.""" if isinstance(v, tensor_pb2.TensorProto): return v elif isinstance(v, six.string_types): pb = tensor_pb2.TensorProto() text_format.Merge(v, pb) return pb raise TypeError( "Don't know how to convert %s to a TensorProto for argument '%s'." % (repr(v), arg_name)) def args_to_matching_eager(l, ctx, allowed_dtypes, default_dtype=None): """Convert sequence `l` to eager same-type Tensors.""" if (not l) and (default_dtype is not None): return default_dtype, [] # List is empty; assume default dtype. EagerTensor = ops.EagerTensor # pylint: disable=invalid-name for x in l: if not isinstance(x, EagerTensor): break else: # note: intentional for-else return l[0]._datatype_enum(), l # pylint: disable=protected-access # Is some input already a Tensor with a dtype? dtype = None for t in l: if isinstance(t, EagerTensor): dtype = t.dtype break if dtype is None: # Infer a dtype based on the first value, and use that dtype for the # remaining values. ret = [] for t in l: tensor = None # First see if we can get a valid dtype with the default conversion # and see if it matches an allowed dtypes. Some ops like ConcatV2 may # not list allowed dtypes, in which case we should skip this. if dtype is None and allowed_dtypes: tensor = ops.convert_to_tensor(t, ctx=ctx) # If we did not match an allowed dtype, try again with the default # dtype. This could be because we have an empty tensor and thus we # picked the wrong type. if tensor.dtype not in allowed_dtypes: tensor = None if tensor is None: tensor = ops.convert_to_tensor( t, dtype, preferred_dtype=default_dtype, ctx=ctx) ret.append(tensor) if dtype is None: dtype = tensor.dtype else: ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l] # TODO(slebedev): consider removing this as it leaks a Keras concept. # pylint: disable=protected-access keras_symbolic_tensors = [x for x in ret if ops._is_keras_symbolic_tensor(x)] if keras_symbolic_tensors: raise core._SymbolicException( "Using symbolic output of a Keras layer during eager execution " "{}".format(keras_symbolic_tensors)) # pylint: enable=protected-access return dtype.as_datatype_enum, ret def convert_to_mixed_eager_tensors(values, ctx): v = [ops.convert_to_tensor(t, ctx=ctx) for t in values] types = [t._datatype_enum() for t in v] # pylint: disable=protected-access return types, v def args_to_mixed_eager_tensors(lists, ctx): """Converts a list of same-length lists of values to eager tensors.""" assert len(lists) > 1 # Generate an error if len(lists[i]) is not the same for all i. lists_ret = [] for l in lists[1:]: if len(l) != len(lists[0]): raise ValueError( "Expected list arguments to be the same length: %d != %d (%r vs. %r)." % (len(lists[0]), len(l), lists[0], l)) lists_ret.append([]) # Convert the first element of each list first, then the second element, etc. types = [] for i in range(len(lists[0])): dtype = None # If any list has a Tensor, use that dtype for l in lists: if isinstance(l[i], ops.EagerTensor): dtype = l[i].dtype break if dtype is None: # Convert the first one and use its dtype. lists_ret[0].append(ops.convert_to_tensor(lists[0][i], ctx=ctx)) dtype = lists_ret[0][i].dtype for j in range(1, len(lists)): lists_ret[j].append( ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx)) else: # Convert everything to the found dtype. for j in range(len(lists)): lists_ret[j].append( ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx)) types.append(dtype.as_datatype_enum) return types, lists_ret
unknown
codeparrot/codeparrot-clean
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nxos_igmp_interface extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages IGMP interface configuration. description: - Manages IGMP interface configuration settings. author: - Jason Edelman (@jedelman8) - Gabriele Gerbino (@GGabriele) notes: - When C(state=default), supported params will be reset to a default state. These include C(version), C(startup_query_interval), C(startup_query_count), C(robustness), C(querier_timeout), C(query_mrt), C(query_interval), C(last_member_qrt), C(last_member_query_count), C(group_timeout), C(report_llg), and C(immediate_leave). - When C(state=absent), all configs for C(oif_prefix), C(oif_source), and C(oif_routemap) will be removed. - PIM must be enabled to use this module. - This module is for Layer 3 interfaces. - Route-map check not performed (same as CLI) check when configuring route-map with 'static-oif' - If restart is set to true with other params set, the restart will happen last, i.e. after the configuration takes place. options: interface: description: - The full interface name for IGMP configuration. e.g. I(Ethernet1/2). required: true version: description: - IGMP version. It can be 2 or 3. required: false default: null choices: ['2', '3'] startup_query_interval: description: - Query interval used when the IGMP process starts up. The range is from 1 to 18000. The default is 31. required: false default: null startup_query_count: description: - Query count used when the IGMP process starts up. The range is from 1 to 10. The default is 2. required: false default: null robustness: description: - Sets the robustness variable. Values can range from 1 to 7. The default is 2. required: false default: null querier_timeout: description: - Sets the querier timeout that the software uses when deciding to take over as the querier. Values can range from 1 to 65535 seconds. The default is 255 seconds. required: false default: null query_mrt: description: - Sets the response time advertised in IGMP queries. Values can range from 1 to 25 seconds. The default is 10 seconds. required: false default: null query_interval: description: - Sets the frequency at which the software sends IGMP host query messages. Values can range from 1 to 18000 seconds. he default is 125 seconds. required: false default: null last_member_qrt: description: - Sets the query interval waited after sending membership reports before the software deletes the group state. Values can range from 1 to 25 seconds. The default is 1 second. required: false default: null last_member_query_count: description: - Sets the number of times that the software sends an IGMP query in response to a host leave message. Values can range from 1 to 5. The default is 2. required: false default: null group_timeout: description: - Sets the group membership timeout for IGMPv2. Values can range from 3 to 65,535 seconds. The default is 260 seconds. required: false default: null report_llg: description: - Configures report-link-local-groups. Enables sending reports for groups in 224.0.0.0/24. Reports are always sent for nonlink local groups. By default, reports are not sent for link local groups. required: false choices: ['true', 'false'] default: false immediate_leave: description: - Enables the device to remove the group entry from the multicast routing table immediately upon receiving a leave message for the group. Use this command to minimize the leave latency of IGMPv2 group memberships on a given IGMP interface because the device does not send group-specific queries. The default is disabled. required: false choices: ['true', 'false'] default: false oif_routemap: description: - Configure a routemap for static outgoing interface (OIF). required: false default: null oif_prefix: description: - Configure a prefix for static outgoing interface (OIF). required: false default: null oif_source: description: - Configure a source for static outgoing interface (OIF). required: false default: null restart: description: - Restart IGMP. required: false choices: ['true', 'false'] default: null state: description: - Manages desired state of the resource. required: false default: present choices: ['present', 'default'] ''' EXAMPLES = ''' - nxos_igmp_interface: interface: ethernet1/32 startup_query_interval: 30 state: present username: "{{ un }}" password: "{{ pwd }}" host: "{{ inventory_hostname }}" ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"asn": "65535", "router_id": "1.1.1.1", "vrf": "test"} existing: description: k/v pairs of existing BGP configuration returned: always type: dict sample: {"asn": "65535", "bestpath_always_compare_med": false, "bestpath_aspath_multipath_relax": false, "bestpath_compare_neighborid": false, "bestpath_compare_routerid": false, "bestpath_cost_community_ignore": false, "bestpath_med_confed": false, "bestpath_med_missing_as_worst": false, "bestpath_med_non_deterministic": false, "cluster_id": "", "confederation_id": "", "confederation_peers": "", "graceful_restart": true, "graceful_restart_helper": false, "graceful_restart_timers_restart": "120", "graceful_restart_timers_stalepath_time": "300", "local_as": "", "log_neighbor_changes": false, "maxas_limit": "", "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", "router_id": "11.11.11.11", "suppress_fib_pending": false, "timer_bestpath_limit": "", "timer_bgp_hold": "180", "timer_bgp_keepalive": "60", "vrf": "test"} end_state: description: k/v pairs of BGP configuration after module execution returned: always type: dict sample: {"asn": "65535", "bestpath_always_compare_med": false, "bestpath_aspath_multipath_relax": false, "bestpath_compare_neighborid": false, "bestpath_compare_routerid": false, "bestpath_cost_community_ignore": false, "bestpath_med_confed": false, "bestpath_med_missing_as_worst": false, "bestpath_med_non_deterministic": false, "cluster_id": "", "confederation_id": "", "confederation_peers": "", "graceful_restart": true, "graceful_restart_helper": false, "graceful_restart_timers_restart": "120", "graceful_restart_timers_stalepath_time": "300", "local_as": "", "log_neighbor_changes": false, "maxas_limit": "", "neighbor_down_fib_accelerate": false, "reconnect_interval": "60", "router_id": "1.1.1.1", "suppress_fib_pending": false, "timer_bestpath_limit": "", "timer_bgp_hold": "180", "timer_bgp_keepalive": "60", "vrf": "test"} updates: description: commands sent to the device returned: always type: list sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' from ansible.module_utils.nxos import get_config, load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule import re def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': command += ' | json' cmds = [command] body = run_commands(module, cmds) elif module.params['transport'] == 'nxapi': cmds = [command] body = run_commands(module, cmds) return body def get_interface_mode(interface, intf_type, module): command = 'show interface {0}'.format(interface) interface = {} mode = 'unknown' if intf_type in ['ethernet', 'portchannel']: body = execute_show_command(command, module)[0] interface_table = body['TABLE_interface']['ROW_interface'] mode = str(interface_table.get('eth_mode', 'layer3')) if mode == 'access' or mode == 'trunk': mode = 'layer2' elif intf_type == 'loopback' or intf_type == 'svi': mode = 'layer3' return mode def get_interface_type(interface): if interface.upper().startswith('ET'): return 'ethernet' elif interface.upper().startswith('VL'): return 'svi' elif interface.upper().startswith('LO'): return 'loopback' elif interface.upper().startswith('MG'): return 'management' elif interface.upper().startswith('MA'): return 'management' elif interface.upper().startswith('PO'): return 'portchannel' else: return 'unknown' def apply_key_map(key_map, table): new_dict = {} for key, value in table.items(): new_key = key_map.get(key) if new_key: value = table.get(key) if value: new_dict[new_key] = value else: new_dict[new_key] = value return new_dict def flatten_list(command_lists): flat_command_list = [] for command in command_lists: if isinstance(command, list): flat_command_list.extend(command) else: flat_command_list.append(command) return flat_command_list def get_igmp_interface(module, interface): command = 'show ip igmp interface {0}'.format(interface) igmp = {} key_map = { 'IGMPVersion': 'version', 'ConfiguredStartupQueryInterval': 'startup_query_interval', 'StartupQueryCount': 'startup_query_count', 'RobustnessVariable': 'robustness', 'QuerierTimeout': 'querier_timeout', 'ConfiguredMaxResponseTime': 'query_mrt', 'ConfiguredQueryInterval': 'query_interval', 'LastMemberMTR': 'last_member_qrt', 'LastMemberQueryCount': 'last_member_query_count', 'ConfiguredGroupTimeout': 'group_timeout' } body = execute_show_command(command, module)[0] if body: resource = body['TABLE_vrf']['ROW_vrf']['TABLE_if']['ROW_if'] igmp = apply_key_map(key_map, resource) report_llg = str(resource['ReportingForLinkLocal']) if report_llg == 'true': igmp['report_llg'] = True elif report_llg == 'false': igmp['report_llg'] = False immediate_leave = str(resource['ImmediateLeave']) # returns en or dis if immediate_leave == 'en': igmp['immediate_leave'] = True elif immediate_leave == 'dis': igmp['immediate_leave'] = False # the next block of code is used to retrieve anything with: # ip igmp static-oif *** i.e.. could be route-map ROUTEMAP # or PREFIX source <ip>, etc. command = 'show run interface {0} | inc oif'.format(interface) body = execute_show_command( command, module, command_type='cli_show_ascii')[0] staticoif = [] if body: split_body = body.split('\n') route_map_regex = ('.*ip igmp static-oif route-map\s+' '(?P<route_map>\S+).*') prefix_source_regex = ('.*ip igmp static-oif\s+(?P<prefix>' '((\d+.){3}\d+))(\ssource\s' '(?P<source>\S+))?.*') for line in split_body: temp = {} try: match_route_map = re.match(route_map_regex, line, re.DOTALL) route_map = match_route_map.groupdict()['route_map'] except AttributeError: route_map = '' try: match_prefix_source = re.match( prefix_source_regex, line, re.DOTALL) prefix_source_group = match_prefix_source.groupdict() prefix = prefix_source_group['prefix'] source = prefix_source_group['source'] except AttributeError: prefix = '' source = '' if route_map: temp['route_map'] = route_map if prefix: temp['prefix'] = prefix if source: temp['source'] = source if temp: staticoif.append(temp) igmp['oif_routemap'] = None igmp['oif_prefix_source'] = [] if staticoif: if len(staticoif) == 1 and staticoif[0].get('route_map'): igmp['oif_routemap'] = staticoif[0]['route_map'] else: igmp['oif_prefix_source'] = staticoif return igmp def config_igmp_interface(delta, found_both, found_prefix): CMDS = { 'version': 'ip igmp version {0}', 'startup_query_interval': 'ip igmp startup-query-interval {0}', 'startup_query_count': 'ip igmp startup-query-count {0}', 'robustness': 'ip igmp robustness-variable {0}', 'querier_timeout': 'ip igmp querier-timeout {0}', 'query_mrt': 'ip igmp query-max-response-time {0}', 'query_interval': 'ip igmp query-interval {0}', 'last_member_qrt': 'ip igmp last-member-query-response-time {0}', 'last_member_query_count': 'ip igmp last-member-query-count {0}', 'group_timeout': 'ip igmp group-timeout {0}', 'report_llg': 'ip igmp report-link-local-groups', 'immediate_leave': 'ip igmp immediate-leave', 'oif_prefix_source': 'ip igmp static-oif {0} source {1} ', 'oif_routemap': 'ip igmp static-oif route-map {0}', 'oif_prefix': 'ip igmp static-oif {0}', } commands = [] command = None for key, value in delta.items(): if key == 'oif_source' or found_both or found_prefix: pass elif key == 'oif_prefix': if delta.get('oif_source'): command = CMDS.get('oif_prefix_source').format( delta.get('oif_prefix'), delta.get('oif_source')) else: command = CMDS.get('oif_prefix').format( delta.get('oif_prefix')) elif value: command = CMDS.get(key).format(value) elif not value: command = 'no {0}'.format(CMDS.get(key).format(value)) if command: if command not in commands: commands.append(command) command = None return commands def get_igmp_interface_defaults(): version = '2' startup_query_interval = '31' startup_query_count = '2' robustness = '2' querier_timeout = '255' query_mrt = '10' query_interval = '125' last_member_qrt = '1' last_member_query_count = '2' group_timeout = '260' report_llg = False immediate_leave = False args = dict(version=version, startup_query_interval=startup_query_interval, startup_query_count=startup_query_count, robustness=robustness, querier_timeout=querier_timeout, query_mrt=query_mrt, query_interval=query_interval, last_member_qrt=last_member_qrt, last_member_query_count=last_member_query_count, group_timeout=group_timeout, report_llg=report_llg, immediate_leave=immediate_leave) default = dict((param, value) for (param, value) in args.items() if value is not None) return default def config_default_igmp_interface(existing, delta, found_both, found_prefix): commands = [] proposed = get_igmp_interface_defaults() delta = dict(set(proposed.items()).difference(existing.items())) if delta: command = config_igmp_interface(delta, found_both, found_prefix) if command: for each in command: commands.append(each) return commands def config_remove_oif(existing, existing_oif_prefix_source): commands = [] command = None if existing.get('routemap'): command = 'no ip igmp static-oif route-map {0}'.format( existing.get('routemap')) if existing_oif_prefix_source: for each in existing_oif_prefix_source: if each.get('prefix') and each.get('source'): command = 'no ip igmp static-oif {0} source {1} '.format( each.get('prefix'), each.get('source') ) elif each.get('prefix'): command = 'no ip igmp static-oif {0}'.format( each.get('prefix') ) if command: commands.append(command) command = None return commands def main(): argument_spec = dict( interface=dict(required=True, type='str'), version=dict(required=False, type='str'), startup_query_interval=dict(required=False, type='str'), startup_query_count=dict(required=False, type='str'), robustness=dict(required=False, type='str'), querier_timeout=dict(required=False, type='str'), query_mrt=dict(required=False, type='str'), query_interval=dict(required=False, type='str'), last_member_qrt=dict(required=False, type='str'), last_member_query_count=dict(required=False, type='str'), group_timeout=dict(required=False, type='str'), report_llg=dict(type='bool'), immediate_leave=dict(type='bool'), oif_routemap=dict(required=False, type='str'), oif_prefix=dict(required=False, type='str'), oif_source=dict(required=False, type='str'), restart=dict(type='bool', default=False), state=dict(choices=['present', 'absent', 'default'], default='present'), include_defaults=dict(default=True), config=dict(), save=dict(type='bool', default=False) ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) state = module.params['state'] interface = module.params['interface'] oif_prefix = module.params['oif_prefix'] oif_source = module.params['oif_source'] oif_routemap = module.params['oif_routemap'] if oif_source: if not oif_prefix: module.fail_json(msg='oif_prefix required when setting oif_source') intf_type = get_interface_type(interface) if get_interface_mode(interface, intf_type, module) == 'layer2': module.fail_json(msg='this module only works on Layer 3 interfaces') if oif_prefix and oif_routemap: module.fail_json(msg='cannot use oif_prefix AND oif_routemap.' ' select one.') existing = get_igmp_interface(module, interface) existing_copy = existing.copy() end_state = existing_copy if not existing.get('version'): module.fail_json(msg='pim needs to be enabled on the interface') existing_oif_prefix_source = existing.get('oif_prefix_source') # not json serializable existing.pop('oif_prefix_source') if oif_routemap and existing_oif_prefix_source: module.fail_json(msg='Delete static-oif configurations on this ' 'interface if you want to use a routemap') if oif_prefix and existing.get('oif_routemap'): module.fail_json(msg='Delete static-oif route-map configuration ' 'on this interface if you want to config ' 'static entries') args = [ 'version', 'startup_query_interval', 'startup_query_count', 'robustness', 'querier_timeout', 'query_mrt', 'query_interval', 'last_member_qrt', 'last_member_query_count', 'group_timeout', 'report_llg', 'immediate_leave', 'oif_routemap', 'oif_prefix', 'oif_source' ] changed = False commands = [] proposed = dict((k, v) for k, v in module.params.items() if v is not None and k in args) CANNOT_ABSENT = ['version', 'startup_query_interval', 'startup_query_count', 'robustness', 'querier_timeout', 'query_mrt', 'query_interval', 'last_member_qrt', 'last_member_query_count', 'group_timeout', 'report_llg', 'immediate_leave'] if state == 'absent': for each in CANNOT_ABSENT: if each in proposed: module.fail_json(msg='only params: oif_prefix, oif_source, ' 'oif_routemap can be used when ' 'state=absent') # delta check for all params except oif_prefix and oif_source delta = dict(set(proposed.items()).difference(existing.items())) # now check to see there is a delta for prefix and source command option found_both = False found_prefix = False if existing_oif_prefix_source: if oif_prefix and oif_source: for each in existing_oif_prefix_source: if (oif_prefix == each.get('prefix') and oif_source == each.get('source')): found_both = True if not found_both: delta['prefix'] = oif_prefix delta['source'] = oif_source elif oif_prefix: for each in existing_oif_prefix_source: if oif_prefix == each.get('prefix') and not each.get('source'): found_prefix = True if not found_prefix: delta['prefix'] = oif_prefix if state == 'present': if delta: command = config_igmp_interface(delta, found_both, found_prefix) if command: commands.append(command) elif state == 'default': command = config_default_igmp_interface(existing, delta, found_both, found_prefix) if command: commands.append(command) elif state == 'absent': command = None if existing.get('oif_routemap') or existing_oif_prefix_source: command = config_remove_oif(existing, existing_oif_prefix_source) if command: commands.append(command) command = config_default_igmp_interface(existing, delta, found_both, found_prefix) if command: commands.append(command) if module.params['restart']: commands.append('restart igmp') cmds = [] results = {} if commands: commands.insert(0, ['interface {0}'.format(interface)]) cmds = flatten_list(commands) if module.check_mode: module.exit_json(changed=True, commands=cmds) else: load_config(module, cmds) changed = True end_state = get_igmp_interface(module, interface) if 'configure' in cmds: cmds.pop(0) results['proposed'] = proposed results['existing'] = existing_copy results['updates'] = cmds results['changed'] = changed results['warnings'] = warnings results['end_state'] = end_state module.exit_json(**results) if __name__ == '__main__': main()
unknown
codeparrot/codeparrot-clean
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from ..modeldb import CommonService_pb2 as modeldb_dot_CommonService__pb2 from ..modeldb import ExperimentService_pb2 as modeldb_dot_ExperimentService__pb2 class ExperimentServiceStub(object): # missing associated documentation comment in .proto file pass def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.createExperiment = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/createExperiment', request_serializer=modeldb_dot_ExperimentService__pb2.CreateExperiment.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.CreateExperiment.Response.FromString, ) self.updateExperimentNameOrDescription = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/updateExperimentNameOrDescription', request_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentNameOrDescription.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentNameOrDescription.Response.FromString, ) self.updateExperimentName = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/updateExperimentName', request_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentName.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentName.Response.FromString, ) self.updateExperimentDescription = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/updateExperimentDescription', request_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentDescription.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentDescription.Response.FromString, ) self.addExperimentTags = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/addExperimentTags', request_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentTags.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentTags.Response.FromString, ) self.getExperimentTags = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentTags', request_serializer=modeldb_dot_CommonService__pb2.GetTags.SerializeToString, response_deserializer=modeldb_dot_CommonService__pb2.GetTags.Response.FromString, ) self.deleteExperimentTags = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteExperimentTags', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTags.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTags.Response.FromString, ) self.addExperimentTag = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/addExperimentTag', request_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentTag.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentTag.Response.FromString, ) self.deleteExperimentTag = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteExperimentTag', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTag.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTag.Response.FromString, ) self.addAttribute = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/addAttribute', request_serializer=modeldb_dot_CommonService__pb2.AddAttributes.SerializeToString, response_deserializer=modeldb_dot_CommonService__pb2.AddAttributes.Response.FromString, ) self.addExperimentAttributes = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/addExperimentAttributes', request_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentAttributes.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentAttributes.Response.FromString, ) self.getExperimentAttributes = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentAttributes', request_serializer=modeldb_dot_CommonService__pb2.GetAttributes.SerializeToString, response_deserializer=modeldb_dot_CommonService__pb2.GetAttributes.Response.FromString, ) self.deleteExperimentAttributes = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteExperimentAttributes', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentAttributes.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentAttributes.Response.FromString, ) self.logExperimentCodeVersion = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/logExperimentCodeVersion', request_serializer=modeldb_dot_ExperimentService__pb2.LogExperimentCodeVersion.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.LogExperimentCodeVersion.Response.FromString, ) self.getExperimentCodeVersion = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentCodeVersion', request_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentCodeVersion.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentCodeVersion.Response.FromString, ) self.getExperimentsInProject = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentsInProject', request_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentsInProject.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentsInProject.Response.FromString, ) self.getExperimentById = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentById', request_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentById.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentById.Response.FromString, ) self.getExperimentByName = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getExperimentByName', request_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentByName.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentByName.Response.FromString, ) self.deleteExperiment = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteExperiment', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperiment.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperiment.Response.FromString, ) self.getUrlForArtifact = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getUrlForArtifact', request_serializer=modeldb_dot_CommonService__pb2.GetUrlForArtifact.SerializeToString, response_deserializer=modeldb_dot_CommonService__pb2.GetUrlForArtifact.Response.FromString, ) self.findExperiments = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/findExperiments', request_serializer=modeldb_dot_ExperimentService__pb2.FindExperiments.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.FindExperiments.Response.FromString, ) self.logArtifacts = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/logArtifacts', request_serializer=modeldb_dot_ExperimentService__pb2.LogExperimentArtifacts.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.LogExperimentArtifacts.Response.FromString, ) self.getArtifacts = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/getArtifacts', request_serializer=modeldb_dot_CommonService__pb2.GetArtifacts.SerializeToString, response_deserializer=modeldb_dot_CommonService__pb2.GetArtifacts.Response.FromString, ) self.deleteArtifact = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteArtifact', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentArtifact.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentArtifact.Response.FromString, ) self.deleteExperiments = channel.unary_unary( '/ai.verta.modeldb.ExperimentService/deleteExperiments', request_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperiments.SerializeToString, response_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperiments.Response.FromString, ) class ExperimentServiceServicer(object): # missing associated documentation comment in .proto file pass def createExperiment(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def updateExperimentNameOrDescription(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def updateExperimentName(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def updateExperimentDescription(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def addExperimentTags(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentTags(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteExperimentTags(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def addExperimentTag(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteExperimentTag(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def addAttribute(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def addExperimentAttributes(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentAttributes(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteExperimentAttributes(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def logExperimentCodeVersion(self, request, context): """code version """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentCodeVersion(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentsInProject(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentById(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getExperimentByName(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteExperiment(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getUrlForArtifact(self, request, context): """artifacts """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def findExperiments(self, request, context): """queries """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def logArtifacts(self, request, context): """artifacts """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def getArtifacts(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteArtifact(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def deleteExperiments(self, request, context): # missing associated documentation comment in .proto file pass context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ExperimentServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'createExperiment': grpc.unary_unary_rpc_method_handler( servicer.createExperiment, request_deserializer=modeldb_dot_ExperimentService__pb2.CreateExperiment.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.CreateExperiment.Response.SerializeToString, ), 'updateExperimentNameOrDescription': grpc.unary_unary_rpc_method_handler( servicer.updateExperimentNameOrDescription, request_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentNameOrDescription.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentNameOrDescription.Response.SerializeToString, ), 'updateExperimentName': grpc.unary_unary_rpc_method_handler( servicer.updateExperimentName, request_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentName.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentName.Response.SerializeToString, ), 'updateExperimentDescription': grpc.unary_unary_rpc_method_handler( servicer.updateExperimentDescription, request_deserializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentDescription.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.UpdateExperimentDescription.Response.SerializeToString, ), 'addExperimentTags': grpc.unary_unary_rpc_method_handler( servicer.addExperimentTags, request_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentTags.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentTags.Response.SerializeToString, ), 'getExperimentTags': grpc.unary_unary_rpc_method_handler( servicer.getExperimentTags, request_deserializer=modeldb_dot_CommonService__pb2.GetTags.FromString, response_serializer=modeldb_dot_CommonService__pb2.GetTags.Response.SerializeToString, ), 'deleteExperimentTags': grpc.unary_unary_rpc_method_handler( servicer.deleteExperimentTags, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTags.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTags.Response.SerializeToString, ), 'addExperimentTag': grpc.unary_unary_rpc_method_handler( servicer.addExperimentTag, request_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentTag.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentTag.Response.SerializeToString, ), 'deleteExperimentTag': grpc.unary_unary_rpc_method_handler( servicer.deleteExperimentTag, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTag.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentTag.Response.SerializeToString, ), 'addAttribute': grpc.unary_unary_rpc_method_handler( servicer.addAttribute, request_deserializer=modeldb_dot_CommonService__pb2.AddAttributes.FromString, response_serializer=modeldb_dot_CommonService__pb2.AddAttributes.Response.SerializeToString, ), 'addExperimentAttributes': grpc.unary_unary_rpc_method_handler( servicer.addExperimentAttributes, request_deserializer=modeldb_dot_ExperimentService__pb2.AddExperimentAttributes.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.AddExperimentAttributes.Response.SerializeToString, ), 'getExperimentAttributes': grpc.unary_unary_rpc_method_handler( servicer.getExperimentAttributes, request_deserializer=modeldb_dot_CommonService__pb2.GetAttributes.FromString, response_serializer=modeldb_dot_CommonService__pb2.GetAttributes.Response.SerializeToString, ), 'deleteExperimentAttributes': grpc.unary_unary_rpc_method_handler( servicer.deleteExperimentAttributes, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentAttributes.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentAttributes.Response.SerializeToString, ), 'logExperimentCodeVersion': grpc.unary_unary_rpc_method_handler( servicer.logExperimentCodeVersion, request_deserializer=modeldb_dot_ExperimentService__pb2.LogExperimentCodeVersion.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.LogExperimentCodeVersion.Response.SerializeToString, ), 'getExperimentCodeVersion': grpc.unary_unary_rpc_method_handler( servicer.getExperimentCodeVersion, request_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentCodeVersion.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentCodeVersion.Response.SerializeToString, ), 'getExperimentsInProject': grpc.unary_unary_rpc_method_handler( servicer.getExperimentsInProject, request_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentsInProject.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentsInProject.Response.SerializeToString, ), 'getExperimentById': grpc.unary_unary_rpc_method_handler( servicer.getExperimentById, request_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentById.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentById.Response.SerializeToString, ), 'getExperimentByName': grpc.unary_unary_rpc_method_handler( servicer.getExperimentByName, request_deserializer=modeldb_dot_ExperimentService__pb2.GetExperimentByName.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.GetExperimentByName.Response.SerializeToString, ), 'deleteExperiment': grpc.unary_unary_rpc_method_handler( servicer.deleteExperiment, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperiment.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperiment.Response.SerializeToString, ), 'getUrlForArtifact': grpc.unary_unary_rpc_method_handler( servicer.getUrlForArtifact, request_deserializer=modeldb_dot_CommonService__pb2.GetUrlForArtifact.FromString, response_serializer=modeldb_dot_CommonService__pb2.GetUrlForArtifact.Response.SerializeToString, ), 'findExperiments': grpc.unary_unary_rpc_method_handler( servicer.findExperiments, request_deserializer=modeldb_dot_ExperimentService__pb2.FindExperiments.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.FindExperiments.Response.SerializeToString, ), 'logArtifacts': grpc.unary_unary_rpc_method_handler( servicer.logArtifacts, request_deserializer=modeldb_dot_ExperimentService__pb2.LogExperimentArtifacts.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.LogExperimentArtifacts.Response.SerializeToString, ), 'getArtifacts': grpc.unary_unary_rpc_method_handler( servicer.getArtifacts, request_deserializer=modeldb_dot_CommonService__pb2.GetArtifacts.FromString, response_serializer=modeldb_dot_CommonService__pb2.GetArtifacts.Response.SerializeToString, ), 'deleteArtifact': grpc.unary_unary_rpc_method_handler( servicer.deleteArtifact, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentArtifact.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperimentArtifact.Response.SerializeToString, ), 'deleteExperiments': grpc.unary_unary_rpc_method_handler( servicer.deleteExperiments, request_deserializer=modeldb_dot_ExperimentService__pb2.DeleteExperiments.FromString, response_serializer=modeldb_dot_ExperimentService__pb2.DeleteExperiments.Response.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'ai.verta.modeldb.ExperimentService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
unknown
codeparrot/codeparrot-clean
import DefaultTheme from "vitepress/theme-without-fonts"; import * as d3 from "d3"; import {useData} from "vitepress"; import CustomLayout from "./CustomLayout.vue"; import "./custom.css"; export default { extends: DefaultTheme, Layout: CustomLayout, enhanceApp({app, router}) { globalThis.d3 = d3; // for console testing! Object.defineProperty(app.config.globalProperties, "$dark", {get: () => useData().isDark.value}); } };
typescript
github
https://github.com/d3/d3
docs/.vitepress/theme/index.ts
""" Classes for interpolating values. """ from __future__ import division, print_function, absolute_import __all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', 'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly', 'RegularGridInterpolator', 'interpn'] import itertools import warnings import functools import operator import numpy as np from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d, dot, ravel, poly1d, asarray, intp) import scipy.linalg import scipy.special as spec from scipy.special import comb from scipy._lib.six import xrange, integer_types, string_types from . import fitpack from . import dfitpack from . import _fitpack from .polyint import _Interpolator1D from . import _ppoly from .fitpack2 import RectBivariateSpline from .interpnd import _ndim_coords_from_arrays from ._bsplines import make_interp_spline, BSpline def prod(x): """Product of a list of numbers; ~40x faster vs np.prod for Python tuples""" if len(x) == 0: return 1 return functools.reduce(operator.mul, x) def lagrange(x, w): """ Return a Lagrange interpolating polynomial. Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating polynomial through the points ``(x, w)``. Warning: This implementation is numerically unstable. Do not expect to be able to use more than about 20 points even if they are chosen optimally. Parameters ---------- x : array_like `x` represents the x-coordinates of a set of datapoints. w : array_like `w` represents the y-coordinates of a set of datapoints, i.e. f(`x`). Returns ------- lagrange : numpy.poly1d instance The Lagrange interpolating polynomial. """ M = len(x) p = poly1d(0.0) for j in xrange(M): pt = poly1d(w[j]) for k in xrange(M): if k == j: continue fac = x[j] - x[k] pt *= poly1d([1.0, -x[k]]) / fac p += pt return p # !! Need to find argument for keeping initialize. If it isn't # !! found, get rid of it! class interp2d(object): """ interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=nan) Interpolate over a 2-D grid. `x`, `y` and `z` are arrays of values used to approximate some function f: ``z = f(x, y)``. This class returns a function whose call method uses spline interpolation to find the value of new points. If `x` and `y` represent a regular grid, consider using RectBivariateSpline. Note that calling `interp2d` with NaNs present in input values results in undefined behaviour. Methods ------- __call__ Parameters ---------- x, y : array_like Arrays defining the data point coordinates. If the points lie on a regular grid, `x` can specify the column coordinates and `y` the row coordinates, for example:: >>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]] Otherwise, `x` and `y` must specify the full coordinates for each point, for example:: >>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6] If `x` and `y` are multi-dimensional, they are flattened before use. z : array_like The values of the function to interpolate at the data points. If `z` is a multi-dimensional array, it is flattened before use. The length of a flattened `z` array is either len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates for each point. kind : {'linear', 'cubic', 'quintic'}, optional The kind of spline interpolation to use. Default is 'linear'. copy : bool, optional If True, the class makes internal copies of x, y and z. If False, references may be used. The default is to copy. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data (x,y), a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If omitted (None), values outside the domain are extrapolated. See Also -------- RectBivariateSpline : Much faster 2D interpolation if your input data is on a grid bisplrep, bisplev : Spline interpolation based on FITPACK BivariateSpline : a more recent wrapper of the FITPACK routines interp1d : one dimension version of this function Notes ----- The minimum number of data points required along the interpolation axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for quintic interpolation. The interpolator is constructed by `bisplrep`, with a smoothing factor of 0. If more control over smoothing is needed, `bisplrep` should be used directly. Examples -------- Construct a 2-D grid and interpolate on it: >>> from scipy import interpolate >>> x = np.arange(-5.01, 5.01, 0.25) >>> y = np.arange(-5.01, 5.01, 0.25) >>> xx, yy = np.meshgrid(x, y) >>> z = np.sin(xx**2+yy**2) >>> f = interpolate.interp2d(x, y, z, kind='cubic') Now use the obtained interpolation function and plot the result: >>> import matplotlib.pyplot as plt >>> xnew = np.arange(-5.01, 5.01, 1e-2) >>> ynew = np.arange(-5.01, 5.01, 1e-2) >>> znew = f(xnew, ynew) >>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-') >>> plt.show() """ def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None): x = ravel(x) y = ravel(y) z = asarray(z) rectangular_grid = (z.size == len(x) * len(y)) if rectangular_grid: if z.ndim == 2: if z.shape != (len(y), len(x)): raise ValueError("When on a regular grid with x.size = m " "and y.size = n, if z.ndim == 2, then z " "must have shape (n, m)") if not np.all(x[1:] >= x[:-1]): j = np.argsort(x) x = x[j] z = z[:, j] if not np.all(y[1:] >= y[:-1]): j = np.argsort(y) y = y[j] z = z[j, :] z = ravel(z.T) else: z = ravel(z) if len(x) != len(y): raise ValueError( "x and y must have equal lengths for non rectangular grid") if len(z) != len(x): raise ValueError( "Invalid length for input z for non rectangular grid") try: kx = ky = {'linear': 1, 'cubic': 3, 'quintic': 5}[kind] except KeyError: raise ValueError("Unsupported interpolation type.") if not rectangular_grid: # TODO: surfit is really not meant for interpolation! self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0) else: nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth( x, y, z, None, None, None, None, kx=kx, ky=ky, s=0.0) self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)], kx, ky) self.bounds_error = bounds_error self.fill_value = fill_value self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)] self.x_min, self.x_max = np.amin(x), np.amax(x) self.y_min, self.y_max = np.amin(y), np.amax(y) def __call__(self, x, y, dx=0, dy=0, assume_sorted=False): """Interpolate the function. Parameters ---------- x : 1D array x-coordinates of the mesh on which to interpolate. y : 1D array y-coordinates of the mesh on which to interpolate. dx : int >= 0, < kx Order of partial derivatives in x. dy : int >= 0, < ky Order of partial derivatives in y. assume_sorted : bool, optional If False, values of `x` and `y` can be in any order and they are sorted first. If True, `x` and `y` have to be arrays of monotonically increasing values. Returns ------- z : 2D array with shape (len(y), len(x)) The interpolated values. """ x = atleast_1d(x) y = atleast_1d(y) if x.ndim != 1 or y.ndim != 1: raise ValueError("x and y should both be 1-D arrays") if not assume_sorted: x = np.sort(x) y = np.sort(y) if self.bounds_error or self.fill_value is not None: out_of_bounds_x = (x < self.x_min) | (x > self.x_max) out_of_bounds_y = (y < self.y_min) | (y > self.y_max) any_out_of_bounds_x = np.any(out_of_bounds_x) any_out_of_bounds_y = np.any(out_of_bounds_y) if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y): raise ValueError("Values out of range; x must be in %r, y in %r" % ((self.x_min, self.x_max), (self.y_min, self.y_max))) z = fitpack.bisplev(x, y, self.tck, dx, dy) z = atleast_2d(z) z = transpose(z) if self.fill_value is not None: if any_out_of_bounds_x: z[:, out_of_bounds_x] = self.fill_value if any_out_of_bounds_y: z[out_of_bounds_y, :] = self.fill_value if len(z) == 1: z = z[0] return array(z) def _check_broadcast_up_to(arr_from, shape_to, name): """Helper to check that arr_from broadcasts up to shape_to""" shape_from = arr_from.shape if len(shape_to) >= len(shape_from): for t, f in zip(shape_to[::-1], shape_from[::-1]): if f != 1 and f != t: break else: # all checks pass, do the upcasting that we need later if arr_from.size != 1 and arr_from.shape != shape_to: arr_from = np.ones(shape_to, arr_from.dtype) * arr_from return arr_from.ravel() # at least one check failed raise ValueError('%s argument must be able to broadcast up ' 'to shape %s but had shape %s' % (name, shape_to, shape_from)) def _do_extrapolate(fill_value): """Helper to check if fill_value == "extrapolate" without warnings""" return (isinstance(fill_value, string_types) and fill_value == 'extrapolate') class interp1d(_Interpolator1D): """ Interpolate a 1-D function. `x` and `y` are arrays of values used to approximate some function f: ``y = f(x)``. This class returns a function whose call method uses interpolation to find the value of new points. Note that calling `interp1d` with NaNs present in input values results in undefined behaviour. Parameters ---------- x : (N,) array_like A 1-D array of real values. y : (...,N,...) array_like A N-D array of real values. The length of `y` along the interpolation axis must be equal to the length of `x`. kind : str or int, optional Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic' where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of first, second or third order) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. axis : int, optional Specifies the axis of `y` along which to interpolate. Interpolation defaults to the last axis of `y`. copy : bool, optional If True, the class makes internal copies of x and y. If False, references to `x` and `y` are used. The default is to copy. bounds_error : bool, optional If True, a ValueError is raised any time interpolation is attempted on a value outside of the range of x (where extrapolation is necessary). If False, out of bounds values are assigned `fill_value`. By default, an error is raised unless `fill_value="extrapolate"`. fill_value : array-like or (array-like, array_like) or "extrapolate", optional - if a ndarray (or float), this value will be used to fill in for requested points outside of the data range. If not provided, then the default is NaN. The array-like must broadcast properly to the dimensions of the non-interpolation axes. - If a two-element tuple, then the first element is used as a fill value for ``x_new < x[0]`` and the second element is used for ``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g., list or ndarray, regardless of shape) is taken to be a single array-like argument meant to be used for both bounds as ``below, above = fill_value, fill_value``. .. versionadded:: 0.17.0 - If "extrapolate", then points outside the data range will be extrapolated. .. versionadded:: 0.17.0 assume_sorted : bool, optional If False, values of `x` can be in any order and they are sorted first. If True, `x` has to be an array of monotonically increasing values. Methods ------- __call__ See Also -------- splrep, splev Spline interpolation/smoothing based on FITPACK. UnivariateSpline : An object-oriented wrapper of the FITPACK routines. interp2d : 2-D interpolation Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy import interpolate >>> x = np.arange(0, 10) >>> y = np.exp(-x/3.0) >>> f = interpolate.interp1d(x, y) >>> xnew = np.arange(0, 9, 0.1) >>> ynew = f(xnew) # use interpolation function returned by `interp1d` >>> plt.plot(x, y, 'o', xnew, ynew, '-') >>> plt.show() """ def __init__(self, x, y, kind='linear', axis=-1, copy=True, bounds_error=None, fill_value=np.nan, assume_sorted=False): """ Initialize a 1D linear interpolation class.""" _Interpolator1D.__init__(self, x, y, axis=axis) self.bounds_error = bounds_error # used by fill_value setter self.copy = copy if kind in ['zero', 'slinear', 'quadratic', 'cubic']: order = {'nearest': 0, 'zero': 0, 'slinear': 1, 'quadratic': 2, 'cubic': 3}[kind] kind = 'spline' elif isinstance(kind, int): order = kind kind = 'spline' elif kind not in ('linear', 'nearest'): raise NotImplementedError("%s is unsupported: Use fitpack " "routines for other types." % kind) x = array(x, copy=self.copy) y = array(y, copy=self.copy) if not assume_sorted: ind = np.argsort(x) x = x[ind] y = np.take(y, ind, axis=axis) if x.ndim != 1: raise ValueError("the x array must have exactly one dimension.") if y.ndim == 0: raise ValueError("the y array must have at least one dimension.") # Force-cast y to a floating-point type, if it's not yet one if not issubclass(y.dtype.type, np.inexact): y = y.astype(np.float_) # Backward compatibility self.axis = axis % y.ndim # Interpolation goes internally along the first axis self.y = y self._y = self._reshape_yi(self.y) self.x = x del y, x # clean up namespace to prevent misuse; use attributes self._kind = kind self.fill_value = fill_value # calls the setter, can modify bounds_err # Adjust to interpolation kind; store reference to *unbound* # interpolation methods, in order to avoid circular references to self # stored in the bound instance methods, and therefore delayed garbage # collection. See: http://docs.python.org/2/reference/datamodel.html if kind in ('linear', 'nearest'): # Make a "view" of the y array that is rotated to the interpolation # axis. minval = 2 if kind == 'nearest': # Do division before addition to prevent possible integer # overflow self.x_bds = self.x / 2.0 self.x_bds = self.x_bds[1:] + self.x_bds[:-1] self._call = self.__class__._call_nearest else: # Check if we can delegate to numpy.interp (2x-10x faster). cond = self.x.dtype == np.float_ and self.y.dtype == np.float_ cond = cond and self.y.ndim == 1 cond = cond and not _do_extrapolate(fill_value) if cond: self._call = self.__class__._call_linear_np else: self._call = self.__class__._call_linear else: minval = order + 1 rewrite_nan = False xx, yy = self.x, self._y if order > 1: # Quadratic or cubic spline. If input contains even a single # nan, then the output is all nans. We cannot just feed data # with nans to make_interp_spline because it calls LAPACK. # So, we make up a bogus x and y with no nans and use it # to get the correct shape of the output, which we then fill # with nans. # For slinear or zero order spline, we just pass nans through. if np.isnan(self.x).any(): xx = np.linspace(min(self.x), max(self.x), len(self.x)) rewrite_nan = True if np.isnan(self._y).any(): yy = np.ones_like(self._y) rewrite_nan = True self._spline = make_interp_spline(xx, yy, k=order, check_finite=False) if rewrite_nan: self._call = self.__class__._call_nan_spline else: self._call = self.__class__._call_spline if len(self.x) < minval: raise ValueError("x and y arrays must have at " "least %d entries" % minval) @property def fill_value(self): # backwards compat: mimic a public attribute return self._fill_value_orig @fill_value.setter def fill_value(self, fill_value): # extrapolation only works for nearest neighbor and linear methods if _do_extrapolate(fill_value): if self.bounds_error: raise ValueError("Cannot extrapolate and raise " "at the same time.") self.bounds_error = False self._extrapolate = True else: broadcast_shape = (self.y.shape[:self.axis] + self.y.shape[self.axis + 1:]) if len(broadcast_shape) == 0: broadcast_shape = (1,) # it's either a pair (_below_range, _above_range) or a single value # for both above and below range if isinstance(fill_value, tuple) and len(fill_value) == 2: below_above = [np.asarray(fill_value[0]), np.asarray(fill_value[1])] names = ('fill_value (below)', 'fill_value (above)') for ii in range(2): below_above[ii] = _check_broadcast_up_to( below_above[ii], broadcast_shape, names[ii]) else: fill_value = np.asarray(fill_value) below_above = [_check_broadcast_up_to( fill_value, broadcast_shape, 'fill_value')] * 2 self._fill_value_below, self._fill_value_above = below_above self._extrapolate = False if self.bounds_error is None: self.bounds_error = True # backwards compat: fill_value was a public attr; make it writeable self._fill_value_orig = fill_value def _call_linear_np(self, x_new): # Note that out-of-bounds values are taken care of in self._evaluate return np.interp(x_new, self.x, self.y) def _call_linear(self, x_new): # 2. Find where in the orignal data, the values to interpolate # would be inserted. # Note: If x_new[n] == x[m], then m is returned by searchsorted. x_new_indices = searchsorted(self.x, x_new) # 3. Clip x_new_indices so that they are within the range of # self.x indices and at least 1. Removes mis-interpolation # of x_new[n] = x[0] x_new_indices = x_new_indices.clip(1, len(self.x) - 1).astype(int) # 4. Calculate the slope of regions that each x_new value falls in. lo = x_new_indices - 1 hi = x_new_indices x_lo = self.x[lo] x_hi = self.x[hi] y_lo = self._y[lo] y_hi = self._y[hi] # Note that the following two expressions rely on the specifics of the # broadcasting semantics. slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None] # 5. Calculate the actual value for each entry in x_new. y_new = slope * (x_new - x_lo)[:, None] + y_lo return y_new def _call_nearest(self, x_new): """ Find nearest neighbour interpolated y_new = f(x_new).""" # 2. Find where in the averaged data the values to interpolate # would be inserted. # Note: use side='left' (right) to searchsorted() to define the # halfway point to be nearest to the left (right) neighbour x_new_indices = searchsorted(self.x_bds, x_new, side='left') # 3. Clip x_new_indices so that they are within the range of x indices. x_new_indices = x_new_indices.clip(0, len(self.x) - 1).astype(intp) # 4. Calculate the actual value for each entry in x_new. y_new = self._y[x_new_indices] return y_new def _call_spline(self, x_new): return self._spline(x_new) def _call_nan_spline(self, x_new): out = self._spline(x_new) out[...] = np.nan return out def _evaluate(self, x_new): # 1. Handle values in x_new that are outside of x. Throw error, # or return a list of mask array indicating the outofbounds values. # The behavior is set by the bounds_error variable. x_new = asarray(x_new) y_new = self._call(self, x_new) if not self._extrapolate: below_bounds, above_bounds = self._check_bounds(x_new) if len(y_new) > 0: # Note fill_value must be broadcast up to the proper size # and flattened to work here y_new[below_bounds] = self._fill_value_below y_new[above_bounds] = self._fill_value_above return y_new def _check_bounds(self, x_new): """Check the inputs for being in the bounds of the interpolated data. Parameters ---------- x_new : array Returns ------- out_of_bounds : bool array The mask on x_new of values that are out of the bounds. """ # If self.bounds_error is True, we raise an error if any x_new values # fall outside the range of x. Otherwise, we return an array indicating # which values are outside the boundary region. below_bounds = x_new < self.x[0] above_bounds = x_new > self.x[-1] # !! Could provide more information about which values are out of bounds if self.bounds_error and below_bounds.any(): raise ValueError("A value in x_new is below the interpolation " "range.") if self.bounds_error and above_bounds.any(): raise ValueError("A value in x_new is above the interpolation " "range.") # !! Should we emit a warning if some values are out of bounds? # !! matlab does not. return below_bounds, above_bounds class _PPolyBase(object): """Base class for piecewise polynomials.""" __slots__ = ('c', 'x', 'extrapolate', 'axis') def __init__(self, c, x, extrapolate=None, axis=0): self.c = np.asarray(c) self.x = np.ascontiguousarray(x, dtype=np.float64) if extrapolate is None: extrapolate = True elif extrapolate != 'periodic': extrapolate = bool(extrapolate) self.extrapolate = extrapolate if not (0 <= axis < self.c.ndim - 1): raise ValueError("%s must be between 0 and %s" % (axis, c.ndim - 1)) self.axis = axis if axis != 0: # roll the interpolation axis to be the first one in self.c # More specifically, the target shape for self.c is (k, m, ...), # and axis !=0 means that we have c.shape (..., k, m, ...) # ^ # axis # So we roll two of them. self.c = np.rollaxis(self.c, axis + 1) self.c = np.rollaxis(self.c, axis + 1) if self.x.ndim != 1: raise ValueError("x must be 1-dimensional") if self.x.size < 2: raise ValueError("at least 2 breakpoints are needed") if self.c.ndim < 2: raise ValueError("c must have at least 2 dimensions") if self.c.shape[0] == 0: raise ValueError("polynomial must be at least of order 0") if self.c.shape[1] != self.x.size - 1: raise ValueError("number of coefficients != len(x)-1") dx = np.diff(self.x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` must be strictly increasing or decreasing.") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ @classmethod def construct_fast(cls, c, x, extrapolate=None, axis=0): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x self.axis = axis if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _ensure_c_contiguous(self): """ c and x may be modified by the user. The Cython code expects that they are C contiguous. """ if not self.x.flags.c_contiguous: self.x = self.x.copy() if not self.c.flags.c_contiguous: self.c = self.c.copy() def extend(self, c, x, right=None): """ Add additional breakpoints and coefficients to the polynomial. Parameters ---------- c : ndarray, size (k, m, ...) Additional coefficients for polynomials in intervals. Note that the first additional interval will be formed using one of the `self.x` end points. x : ndarray, size (m,) Additional breakpoints. Must be sorted in the same order as `self.x` and either to the right or to the left of the current breakpoints. right Deprecated argument. Has no effect. .. deprecated:: 0.19 """ if right is not None: warnings.warn("`right` is deprecated and will be removed.") c = np.asarray(c) x = np.asarray(x) if c.ndim < 2: raise ValueError("invalid dimensions for c") if x.ndim != 1: raise ValueError("invalid dimensions for x") if x.shape[0] != c.shape[1]: raise ValueError("x and c have incompatible sizes") if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim: raise ValueError("c and self.c have incompatible shapes") if c.size == 0: return dx = np.diff(x) if not (np.all(dx >= 0) or np.all(dx <= 0)): raise ValueError("`x` is not sorted.") if self.x[-1] >= self.x[0]: if not x[-1] >= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] >= self.x[-1]: action = 'append' elif x[-1] <= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") else: if not x[-1] <= x[0]: raise ValueError("`x` is in the different order " "than `self.x`.") if x[0] <= self.x[-1]: action = 'append' elif x[-1] >= self.x[0]: action = 'prepend' else: raise ValueError("`x` is neither on the left or on the right " "from `self.x`.") dtype = self._get_dtype(c.dtype) k2 = max(c.shape[0], self.c.shape[0]) c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:], dtype=dtype) if action == 'append': c2[k2 - self.c.shape[0]:, :self.c.shape[1]] = self.c c2[k2 - c.shape[0]:, self.c.shape[1]:] = c self.x = np.r_[self.x, x] elif action == 'prepend': c2[k2 - self.c.shape[0]:, :c.shape[1]] = c c2[k2 - c.shape[0]:, c.shape[1]:] = self.c self.x = np.r_[x, self.x] self.c = c2 def __call__(self, x, nu=0, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative. Parameters ---------- x : array_like Points to evaluate the interpolant at. nu : int, optional Order of derivative to evaluate. Must be non-negative. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- y : array_like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate x = np.asarray(x) x_shape, x_ndim = x.shape, x.ndim x = np.ascontiguousarray(x.ravel(), dtype=np.float_) # With periodic extrapolation we map x to the segment # [self.x[0], self.x[-1]]. if extrapolate == 'periodic': x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0]) extrapolate = False out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype) self._ensure_c_contiguous() self._evaluate(x, nu, extrapolate, out) out = out.reshape(x_shape + self.c.shape[2:]) if self.axis != 0: # transpose to move the calculated values to the interpolation axis l = list(range(out.ndim)) l = l[x_ndim:x_ndim + self.axis] + l[:x_ndim] + l[x_ndim + self.axis:] out = out.transpose(l) return out class PPoly(_PPolyBase): """ Piecewise polynomial in terms of coefficients and breakpoints The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the local power basis:: S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1)) where ``k`` is the degree of the polynomial. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ derivative antiderivative integrate solve roots extend from_spline from_bernstein_basis construct_fast See also -------- BPoly : piecewise polynomials in the Bernstein basis Notes ----- High-order polynomials in the power basis can be numerically unstable. Precision problems can start to appear for orders larger than 20-30. """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k - n representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if nu < 0: return self.antiderivative(-nu) # reduce order if nu == 0: c2 = self.c.copy() else: c2 = self.c[:-nu, :].copy() if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu) c2 *= factor[(slice(None),) + (None,) * (c2.ndim - 1)] # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], dtype=self.c.dtype) c[:-nu] = self.c # divide by the correct rising factorials factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu) c[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)] # fix continuity of added degrees of freedom self._ensure_c_contiguous() _ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), self.x, nu - 1) if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate # construct a compatible polynomial return self.construct_fast(c, self.x, extrapolate, self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a, b] """ if extrapolate is None: extrapolate = self.extrapolate # Swap integration bounds if needed sign = 1 if b < a: a, b = b, a sign = -1 range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype) self._ensure_c_contiguous() # Compute the integral. if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) if n_periods > 0: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xe, False, out=range_int) range_int *= n_periods else: range_int.fill(0) # Map a to [xs, xe], b is always a + left. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. remainder_int = np.empty_like(range_int) if b <= xe: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, xe, False, out=remainder_int) range_int += remainder_int _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, xs, xs + left + a - xe, False, out=remainder_int) range_int += remainder_int else: _ppoly.integrate( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, a, b, bool(extrapolate), out=range_int) # Return range_int *= sign return range_int.reshape(self.c.shape[2:]) def solve(self, y=0., discontinuity=True, extrapolate=None): """ Find real solutions of the the equation ``pp(x) == y``. Parameters ---------- y : float, optional Right-hand side. Default is zero. discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. Notes ----- This routine works only on real-valued polynomials. If the piecewise polynomial contains sections that are identically zero, the root list will contain the start point of the corresponding interval, followed by a ``nan`` value. If the polynomial is discontinuous across a breakpoint, and there is a sign change across the breakpoint, this is reported if the `discont` parameter is True. Examples -------- Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals ``[-2, 1], [1, 2]``: >>> from scipy.interpolate import PPoly >>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2]) >>> pp.roots() array([-1., 1.]) """ if extrapolate is None: extrapolate = self.extrapolate self._ensure_c_contiguous() if np.issubdtype(self.c.dtype, np.complexfloating): raise ValueError("Root finding is only for " "real-valued polynomials") y = float(y) r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, y, bool(discontinuity), bool(extrapolate)) if self.c.ndim == 2: return r[0] else: r2 = np.empty(prod(self.c.shape[2:]), dtype=object) # this for-loop is equivalent to ``r2[...] = r``, but that's broken # in numpy 1.6.0 for ii, root in enumerate(r): r2[ii] = root return r2.reshape(self.c.shape[2:]) def roots(self, discontinuity=True, extrapolate=None): """ Find real roots of the the piecewise polynomial. Parameters ---------- discontinuity : bool, optional Whether to report sign changes across discontinuities at breakpoints as roots. extrapolate : {bool, 'periodic', None}, optional If bool, determines whether to return roots from the polynomial extrapolated based on first and last intervals, 'periodic' works the same as False. If None (default), use `self.extrapolate`. Returns ------- roots : ndarray Roots of the polynomial(s). If the PPoly object describes multiple polynomials, the return value is an object array whose each element is an ndarray containing the roots. See Also -------- PPoly.solve """ return self.solve(0, discontinuity, extrapolate) @classmethod def from_spline(cls, tck, extrapolate=None): """ Construct a piecewise polynomial from a spline Parameters ---------- tck A spline, as returned by `splrep` or a BSpline object. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ if isinstance(tck, BSpline): t, c, k = tck.tck if extrapolate is None: extrapolate = tck.extrapolate else: t, c, k = tck cvals = np.empty((k + 1, len(t) - 1), dtype=c.dtype) for m in xrange(k, -1, -1): y = fitpack.splev(t[:-1], tck, der=m) cvals[k - m, :] = y / spec.gamma(m + 1) return cls.construct_fast(cvals, t, extrapolate) @classmethod def from_bernstein_basis(cls, bp, extrapolate=None): """ Construct a piecewise polynomial in the power basis from a polynomial in Bernstein basis. Parameters ---------- bp : BPoly A Bernstein basis polynomial, as created by BPoly extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ dx = np.diff(bp.x) k = bp.c.shape[0] - 1 # polynomial order rest = (None,) * (bp.c.ndim - 2) c = np.zeros_like(bp.c) for a in range(k + 1): factor = (-1) ** a * comb(k, a) * bp.c[a] for s in range(a, k + 1): val = comb(k - a, s - a) * (-1) ** s c[k - s] += factor * val / dx[(slice(None),) + rest] ** s if extrapolate is None: extrapolate = bp.extrapolate return cls.construct_fast(c, bp.x, extrapolate, bp.axis) class BPoly(_PPolyBase): """Piecewise polynomial in terms of coefficients and breakpoints. The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the Bernstein polynomial basis:: S = sum(c[a, i] * b(a, k; x) for a in range(k+1)), where ``k`` is the degree of the polynomial, and:: b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a), with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial coefficient. Parameters ---------- c : ndarray, shape (k, m, ...) Polynomial coefficients, order `k` and `m` intervals x : ndarray, shape (m+1,) Polynomial breakpoints. Must be sorted in either increasing or decreasing order. extrapolate : bool, optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. axis : int, optional Interpolation axis. Default is zero. Attributes ---------- x : ndarray Breakpoints. c : ndarray Coefficients of the polynomials. They are reshaped to a 3-dimensional array with the last dimension representing the trailing dimensions of the original coefficient array. axis : int Interpolation axis. Methods ------- __call__ extend derivative antiderivative integrate construct_fast from_power_basis from_derivatives See also -------- PPoly : piecewise polynomials in the power basis Notes ----- Properties of Bernstein polynomials are well documented in the literature. Here's a non-exhaustive list: .. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial .. [2] Kenneth I. Joy, Bernstein polynomials, http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems, vol 2011, article ID 829546, :doi:`10.1155/2011/829543`. Examples -------- >>> from scipy.interpolate import BPoly >>> x = [0, 1] >>> c = [[1], [2], [3]] >>> bp = BPoly(c, x) This creates a 2nd order polynomial .. math:: B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\ = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2 """ def _evaluate(self, x, nu, extrapolate, out): _ppoly.evaluate_bernstein( self.c.reshape(self.c.shape[0], self.c.shape[1], -1), self.x, x, nu, bool(extrapolate), out) def derivative(self, nu=1): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e. compute the first derivative. If negative, the antiderivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k - nu representing the derivative of this polynomial. """ if nu < 0: return self.antiderivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.derivative() return bp # reduce order if nu == 0: c2 = self.c.copy() else: # For a polynomial # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x), # we use the fact that # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ), # which leads to # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1} # # finally, for an interval [y, y + dy] with dy != 1, # we need to correct for an extra power of dy rest = (None,) * (self.c.ndim - 2) k = self.c.shape[0] - 1 dx = np.diff(self.x)[(None, slice(None)) + rest] c2 = k * np.diff(self.c, axis=0) / dx if c2.shape[0] == 0: # derivative of order 0 is zero c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype) # construct a compatible polynomial return self.construct_fast(c2, self.x, self.extrapolate, self.axis) def antiderivative(self, nu=1): """ Construct a new piecewise polynomial representing the antiderivative. Parameters ---------- nu : int, optional Order of antiderivative to evaluate. Default is 1, i.e. compute the first integral. If negative, the derivative is returned. Returns ------- bp : BPoly Piecewise polynomial of order k + nu representing the antiderivative of this polynomial. Notes ----- If antiderivative is computed and ``self.extrapolate='periodic'``, it will be set to False for the returned instance. This is done because the antiderivative is no longer periodic and its correct evaluation outside of the initially given x interval is difficult. """ if nu <= 0: return self.derivative(-nu) if nu > 1: bp = self for k in range(nu): bp = bp.antiderivative() return bp # Construct the indefinite integrals on individual intervals c, x = self.c, self.x k = c.shape[0] c2 = np.zeros((k + 1,) + c.shape[1:], dtype=c.dtype) c2[1:, ...] = np.cumsum(c, axis=0) / k delta = x[1:] - x[:-1] c2 *= delta[(None, slice(None)) + (None,) * (c.ndim - 2)] # Now fix continuity: on the very first interval, take the integration # constant to be zero; on an interval [x_j, x_{j+1}) with j>0, # the integration constant is then equal to the jump of the `bp` at x_j. # The latter is given by the coefficient of B_{n+1, n+1} # *on the previous interval* (other B. polynomials are zero at the # breakpoint). Finally, use the fact that BPs form a partition of unity. c2[:, 1:] += np.cumsum(c2[k, :], axis=0)[:-1] if self.extrapolate == 'periodic': extrapolate = False else: extrapolate = self.extrapolate return self.construct_fast(c2, x, extrapolate, axis=self.axis) def integrate(self, a, b, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use `self.extrapolate`. Returns ------- array_like Definite integral of the piecewise polynomial over [a, b] """ # XXX: can probably use instead the fact that # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1) ib = self.antiderivative() if extrapolate is None: extrapolate = self.extrapolate # ib.extrapolate shouldn't be 'periodic', it is converted to # False for 'periodic. in antiderivative() call. if extrapolate != 'periodic': ib.extrapolate = extrapolate if extrapolate == 'periodic': # Split the integral into the part over period (can be several # of them) and the remaining part. # For simplicity and clarity convert to a <= b case. if a <= b: sign = 1 else: a, b = b, a sign = -1 xs, xe = self.x[0], self.x[-1] period = xe - xs interval = b - a n_periods, left = divmod(interval, period) res = n_periods * (ib(xe) - ib(xs)) # Map a and b to [xs, xe]. a = xs + (a - xs) % period b = a + left # If b <= xe then we need to integrate over [a, b], otherwise # over [a, xe] and from xs to what is remained. if b <= xe: res += ib(b) - ib(a) else: res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) return sign * res else: return ib(b) - ib(a) def extend(self, c, x, right=None): k = max(self.c.shape[0], c.shape[0]) self.c = self._raise_degree(self.c, k - self.c.shape[0]) c = self._raise_degree(c, k - c.shape[0]) return _PPolyBase.extend(self, c, x, right) extend.__doc__ = _PPolyBase.extend.__doc__ @classmethod def from_power_basis(cls, pp, extrapolate=None): """ Construct a piecewise polynomial in Bernstein basis from a power basis polynomial. Parameters ---------- pp : PPoly A piecewise polynomial in the power basis extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. """ dx = np.diff(pp.x) k = pp.c.shape[0] - 1 # polynomial order rest = (None,) * (pp.c.ndim - 2) c = np.zeros_like(pp.c) for a in range(k + 1): factor = pp.c[a] / comb(k, k - a) * dx[(slice(None),) + rest] ** (k - a) for j in range(k - a, k + 1): c[j] += factor * comb(j, k - a) if extrapolate is None: extrapolate = pp.extrapolate return cls.construct_fast(c, pp.x, extrapolate, pp.axis) @classmethod def from_derivatives(cls, xi, yi, orders=None, extrapolate=None): """Construct a piecewise polynomial in the Bernstein basis, compatible with the specified values and derivatives at breakpoints. Parameters ---------- xi : array_like sorted 1D array of x-coordinates yi : array_like or list of array_likes ``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]`` orders : None or int or array_like of ints. Default: None. Specifies the degree of local polynomials. If not None, some derivatives are ignored. extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True. Notes ----- If ``k`` derivatives are specified at a breakpoint ``x``, the constructed polynomial is exactly ``k`` times continuously differentiable at ``x``, unless the ``order`` is provided explicitly. In the latter case, the smoothness of the polynomial at the breakpoint is controlled by the ``order``. Deduces the number of derivatives to match at each end from ``order`` and the number of derivatives available. If possible it uses the same number of derivatives from each end; if the number is odd it tries to take the extra one from y2. In any case if not enough derivatives are available at one end or another it draws enough to make up the total from the other end. If the order is too high and not enough derivatives are available, an exception is raised. Examples -------- >>> from scipy.interpolate import BPoly >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]]) Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]` such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4` >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]]) Creates a piecewise polynomial `f(x)`, such that `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`. Based on the number of derivatives provided, the order of the local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`. Notice that no restriction is imposed on the derivatives at `x = 1` and `x = 2`. Indeed, the explicit form of the polynomial is:: f(x) = | x * (1 - x), 0 <= x < 1 | 2 * (x - 1), 1 <= x <= 2 So that f'(1-0) = -1 and f'(1+0) = 2 """ xi = np.asarray(xi) if len(xi) != len(yi): raise ValueError("xi and yi need to have the same length") if np.any(xi[1:] - xi[:1] <= 0): raise ValueError("x coordinates are not in increasing order") # number of intervals m = len(xi) - 1 # global poly order is k-1, local orders are <=k and can vary try: k = max(len(yi[i]) + len(yi[i + 1]) for i in range(m)) except TypeError: raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).") if orders is None: orders = [None] * m else: if isinstance(orders, (integer_types, np.integer)): orders = [orders] * m k = max(k, max(orders)) if any(o <= 0 for o in orders): raise ValueError("Orders must be positive.") c = [] for i in range(m): y1, y2 = yi[i], yi[i + 1] if orders[i] is None: n1, n2 = len(y1), len(y2) else: n = orders[i] + 1 n1 = min(n // 2, len(y1)) n2 = min(n - n1, len(y2)) n1 = min(n - n2, len(y2)) if n1 + n2 != n: mesg = ("Point %g has %d derivatives, point %g" " has %d derivatives, but order %d requested" % ( xi[i], len(y1), xi[i + 1], len(y2), orders[i])) raise ValueError(mesg) if not (n1 <= len(y1) and n2 <= len(y2)): raise ValueError("`order` input incompatible with" " length y1 or y2.") b = BPoly._construct_from_derivatives(xi[i], xi[i + 1], y1[:n1], y2[:n2]) if len(b) < k: b = BPoly._raise_degree(b, k - len(b)) c.append(b) c = np.asarray(c) return cls(c.swapaxes(0, 1), xi, extrapolate) @staticmethod def _construct_from_derivatives(xa, xb, ya, yb): """Compute the coefficients of a polynomial in the Bernstein basis given the values and derivatives at the edges. Return the coefficients of a polynomial in the Bernstein basis defined on `[xa, xb]` and having the values and derivatives at the endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``. The polynomial constructed is of the minimal possible degree, i.e., if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree of the polynomial is ``na + nb - 1``. Parameters ---------- xa : float Left-hand end point of the interval xb : float Right-hand end point of the interval ya : array_like Derivatives at ``xa``. ``ya[0]`` is the value of the function, and ``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative. yb : array_like Derivatives at ``xb``. Returns ------- array coefficient array of a polynomial having specified derivatives Notes ----- This uses several facts from life of Bernstein basis functions. First of all, .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1}) If B(x) is a linear combination of the form .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n}, then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}. Iterating the latter one, one finds for the q-th derivative .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q}, with .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a} This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and `c_q` are found one by one by iterating `q = 0, ..., na`. At `x = xb` it's the same with `a = n - q`. """ ya, yb = np.asarray(ya), np.asarray(yb) if ya.shape[1:] != yb.shape[1:]: raise ValueError('ya and yb have incompatible dimensions.') dta, dtb = ya.dtype, yb.dtype if (np.issubdtype(dta, np.complexfloating) or np.issubdtype(dtb, np.complexfloating)): dt = np.complex_ else: dt = np.float_ na, nb = len(ya), len(yb) n = na + nb c = np.empty((na + nb,) + ya.shape[1:], dtype=dt) # compute coefficients of a polynomial degree na+nb-1 # walk left-to-right for q in range(0, na): c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa) ** q for j in range(0, q): c[q] -= (-1) ** (j + q) * comb(q, j) * c[j] # now walk right-to-left for q in range(0, nb): c[-q - 1] = yb[q] / spec.poch(n - q, q) * (-1) ** q * (xb - xa) ** q for j in range(0, q): c[-q - 1] -= (-1) ** (j + 1) * comb(q, j + 1) * c[-q + j] return c @staticmethod def _raise_degree(c, d): """Raise a degree of a polynomial in the Bernstein basis. Given the coefficients of a polynomial degree `k`, return (the coefficients of) the equivalent polynomial of degree `k+d`. Parameters ---------- c : array_like coefficient array, 1D d : integer Returns ------- array coefficient array, 1D array of length `c.shape[0] + d` Notes ----- This uses the fact that a Bernstein polynomial `b_{a, k}` can be identically represented as a linear combination of polynomials of a higher degree `k+d`: .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \ comb(d, j) / comb(k+d, a+j) """ if d == 0: return c k = c.shape[0] - 1 out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype) for a in range(c.shape[0]): f = c[a] * comb(k, a) for j in range(d + 1): out[a + j] += f * comb(d, j) / comb(k + d, a + j) return out class NdPPoly(object): """ Piecewise tensor product polynomial The value at point `xp = (x', y', z', ...)` is evaluated by first computing the interval indices `i` such that:: x[0][i[0]] <= x' < x[0][i[0]+1] x[1][i[1]] <= y' < x[1][i[1]+1] ... and then computing:: S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]] * (xp[0] - x[0][i[0]])**m0 * ... * (xp[n] - x[n][i[n]])**mn for m0 in range(k[0]+1) ... for mn in range(k[n]+1)) where ``k[j]`` is the degree of the polynomial in dimension j. This representation is the piecewise multivariate power basis. Parameters ---------- c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...) Polynomial coefficients, with polynomial order `kj` and `mj+1` intervals for each dimension `j`. x : ndim-tuple of ndarrays, shapes (mj+1,) Polynomial breakpoints for each dimension. These must be sorted in increasing order. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Default: True. Attributes ---------- x : tuple of ndarrays Breakpoints. c : ndarray Coefficients of the polynomials. Methods ------- __call__ construct_fast See also -------- PPoly : piecewise polynomials in 1D Notes ----- High-order polynomials in the power basis can be numerically unstable. """ def __init__(self, c, x, extrapolate=None): self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x) self.c = np.asarray(c) if extrapolate is None: extrapolate = True self.extrapolate = bool(extrapolate) ndim = len(self.x) if any(v.ndim != 1 for v in self.x): raise ValueError("x arrays must all be 1-dimensional") if any(v.size < 2 for v in self.x): raise ValueError("x arrays must all contain at least 2 points") if c.ndim < 2 * ndim: raise ValueError("c must have at least 2*len(x) dimensions") if any(np.any(v[1:] - v[:-1] < 0) for v in self.x): raise ValueError("x-coordinates are not in increasing order") if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2 * ndim], self.x)): raise ValueError("x and c do not agree on the number of intervals") dtype = self._get_dtype(self.c.dtype) self.c = np.ascontiguousarray(self.c, dtype=dtype) @classmethod def construct_fast(cls, c, x, extrapolate=None): """ Construct the piecewise polynomial without making checks. Takes the same parameters as the constructor. Input arguments `c` and `x` must be arrays of the correct shape and type. The `c` array can only be of dtypes float and complex, and `x` array must have dtype float. """ self = object.__new__(cls) self.c = c self.x = x if extrapolate is None: extrapolate = True self.extrapolate = extrapolate return self def _get_dtype(self, dtype): if np.issubdtype(dtype, np.complexfloating) \ or np.issubdtype(self.c.dtype, np.complexfloating): return np.complex_ else: return np.float_ def _ensure_c_contiguous(self): if not self.c.flags.c_contiguous: self.c = self.c.copy() if not isinstance(self.x, tuple): self.x = tuple(self.x) def __call__(self, x, nu=None, extrapolate=None): """ Evaluate the piecewise polynomial or its derivative Parameters ---------- x : array-like Points to evaluate the interpolant at. nu : tuple, optional Orders of derivatives to evaluate. Each must be non-negative. extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- y : array-like Interpolated values. Shape is determined by replacing the interpolation axis in the original array with the shape of x. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) x = _ndim_coords_from_arrays(x) x_shape = x.shape x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_) if nu is None: nu = np.zeros((ndim,), dtype=np.intc) else: nu = np.asarray(nu, dtype=np.intc) if nu.ndim != 1 or nu.shape[0] != ndim: raise ValueError("invalid number of derivative orders nu") dim1 = prod(self.c.shape[:ndim]) dim2 = prod(self.c.shape[ndim:2 * ndim]) dim3 = prod(self.c.shape[2 * ndim:]) ks = np.array(self.c.shape[:ndim], dtype=np.intc) out = np.empty((x.shape[0], dim3), dtype=self.c.dtype) self._ensure_c_contiguous() _ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3), self.x, ks, x, nu, bool(extrapolate), out) return out.reshape(x_shape[:-1] + self.c.shape[2 * ndim:]) def _derivative_inplace(self, nu, axis): """ Compute 1D derivative along a selected dimension in-place May result to non-contiguous c array. """ if nu < 0: return self._antiderivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim # reduce order if nu == 0: # noop return else: sl = [slice(None)] * ndim sl[axis] = slice(None, -nu, None) c2 = self.c[sl] if c2.shape[axis] == 0: # derivative of order 0 is zero shp = list(c2.shape) shp[axis] = 1 c2 = np.zeros(shp, dtype=c2.dtype) # multiply by the correct rising factorials factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu) sl = [None] * c2.ndim sl[axis] = slice(None) c2 *= factor[sl] self.c = c2 def _antiderivative_inplace(self, nu, axis): """ Compute 1D antiderivative along a selected dimension May result to non-contiguous c array. """ if nu <= 0: return self._derivative_inplace(-nu, axis) ndim = len(self.x) axis = axis % ndim perm = list(range(ndim)) perm[0], perm[axis] = perm[axis], perm[0] perm = perm + list(range(ndim, self.c.ndim)) c = self.c.transpose(perm) c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:], dtype=c.dtype) c2[:-nu] = c # divide by the correct rising factorials factor = spec.poch(np.arange(c.shape[0], 0, -1), nu) c2[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim - 1)] # fix continuity of added degrees of freedom perm2 = list(range(c2.ndim)) perm2[1], perm2[ndim + axis] = perm2[ndim + axis], perm2[1] c2 = c2.transpose(perm2) c2 = c2.copy() _ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1), self.x[axis], nu - 1) c2 = c2.transpose(perm2) c2 = c2.transpose(perm) # Done self.c = c2 def derivative(self, nu): """ Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the antiderivative is returned. Returns ------- pp : NdPPoly Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals in each dimension are considered half-open, ``[a, b)``, except for the last interval which is closed ``[a, b]``. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._derivative_inplace(n, axis) p._ensure_c_contiguous() return p def antiderivative(self, nu): """ Construct a new piecewise polynomial representing the antiderivative. Antiderivative is also the indefinite integral of the function, and derivative is its inverse operation. Parameters ---------- nu : ndim-tuple of int Order of derivatives to evaluate for each dimension. If negative, the derivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k + n representing the antiderivative of this polynomial. Notes ----- The antiderivative returned by this function is continuous and continuously differentiable to order n-1, up to floating point rounding error. """ p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) for axis, n in enumerate(nu): p._antiderivative_inplace(n, axis) p._ensure_c_contiguous() return p def integrate_1d(self, a, b, axis, extrapolate=None): r""" Compute NdPPoly representation for one dimensional definite integral The result is a piecewise polynomial representing the integral: .. math:: p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...) where the dimension integrated over is specified with the `axis` parameter. Parameters ---------- a, b : float Lower and upper bound for integration. axis : int Dimension over which to compute the 1D integrals extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : NdPPoly or array-like Definite integral of the piecewise polynomial over [a, b]. If the polynomial was 1-dimensional, an array is returned, otherwise, an NdPPoly object. """ if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) ndim = len(self.x) axis = int(axis) % ndim # reuse 1D integration routines c = self.c swap = list(range(c.ndim)) swap.insert(0, swap[axis]) del swap[axis + 1] swap.insert(1, swap[ndim + axis]) del swap[ndim + axis + 1] c = c.transpose(swap) p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), self.x[axis], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) # Construct result if ndim == 1: return out.reshape(c.shape[2:]) else: c = out.reshape(c.shape[2:]) x = self.x[:axis] + self.x[axis + 1:] return self.construct_fast(c, x, extrapolate=extrapolate) def integrate(self, ranges, extrapolate=None): """ Compute a definite integral over a piecewise polynomial. Parameters ---------- ranges : ndim-tuple of 2-tuples float Sequence of lower and upper bounds for each dimension, ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]`` extrapolate : bool, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. Returns ------- ig : array_like Definite integral of the piecewise polynomial over [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]] """ ndim = len(self.x) if extrapolate is None: extrapolate = self.extrapolate else: extrapolate = bool(extrapolate) if not hasattr(ranges, '__len__') or len(ranges) != ndim: raise ValueError("Range not a sequence of correct length") self._ensure_c_contiguous() # Reuse 1D integration routine c = self.c for n, (a, b) in enumerate(ranges): swap = list(range(c.ndim)) swap.insert(1, swap[ndim - n]) del swap[ndim - n + 1] c = c.transpose(swap) p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate) out = p.integrate(a, b, extrapolate=extrapolate) c = out.reshape(c.shape[2:]) return c class RegularGridInterpolator(object): """ Interpolation on a regular grid in arbitrary dimensions The data must be defined on a regular grid; the grid spacing however may be uneven. Linear and nearest-neighbour interpolation are supported. After setting up the interpolator object, the interpolation method (*linear* or *nearest*) may be chosen at each evaluation. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest". This parameter will become the default for the object's ``__call__`` method. Default is "linear". bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Methods ------- __call__ Notes ----- Contrary to LinearNDInterpolator and NearestNDInterpolator, this class avoids expensive triangulation of the input data by taking advantage of the regular grid structure. .. versionadded:: 0.14 Examples -------- Evaluate a simple example function on the points of a 3D grid: >>> from scipy.interpolate import RegularGridInterpolator >>> def f(x, y, z): ... return 2 * x**3 + 3 * y**2 - z >>> x = np.linspace(1, 4, 11) >>> y = np.linspace(4, 7, 22) >>> z = np.linspace(7, 9, 33) >>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True)) ``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``. Next, define an interpolating function from this data: >>> my_interpolating_function = RegularGridInterpolator((x, y, z), data) Evaluate the interpolating function at the two points ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``: >>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]]) >>> my_interpolating_function(pts) array([ 125.80469388, 146.30069388]) which is indeed a close approximation to ``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``. See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions References ---------- .. [1] Python package *regulargrid* by Johannes Buchner, see https://pypi.python.org/pypi/regulargrid/ .. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free Encyclopedia. Retrieved 27 Feb 2013 01:28. http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871 .. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear and multilinear table interpolation in many dimensions." MATH. COMPUT. 50.181 (1988): 189-196. http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf """ # this class is based on code originally programmed by Johannes Buchner, # see https://github.com/JohannesBuchner/regulargrid def __init__(self, points, values, method="linear", bounds_error=True, fill_value=np.nan): if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) self.method = method self.bounds_error = bounds_error if not hasattr(values, 'ndim'): # allow reasonable duck-typed values values = np.asarray(values) if len(points) > values.ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), values.ndim)) if hasattr(values, 'dtype') and hasattr(values, 'astype'): if not np.issubdtype(values.dtype, np.inexact): values = values.astype(float) self.fill_value = fill_value if fill_value is not None: fill_value_dtype = np.asarray(fill_value).dtype if (hasattr(values, 'dtype') and not np.can_cast(fill_value_dtype, values.dtype, casting='same_kind')): raise ValueError("fill_value must be either 'None' or " "of a type compatible with values") for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) self.grid = tuple([np.asarray(p) for p in points]) self.values = values def __call__(self, xi, method=None): """ Interpolation at coordinates Parameters ---------- xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str The method of interpolation to perform. Supported are "linear" and "nearest". """ method = self.method if method is None else method if method not in ["linear", "nearest"]: raise ValueError("Method '%s' is not defined" % method) ndim = len(self.grid) xi = _ndim_coords_from_arrays(xi, ndim=ndim) if xi.shape[-1] != len(self.grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], ndim)) xi_shape = xi.shape xi = xi.reshape(-1, xi_shape[-1]) if self.bounds_error: for i, p in enumerate(xi.T): if not np.logical_and(np.all(self.grid[i][0] <= p), np.all(p <= self.grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) indices, norm_distances, out_of_bounds = self._find_indices(xi.T) if method == "linear": result = self._evaluate_linear(indices, norm_distances, out_of_bounds) elif method == "nearest": result = self._evaluate_nearest(indices, norm_distances, out_of_bounds) if not self.bounds_error and self.fill_value is not None: result[out_of_bounds] = self.fill_value return result.reshape(xi_shape[:-1] + self.values.shape[ndim:]) def _evaluate_linear(self, indices, norm_distances, out_of_bounds): # slice for broadcasting over trailing dimensions in self.values vslice = (slice(None),) + (None,) * (self.values.ndim - len(indices)) # find relevant values # each i and i+1 represents a edge edges = itertools.product(*[[i, i + 1] for i in indices]) values = 0. for edge_indices in edges: weight = 1. for ei, i, yi in zip(edge_indices, indices, norm_distances): weight *= np.where(ei == i, 1 - yi, yi) values += np.asarray(self.values[edge_indices]) * weight[vslice] return values def _evaluate_nearest(self, indices, norm_distances, out_of_bounds): idx_res = [] for i, yi in zip(indices, norm_distances): idx_res.append(np.where(yi <= .5, i, i + 1)) return self.values[idx_res] def _find_indices(self, xi): # find relevant edges between which xi are situated indices = [] # compute distance to lower edge in unity units norm_distances = [] # check for out of bounds xi out_of_bounds = np.zeros((xi.shape[1]), dtype=bool) # iterate through dimensions for x, grid in zip(xi, self.grid): i = np.searchsorted(grid, x) - 1 i[i < 0] = 0 i[i > grid.size - 2] = grid.size - 2 indices.append(i) norm_distances.append((x - grid[i]) / (grid[i + 1] - grid[i])) if not self.bounds_error: out_of_bounds += x < grid[0] out_of_bounds += x > grid[-1] return indices, norm_distances, out_of_bounds def interpn(points, values, xi, method="linear", bounds_error=True, fill_value=np.nan): """ Multidimensional interpolation on regular grids. Parameters ---------- points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) The points defining the regular grid in n dimensions. values : array_like, shape (m1, ..., mn, ...) The data on the regular grid in n dimensions. xi : ndarray of shape (..., ndim) The coordinates to sample the gridded data at method : str, optional The method of interpolation to perform. Supported are "linear" and "nearest", and "splinef2d". "splinef2d" is only supported for 2-dimensional data. bounds_error : bool, optional If True, when interpolated values are requested outside of the domain of the input data, a ValueError is raised. If False, then `fill_value` is used. fill_value : number, optional If provided, the value to use for points outside of the interpolation domain. If None, values outside the domain are extrapolated. Extrapolation is not supported by method "splinef2d". Returns ------- values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:] Interpolated values at input coordinates. Notes ----- .. versionadded:: 0.14 See also -------- NearestNDInterpolator : Nearest neighbour interpolation on unstructured data in N dimensions LinearNDInterpolator : Piecewise linear interpolant on unstructured data in N dimensions RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a regular grid in arbitrary dimensions RectBivariateSpline : Bivariate spline approximation over a rectangular mesh """ # sanity check 'method' kwarg if method not in ["linear", "nearest", "splinef2d"]: raise ValueError("interpn only understands the methods 'linear', " "'nearest', and 'splinef2d'. You provided %s." % method) if not hasattr(values, 'ndim'): values = np.asarray(values) ndim = values.ndim if ndim > 2 and method == "splinef2d": raise ValueError("The method spline2fd can only be used for " "2-dimensional input data") if not bounds_error and fill_value is None and method == "splinef2d": raise ValueError("The method spline2fd does not support extrapolation.") # sanity check consistency of input dimensions if len(points) > ndim: raise ValueError("There are %d point arrays, but values has %d " "dimensions" % (len(points), ndim)) if len(points) != ndim and method == 'splinef2d': raise ValueError("The method spline2fd can only be used for " "scalar data with one point per coordinate") # sanity check input grid for i, p in enumerate(points): if not np.all(np.diff(p) > 0.): raise ValueError("The points in dimension %d must be strictly " "ascending" % i) if not np.asarray(p).ndim == 1: raise ValueError("The points in dimension %d must be " "1-dimensional" % i) if not values.shape[i] == len(p): raise ValueError("There are %d points and %d values in " "dimension %d" % (len(p), values.shape[i], i)) grid = tuple([np.asarray(p) for p in points]) # sanity check requested xi xi = _ndim_coords_from_arrays(xi, ndim=len(grid)) if xi.shape[-1] != len(grid): raise ValueError("The requested sample points xi have dimension " "%d, but this RegularGridInterpolator has " "dimension %d" % (xi.shape[1], len(grid))) for i, p in enumerate(xi.T): if bounds_error and not np.logical_and(np.all(grid[i][0] <= p), np.all(p <= grid[i][-1])): raise ValueError("One of the requested xi is out of bounds " "in dimension %d" % i) # perform interpolation if method == "linear": interp = RegularGridInterpolator(points, values, method="linear", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "nearest": interp = RegularGridInterpolator(points, values, method="nearest", bounds_error=bounds_error, fill_value=fill_value) return interp(xi) elif method == "splinef2d": xi_shape = xi.shape xi = xi.reshape(-1, xi.shape[-1]) # RectBivariateSpline doesn't support fill_value; we need to wrap here idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1], grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]), axis=0) result = np.empty_like(xi[:, 0]) # make a copy of values for RectBivariateSpline interp = RectBivariateSpline(points[0], points[1], values[:]) result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1]) result[np.logical_not(idx_valid)] = fill_value return result.reshape(xi_shape[:-1]) # backward compatibility wrapper class ppform(PPoly): """ Deprecated piecewise polynomial class. New code should use the `PPoly` class instead. """ def __init__(self, coeffs, breaks, fill=0.0, sort=False): warnings.warn("ppform is deprecated -- use PPoly instead", category=DeprecationWarning) if sort: breaks = np.sort(breaks) else: breaks = np.asarray(breaks) PPoly.__init__(self, coeffs, breaks) self.coeffs = self.c self.breaks = self.x self.K = self.coeffs.shape[0] self.fill = fill self.a = self.breaks[0] self.b = self.breaks[-1] def __call__(self, x): return PPoly.__call__(self, x, 0, False) def _evaluate(self, x, nu, extrapolate, out): PPoly._evaluate(self, x, nu, extrapolate, out) out[~((x >= self.a) & (x <= self.b))] = self.fill return out @classmethod def fromspline(cls, xk, cvals, order, fill=0.0): # Note: this spline representation is incompatible with FITPACK N = len(xk) - 1 sivals = np.empty((order + 1, N), dtype=float) for m in xrange(order, -1, -1): fact = spec.gamma(m + 1) res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m) res /= fact sivals[order - m, :] = res return cls(sivals, xk, fill=fill) # The 3 private functions below can be called by splmake(). def _dot0(a, b): """Similar to numpy.dot, but sum over last axis of a and 1st axis of b""" if b.ndim <= 2: return dot(a, b) else: axes = list(range(b.ndim)) axes.insert(-1, 0) axes.pop(0) return dot(a, b.transpose(axes)) def _find_smoothest(xk, yk, order, conds=None, B=None): # construct Bmatrix, and Jmatrix # e = J*c # minimize norm(e,2) given B*c=yk # if desired B can be given # conds is ignored N = len(xk) - 1 K = order if B is None: B = _fitpack._bsplmat(order, xk) J = _fitpack._bspldismat(order, xk) u, s, vh = scipy.linalg.svd(B) ind = K - 1 V2 = vh[-ind:, :].T V1 = vh[:-ind, :].T A = dot(J.T, J) tmp = dot(V2.T, A) Q = dot(tmp, V2) p = scipy.linalg.solve(Q, tmp) tmp = dot(V2, p) tmp = np.eye(N + K) - tmp tmp = dot(tmp, V1) tmp = dot(tmp, np.diag(1.0 / s)) tmp = dot(tmp, u.T) return _dot0(tmp, yk) # conds is a tuple of an array and a vector # giving the left-hand and the right-hand side # of the additional equations to add to B def _find_user(xk, yk, order, conds, B): lh = conds[0] rh = conds[1] B = np.concatenate((B, lh), axis=0) w = np.concatenate((yk, rh), axis=0) M, N = B.shape if (M > N): raise ValueError("over-specification of conditions") elif (M < N): return _find_smoothest(xk, yk, order, None, B) else: return scipy.linalg.solve(B, w) # Remove the 3 private functions above as well when removing splmake @np.deprecate(message="splmake is deprecated in scipy 0.19.0, " "use make_interp_spline instead.") def splmake(xk, yk, order=3, kind='smoothest', conds=None): """ Return a representation of a spline given data-points at internal knots Parameters ---------- xk : array_like The input array of x values of rank 1 yk : array_like The input array of y values of rank N. `yk` can be an N-d array to represent more than one curve, through the same `xk` points. The first dimension is assumed to be the interpolating dimension and is the same length of `xk`. order : int, optional Order of the spline kind : str, optional Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural', 'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2 conds : optional Conds Returns ------- splmake : tuple Return a (`xk`, `cvals`, `k`) representation of a spline given data-points where the (internal) knots are at the data-points. """ yk = np.asanyarray(yk) order = int(order) if order < 0: raise ValueError("order must not be negative") if order == 0: return xk, yk[:-1], order elif order == 1: return xk, yk, order try: func = eval('_find_%s' % kind) except: raise NotImplementedError # the constraint matrix B = _fitpack._bsplmat(order, xk) coefs = func(xk, yk, order, conds, B) return xk, coefs, order @np.deprecate(message="spleval is deprecated in scipy 0.19.0, " "use BSpline instead.") def spleval(xck, xnew, deriv=0): """ Evaluate a fixed spline represented by the given tuple at the new x-values The `xj` values are the interior knot points. The approximation region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals` should have length N+k where `k` is the order of the spline. Parameters ---------- (xj, cvals, k) : tuple Parameters that define the fixed spline xj : array_like Interior knot points cvals : array_like Curvature k : int Order of the spline xnew : array_like Locations to calculate spline deriv : int Deriv Returns ------- spleval : ndarray If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or `xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]` providing the interpolation of multiple curves. Notes ----- Internally, an additional `k`-1 knot points are added on either side of the spline. """ (xj, cvals, k) = xck oldshape = np.shape(xnew) xx = np.ravel(xnew) sh = cvals.shape[1:] res = np.empty(xx.shape + sh, dtype=cvals.dtype) for index in np.ndindex(*sh): sl = (slice(None),) + index if issubclass(cvals.dtype.type, np.complexfloating): res[sl].real = _fitpack._bspleval(xx, xj, cvals.real[sl], k, deriv) res[sl].imag = _fitpack._bspleval(xx, xj, cvals.imag[sl], k, deriv) else: res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv) res.shape = oldshape + sh return res @np.deprecate(message="spltopp is deprecated in scipy 0.19.0, " "use PPoly.from_spline instead.") def spltopp(xk, cvals, k): """Return a piece-wise polynomial object from a fixed-spline tuple.""" return ppform.fromspline(xk, cvals, k) @np.deprecate(message="spline is deprecated in scipy 0.19.0, " "use Bspline class instead.") def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None): """ Interpolate a curve at new points using a spline fit Parameters ---------- xk, yk : array_like The x and y values that define the curve. xnew : array_like The x values where spline should estimate the y values. order : int Default is 3. kind : string One of {'smoothest'} conds : Don't know Don't know Returns ------- spline : ndarray An array of y values; the spline evaluated at the positions `xnew`. """ return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
unknown
codeparrot/codeparrot-clean
# torch.onnx ## Overview [Open Neural Network eXchange (ONNX)](https://onnx.ai/) is an open standard format for representing machine learning models. The `torch.onnx` module captures the computation graph from a native PyTorch {class}`torch.nn.Module` model and converts it into an [ONNX graph](https://github.com/onnx/onnx/blob/main/docs/IR.md). The exported model can be consumed by any of the many [runtimes that support ONNX](https://onnx.ai/supported-tools.html#deployModel), including Microsoft's [ONNX Runtime](https://www.onnxruntime.ai). Next example shows how to export a simple model. ```python import torch class MyModel(torch.nn.Module): def __init__(self): super(MyModel, self).__init__() self.conv1 = torch.nn.Conv2d(1, 128, 5) def forward(self, x): return torch.relu(self.conv1(x)) input_tensor = torch.rand((1, 1, 128, 128), dtype=torch.float32) model = MyModel() torch.onnx.export( model, # model to export (input_tensor,), # inputs of the model, "my_model.onnx", # filename of the ONNX model input_names=["input"], # Rename inputs for the ONNX model dynamo=True # True or False to select the exporter to use ) ``` ## torch.export-based ONNX Exporter *The torch.export-based ONNX exporter is the newest exporter for PyTorch 2.6 and newer* {ref}`torch.export <torch.export>` engine is leveraged to produce a traced graph representing only the Tensor computation of the function in an Ahead-of-Time (AOT) fashion. The resulting traced graph (1) produces normalized operators in the functional ATen operator set (as well as any user-specified custom operators), (2) has eliminated all Python control flow and data structures (with certain exceptions), and (3) records the set of shape constraints needed to show that this normalization and control-flow elimination is sound for future inputs, before it is finally translated into an ONNX graph. {doc}`Learn more about the torch.export-based ONNX Exporter <onnx_export>` ## Frequently Asked Questions Q: I have exported my LLM model, but its input size seems to be fixed? The tracer records the shapes of the example inputs. If the model should accept inputs of dynamic shapes, set ``dynamic_shapes`` when calling {func}`torch.onnx.export`. Q: How to export models containing loops? See {ref}`torch.cond <cond>`. ## Contributing / Developing The ONNX exporter is a community project and we welcome contributions. We follow the [PyTorch guidelines for contributions](https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md), but you might also be interested in reading our [development wiki](https://github.com/pytorch/pytorch/wiki/PyTorch-ONNX-exporter). ## torch.onnx APIs ```{eval-rst} .. automodule:: torch.onnx ``` ### Functions ```{eval-rst} .. autofunction:: export :noindex: .. autofunction:: is_in_onnx_export :noindex: ``` ### Classes ```{eval-rst} .. autoclass:: ONNXProgram :noindex: .. autoclass:: OnnxExporterError :noindex: ``` ```{eval-rst} .. toctree:: :hidden: onnx_export onnx_ops onnx_verification onnx_testing ``` ### Deprecated APIs ```{eval-rst} .. deprecated:: 2.6 These functions are deprecated and will be removed in a future version. .. autofunction:: register_custom_op_symbolic .. autofunction:: unregister_custom_op_symbolic .. autofunction:: select_model_mode_for_export ``` ```{eval-rst} .. py:module:: torch.onnx.errors .. py:module:: torch.onnx.operators .. py:module:: torch.onnx.symbolic_helper .. py:module:: torch.onnx.symbolic_opset10 .. py:module:: torch.onnx.symbolic_opset11 .. py:module:: torch.onnx.symbolic_opset12 .. py:module:: torch.onnx.symbolic_opset13 .. py:module:: torch.onnx.symbolic_opset14 .. py:module:: torch.onnx.symbolic_opset15 .. py:module:: torch.onnx.symbolic_opset16 .. py:module:: torch.onnx.symbolic_opset17 .. py:module:: torch.onnx.symbolic_opset18 .. py:module:: torch.onnx.symbolic_opset19 .. py:module:: torch.onnx.symbolic_opset20 .. py:module:: torch.onnx.symbolic_opset7 .. py:module:: torch.onnx.symbolic_opset8 .. py:module:: torch.onnx.symbolic_opset9 .. py:module:: torch.onnx.utils ```
unknown
github
https://github.com/pytorch/pytorch
docs/source/onnx.md
# Benchmark System **VM HOST:** Travis **Machine:** Ubuntu 16.04.6 LTS x64 **Date:** May 04th, 2020 **Version:** Gin v1.6.3 **Go Version:** 1.14.2 linux/amd64 **Source:** [Go HTTP Router Benchmark](https://github.com/gin-gonic/go-http-routing-benchmark) **Result:** [See the gist](https://gist.github.com/appleboy/b5f2ecfaf50824ae9c64dcfb9165ae5e) or [Travis result](https://travis-ci.org/github/gin-gonic/go-http-routing-benchmark/jobs/682947061) ## Static Routes: 157 ```sh Gin: 34936 Bytes HttpServeMux: 14512 Bytes Ace: 30680 Bytes Aero: 34536 Bytes Bear: 30456 Bytes Beego: 98456 Bytes Bone: 40224 Bytes Chi: 83608 Bytes Denco: 10216 Bytes Echo: 80328 Bytes GocraftWeb: 55288 Bytes Goji: 29744 Bytes Gojiv2: 105840 Bytes GoJsonRest: 137496 Bytes GoRestful: 816936 Bytes GorillaMux: 585632 Bytes GowwwRouter: 24968 Bytes HttpRouter: 21712 Bytes HttpTreeMux: 73448 Bytes Kocha: 115472 Bytes LARS: 30640 Bytes Macaron: 38592 Bytes Martini: 310864 Bytes Pat: 19696 Bytes Possum: 89920 Bytes R2router: 23712 Bytes Rivet: 24608 Bytes Tango: 28264 Bytes TigerTonic: 78768 Bytes Traffic: 538976 Bytes Vulcan: 369960 Bytes ``` ## GithubAPI Routes: 203 ```sh Gin: 58512 Bytes Ace: 48688 Bytes Aero: 318568 Bytes Bear: 84248 Bytes Beego: 150936 Bytes Bone: 100976 Bytes Chi: 95112 Bytes Denco: 36736 Bytes Echo: 100296 Bytes GocraftWeb: 95432 Bytes Goji: 49680 Bytes Gojiv2: 104704 Bytes GoJsonRest: 141976 Bytes GoRestful: 1241656 Bytes GorillaMux: 1322784 Bytes GowwwRouter: 80008 Bytes HttpRouter: 37144 Bytes HttpTreeMux: 78800 Bytes Kocha: 785120 Bytes LARS: 48600 Bytes Macaron: 92784 Bytes Martini: 485264 Bytes Pat: 21200 Bytes Possum: 85312 Bytes R2router: 47104 Bytes Rivet: 42840 Bytes Tango: 54840 Bytes TigerTonic: 95264 Bytes Traffic: 921744 Bytes Vulcan: 425992 Bytes ``` ## GPlusAPI Routes: 13 ```sh Gin: 4384 Bytes Ace: 3712 Bytes Aero: 26056 Bytes Bear: 7112 Bytes Beego: 10272 Bytes Bone: 6688 Bytes Chi: 8024 Bytes Denco: 3264 Bytes Echo: 9688 Bytes GocraftWeb: 7496 Bytes Goji: 3152 Bytes Gojiv2: 7376 Bytes GoJsonRest: 11400 Bytes GoRestful: 74328 Bytes GorillaMux: 66208 Bytes GowwwRouter: 5744 Bytes HttpRouter: 2808 Bytes HttpTreeMux: 7440 Bytes Kocha: 128880 Bytes LARS: 3656 Bytes Macaron: 8656 Bytes Martini: 23920 Bytes Pat: 1856 Bytes Possum: 7248 Bytes R2router: 3928 Bytes Rivet: 3064 Bytes Tango: 5168 Bytes TigerTonic: 9408 Bytes Traffic: 46400 Bytes Vulcan: 25544 Bytes ``` ## ParseAPI Routes: 26 ```sh Gin: 7776 Bytes Ace: 6704 Bytes Aero: 28488 Bytes Bear: 12320 Bytes Beego: 19280 Bytes Bone: 11440 Bytes Chi: 9744 Bytes Denco: 4192 Bytes Echo: 11664 Bytes GocraftWeb: 12800 Bytes Goji: 5680 Bytes Gojiv2: 14464 Bytes GoJsonRest: 14072 Bytes GoRestful: 116264 Bytes GorillaMux: 105880 Bytes GowwwRouter: 9344 Bytes HttpRouter: 5072 Bytes HttpTreeMux: 7848 Bytes Kocha: 181712 Bytes LARS: 6632 Bytes Macaron: 13648 Bytes Martini: 45888 Bytes Pat: 2560 Bytes Possum: 9200 Bytes R2router: 7056 Bytes Rivet: 5680 Bytes Tango: 8920 Bytes TigerTonic: 9840 Bytes Traffic: 79096 Bytes Vulcan: 44504 Bytes ``` ## Static Routes ```sh BenchmarkGin_StaticAll 62169 19319 ns/op 0 B/op 0 allocs/op BenchmarkAce_StaticAll 65428 18313 ns/op 0 B/op 0 allocs/op BenchmarkAero_StaticAll 121132 9632 ns/op 0 B/op 0 allocs/op BenchmarkHttpServeMux_StaticAll 52626 22758 ns/op 0 B/op 0 allocs/op BenchmarkBeego_StaticAll 9962 179058 ns/op 55264 B/op 471 allocs/op BenchmarkBear_StaticAll 14894 80966 ns/op 20272 B/op 469 allocs/op BenchmarkBone_StaticAll 18718 64065 ns/op 0 B/op 0 allocs/op BenchmarkChi_StaticAll 10000 149827 ns/op 67824 B/op 471 allocs/op BenchmarkDenco_StaticAll 211393 5680 ns/op 0 B/op 0 allocs/op BenchmarkEcho_StaticAll 49341 24343 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_StaticAll 10000 126209 ns/op 46312 B/op 785 allocs/op BenchmarkGoji_StaticAll 27956 43174 ns/op 0 B/op 0 allocs/op BenchmarkGojiv2_StaticAll 3430 370718 ns/op 205984 B/op 1570 allocs/op BenchmarkGoJsonRest_StaticAll 9134 188888 ns/op 51653 B/op 1727 allocs/op BenchmarkGoRestful_StaticAll 706 1703330 ns/op 613280 B/op 2053 allocs/op BenchmarkGorillaMux_StaticAll 1268 924083 ns/op 153233 B/op 1413 allocs/op BenchmarkGowwwRouter_StaticAll 63374 18935 ns/op 0 B/op 0 allocs/op BenchmarkHttpRouter_StaticAll 109938 10902 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_StaticAll 109166 10861 ns/op 0 B/op 0 allocs/op BenchmarkKocha_StaticAll 92258 12992 ns/op 0 B/op 0 allocs/op BenchmarkLARS_StaticAll 65200 18387 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_StaticAll 5671 291501 ns/op 115553 B/op 1256 allocs/op BenchmarkMartini_StaticAll 807 1460498 ns/op 125444 B/op 1717 allocs/op BenchmarkPat_StaticAll 513 2342396 ns/op 602832 B/op 12559 allocs/op BenchmarkPossum_StaticAll 10000 128270 ns/op 65312 B/op 471 allocs/op BenchmarkR2router_StaticAll 16726 71760 ns/op 22608 B/op 628 allocs/op BenchmarkRivet_StaticAll 41722 28723 ns/op 0 B/op 0 allocs/op BenchmarkTango_StaticAll 7606 205082 ns/op 39209 B/op 1256 allocs/op BenchmarkTigerTonic_StaticAll 26247 45806 ns/op 7376 B/op 157 allocs/op BenchmarkTraffic_StaticAll 550 2284518 ns/op 754864 B/op 14601 allocs/op BenchmarkVulcan_StaticAll 10000 131343 ns/op 15386 B/op 471 allocs/op ``` ## Micro Benchmarks ```sh BenchmarkGin_Param 18785022 63.9 ns/op 0 B/op 0 allocs/op BenchmarkAce_Param 14689765 81.5 ns/op 0 B/op 0 allocs/op BenchmarkAero_Param 23094770 51.2 ns/op 0 B/op 0 allocs/op BenchmarkBear_Param 1417045 845 ns/op 456 B/op 5 allocs/op BenchmarkBeego_Param 1000000 1080 ns/op 352 B/op 3 allocs/op BenchmarkBone_Param 1000000 1463 ns/op 816 B/op 6 allocs/op BenchmarkChi_Param 1378756 885 ns/op 432 B/op 3 allocs/op BenchmarkDenco_Param 8557899 143 ns/op 32 B/op 1 allocs/op BenchmarkEcho_Param 16433347 75.5 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_Param 1000000 1218 ns/op 648 B/op 8 allocs/op BenchmarkGoji_Param 1921248 617 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_Param 561848 2156 ns/op 1328 B/op 11 allocs/op BenchmarkGoJsonRest_Param 1000000 1358 ns/op 649 B/op 13 allocs/op BenchmarkGoRestful_Param 224857 5307 ns/op 4192 B/op 14 allocs/op BenchmarkGorillaMux_Param 498313 2459 ns/op 1280 B/op 10 allocs/op BenchmarkGowwwRouter_Param 1864354 654 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_Param 26269074 47.7 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_Param 2109829 557 ns/op 352 B/op 3 allocs/op BenchmarkKocha_Param 5050216 243 ns/op 56 B/op 3 allocs/op BenchmarkLARS_Param 19811712 59.9 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_Param 662746 2329 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_Param 279902 4260 ns/op 1072 B/op 10 allocs/op BenchmarkPat_Param 1000000 1382 ns/op 536 B/op 11 allocs/op BenchmarkPossum_Param 1000000 1014 ns/op 496 B/op 5 allocs/op BenchmarkR2router_Param 1712559 707 ns/op 432 B/op 5 allocs/op BenchmarkRivet_Param 6648086 182 ns/op 48 B/op 1 allocs/op BenchmarkTango_Param 1221504 994 ns/op 248 B/op 8 allocs/op BenchmarkTigerTonic_Param 891661 2261 ns/op 776 B/op 16 allocs/op BenchmarkTraffic_Param 350059 3598 ns/op 1856 B/op 21 allocs/op BenchmarkVulcan_Param 2517823 472 ns/op 98 B/op 3 allocs/op BenchmarkAce_Param5 9214365 130 ns/op 0 B/op 0 allocs/op BenchmarkAero_Param5 15369013 77.9 ns/op 0 B/op 0 allocs/op BenchmarkBear_Param5 1000000 1113 ns/op 501 B/op 5 allocs/op BenchmarkBeego_Param5 1000000 1269 ns/op 352 B/op 3 allocs/op BenchmarkBone_Param5 986820 1873 ns/op 864 B/op 6 allocs/op BenchmarkChi_Param5 1000000 1156 ns/op 432 B/op 3 allocs/op BenchmarkDenco_Param5 3036331 400 ns/op 160 B/op 1 allocs/op BenchmarkEcho_Param5 6447133 186 ns/op 0 B/op 0 allocs/op BenchmarkGin_Param5 10786068 110 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_Param5 844820 1944 ns/op 920 B/op 11 allocs/op BenchmarkGoji_Param5 1474965 827 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_Param5 442820 2516 ns/op 1392 B/op 11 allocs/op BenchmarkGoJsonRest_Param5 507555 2711 ns/op 1097 B/op 16 allocs/op BenchmarkGoRestful_Param5 216481 6093 ns/op 4288 B/op 14 allocs/op BenchmarkGorillaMux_Param5 314402 3628 ns/op 1344 B/op 10 allocs/op BenchmarkGowwwRouter_Param5 1624660 733 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_Param5 13167324 92.0 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_Param5 1000000 1295 ns/op 576 B/op 6 allocs/op BenchmarkKocha_Param5 1000000 1138 ns/op 440 B/op 10 allocs/op BenchmarkLARS_Param5 11580613 105 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_Param5 473596 2755 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_Param5 230756 5111 ns/op 1232 B/op 11 allocs/op BenchmarkPat_Param5 469190 3370 ns/op 888 B/op 29 allocs/op BenchmarkPossum_Param5 1000000 1002 ns/op 496 B/op 5 allocs/op BenchmarkR2router_Param5 1422129 844 ns/op 432 B/op 5 allocs/op BenchmarkRivet_Param5 2263789 539 ns/op 240 B/op 1 allocs/op BenchmarkTango_Param5 1000000 1256 ns/op 360 B/op 8 allocs/op BenchmarkTigerTonic_Param5 175500 7492 ns/op 2279 B/op 39 allocs/op BenchmarkTraffic_Param5 233631 5816 ns/op 2208 B/op 27 allocs/op BenchmarkVulcan_Param5 1923416 629 ns/op 98 B/op 3 allocs/op BenchmarkAce_Param20 4321266 281 ns/op 0 B/op 0 allocs/op BenchmarkAero_Param20 31501641 35.2 ns/op 0 B/op 0 allocs/op BenchmarkBear_Param20 335204 3489 ns/op 1665 B/op 5 allocs/op BenchmarkBeego_Param20 503674 2860 ns/op 352 B/op 3 allocs/op BenchmarkBone_Param20 298922 4741 ns/op 2031 B/op 6 allocs/op BenchmarkChi_Param20 878181 1957 ns/op 432 B/op 3 allocs/op BenchmarkDenco_Param20 1000000 1360 ns/op 640 B/op 1 allocs/op BenchmarkEcho_Param20 2104946 580 ns/op 0 B/op 0 allocs/op BenchmarkGin_Param20 4167204 290 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_Param20 173064 7514 ns/op 3796 B/op 15 allocs/op BenchmarkGoji_Param20 458778 2651 ns/op 1247 B/op 2 allocs/op BenchmarkGojiv2_Param20 364862 3178 ns/op 1632 B/op 11 allocs/op BenchmarkGoJsonRest_Param20 125514 9760 ns/op 4485 B/op 20 allocs/op BenchmarkGoRestful_Param20 101217 11964 ns/op 6715 B/op 18 allocs/op BenchmarkGorillaMux_Param20 147654 8132 ns/op 3452 B/op 12 allocs/op BenchmarkGowwwRouter_Param20 1000000 1225 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_Param20 4920895 247 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_Param20 173202 6605 ns/op 3196 B/op 10 allocs/op BenchmarkKocha_Param20 345988 3620 ns/op 1808 B/op 27 allocs/op BenchmarkLARS_Param20 4592326 262 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_Param20 166492 7286 ns/op 2924 B/op 12 allocs/op BenchmarkMartini_Param20 122162 10653 ns/op 3595 B/op 13 allocs/op BenchmarkPat_Param20 78630 15239 ns/op 4424 B/op 93 allocs/op BenchmarkPossum_Param20 1000000 1008 ns/op 496 B/op 5 allocs/op BenchmarkR2router_Param20 294981 4587 ns/op 2284 B/op 7 allocs/op BenchmarkRivet_Param20 691798 2090 ns/op 1024 B/op 1 allocs/op BenchmarkTango_Param20 842440 2505 ns/op 856 B/op 8 allocs/op BenchmarkTigerTonic_Param20 38614 31509 ns/op 9870 B/op 119 allocs/op BenchmarkTraffic_Param20 57633 21107 ns/op 7853 B/op 47 allocs/op BenchmarkVulcan_Param20 1000000 1178 ns/op 98 B/op 3 allocs/op BenchmarkAce_ParamWrite 7330743 180 ns/op 8 B/op 1 allocs/op BenchmarkAero_ParamWrite 13833598 86.7 ns/op 0 B/op 0 allocs/op BenchmarkBear_ParamWrite 1363321 867 ns/op 456 B/op 5 allocs/op BenchmarkBeego_ParamWrite 1000000 1104 ns/op 360 B/op 4 allocs/op BenchmarkBone_ParamWrite 1000000 1475 ns/op 816 B/op 6 allocs/op BenchmarkChi_ParamWrite 1320590 892 ns/op 432 B/op 3 allocs/op BenchmarkDenco_ParamWrite 7093605 172 ns/op 32 B/op 1 allocs/op BenchmarkEcho_ParamWrite 8434424 161 ns/op 8 B/op 1 allocs/op BenchmarkGin_ParamWrite 10377034 118 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_ParamWrite 1000000 1266 ns/op 656 B/op 9 allocs/op BenchmarkGoji_ParamWrite 1874168 654 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_ParamWrite 459032 2352 ns/op 1360 B/op 13 allocs/op BenchmarkGoJsonRest_ParamWrite 499434 2145 ns/op 1128 B/op 18 allocs/op BenchmarkGoRestful_ParamWrite 241087 5470 ns/op 4200 B/op 15 allocs/op BenchmarkGorillaMux_ParamWrite 425686 2522 ns/op 1280 B/op 10 allocs/op BenchmarkGowwwRouter_ParamWrite 922172 1778 ns/op 976 B/op 8 allocs/op BenchmarkHttpRouter_ParamWrite 15392049 77.7 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_ParamWrite 1973385 597 ns/op 352 B/op 3 allocs/op BenchmarkKocha_ParamWrite 4262500 281 ns/op 56 B/op 3 allocs/op BenchmarkLARS_ParamWrite 10764410 113 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_ParamWrite 486769 2726 ns/op 1176 B/op 14 allocs/op BenchmarkMartini_ParamWrite 264804 4842 ns/op 1176 B/op 14 allocs/op BenchmarkPat_ParamWrite 735116 2047 ns/op 960 B/op 15 allocs/op BenchmarkPossum_ParamWrite 1000000 1004 ns/op 496 B/op 5 allocs/op BenchmarkR2router_ParamWrite 1592136 768 ns/op 432 B/op 5 allocs/op BenchmarkRivet_ParamWrite 3582051 339 ns/op 112 B/op 2 allocs/op BenchmarkTango_ParamWrite 2237337 534 ns/op 136 B/op 4 allocs/op BenchmarkTigerTonic_ParamWrite 439608 3136 ns/op 1216 B/op 21 allocs/op BenchmarkTraffic_ParamWrite 306979 4328 ns/op 2280 B/op 25 allocs/op BenchmarkVulcan_ParamWrite 2529973 472 ns/op 98 B/op 3 allocs/op ``` ## GitHub ```sh BenchmarkGin_GithubStatic 15629472 76.7 ns/op 0 B/op 0 allocs/op BenchmarkAce_GithubStatic 15542612 75.9 ns/op 0 B/op 0 allocs/op BenchmarkAero_GithubStatic 24777151 48.5 ns/op 0 B/op 0 allocs/op BenchmarkBear_GithubStatic 2788894 435 ns/op 120 B/op 3 allocs/op BenchmarkBeego_GithubStatic 1000000 1064 ns/op 352 B/op 3 allocs/op BenchmarkBone_GithubStatic 93507 12838 ns/op 2880 B/op 60 allocs/op BenchmarkChi_GithubStatic 1387743 860 ns/op 432 B/op 3 allocs/op BenchmarkDenco_GithubStatic 39384996 30.4 ns/op 0 B/op 0 allocs/op BenchmarkEcho_GithubStatic 12076382 99.1 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GithubStatic 1596495 756 ns/op 296 B/op 5 allocs/op BenchmarkGoji_GithubStatic 6364876 189 ns/op 0 B/op 0 allocs/op BenchmarkGojiv2_GithubStatic 550202 2098 ns/op 1312 B/op 10 allocs/op BenchmarkGoRestful_GithubStatic 102183 12552 ns/op 4256 B/op 13 allocs/op BenchmarkGoJsonRest_GithubStatic 1000000 1029 ns/op 329 B/op 11 allocs/op BenchmarkGorillaMux_GithubStatic 255552 5190 ns/op 976 B/op 9 allocs/op BenchmarkGowwwRouter_GithubStatic 15531916 77.1 ns/op 0 B/op 0 allocs/op BenchmarkHttpRouter_GithubStatic 27920724 43.1 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GithubStatic 21448953 55.8 ns/op 0 B/op 0 allocs/op BenchmarkKocha_GithubStatic 21405310 56.0 ns/op 0 B/op 0 allocs/op BenchmarkLARS_GithubStatic 13625156 89.0 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GithubStatic 1000000 1747 ns/op 736 B/op 8 allocs/op BenchmarkMartini_GithubStatic 187186 7326 ns/op 768 B/op 9 allocs/op BenchmarkPat_GithubStatic 109143 11563 ns/op 3648 B/op 76 allocs/op BenchmarkPossum_GithubStatic 1575898 770 ns/op 416 B/op 3 allocs/op BenchmarkR2router_GithubStatic 3046231 404 ns/op 144 B/op 4 allocs/op BenchmarkRivet_GithubStatic 11484826 105 ns/op 0 B/op 0 allocs/op BenchmarkTango_GithubStatic 1000000 1153 ns/op 248 B/op 8 allocs/op BenchmarkTigerTonic_GithubStatic 4929780 249 ns/op 48 B/op 1 allocs/op BenchmarkTraffic_GithubStatic 106351 11819 ns/op 4664 B/op 90 allocs/op BenchmarkVulcan_GithubStatic 1613271 722 ns/op 98 B/op 3 allocs/op BenchmarkAce_GithubParam 8386032 143 ns/op 0 B/op 0 allocs/op BenchmarkAero_GithubParam 11816200 102 ns/op 0 B/op 0 allocs/op BenchmarkBear_GithubParam 1000000 1012 ns/op 496 B/op 5 allocs/op BenchmarkBeego_GithubParam 1000000 1157 ns/op 352 B/op 3 allocs/op BenchmarkBone_GithubParam 184653 6912 ns/op 1888 B/op 19 allocs/op BenchmarkChi_GithubParam 1000000 1102 ns/op 432 B/op 3 allocs/op BenchmarkDenco_GithubParam 3484798 352 ns/op 128 B/op 1 allocs/op BenchmarkEcho_GithubParam 6337380 189 ns/op 0 B/op 0 allocs/op BenchmarkGin_GithubParam 9132032 131 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GithubParam 1000000 1446 ns/op 712 B/op 9 allocs/op BenchmarkGoji_GithubParam 1248640 977 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_GithubParam 383233 2784 ns/op 1408 B/op 13 allocs/op BenchmarkGoJsonRest_GithubParam 1000000 1991 ns/op 713 B/op 14 allocs/op BenchmarkGoRestful_GithubParam 76414 16015 ns/op 4352 B/op 16 allocs/op BenchmarkGorillaMux_GithubParam 150026 7663 ns/op 1296 B/op 10 allocs/op BenchmarkGowwwRouter_GithubParam 1592044 751 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_GithubParam 10420628 115 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GithubParam 1403755 835 ns/op 384 B/op 4 allocs/op BenchmarkKocha_GithubParam 2286170 533 ns/op 128 B/op 5 allocs/op BenchmarkLARS_GithubParam 9540374 129 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GithubParam 533154 2742 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_GithubParam 119397 9638 ns/op 1152 B/op 11 allocs/op BenchmarkPat_GithubParam 150675 8858 ns/op 2408 B/op 48 allocs/op BenchmarkPossum_GithubParam 1000000 1001 ns/op 496 B/op 5 allocs/op BenchmarkR2router_GithubParam 1602886 761 ns/op 432 B/op 5 allocs/op BenchmarkRivet_GithubParam 2986579 409 ns/op 96 B/op 1 allocs/op BenchmarkTango_GithubParam 1000000 1356 ns/op 344 B/op 8 allocs/op BenchmarkTigerTonic_GithubParam 388899 3429 ns/op 1176 B/op 22 allocs/op BenchmarkTraffic_GithubParam 123160 9734 ns/op 2816 B/op 40 allocs/op BenchmarkVulcan_GithubParam 1000000 1138 ns/op 98 B/op 3 allocs/op BenchmarkAce_GithubAll 40543 29670 ns/op 0 B/op 0 allocs/op BenchmarkAero_GithubAll 57632 20648 ns/op 0 B/op 0 allocs/op BenchmarkBear_GithubAll 9234 216179 ns/op 86448 B/op 943 allocs/op BenchmarkBeego_GithubAll 7407 243496 ns/op 71456 B/op 609 allocs/op BenchmarkBone_GithubAll 420 2922835 ns/op 720160 B/op 8620 allocs/op BenchmarkChi_GithubAll 7620 238331 ns/op 87696 B/op 609 allocs/op BenchmarkDenco_GithubAll 18355 64494 ns/op 20224 B/op 167 allocs/op BenchmarkEcho_GithubAll 31251 38479 ns/op 0 B/op 0 allocs/op BenchmarkGin_GithubAll 43550 27364 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GithubAll 4117 300062 ns/op 131656 B/op 1686 allocs/op BenchmarkGoji_GithubAll 3274 416158 ns/op 56112 B/op 334 allocs/op BenchmarkGojiv2_GithubAll 1402 870518 ns/op 352720 B/op 4321 allocs/op BenchmarkGoJsonRest_GithubAll 2976 401507 ns/op 134371 B/op 2737 allocs/op BenchmarkGoRestful_GithubAll 410 2913158 ns/op 910144 B/op 2938 allocs/op BenchmarkGorillaMux_GithubAll 346 3384987 ns/op 251650 B/op 1994 allocs/op BenchmarkGowwwRouter_GithubAll 10000 143025 ns/op 72144 B/op 501 allocs/op BenchmarkHttpRouter_GithubAll 55938 21360 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GithubAll 10000 153944 ns/op 65856 B/op 671 allocs/op BenchmarkKocha_GithubAll 10000 106315 ns/op 23304 B/op 843 allocs/op BenchmarkLARS_GithubAll 47779 25084 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GithubAll 3266 371907 ns/op 149409 B/op 1624 allocs/op BenchmarkMartini_GithubAll 331 3444706 ns/op 226551 B/op 2325 allocs/op BenchmarkPat_GithubAll 273 4381818 ns/op 1483152 B/op 26963 allocs/op BenchmarkPossum_GithubAll 10000 164367 ns/op 84448 B/op 609 allocs/op BenchmarkR2router_GithubAll 10000 160220 ns/op 77328 B/op 979 allocs/op BenchmarkRivet_GithubAll 14625 82453 ns/op 16272 B/op 167 allocs/op BenchmarkTango_GithubAll 6255 279611 ns/op 63826 B/op 1618 allocs/op BenchmarkTigerTonic_GithubAll 2008 687874 ns/op 193856 B/op 4474 allocs/op BenchmarkTraffic_GithubAll 355 3478508 ns/op 820744 B/op 14114 allocs/op BenchmarkVulcan_GithubAll 6885 193333 ns/op 19894 B/op 609 allocs/op ``` ## Google+ ```sh BenchmarkGin_GPlusStatic 19247326 62.2 ns/op 0 B/op 0 allocs/op BenchmarkAce_GPlusStatic 20235060 59.2 ns/op 0 B/op 0 allocs/op BenchmarkAero_GPlusStatic 31978935 37.6 ns/op 0 B/op 0 allocs/op BenchmarkBear_GPlusStatic 3516523 341 ns/op 104 B/op 3 allocs/op BenchmarkBeego_GPlusStatic 1212036 991 ns/op 352 B/op 3 allocs/op BenchmarkBone_GPlusStatic 6736242 183 ns/op 32 B/op 1 allocs/op BenchmarkChi_GPlusStatic 1490640 814 ns/op 432 B/op 3 allocs/op BenchmarkDenco_GPlusStatic 55006856 21.8 ns/op 0 B/op 0 allocs/op BenchmarkEcho_GPlusStatic 17688258 67.9 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GPlusStatic 1829181 666 ns/op 280 B/op 5 allocs/op BenchmarkGoji_GPlusStatic 9147451 130 ns/op 0 B/op 0 allocs/op BenchmarkGojiv2_GPlusStatic 594015 2063 ns/op 1312 B/op 10 allocs/op BenchmarkGoJsonRest_GPlusStatic 1264906 950 ns/op 329 B/op 11 allocs/op BenchmarkGoRestful_GPlusStatic 231558 5341 ns/op 3872 B/op 13 allocs/op BenchmarkGorillaMux_GPlusStatic 908418 1809 ns/op 976 B/op 9 allocs/op BenchmarkGowwwRouter_GPlusStatic 40684604 29.5 ns/op 0 B/op 0 allocs/op BenchmarkHttpRouter_GPlusStatic 46742804 25.7 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GPlusStatic 32567161 36.9 ns/op 0 B/op 0 allocs/op BenchmarkKocha_GPlusStatic 33800060 35.3 ns/op 0 B/op 0 allocs/op BenchmarkLARS_GPlusStatic 20431858 60.0 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GPlusStatic 1000000 1745 ns/op 736 B/op 8 allocs/op BenchmarkMartini_GPlusStatic 442248 3619 ns/op 768 B/op 9 allocs/op BenchmarkPat_GPlusStatic 4328004 292 ns/op 96 B/op 2 allocs/op BenchmarkPossum_GPlusStatic 1570753 763 ns/op 416 B/op 3 allocs/op BenchmarkR2router_GPlusStatic 3339474 355 ns/op 144 B/op 4 allocs/op BenchmarkRivet_GPlusStatic 18570961 64.7 ns/op 0 B/op 0 allocs/op BenchmarkTango_GPlusStatic 1388702 860 ns/op 200 B/op 8 allocs/op BenchmarkTigerTonic_GPlusStatic 7803543 159 ns/op 32 B/op 1 allocs/op BenchmarkTraffic_GPlusStatic 878605 2171 ns/op 1112 B/op 16 allocs/op BenchmarkVulcan_GPlusStatic 2742446 437 ns/op 98 B/op 3 allocs/op BenchmarkAce_GPlusParam 11626975 105 ns/op 0 B/op 0 allocs/op BenchmarkAero_GPlusParam 16914322 71.6 ns/op 0 B/op 0 allocs/op BenchmarkBear_GPlusParam 1405173 832 ns/op 480 B/op 5 allocs/op BenchmarkBeego_GPlusParam 1000000 1075 ns/op 352 B/op 3 allocs/op BenchmarkBone_GPlusParam 1000000 1557 ns/op 816 B/op 6 allocs/op BenchmarkChi_GPlusParam 1347926 894 ns/op 432 B/op 3 allocs/op BenchmarkDenco_GPlusParam 5513000 212 ns/op 64 B/op 1 allocs/op BenchmarkEcho_GPlusParam 11884383 101 ns/op 0 B/op 0 allocs/op BenchmarkGin_GPlusParam 12898952 93.1 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GPlusParam 1000000 1194 ns/op 648 B/op 8 allocs/op BenchmarkGoji_GPlusParam 1857229 645 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_GPlusParam 520939 2322 ns/op 1328 B/op 11 allocs/op BenchmarkGoJsonRest_GPlusParam 1000000 1536 ns/op 649 B/op 13 allocs/op BenchmarkGoRestful_GPlusParam 205449 5800 ns/op 4192 B/op 14 allocs/op BenchmarkGorillaMux_GPlusParam 395310 3188 ns/op 1280 B/op 10 allocs/op BenchmarkGowwwRouter_GPlusParam 1851798 667 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_GPlusParam 18420789 65.2 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GPlusParam 1878463 629 ns/op 352 B/op 3 allocs/op BenchmarkKocha_GPlusParam 4495610 273 ns/op 56 B/op 3 allocs/op BenchmarkLARS_GPlusParam 14615976 83.2 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GPlusParam 584145 2549 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_GPlusParam 250501 4583 ns/op 1072 B/op 10 allocs/op BenchmarkPat_GPlusParam 1000000 1645 ns/op 576 B/op 11 allocs/op BenchmarkPossum_GPlusParam 1000000 1008 ns/op 496 B/op 5 allocs/op BenchmarkR2router_GPlusParam 1708191 688 ns/op 432 B/op 5 allocs/op BenchmarkRivet_GPlusParam 5795014 211 ns/op 48 B/op 1 allocs/op BenchmarkTango_GPlusParam 1000000 1091 ns/op 264 B/op 8 allocs/op BenchmarkTigerTonic_GPlusParam 760221 2489 ns/op 856 B/op 16 allocs/op BenchmarkTraffic_GPlusParam 309774 4039 ns/op 1872 B/op 21 allocs/op BenchmarkVulcan_GPlusParam 1935730 623 ns/op 98 B/op 3 allocs/op BenchmarkAce_GPlus2Params 9158314 134 ns/op 0 B/op 0 allocs/op BenchmarkAero_GPlus2Params 11300517 107 ns/op 0 B/op 0 allocs/op BenchmarkBear_GPlus2Params 1239238 961 ns/op 496 B/op 5 allocs/op BenchmarkBeego_GPlus2Params 1000000 1202 ns/op 352 B/op 3 allocs/op BenchmarkBone_GPlus2Params 335576 3725 ns/op 1168 B/op 10 allocs/op BenchmarkChi_GPlus2Params 1000000 1014 ns/op 432 B/op 3 allocs/op BenchmarkDenco_GPlus2Params 4394598 280 ns/op 64 B/op 1 allocs/op BenchmarkEcho_GPlus2Params 7851861 154 ns/op 0 B/op 0 allocs/op BenchmarkGin_GPlus2Params 9958588 120 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GPlus2Params 1000000 1433 ns/op 712 B/op 9 allocs/op BenchmarkGoji_GPlus2Params 1325134 909 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_GPlus2Params 405955 2870 ns/op 1408 B/op 14 allocs/op BenchmarkGoJsonRest_GPlus2Params 977038 1987 ns/op 713 B/op 14 allocs/op BenchmarkGoRestful_GPlus2Params 205018 6142 ns/op 4384 B/op 16 allocs/op BenchmarkGorillaMux_GPlus2Params 205641 6015 ns/op 1296 B/op 10 allocs/op BenchmarkGowwwRouter_GPlus2Params 1748542 684 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_GPlus2Params 14047102 87.7 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GPlus2Params 1418673 828 ns/op 384 B/op 4 allocs/op BenchmarkKocha_GPlus2Params 2334562 520 ns/op 128 B/op 5 allocs/op BenchmarkLARS_GPlus2Params 11954094 101 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GPlus2Params 491552 2890 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_GPlus2Params 120532 9545 ns/op 1200 B/op 13 allocs/op BenchmarkPat_GPlus2Params 194739 6766 ns/op 2168 B/op 33 allocs/op BenchmarkPossum_GPlus2Params 1201224 1009 ns/op 496 B/op 5 allocs/op BenchmarkR2router_GPlus2Params 1575535 756 ns/op 432 B/op 5 allocs/op BenchmarkRivet_GPlus2Params 3698930 325 ns/op 96 B/op 1 allocs/op BenchmarkTango_GPlus2Params 1000000 1212 ns/op 344 B/op 8 allocs/op BenchmarkTigerTonic_GPlus2Params 349350 3660 ns/op 1200 B/op 22 allocs/op BenchmarkTraffic_GPlus2Params 169714 7862 ns/op 2248 B/op 28 allocs/op BenchmarkVulcan_GPlus2Params 1222288 974 ns/op 98 B/op 3 allocs/op BenchmarkAce_GPlusAll 845606 1398 ns/op 0 B/op 0 allocs/op BenchmarkAero_GPlusAll 1000000 1009 ns/op 0 B/op 0 allocs/op BenchmarkBear_GPlusAll 103830 11386 ns/op 5488 B/op 61 allocs/op BenchmarkBeego_GPlusAll 82653 14784 ns/op 4576 B/op 39 allocs/op BenchmarkBone_GPlusAll 36601 33123 ns/op 11744 B/op 109 allocs/op BenchmarkChi_GPlusAll 95264 12831 ns/op 5616 B/op 39 allocs/op BenchmarkDenco_GPlusAll 567681 2950 ns/op 672 B/op 11 allocs/op BenchmarkEcho_GPlusAll 720366 1665 ns/op 0 B/op 0 allocs/op BenchmarkGin_GPlusAll 1000000 1185 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_GPlusAll 71575 16365 ns/op 8040 B/op 103 allocs/op BenchmarkGoji_GPlusAll 136352 9191 ns/op 3696 B/op 22 allocs/op BenchmarkGojiv2_GPlusAll 38006 31802 ns/op 17616 B/op 154 allocs/op BenchmarkGoJsonRest_GPlusAll 57238 21561 ns/op 8117 B/op 170 allocs/op BenchmarkGoRestful_GPlusAll 15147 79276 ns/op 55520 B/op 192 allocs/op BenchmarkGorillaMux_GPlusAll 24446 48410 ns/op 16112 B/op 128 allocs/op BenchmarkGowwwRouter_GPlusAll 150112 7770 ns/op 4752 B/op 33 allocs/op BenchmarkHttpRouter_GPlusAll 1367820 878 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_GPlusAll 166628 8004 ns/op 4032 B/op 38 allocs/op BenchmarkKocha_GPlusAll 265694 4570 ns/op 976 B/op 43 allocs/op BenchmarkLARS_GPlusAll 1000000 1068 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_GPlusAll 54564 23305 ns/op 9568 B/op 104 allocs/op BenchmarkMartini_GPlusAll 16274 73845 ns/op 14016 B/op 145 allocs/op BenchmarkPat_GPlusAll 27181 44478 ns/op 15264 B/op 271 allocs/op BenchmarkPossum_GPlusAll 122587 10277 ns/op 5408 B/op 39 allocs/op BenchmarkR2router_GPlusAll 130137 9297 ns/op 5040 B/op 63 allocs/op BenchmarkRivet_GPlusAll 532438 3323 ns/op 768 B/op 11 allocs/op BenchmarkTango_GPlusAll 86054 14531 ns/op 3656 B/op 104 allocs/op BenchmarkTigerTonic_GPlusAll 33936 35356 ns/op 11600 B/op 242 allocs/op BenchmarkTraffic_GPlusAll 17833 68181 ns/op 26248 B/op 341 allocs/op BenchmarkVulcan_GPlusAll 120109 9861 ns/op 1274 B/op 39 allocs/op ``` ## Parse.com ```sh BenchmarkGin_ParseStatic 18877833 63.5 ns/op 0 B/op 0 allocs/op BenchmarkAce_ParseStatic 19663731 60.8 ns/op 0 B/op 0 allocs/op BenchmarkAero_ParseStatic 28967341 41.5 ns/op 0 B/op 0 allocs/op BenchmarkBear_ParseStatic 3006984 402 ns/op 120 B/op 3 allocs/op BenchmarkBeego_ParseStatic 1000000 1031 ns/op 352 B/op 3 allocs/op BenchmarkBone_ParseStatic 1782482 675 ns/op 144 B/op 3 allocs/op BenchmarkChi_ParseStatic 1453261 819 ns/op 432 B/op 3 allocs/op BenchmarkDenco_ParseStatic 45023595 26.5 ns/op 0 B/op 0 allocs/op BenchmarkEcho_ParseStatic 17330470 69.3 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_ParseStatic 1644006 731 ns/op 296 B/op 5 allocs/op BenchmarkGoji_ParseStatic 7026930 170 ns/op 0 B/op 0 allocs/op BenchmarkGojiv2_ParseStatic 517618 2037 ns/op 1312 B/op 10 allocs/op BenchmarkGoJsonRest_ParseStatic 1227080 975 ns/op 329 B/op 11 allocs/op BenchmarkGoRestful_ParseStatic 192458 6659 ns/op 4256 B/op 13 allocs/op BenchmarkGorillaMux_ParseStatic 744062 2109 ns/op 976 B/op 9 allocs/op BenchmarkGowwwRouter_ParseStatic 37781062 31.8 ns/op 0 B/op 0 allocs/op BenchmarkHttpRouter_ParseStatic 45311223 26.5 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_ParseStatic 21383475 56.1 ns/op 0 B/op 0 allocs/op BenchmarkKocha_ParseStatic 29953290 40.1 ns/op 0 B/op 0 allocs/op BenchmarkLARS_ParseStatic 20036196 62.7 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_ParseStatic 1000000 1740 ns/op 736 B/op 8 allocs/op BenchmarkMartini_ParseStatic 404156 3801 ns/op 768 B/op 9 allocs/op BenchmarkPat_ParseStatic 1547180 772 ns/op 240 B/op 5 allocs/op BenchmarkPossum_ParseStatic 1608991 757 ns/op 416 B/op 3 allocs/op BenchmarkR2router_ParseStatic 3177936 385 ns/op 144 B/op 4 allocs/op BenchmarkRivet_ParseStatic 17783205 67.4 ns/op 0 B/op 0 allocs/op BenchmarkTango_ParseStatic 1210777 990 ns/op 248 B/op 8 allocs/op BenchmarkTigerTonic_ParseStatic 5316440 231 ns/op 48 B/op 1 allocs/op BenchmarkTraffic_ParseStatic 496050 2539 ns/op 1256 B/op 19 allocs/op BenchmarkVulcan_ParseStatic 2462798 488 ns/op 98 B/op 3 allocs/op BenchmarkAce_ParseParam 13393669 89.6 ns/op 0 B/op 0 allocs/op BenchmarkAero_ParseParam 19836619 60.4 ns/op 0 B/op 0 allocs/op BenchmarkBear_ParseParam 1405954 864 ns/op 467 B/op 5 allocs/op BenchmarkBeego_ParseParam 1000000 1065 ns/op 352 B/op 3 allocs/op BenchmarkBone_ParseParam 1000000 1698 ns/op 896 B/op 7 allocs/op BenchmarkChi_ParseParam 1356037 873 ns/op 432 B/op 3 allocs/op BenchmarkDenco_ParseParam 6241392 204 ns/op 64 B/op 1 allocs/op BenchmarkEcho_ParseParam 14088100 85.1 ns/op 0 B/op 0 allocs/op BenchmarkGin_ParseParam 17426064 68.9 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_ParseParam 1000000 1254 ns/op 664 B/op 8 allocs/op BenchmarkGoji_ParseParam 1682574 713 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_ParseParam 502224 2333 ns/op 1360 B/op 12 allocs/op BenchmarkGoJsonRest_ParseParam 1000000 1401 ns/op 649 B/op 13 allocs/op BenchmarkGoRestful_ParseParam 182623 7097 ns/op 4576 B/op 14 allocs/op BenchmarkGorillaMux_ParseParam 482332 2477 ns/op 1280 B/op 10 allocs/op BenchmarkGowwwRouter_ParseParam 1834873 657 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_ParseParam 23593393 51.0 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_ParseParam 2100160 574 ns/op 352 B/op 3 allocs/op BenchmarkKocha_ParseParam 4837220 252 ns/op 56 B/op 3 allocs/op BenchmarkLARS_ParseParam 18411192 66.2 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_ParseParam 571870 2398 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_ParseParam 286262 4268 ns/op 1072 B/op 10 allocs/op BenchmarkPat_ParseParam 692906 2157 ns/op 992 B/op 15 allocs/op BenchmarkPossum_ParseParam 1000000 1011 ns/op 496 B/op 5 allocs/op BenchmarkR2router_ParseParam 1722735 697 ns/op 432 B/op 5 allocs/op BenchmarkRivet_ParseParam 6058054 203 ns/op 48 B/op 1 allocs/op BenchmarkTango_ParseParam 1000000 1061 ns/op 280 B/op 8 allocs/op BenchmarkTigerTonic_ParseParam 890275 2277 ns/op 784 B/op 15 allocs/op BenchmarkTraffic_ParseParam 351322 3543 ns/op 1896 B/op 21 allocs/op BenchmarkVulcan_ParseParam 2076544 572 ns/op 98 B/op 3 allocs/op BenchmarkAce_Parse2Params 11718074 101 ns/op 0 B/op 0 allocs/op BenchmarkAero_Parse2Params 16264988 73.4 ns/op 0 B/op 0 allocs/op BenchmarkBear_Parse2Params 1238322 973 ns/op 496 B/op 5 allocs/op BenchmarkBeego_Parse2Params 1000000 1120 ns/op 352 B/op 3 allocs/op BenchmarkBone_Parse2Params 1000000 1632 ns/op 848 B/op 6 allocs/op BenchmarkChi_Parse2Params 1239477 955 ns/op 432 B/op 3 allocs/op BenchmarkDenco_Parse2Params 4944133 245 ns/op 64 B/op 1 allocs/op BenchmarkEcho_Parse2Params 10518286 114 ns/op 0 B/op 0 allocs/op BenchmarkGin_Parse2Params 14505195 82.7 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_Parse2Params 1000000 1437 ns/op 712 B/op 9 allocs/op BenchmarkGoji_Parse2Params 1689883 707 ns/op 336 B/op 2 allocs/op BenchmarkGojiv2_Parse2Params 502334 2308 ns/op 1344 B/op 11 allocs/op BenchmarkGoJsonRest_Parse2Params 1000000 1771 ns/op 713 B/op 14 allocs/op BenchmarkGoRestful_Parse2Params 159092 7583 ns/op 4928 B/op 14 allocs/op BenchmarkGorillaMux_Parse2Params 417548 2980 ns/op 1296 B/op 10 allocs/op BenchmarkGowwwRouter_Parse2Params 1751737 686 ns/op 432 B/op 3 allocs/op BenchmarkHttpRouter_Parse2Params 18089204 66.3 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_Parse2Params 1556986 777 ns/op 384 B/op 4 allocs/op BenchmarkKocha_Parse2Params 2493082 485 ns/op 128 B/op 5 allocs/op BenchmarkLARS_Parse2Params 15350108 78.5 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_Parse2Params 530974 2605 ns/op 1072 B/op 10 allocs/op BenchmarkMartini_Parse2Params 247069 4673 ns/op 1152 B/op 11 allocs/op BenchmarkPat_Parse2Params 816295 2126 ns/op 752 B/op 16 allocs/op BenchmarkPossum_Parse2Params 1000000 1002 ns/op 496 B/op 5 allocs/op BenchmarkR2router_Parse2Params 1569771 733 ns/op 432 B/op 5 allocs/op BenchmarkRivet_Parse2Params 4080546 295 ns/op 96 B/op 1 allocs/op BenchmarkTango_Parse2Params 1000000 1121 ns/op 312 B/op 8 allocs/op BenchmarkTigerTonic_Parse2Params 399556 3470 ns/op 1168 B/op 22 allocs/op BenchmarkTraffic_Parse2Params 314194 4159 ns/op 1944 B/op 22 allocs/op BenchmarkVulcan_Parse2Params 1827559 664 ns/op 98 B/op 3 allocs/op BenchmarkAce_ParseAll 478395 2503 ns/op 0 B/op 0 allocs/op BenchmarkAero_ParseAll 715392 1658 ns/op 0 B/op 0 allocs/op BenchmarkBear_ParseAll 59191 20124 ns/op 8928 B/op 110 allocs/op BenchmarkBeego_ParseAll 45507 27266 ns/op 9152 B/op 78 allocs/op BenchmarkBone_ParseAll 29328 41459 ns/op 16208 B/op 147 allocs/op BenchmarkChi_ParseAll 48531 25053 ns/op 11232 B/op 78 allocs/op BenchmarkDenco_ParseAll 325532 4284 ns/op 928 B/op 16 allocs/op BenchmarkEcho_ParseAll 433771 2759 ns/op 0 B/op 0 allocs/op BenchmarkGin_ParseAll 576316 2082 ns/op 0 B/op 0 allocs/op BenchmarkGocraftWeb_ParseAll 41500 29692 ns/op 13728 B/op 181 allocs/op BenchmarkGoji_ParseAll 80833 15563 ns/op 5376 B/op 32 allocs/op BenchmarkGojiv2_ParseAll 19836 60335 ns/op 34448 B/op 277 allocs/op BenchmarkGoJsonRest_ParseAll 32210 38027 ns/op 13866 B/op 321 allocs/op BenchmarkGoRestful_ParseAll 6644 190842 ns/op 117600 B/op 354 allocs/op BenchmarkGorillaMux_ParseAll 12634 95894 ns/op 30288 B/op 250 allocs/op BenchmarkGowwwRouter_ParseAll 98152 12159 ns/op 6912 B/op 48 allocs/op BenchmarkHttpRouter_ParseAll 933208 1273 ns/op 0 B/op 0 allocs/op BenchmarkHttpTreeMux_ParseAll 107191 11554 ns/op 5728 B/op 51 allocs/op BenchmarkKocha_ParseAll 184862 6225 ns/op 1112 B/op 54 allocs/op BenchmarkLARS_ParseAll 644546 1858 ns/op 0 B/op 0 allocs/op BenchmarkMacaron_ParseAll 26145 46484 ns/op 19136 B/op 208 allocs/op BenchmarkMartini_ParseAll 10000 121838 ns/op 25072 B/op 253 allocs/op BenchmarkPat_ParseAll 25417 47196 ns/op 15216 B/op 308 allocs/op BenchmarkPossum_ParseAll 58550 20735 ns/op 10816 B/op 78 allocs/op BenchmarkR2router_ParseAll 72732 16584 ns/op 8352 B/op 120 allocs/op BenchmarkRivet_ParseAll 281365 4968 ns/op 912 B/op 16 allocs/op BenchmarkTango_ParseAll 42831 28668 ns/op 7168 B/op 208 allocs/op BenchmarkTigerTonic_ParseAll 23774 49972 ns/op 16048 B/op 332 allocs/op BenchmarkTraffic_ParseAll 10000 104679 ns/op 45520 B/op 605 allocs/op BenchmarkVulcan_ParseAll 64810 18108 ns/op 2548 B/op 78 allocs/op ```
unknown
github
https://github.com/gin-gonic/gin
BENCHMARKS.md
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for learn.dataframe.transforms.reader_source.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import tensorflow.contrib.learn.python.learn.dataframe.transforms.reader_source as rs class ReaderSourceTestCase(tf.test.TestCase): """Test class for ReaderSource.""" def setUp(self): super(ReaderSourceTestCase, self).setUp() self.work_units = [str(x) for x in range(1000)] def testNoShuffle(self): id_source = rs.ReaderSource(reader_cls=tf.IdentityReader, work_units=self.work_units, batch_size=1, shuffle=False, num_threads=1) index_column, value_column = id_source() index_tensor = index_column.build() value_tensor = value_column.build() self.assertEqual([1], index_tensor.get_shape().as_list()) self.assertEqual([1], value_tensor.get_shape().as_list()) with self.test_session() as sess: tf.global_variables_initializer().run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(50): index, value = sess.run([index_tensor, value_tensor]) self.assertEqual(i, int(index[0])) self.assertEqual(i, int(value[0])) coord.request_stop() coord.join(threads) def testYesShuffle(self): id_source = rs.ReaderSource(reader_cls=tf.IdentityReader, work_units=self.work_units, batch_size=1, shuffle=True, num_threads=10, seed=1234) index_column, value_column = id_source() cache = {} index_tensor = index_column.build(cache) value_tensor = value_column.build(cache) self.assertEqual([1], index_tensor.get_shape().as_list()) self.assertEqual([1], value_tensor.get_shape().as_list()) seen = set([]) with self.test_session() as sess: tf.global_variables_initializer().run() coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for _ in range(500): index, value = sess.run([index_tensor, value_tensor]) self.assertEqual(index, value) self.assertNotIn(int(value[0]), seen) seen.add(int(value[0])) coord.request_stop() coord.join(threads) if __name__ == "__main__": tf.test.main()
unknown
codeparrot/codeparrot-clean
#include "macro_helper_test.h" #define DEFINE(name) \ namespace ns { \ static const bool t1 = false; \ bool t2_##name = t1; \ bool t3_##name = t1; \ } \ using ns::t2_##name; DEFINE(test) void f1() {}
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-move/Inputs/macro_helper_test.cpp
// RUN: %check_clang_tidy -std=c++14 %s bugprone-unhandled-exception-at-new %t -- -- -fexceptions // FIXME: Fix the checker to work in C++17 or later mode. namespace std { typedef __typeof__(sizeof(0)) size_t; enum class align_val_t : std::size_t {}; class exception {}; class bad_alloc : public exception {}; class bad_array_new_length : public bad_alloc {}; struct nothrow_t {}; extern const nothrow_t nothrow; } // namespace std void *operator new(std::size_t, const std::nothrow_t &) noexcept; void *operator new(std::size_t, std::align_val_t, const std::nothrow_t &) noexcept; void *operator new(std::size_t, void *) noexcept; class A {}; typedef std::bad_alloc badalloc1; using badalloc2 = std::bad_alloc; using badalloc3 = std::bad_alloc &; void *operator new(std::size_t, int, int); void *operator new(std::size_t, int, int, int) noexcept; struct ClassSpecificNew { void *operator new(std::size_t); void *operator new(std::size_t, std::align_val_t); void *operator new(std::size_t, int, int) noexcept; void *operator new(std::size_t, int, int, int); }; void f1() noexcept { int *I1 = new int; // CHECK-MESSAGES: :[[@LINE-1]]:13: warning: missing exception handler for allocation failure at 'new' try { int *I2 = new int; try { int *I3 = new int; } catch (A) { } } catch (std::bad_alloc) { } try { int *I = new int; } catch (std::bad_alloc &) { } try { int *I = new int; } catch (const std::bad_alloc &) { } try { int *I = new int; } catch (badalloc1) { } try { int *I = new int; } catch (badalloc1 &) { } try { int *I = new int; } catch (const badalloc1 &) { } try { int *I = new int; } catch (badalloc2) { } try { int *I = new int; } catch (badalloc3) { } try { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: missing exception handler for allocation failure at 'new' } catch (std::bad_alloc *) { } try { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: missing exception handler for allocation failure at 'new' } catch (A) { } } void f2() noexcept { try { int *I = new int; } catch (A) { } catch (std::bad_alloc) { } try { int *I = new int; } catch (...) { } try { int *I = new int; } catch (const std::exception &) { } try { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: missing exception handler for allocation failure at 'new' } catch (const std::bad_array_new_length &) { } } void f_new_nothrow() noexcept { int *I1 = new (std::nothrow) int; int *I2 = new (static_cast<std::align_val_t>(1), std::nothrow) int; } void f_new_placement() noexcept { char buf[100]; int *I = new (buf) int; } void f_new_user_defined() noexcept { int *I1 = new (1, 2) int; // CHECK-MESSAGES: :[[@LINE-1]]:13: warning: missing exception handler for allocation failure at 'new' int *I2 = new (1, 2, 3) int; } void f_class_specific() noexcept { ClassSpecificNew *N1 = new ClassSpecificNew; // CHECK-MESSAGES: :[[@LINE-1]]:26: warning: missing exception handler for allocation failure at 'new' ClassSpecificNew *N2 = new (static_cast<std::align_val_t>(1)) ClassSpecificNew; // CHECK-MESSAGES: :[[@LINE-1]]:26: warning: missing exception handler for allocation failure at 'new' ClassSpecificNew *N3 = new (1, 2) ClassSpecificNew; ClassSpecificNew *N4 = new (1, 2, 3) ClassSpecificNew; // CHECK-MESSAGES: :[[@LINE-1]]:26: warning: missing exception handler for allocation failure at 'new' } void f_est_none() { int *I = new int; } void f_est_noexcept_false() noexcept(false) { int *I = new int; } void f_est_noexcept_true() noexcept(true) { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: missing exception handler for allocation failure at 'new' } void f_est_dynamic_none() throw() { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: missing exception handler for allocation failure at 'new' } void f_est_dynamic_1() throw(std::bad_alloc) { int *I = new int; } void f_est_dynamic_2() throw(A) { // the exception specification list is not checked int *I = new int; } void f_try() noexcept try { int *I = new int; } catch (const std::bad_alloc &) { } void f_try_bad() noexcept try { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: missing exception handler for allocation failure at 'new' } catch (const A &) { } void f_embedded_try() noexcept { try { try { int *I = new int; } catch (const A &) { } } catch (const std::bad_alloc &) { } } template <bool P> void f_est_noexcept_dependent_unused() noexcept(P) { int *I = new int; } template <bool P> void f_est_noexcept_dependent_used() noexcept(P) { int *I = new int; // CHECK-MESSAGES: :[[@LINE-1]]:12: warning: missing exception handler for allocation failure at 'new' } template <class T> void f_dependent_new() { T *X = new T; } void f_1() { f_est_noexcept_dependent_used<true>(); }
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/test/clang-tidy/checkers/bugprone/unhandled-exception-at-new.cpp
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.host_mock import MockHost from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.layout_tests.models.test_configuration import * from webkitpy.layout_tests.models.test_expectations import * from webkitpy.layout_tests.models.test_configuration import * try: from collections import OrderedDict except ImportError: # Needed for Python < 2.7 from webkitpy.thirdparty.ordered_dict import OrderedDict class Base(unittest.TestCase): # Note that all of these tests are written assuming the configuration # being tested is Windows XP, Release build. def __init__(self, testFunc): host = MockHost() self._port = host.port_factory.get('test-win-xp', None) self._exp = None unittest.TestCase.__init__(self, testFunc) def get_test(self, test_name): # FIXME: Remove this routine and just reference test names directly. return test_name def get_basic_tests(self): return [self.get_test('failures/expected/text.html'), self.get_test('failures/expected/image_checksum.html'), self.get_test('failures/expected/crash.html'), self.get_test('failures/expected/missing_text.html'), self.get_test('failures/expected/image.html'), self.get_test('passes/text.html')] def get_basic_expectations(self): return """ Bug(test) failures/expected/text.html [ Failure ] Bug(test) failures/expected/crash.html [ WontFix ] Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ] Bug(test) failures/expected/image_checksum.html [ WontFix ] Bug(test) failures/expected/image.html [ WontFix Mac ] """ def parse_exp(self, expectations, overrides=None, is_lint_mode=False): expectations_dict = OrderedDict() expectations_dict['expectations'] = expectations if overrides: expectations_dict['overrides'] = overrides self._port.expectations_dict = lambda: expectations_dict expectations_to_lint = expectations_dict if is_lint_mode else None self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint) def assert_exp(self, test, result): self.assertEqual(self._exp.get_expectations(self.get_test(test)), set([result])) def assert_bad_expectations(self, expectations, overrides=None): self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides) class BasicTests(Base): def test_basic(self): self.parse_exp(self.get_basic_expectations()) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/image_checksum.html', PASS) self.assert_exp('passes/text.html', PASS) self.assert_exp('failures/expected/image.html', PASS) class MiscTests(Base): def test_multiple_results(self): self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]') self.assertEqual(self._exp.get_expectations( self.get_test('failures/expected/text.html')), set([FAIL, CRASH])) def test_result_was_expected(self): # test basics self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True) self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False) # test handling of SKIPped tests and results self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True) self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False) # test handling of MISSING results and the REBASELINE modifier self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True) self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False) def test_remove_pixel_failures(self): self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL])) self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS])) self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS])) self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL])) self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH])) def test_suffixes_for_expectations(self): self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav'])) self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set()) def test_category_expectations(self): # This test checks unknown tests are not present in the # expectations and that known test part of a test category is # present in the expectations. exp_str = 'Bug(x) failures/expected [ WontFix ]' self.parse_exp(exp_str) test_name = 'failures/expected/unknown-test.html' unknown_test = self.get_test(test_name) self.assertRaises(KeyError, self._exp.get_expectations, unknown_test) self.assert_exp('failures/expected/crash.html', PASS) def test_get_modifiers(self): self.parse_exp(self.get_basic_expectations()) self.assertEqual(self._exp.get_modifiers( self.get_test('passes/text.html')), []) def test_get_expectations_string(self): self.parse_exp(self.get_basic_expectations()) self.assertEqual(self._exp.get_expectations_string( self.get_test('failures/expected/text.html')), 'FAIL') def test_expectation_to_string(self): # Normal cases are handled by other tests. self.parse_exp(self.get_basic_expectations()) self.assertRaises(ValueError, self._exp.expectation_to_string, -1) def test_get_test_set(self): # Handle some corner cases for this routine not covered by other tests. self.parse_exp(self.get_basic_expectations()) s = self._exp.get_test_set(WONTFIX) self.assertEqual(s, set([self.get_test('failures/expected/crash.html'), self.get_test('failures/expected/image_checksum.html')])) def test_parse_warning(self): try: filesystem = self._port.host.filesystem filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content') self.get_test('disabled-test.html-disabled'), self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n" "Bug(rniwa) non-existent-test.html [ Failure ]\n" "Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True) self.assertFalse(True, "ParseError wasn't raised") except ParseError, e: warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n" "expectations:2 Path does not exist. non-existent-test.html") self.assertEqual(str(e), warnings) def test_parse_warnings_are_logged_if_not_in_lint_mode(self): oc = OutputCapture() try: oc.capture_output() self.parse_exp('-- this should be a syntax error', is_lint_mode=False) finally: _, _, logs = oc.restore_output() self.assertNotEquals(logs, '') def test_error_on_different_platform(self): # parse_exp uses a Windows port. Assert errors on Mac show up in lint mode. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_error_on_different_build_type(self): # parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_overrides(self): self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]") self.assert_exp('failures/expected/text.html', IMAGE) def test_overrides__directory(self): self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected [ Crash ]") self.assert_exp('failures/expected/text.html', CRASH) self.assert_exp('failures/expected/image.html', CRASH) def test_overrides__duplicate(self): self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]", "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n" "Bug(override) failures/expected/text.html [ Crash ]\n") def test_pixel_tests_flag(self): def match(test, result, pixel_tests_enabled): return self._exp.matches_an_expected_result( self.get_test(test), result, pixel_tests_enabled) self.parse_exp(self.get_basic_expectations()) self.assertTrue(match('failures/expected/text.html', FAIL, True)) self.assertTrue(match('failures/expected/text.html', FAIL, False)) self.assertFalse(match('failures/expected/text.html', CRASH, True)) self.assertFalse(match('failures/expected/text.html', CRASH, False)) self.assertTrue(match('failures/expected/image_checksum.html', PASS, True)) self.assertTrue(match('failures/expected/image_checksum.html', PASS, False)) self.assertTrue(match('failures/expected/crash.html', PASS, False)) self.assertTrue(match('passes/text.html', PASS, False)) def test_more_specific_override_resets_skip(self): self.parse_exp("Bug(x) failures/expected [ Skip ]\n" "Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n") self.assert_exp('failures/expected/text.html', IMAGE) self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(), 'failures/expected/text.html') in self._exp.get_tests_with_result_type(SKIP)) class SkippedTests(Base): def check(self, expectations, overrides, skips, lint=False): port = MockHost().port_factory.get('qt') port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo') expectations_dict = OrderedDict() expectations_dict['expectations'] = expectations if overrides: expectations_dict['overrides'] = overrides port.expectations_dict = lambda: expectations_dict port.skipped_layout_tests = lambda tests: set(skips) expectations_to_lint = expectations_dict if lint else None exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint) # Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ] self.assertEqual(exp.get_modifiers('failures/expected/text.html'), [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]) self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS])) def test_skipped_tests_work(self): self.check(expectations='', overrides=None, skips=['failures/expected/text.html']) def test_duplicate_skipped_test_fails_lint(self): self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True) def test_skipped_file_overrides_expectations(self): self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html']) def test_skipped_dir_overrides_expectations(self): self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected']) def test_skipped_file_overrides_overrides(self): self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n', skips=['failures/expected/text.html']) def test_skipped_dir_overrides_overrides(self): self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n', skips=['failures/expected']) def test_skipped_entry_dont_exist(self): port = MockHost().port_factory.get('qt') expectations_dict = OrderedDict() expectations_dict['expectations'] = '' port.expectations_dict = lambda: expectations_dict port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html']) capture = OutputCapture() capture.capture_output() exp = TestExpectations(port) _, _, logs = capture.restore_output() self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs) class ExpectationSyntaxTests(Base): def test_unrecognized_expectation(self): self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]') def test_macro(self): exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]' self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'): bugs = bugs or [] modifiers = modifiers or [] expectations = expectations or [] warnings = warnings or [] filename = 'TestExpectations' line_number = 1 expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number) self.assertEqual(expectation_line.warnings, warnings) self.assertEqual(expectation_line.name, name) self.assertEqual(expectation_line.filename, filename) self.assertEqual(expectation_line.line_number, line_number) if not warnings: self.assertEqual(expectation_line.modifiers, modifiers) self.assertEqual(expectation_line.expectations, expectations) def test_bare_name(self): self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS']) def test_bare_name_and_bugs(self): self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS']) def test_comments(self): self.assert_tokenize_exp("# comment", name=None, comment="# comment") self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP']) def test_config_modifiers(self): self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL']) def test_unknown_config(self): self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS']) def test_unknown_expectation(self): self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"']) def test_skip(self): self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS']) def test_slow(self): self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS']) def test_wontfix(self): self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS']) self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE']) self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL']) def test_blank_line(self): self.assert_tokenize_exp('', name=None) def test_warnings(self): self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None) self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None) self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None) self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.']) class SemanticTests(Base): def test_bug_format(self): self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True) def test_bad_bugid(self): try: self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True) self.fail('should have raised an error about a bad bug identifier') except ParseError, exp: self.assertEqual(len(exp.warnings), 1) def test_missing_bugid(self): self.parse_exp('failures/expected/text.html [ Failure ]') self.assertFalse(self._exp.has_warnings()) self._port.warn_if_bug_missing_in_test_expectations = lambda: True self.parse_exp('failures/expected/text.html [ Failure ]') line = self._exp._model.get_expectation_line('failures/expected/text.html') self.assertFalse(line.is_invalid()) self.assertEqual(line.warnings, ['Test lacks BUG modifier.']) def test_skip_and_wontfix(self): # Skip is not allowed to have other expectations as well, because those # expectations won't be exercised and may become stale . self.parse_exp('failures/expected/text.html [ Failure Skip ]') self.assertTrue(self._exp.has_warnings()) self.parse_exp('failures/expected/text.html [ Crash WontFix ]') self.assertFalse(self._exp.has_warnings()) self.parse_exp('failures/expected/text.html [ Pass WontFix ]') self.assertFalse(self._exp.has_warnings()) def test_slow_and_timeout(self): # A test cannot be SLOW and expected to TIMEOUT. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True) def test_rebaseline(self): # Can't lint a file w/ 'REBASELINE' in it. self.assertRaises(ParseError, self.parse_exp, 'Bug(test) failures/expected/text.html [ Failure Rebaseline ]', is_lint_mode=True) def test_duplicates(self): self.assertRaises(ParseError, self.parse_exp, """ Bug(exp) failures/expected/text.html [ Failure ] Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True) self.assertRaises(ParseError, self.parse_exp, self.get_basic_expectations(), overrides=""" Bug(override) failures/expected/text.html [ Failure ] Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True) def test_missing_file(self): self.parse_exp('Bug(test) missing_file.html [ Failure ]') self.assertTrue(self._exp.has_warnings(), 1) class PrecedenceTests(Base): def test_file_over_directory(self): # This tests handling precedence of specific lines over directories # and tests expectations covering entire directories. exp_str = """ Bug(x) failures/expected/text.html [ Failure ] Bug(y) failures/expected [ WontFix ] """ self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/crash.html', PASS) exp_str = """ Bug(x) failures/expected [ WontFix ] Bug(y) failures/expected/text.html [ Failure ] """ self.parse_exp(exp_str) self.assert_exp('failures/expected/text.html', FAIL) self.assert_exp('failures/expected/crash.html', PASS) def test_ambiguous(self): self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n" "Bug(test) [ Win ] passes/text.html [ Failure ]\n") def test_more_modifiers(self): self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n" "Bug(test) [ Win Release ] passes/text.html [ Failure ]\n") def test_order_in_file(self): self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n" "Bug(test) [ Release ] : passes/text.html [ Pass ]\n") def test_macro_overrides(self): self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n" "Bug(test) [ XP ] passes/text.html [ Failure ]\n") class RemoveConfigurationsTest(Base): def test_remove(self): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) test_port.test_exists = lambda test: True test_port.test_isfile = lambda test: True test_config = test_port.test_configuration() test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ] """} expectations = TestExpectations(test_port, self.get_basic_tests()) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config) self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ] """, actual_expectations) def test_remove_line(self): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) test_port.test_exists = lambda test: True test_port.test_isfile = lambda test: True test_config = test_port.test_configuration() test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ] Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ] """} expectations = TestExpectations(test_port) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration()) actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration()) self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ] """, actual_expectations) class RebaseliningTest(Base): """Test rebaselining-specific functionality.""" def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides): self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides) actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations') self.assertEqual(expected_expectations, actual_expectations) actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides') self.assertEqual(expected_overrides, actual_overrides) def test_remove(self): self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n' 'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n' 'Bug(z) failures/expected/crash.html [ Crash ]\n', 'Bug(x0) failures/expected/image.html [ Crash ]\n', ['failures/expected/text.html'], 'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n' 'Bug(z) failures/expected/crash.html [ Crash ]\n', 'Bug(x0) failures/expected/image.html [ Crash ]\n') # Ensure that we don't modify unrelated lines, even if we could rewrite them. # i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html" self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n' 'Bug(Y) failures/expected/image.html [ Skip ]\n' 'Bug(z) failures/expected/crash.html\n', '', ['failures/expected/text.html'], 'Bug(Y) failures/expected/image.html [ Skip ]\n' 'Bug(z) failures/expected/crash.html\n', '') def test_get_rebaselining_failures(self): # Make sure we find a test as needing a rebaseline even if it is not marked as a failure. self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n') self.assertEqual(len(self._exp.get_rebaselining_failures()), 1) self.parse_exp(self.get_basic_expectations()) self.assertEqual(len(self._exp.get_rebaselining_failures()), 0) class TestExpectationSerializationTests(unittest.TestCase): def __init__(self, testFunc): host = MockHost() test_port = host.port_factory.get('test-win-xp', None) self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros()) unittest.TestCase.__init__(self, testFunc) def _tokenize(self, line): return TestExpectationParser._tokenize_line('path', line, 0) def assert_round_trip(self, in_string, expected_string=None): expectation = self._tokenize(in_string) if expected_string is None: expected_string = in_string self.assertEqual(expected_string, expectation.to_string(self._converter)) def assert_list_round_trip(self, in_string, expected_string=None): host = MockHost() parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False) expectations = parser.parse('path', in_string) if expected_string is None: expected_string = in_string self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter)) def test_unparsed_to_string(self): expectation = TestExpectationLine() self.assertEqual(expectation.to_string(self._converter), '') expectation.comment = ' Qux.' self.assertEqual(expectation.to_string(self._converter), '# Qux.') expectation.name = 'bar' self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.') expectation.modifiers = ['foo'] # FIXME: case should be preserved here but we can't until we drop the old syntax. self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.') expectation.expectations = ['bAz'] self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.') expectation.expectations = ['bAz1', 'baZ2'] self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.') expectation.modifiers = ['foo1', 'foO2'] self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.') expectation.warnings.append('Oh the horror.') self.assertEqual(expectation.to_string(self._converter), '') expectation.original_string = 'Yes it is!' self.assertEqual(expectation.to_string(self._converter), 'Yes it is!') def test_unparsed_list_to_string(self): expectation = TestExpectationLine() expectation.comment = 'Qux.' expectation.name = 'bar' expectation.modifiers = ['foo'] expectation.expectations = ['bAz1', 'baZ2'] # FIXME: case should be preserved here but we can't until we drop the old syntax. self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.') def test_parsed_to_string(self): expectation_line = TestExpectationLine() expectation_line.parsed_bug_modifiers = ['BUGX'] expectation_line.name = 'test/name/for/realz.html' expectation_line.parsed_expectations = set([IMAGE]) self.assertEqual(expectation_line.to_string(self._converter), None) expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')]) self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]') expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]) self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]') def test_serialize_parsed_expectations(self): expectation_line = TestExpectationLine() expectation_line.parsed_expectations = set([]) parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '') expectation_line.parsed_expectations = set([FAIL]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail') expectation_line.parsed_expectations = set([PASS, IMAGE]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image') expectation_line.parsed_expectations = set([FAIL, PASS]) self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail') def test_serialize_parsed_modifier_string(self): expectation_line = TestExpectationLine() expectation_line.parsed_bug_modifiers = ['garden-o-matic'] expectation_line.parsed_modifiers = ['for', 'the'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the') self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win') expectation_line.parsed_bug_modifiers = [] expectation_line.parsed_modifiers = [] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '') self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win') expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win') expectation_line.parsed_bug_modifiers = [] expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is'] self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win') def test_format_line(self): self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment') self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]') def test_string_roundtrip(self): self.assert_round_trip('') self.assert_round_trip('FOO') self.assert_round_trip('[') self.assert_round_trip('FOO [') self.assert_round_trip('FOO ] bar') self.assert_round_trip(' FOO [') self.assert_round_trip(' [ FOO ] ') self.assert_round_trip('[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ') self.assert_round_trip('[ FOO ] ] ] bar BAZ') self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]') self.assert_round_trip('FOO ] ] bar ==== BAZ') self.assert_round_trip('=') self.assert_round_trip('#') self.assert_round_trip('# ') self.assert_round_trip('# Foo') self.assert_round_trip('# Foo') self.assert_round_trip('# Foo :') self.assert_round_trip('# Foo : =') def test_list_roundtrip(self): self.assert_list_round_trip('') self.assert_list_round_trip('\n') self.assert_list_round_trip('\n\n') self.assert_list_round_trip('bar') self.assert_list_round_trip('bar\n# Qux.') self.assert_list_round_trip('bar\n# Qux.\n') def test_reconstitute_only_these(self): lines = [] reconstitute_only_these = [] def add_line(matching_configurations, reconstitute): expectation_line = TestExpectationLine() expectation_line.original_string = "Nay" expectation_line.parsed_bug_modifiers = ['BUGX'] expectation_line.name = 'Yay' expectation_line.parsed_expectations = set([IMAGE]) expectation_line.matching_configurations = matching_configurations lines.append(expectation_line) if reconstitute: reconstitute_only_these.append(expectation_line) add_line(set([TestConfiguration('xp', 'x86', 'release')]), True) add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False) serialized = TestExpectations.list_to_string(lines, self._converter) self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]") serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these) self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay") def disabled_test_string_whitespace_stripping(self): # FIXME: Re-enable this test once we rework the code to no longer support the old syntax. self.assert_round_trip('\n', '') self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.') self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
unknown
codeparrot/codeparrot-clean
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.clients.admin; import java.util.Collection; /** * Options for the {@link Admin#deleteAcls(Collection)} call. */ public class DeleteAclsOptions extends AbstractOptions<DeleteAclsOptions> { /** * Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the * AdminClient should be used. * */ // This method is retained to keep binary compatibility with 0.11 public DeleteAclsOptions timeoutMs(Integer timeoutMs) { this.timeoutMs = timeoutMs; return this; } }
java
github
https://github.com/apache/kafka
clients/src/main/java/org/apache/kafka/clients/admin/DeleteAclsOptions.java
package containerd import ( "context" "fmt" "regexp" "strconv" "strings" c8dimages "github.com/containerd/containerd/v2/core/images" cerrdefs "github.com/containerd/errdefs" "github.com/containerd/log" "github.com/containerd/platforms" "github.com/distribution/reference" dockerspec "github.com/moby/docker-image-spec/specs-go/v1" "github.com/moby/moby/v2/daemon/images" "github.com/moby/moby/v2/daemon/internal/image" "github.com/moby/moby/v2/daemon/server/imagebackend" "github.com/moby/moby/v2/errdefs" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) var errInconsistentData error = errors.New("consistency error: data changed during operation, retry") type errPlatformNotFound struct { wanted ocispec.Platform imageRef string } func (e *errPlatformNotFound) NotFound() {} func (e *errPlatformNotFound) Error() string { msg := "image with reference " + e.imageRef + " was found but does not provide " if e.wanted.OS != "" { msg += "the specified platform (" + platforms.FormatAll(e.wanted) + ")" } else { msg += "any platform" } return msg } // GetImage returns an image corresponding to the image referred to by refOrID. func (i *ImageService) GetImage(ctx context.Context, refOrID string, options imagebackend.GetImageOpts) (*image.Image, error) { img, err := i.resolveImage(ctx, refOrID) if err != nil { return nil, err } pm := i.matchRequestedOrDefault(platforms.OnlyStrict, options.Platform) im, err := i.getBestPresentImageManifest(ctx, img, pm) if err != nil { return nil, err } var ociImage dockerspec.DockerOCIImage err = im.ReadConfig(ctx, &ociImage) if err != nil { return nil, err } imgV1 := dockerOciImageToDockerImagePartial(image.ID(img.Target.Digest), ociImage) parent, err := i.getImageLabelByDigest(ctx, img.Target.Digest, imageLabelClassicBuilderParent) if err != nil { log.G(ctx).WithError(err).Warn("failed to determine Parent property") } else { imgV1.Parent = image.ID(parent) } target := im.Target() imgV1.Details = &image.Details{ ManifestDescriptor: &target, } return imgV1, nil } // getBestPresentImageManifest returns a platform-specific image manifest that best matches the provided platform matcher. // Only locally available platform images are considered. // If no image manifest matches the platform, an error is returned. func (i *ImageService) getBestPresentImageManifest(ctx context.Context, img c8dimages.Image, pm platforms.MatchComparer) (*ImageManifest, error) { var best *ImageManifest var bestPlatform ocispec.Platform err := i.walkImageManifests(ctx, img, func(im *ImageManifest) error { imPlatform, err := im.ImagePlatform(ctx) if err != nil { return err } if pm.Match(imPlatform) { if best == nil || pm.Less(imPlatform, bestPlatform) { best = im bestPlatform = imPlatform } } return nil }) if err != nil { return nil, err } if best == nil { err := &errPlatformNotFound{imageRef: imageFamiliarName(img)} if p, ok := pm.(platformMatcherWithRequestedPlatform); ok && p.Requested != nil { err.wanted = *p.Requested } return nil, err } return best, nil } // resolveDescriptor searches for a descriptor based on the given // reference or identifier. Returns the descriptor of // the image, which could be a manifest list, manifest, or config. func (i *ImageService) resolveDescriptor(ctx context.Context, refOrID string) (ocispec.Descriptor, error) { img, err := i.resolveImage(ctx, refOrID) if err != nil { return ocispec.Descriptor{}, err } return img.Target, nil } // ResolveImage looks up an image by reference or identifier in the image store. func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (c8dimages.Image, error) { return i.resolveImage(ctx, refOrID) } func (i *ImageService) resolveImage(ctx context.Context, refOrID string) (c8dimages.Image, error) { parsed, err := reference.ParseAnyReference(refOrID) if err != nil { return c8dimages.Image{}, errdefs.InvalidParameter(err) } digested, ok := parsed.(reference.Digested) if ok { imgs, err := i.images.List(ctx, "target.digest=="+digested.Digest().String()) if err != nil { return c8dimages.Image{}, errors.Wrap(err, "failed to lookup digest") } if len(imgs) == 0 { return c8dimages.Image{}, images.ErrImageDoesNotExist{Ref: parsed} } // If reference is both Named and Digested, make sure we don't match // images with a different repository even if digest matches. // For example, busybox@sha256:abcdef..., shouldn't match asdf@sha256:abcdef... if parsedNamed, ok := parsed.(reference.Named); ok { for _, img := range imgs { imgNamed, err := reference.ParseNormalizedNamed(img.Name) if err != nil { log.G(ctx).WithError(err).WithField("image", img.Name).Warn("image with invalid name encountered") continue } if parsedNamed.Name() == imgNamed.Name() { return img, nil } } return c8dimages.Image{}, images.ErrImageDoesNotExist{Ref: parsed} } return imgs[0], nil } // Try resolve by name:tag ref := reference.TagNameOnly(parsed.(reference.Named)).String() if img, err := i.images.Get(ctx, ref); err == nil { return img, nil } else if !cerrdefs.IsNotFound(err) { return c8dimages.Image{}, err } // If the identifier could be a short ID, attempt to match. if idWithoutAlgo := checkTruncatedID(refOrID); idWithoutAlgo != "" { // Valid ID. filters := []string{ fmt.Sprintf("name==%q", ref), // Or it could just look like one. "target.digest~=" + strconv.Quote(fmt.Sprintf(`^sha256:%s[0-9a-fA-F]{%d}$`, regexp.QuoteMeta(idWithoutAlgo), 64-len(idWithoutAlgo))), } imgs, err := i.images.List(ctx, filters...) if err != nil { return c8dimages.Image{}, err } if len(imgs) == 0 { return c8dimages.Image{}, images.ErrImageDoesNotExist{Ref: parsed} } if len(imgs) > 1 { digests := map[digest.Digest]struct{}{} for _, img := range imgs { if img.Name == ref { return img, nil } digests[img.Target.Digest] = struct{}{} } if len(digests) > 1 { return c8dimages.Image{}, errdefs.NotFound(errors.New("ambiguous reference")) } } return imgs[0], nil } return c8dimages.Image{}, images.ErrImageDoesNotExist{Ref: parsed} } // getAllImagesWithRepository returns a slice of images which name is a reference // pointing to the same repository as the given reference. func (i *ImageService) getAllImagesWithRepository(ctx context.Context, ref reference.Named) ([]c8dimages.Image, error) { nameFilter := "^" + regexp.QuoteMeta(ref.Name()) + ":" + reference.TagRegexp.String() + "$" return i.images.List(ctx, "name~="+strconv.Quote(nameFilter)) } func imageFamiliarName(img c8dimages.Image) string { if isDanglingImage(img) { return img.Target.Digest.String() } if ref, err := reference.ParseNamed(img.Name); err == nil { return reference.FamiliarString(ref) } return img.Name } // getImageLabelByDigest will return the value of the label for images // targeting the specified digest. // If images have different values, an errdefs.Conflict error will be returned. func (i *ImageService) getImageLabelByDigest(ctx context.Context, target digest.Digest, labelKey string) (string, error) { imgs, err := i.images.List(ctx, "target.digest=="+target.String()+",labels."+labelKey) if err != nil { return "", errdefs.System(err) } var value string for _, img := range imgs { if v, ok := img.Labels[labelKey]; ok { if value != "" && value != v { return value, errdefs.Conflict(fmt.Errorf("conflicting label value %q and %q", value, v)) } value = v } } return value, nil } // resolveAllReferences resolves the reference name or ID to an image and returns all the images with // the same target. // // Returns: // // 1: *(github.com/containerd/containerd/images).Image // // An image match from the image store with the provided refOrID // // 2: [](github.com/containerd/containerd/images).Image // // List of all images with the same target that matches the refOrID. If the first argument is // non-nil, the image list will all have the same target as the matched image. If the first // argument is nil but the list is non-empty, this value is a list of all the images with a // target that matches the digest provided in the refOrID, but none are an image name match // to refOrID. // // 3: error // // An error looking up refOrID or no images found with matching name or target. Note that the first // argument may be nil with a nil error if the second argument is non-empty. func (i *ImageService) resolveAllReferences(ctx context.Context, refOrID string) (*c8dimages.Image, []c8dimages.Image, error) { parsed, err := reference.ParseAnyReference(refOrID) if err != nil { return nil, nil, errdefs.InvalidParameter(err) } var dgst digest.Digest var img *c8dimages.Image if idWithoutAlgo := checkTruncatedID(refOrID); idWithoutAlgo != "" { // Valid ID. if d, ok := parsed.(reference.Digested); ok { if cimg, err := i.images.Get(ctx, d.String()); err == nil { img = &cimg dgst = d.Digest() if cimg.Target.Digest != dgst { // Ambiguous image reference, use reference name log.G(ctx).WithField("image", refOrID).WithField("target", cimg.Target.Digest).Warn("digest reference points to image with a different digest") dgst = cimg.Target.Digest } } else if !cerrdefs.IsNotFound(err) { return nil, nil, err } else { dgst = d.Digest() } } else { name := reference.TagNameOnly(parsed.(reference.Named)).String() filters := []string{ fmt.Sprintf("name==%q", name), // Or it could just look like one. "target.digest~=" + strconv.Quote(fmt.Sprintf(`^sha256:%s[0-9a-fA-F]{%d}$`, regexp.QuoteMeta(idWithoutAlgo), 64-len(idWithoutAlgo))), } imgs, err := i.images.List(ctx, filters...) if err != nil { return nil, nil, err } if len(imgs) == 0 { return nil, nil, images.ErrImageDoesNotExist{Ref: parsed} } for _, limg := range imgs { if limg.Name == name { copyImg := limg img = &copyImg } if dgst != "" { if limg.Target.Digest != dgst { return nil, nil, errdefs.NotFound(errors.New("ambiguous reference")) } } else { dgst = limg.Target.Digest } } // Return immediately if target digest matches already included if img == nil || len(imgs) > 1 { return img, imgs, nil } } } else { named, ok := parsed.(reference.Named) if !ok { return nil, nil, errdefs.InvalidParameter(errors.New("invalid name reference")) } digested, ok := parsed.(reference.Digested) if ok { dgst = digested.Digest() } name := reference.TagNameOnly(named).String() cimg, err := i.images.Get(ctx, name) if err != nil { if !cerrdefs.IsNotFound(err) { return nil, nil, err } // If digest is given, continue looking up for matching targets. // There will be no exact match found but the caller may attempt // to match across images with the matching target. if dgst == "" { return nil, nil, images.ErrImageDoesNotExist{Ref: parsed} } } else { img = &cimg if dgst != "" && img.Target.Digest != dgst { // Ambiguous image reference, use reference name log.G(ctx).WithField("image", name).WithField("target", cimg.Target.Digest).Warn("digest reference points to image with a different digest") } dgst = img.Target.Digest } } // Lookup up all associated images and check for consistency with first reference // Ideally operations dependent on multiple values will rely on the garbage collector, // this logic will just check for consistency and throw an error imgs, err := i.images.List(ctx, "target.digest=="+dgst.String()) if err != nil { return nil, nil, errors.Wrap(err, "failed to lookup digest") } if len(imgs) == 0 { if img == nil { return nil, nil, images.ErrImageDoesNotExist{Ref: parsed} } err = errInconsistentData } else if img != nil { // Check to ensure the original img is in the list still err = errInconsistentData for _, rimg := range imgs { if rimg.Name == img.Name { err = nil break } } } if errors.Is(err, errInconsistentData) { if retries, ok := ctx.Value(errInconsistentData).(int); !ok || retries < 3 { log.G(ctx).WithFields(log.Fields{"retry": retries, "ref": refOrID}).Info("image changed during lookup, retrying") return i.resolveAllReferences(context.WithValue(ctx, errInconsistentData, retries+1), refOrID) } return nil, nil, err } return img, imgs, nil } // checkTruncatedID checks id for validity. If id is invalid, an empty string // is returned; otherwise, the ID without the optional "sha256:" prefix is // returned. The validity check is equivalent to // regexp.MustCompile(`^(sha256:)?([a-f0-9]{4,64})$`).MatchString(id). func checkTruncatedID(id string) string { id = strings.TrimPrefix(id, "sha256:") if l := len(id); l < 4 || l > 64 { return "" } for _, c := range id { if (c < '0' || c > '9') && (c < 'a' || c > 'f') { return "" } } return id }
go
github
https://github.com/moby/moby
daemon/containerd/image.go
#ifndef Py_INTERNAL_PYMEM_H #define Py_INTERNAL_PYMEM_H #include "pycore_llist.h" // struct llist_node #ifdef __cplusplus extern "C" { #endif #ifndef Py_BUILD_CORE # error "this header requires Py_BUILD_CORE define" #endif // Try to get the allocators name set by _PyMem_SetupAllocators(). // Return NULL if unknown. // Export for '_testinternalcapi' shared extension. PyAPI_FUNC(const char*) _PyMem_GetCurrentAllocatorName(void); // strdup() using PyMem_RawMalloc() extern char* _PyMem_RawStrdup(const char *str); // strdup() using PyMem_Malloc(). // Export for '_pickle ' shared extension. PyAPI_FUNC(char*) _PyMem_Strdup(const char *str); // wcsdup() using PyMem_RawMalloc() extern wchar_t* _PyMem_RawWcsdup(const wchar_t *str); /* Special bytes broadcast into debug memory blocks at appropriate times. Strings of these are unlikely to be valid addresses, floats, ints or 7-bit ASCII. - PYMEM_CLEANBYTE: clean (newly allocated) memory - PYMEM_DEADBYTE dead (newly freed) memory - PYMEM_FORBIDDENBYTE: untouchable bytes at each end of a block Byte patterns 0xCB, 0xDB and 0xFB have been replaced with 0xCD, 0xDD and 0xFD to use the same values as Windows CRT debug malloc() and free(). If modified, _PyMem_IsPtrFreed() should be updated as well. */ #define PYMEM_CLEANBYTE 0xCD #define PYMEM_DEADBYTE 0xDD #define PYMEM_FORBIDDENBYTE 0xFD /* Heuristic checking if a pointer value is newly allocated (uninitialized), newly freed or NULL (is equal to zero). The pointer is not dereferenced, only the pointer value is checked. The heuristic relies on the debug hooks on Python memory allocators which fills newly allocated memory with CLEANBYTE (0xCD) and newly freed memory with DEADBYTE (0xDD). Detect also "untouchable bytes" marked with FORBIDDENBYTE (0xFD). */ static inline int _PyMem_IsPtrFreed(const void *ptr) { uintptr_t value = (uintptr_t)ptr; #if SIZEOF_VOID_P == 8 return (value <= 0xff // NULL, 0x1, 0x2, ..., 0xff || value == (uintptr_t)0xCDCDCDCDCDCDCDCD || value == (uintptr_t)0xDDDDDDDDDDDDDDDD || value == (uintptr_t)0xFDFDFDFDFDFDFDFD || value >= (uintptr_t)0xFFFFFFFFFFFFFF00); // -0xff, ..., -2, -1 #elif SIZEOF_VOID_P == 4 return (value <= 0xff || value == (uintptr_t)0xCDCDCDCD || value == (uintptr_t)0xDDDDDDDD || value == (uintptr_t)0xFDFDFDFD || value >= (uintptr_t)0xFFFFFF00); #else # error "unknown pointer size" #endif } // Similar to _PyMem_IsPtrFreed() but expects an 'unsigned long' instead of a // pointer. static inline int _PyMem_IsULongFreed(unsigned long value) { #if SIZEOF_LONG == 8 return (value == 0 || value == (unsigned long)0xCDCDCDCDCDCDCDCD || value == (unsigned long)0xDDDDDDDDDDDDDDDD || value == (unsigned long)0xFDFDFDFDFDFDFDFD || value == (unsigned long)0xFFFFFFFFFFFFFFFF); #elif SIZEOF_LONG == 4 return (value == 0 || value == (unsigned long)0xCDCDCDCD || value == (unsigned long)0xDDDDDDDD || value == (unsigned long)0xFDFDFDFD || value == (unsigned long)0xFFFFFFFF); #else # error "unknown long size" #endif } extern int _PyMem_GetAllocatorName( const char *name, PyMemAllocatorName *allocator); /* Configure the Python memory allocators. Pass PYMEM_ALLOCATOR_DEFAULT to use default allocators. PYMEM_ALLOCATOR_NOT_SET does nothing. */ extern int _PyMem_SetupAllocators(PyMemAllocatorName allocator); // Default raw memory allocator that is not affected by PyMem_SetAllocator() extern void *_PyMem_DefaultRawMalloc(size_t); extern void *_PyMem_DefaultRawCalloc(size_t, size_t); extern void *_PyMem_DefaultRawRealloc(void *, size_t); extern void _PyMem_DefaultRawFree(void *); extern wchar_t *_PyMem_DefaultRawWcsdup(const wchar_t *str); /* Is the debug allocator enabled? */ extern int _PyMem_DebugEnabled(void); // Enqueue a pointer to be freed possibly after some delay. extern void _PyMem_FreeDelayed(void *ptr, size_t size); // Periodically process delayed free requests. extern void _PyMem_ProcessDelayed(PyThreadState *tstate); // Periodically process delayed free requests when the world is stopped. // Notify of any objects whic should be freeed. typedef void (*delayed_dealloc_cb)(PyObject *, void *); extern void _PyMem_ProcessDelayedNoDealloc(PyThreadState *tstate, delayed_dealloc_cb cb, void *state); // Abandon all thread-local delayed free requests and push them to the // interpreter's queue. extern void _PyMem_AbandonDelayed(PyThreadState *tstate); // On interpreter shutdown, frees all delayed free requests. extern void _PyMem_FiniDelayed(PyInterpreterState *interp); #ifdef __cplusplus } #endif #endif // !Py_INTERNAL_PYMEM_H
c
github
https://github.com/python/cpython
Include/internal/pycore_pymem.h
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from cStringIO import StringIO from zipfile import ZipFile from compiled_file_system import CompiledFileSystem from directory_zipper import DirectoryZipper from file_system import FileNotFoundError from test_file_system import TestFileSystem from object_store_creator import ObjectStoreCreator _TEST_DATA = { 'top': { 'one.txt': 'one.txt contents', 'two': { 'three.txt': 'three.txt contents', 'four.txt': 'four.txt contents', }, } } class DirectoryZipperTest(unittest.TestCase): def setUp(self): self._directory_zipper = DirectoryZipper( CompiledFileSystem.Factory(ObjectStoreCreator.ForTest()), TestFileSystem(_TEST_DATA)) def testTopZip(self): top_zip = ZipFile(StringIO(self._directory_zipper.Zip('top/').Get())) self.assertEqual(['top/one.txt', 'top/two/four.txt', 'top/two/three.txt'], sorted(top_zip.namelist())) self.assertEqual('one.txt contents', top_zip.read('top/one.txt')) self.assertEqual('three.txt contents', top_zip.read('top/two/three.txt')) self.assertEqual('four.txt contents', top_zip.read('top/two/four.txt')) def testTwoZip(self): two_zip = ZipFile(StringIO(self._directory_zipper.Zip('top/two/').Get())) self.assertEqual(['two/four.txt', 'two/three.txt'], sorted(two_zip.namelist())) self.assertEqual('three.txt contents', two_zip.read('two/three.txt')) self.assertEqual('four.txt contents', two_zip.read('two/four.txt')) def testNotFound(self): self.assertRaises(FileNotFoundError, self._directory_zipper.Zip('notfound/').Get) if __name__ == '__main__': unittest.main()
unknown
codeparrot/codeparrot-clean
// runoutput ./rotate.go // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Generate test of bit rotations. // The output is compiled and run. package main const mode = 1
go
github
https://github.com/golang/go
test/rotate1.go
from django.contrib.admin import ( HORIZONTAL, VERTICAL, AdminSite, ModelAdmin, StackedInline, TabularInline, action, autodiscover, display, register, site, ) from django.contrib.gis.admin.options import GISModelAdmin __all__ = [ "HORIZONTAL", "VERTICAL", "AdminSite", "ModelAdmin", "StackedInline", "TabularInline", "action", "autodiscover", "display", "register", "site", "GISModelAdmin", ]
python
github
https://github.com/django/django
django/contrib/gis/admin/__init__.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from six.moves import http_client from keystone.tests.unit import test_v3 class BaseTestCase(test_v3.RestfulTestCase): EXTENSION_TO_ADD = 'simple_cert_extension' CA_PATH = '/v3/OS-SIMPLE-CERT/ca' CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates' class TestSimpleCert(BaseTestCase): def request_cert(self, path): content_type = 'application/x-pem-file' response = self.request(app=self.public_app, method='GET', path=path, headers={'Accept': content_type}, expected_status=http_client.OK) self.assertEqual(content_type, response.content_type.lower()) self.assertIn('---BEGIN', response.body) return response def test_ca_cert(self): self.request_cert(self.CA_PATH) def test_signing_cert(self): self.request_cert(self.CERT_PATH) def test_missing_file(self): # these files do not exist self.config_fixture.config(group='signing', ca_certs=uuid.uuid4().hex, certfile=uuid.uuid4().hex) for path in [self.CA_PATH, self.CERT_PATH]: self.request(app=self.public_app, method='GET', path=path, expected_status=http_client.INTERNAL_SERVER_ERROR)
unknown
codeparrot/codeparrot-clean
#### Update roles for users in CSV #### Example: #### updateUserRoles.py -u myuser -p mypassword -portal https://esri.maps.arcgis.com -file c:\temp\agolinput.csv import csv import argparse import sys from agoTools.admin import Admin from agoTools.admin import UsersAttributes from agoTools.admin import UserAttributes def _raw_input(prompt=None, stream=None, input=None): # A raw_input() replacement that doesn't save the string in the # GNU readline history. if not stream: stream = sys.stderr if not input: input = sys.stdin prompt = str(prompt) if prompt: stream.write(prompt) stream.flush() # NOTE: The Python C API calls flockfile() (and unlock) during readline. line = input.readline() if not line: raise EOFError if line[-1] == '\n': line = line[:-1] return line parser = argparse.ArgumentParser() parser.add_argument('-u', '--user') parser.add_argument('-p', '--password') parser.add_argument('-file', '--file') parser.add_argument('-portal', '--portal') args = parser.parse_args() inputFile = '' if args.file == None: args.file = _raw_input("CSV path: ") if args.user == None: args.user = _raw_input("Username:") if args.portal == None: args.portal = _raw_input("Portal: ") args.portal = str(args.portal).replace("http://","https://") agoAdmin = Admin(args.user,args.portal,args.password) if args.file != None: inputFile=args.file with open(inputFile) as input: dataReader = csv.DictReader(input) users=UsersAttributes(dataReader) agoAdmin.updateUserRoles(users)
unknown
codeparrot/codeparrot-clean
# As usual, a bit of setup import time, os, json import numpy as np from scipy.misc import imread, imresize import matplotlib.pyplot as plt from cs231n.classifiers.pretrained_cnn import PretrainedCNN from cs231n.data_utils import load_tiny_imagenet from cs231n.image_utils import blur_image, deprocess_image, preprocess_image #%matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython #%load_ext autoreload #%autoreload 2 data = load_tiny_imagenet('cs231n/datasets/tiny-imagenet-100-A', subtract_mean=True) model = PretrainedCNN(h5_file='cs231n/datasets/pretrained_model.h5') def create_class_visualization(target_y, model, **kwargs): """ Perform optimization over the image to generate class visualizations. Inputs: - target_y: Integer in the range [0, 100) giving the target class - model: A PretrainedCNN that will be used for generation Keyword arguments: - learning_rate: Floating point number giving the learning rate - blur_every: An integer; how often to blur the image as a regularizer - l2_reg: Floating point number giving L2 regularization strength on the image; this is lambda in the equation above. - max_jitter: How much random jitter to add to the image as regularization - num_iterations: How many iterations to run for - show_every: How often to show the image """ learning_rate = kwargs.pop('learning_rate', 10000) blur_every = kwargs.pop('blur_every', 1) l2_reg = kwargs.pop('l2_reg', 1e-6) max_jitter = kwargs.pop('max_jitter', 4) num_iterations = kwargs.pop('num_iterations', 100) show_every = kwargs.pop('show_every', 25) X = np.random.randn(1, 3, 64, 64) mode = 'test' for t in xrange(num_iterations): # As a regularizer, add random jitter to the image ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2) X = np.roll(np.roll(X, ox, -1), oy, -2) scores, cache = model.forward(X, mode=mode) class_mask = np.zeros(scores.shape) class_mask[0,target_y] = 1 scores = scores * class_mask dX, grads = model.backward(scores, cache) dX = dX - l2_reg * X X = X + learning_rate * dX # Undo the jitter X = np.roll(np.roll(X, -ox, -1), -oy, -2) # As a regularizer, clip the image X = np.clip(X, -data['mean_image'], 255.0 - data['mean_image']) # As a regularizer, periodically blur the image if t % blur_every == 0: X = blur_image(X) # Periodically show the image if t % show_every == 0: plt.imshow(deprocess_image(X, data['mean_image'])) plt.gcf().set_size_inches(3, 3) plt.axis('off') img_path = 'images/class_%d_%d.jpg' % (target_y, t) plt.savefig(img_path) return X ''' # visualize class target_y = 50 print data['class_names'][target_y] X = create_class_visualization(target_y, model, l2_reg=2e-5,num_iterations=300, show_every=100) ''' # Deep Dream def deepdream(X, layer, model, **kwargs): """ Generate a DeepDream image. Inputs: - X: Starting image, of shape (1, 3, H, W) - layer: Index of layer at which to dream - model: A PretrainedCNN object Keyword arguments: - learning_rate: How much to update the image at each iteration - max_jitter: Maximum number of pixels for jitter regularization - num_iterations: How many iterations to run for - show_every: How often to show the generated image """ X = X.copy() learning_rate = kwargs.pop('learning_rate', 5.0) max_jitter = kwargs.pop('max_jitter', 16) num_iterations = kwargs.pop('num_iterations', 100) show_every = kwargs.pop('show_every', 25) for t in xrange(num_iterations): # As a regularizer, add random jitter to the image ox, oy = np.random.randint(-max_jitter, max_jitter+1, 2) X = np.roll(np.roll(X, ox, -1), oy, -2) activation, cache = model.forward(X, mode='test', start=0, end=layer) dX, grads = model.backward(activation, cache) X = X + learning_rate * dX # Undo the jitter X = np.roll(np.roll(X, -ox, -1), -oy, -2) # As a regularizer, clip the image mean_pixel = data['mean_image'].mean(axis=(1, 2), keepdims=True) X = np.clip(X, -mean_pixel, 255.0 - mean_pixel) # Periodically show the image if t == 0 or (t + 1) % show_every == 0: img = deprocess_image(X, data['mean_image'], mean='pixel') plt.imshow(img) plt.title('t = %d' % (t + 1)) plt.gcf().set_size_inches(8, 8) plt.axis('off') filename = 'images/deepdream_%d.jpg' % (t+1) plt.savefig(filename) return X def read_image(filename, max_size): """ Read an image from disk and resize it so its larger side is max_size """ img = imread(filename) H, W, _ = img.shape if H >= W: img = imresize(img, (max_size, int(W * float(max_size) / H))) elif H < W: img = imresize(img, (int(H * float(max_size) / W), max_size)) return img filename = 'kitten.jpg' max_size = 256 img = read_image(filename, max_size) plt.imshow(img) plt.axis('off') # Preprocess the image by converting to float, transposing, # and performing mean subtraction. img_pre = preprocess_image(img, data['mean_image'], mean='pixel') out = deepdream(img_pre, 7, model, learning_rate=2000)
unknown
codeparrot/codeparrot-clean
//===-- FunctionTests.cpp -------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "support/Function.h" #include "gmock/gmock.h" #include "gtest/gtest.h" namespace clang { namespace clangd { namespace { TEST(EventTest, Subscriptions) { Event<int> E; int N = 0; { Event<int>::Subscription SubA; // No subscriptions are active. E.broadcast(42); EXPECT_EQ(0, N); Event<int>::Subscription SubB = E.observe([&](int) { ++N; }); // Now one is active. E.broadcast(42); EXPECT_EQ(1, N); SubA = E.observe([&](int) { ++N; }); // Both are active. EXPECT_EQ(1, N); E.broadcast(42); EXPECT_EQ(3, N); SubA = std::move(SubB); // One is active. EXPECT_EQ(3, N); E.broadcast(42); EXPECT_EQ(4, N); } // None are active. EXPECT_EQ(4, N); E.broadcast(42); EXPECT_EQ(4, N); } } // namespace } // namespace clangd } // namespace clang
cpp
github
https://github.com/llvm/llvm-project
clang-tools-extra/clangd/unittests/support/FunctionTests.cpp
from sympy import Symbol, exp, log from sympy.calculus.singularities import (singularities, is_increasing, is_strictly_increasing, is_decreasing, is_strictly_decreasing, is_monotonic) from sympy.sets import Interval, FiniteSet, EmptySet from sympy import oo, S, I, sqrt from sympy.utilities.pytest import XFAIL from sympy.abc import x, y a = Symbol('a', negative=True) b = Symbol('b', positive=True) def test_singularities(): x = Symbol('x') y = Symbol('y') assert singularities(x**2, x) == S.EmptySet assert singularities(x/(x**2 + 3*x + 2), x) == FiniteSet(-2, -1) assert singularities(1/(x**2 + 1), x) == FiniteSet(I, -I) assert singularities(x/(x**3 + 1), x) == FiniteSet(-1, (1 - sqrt(3)*I)/2, (1 + sqrt(3)*I)/2) assert singularities(1/(y**2 + 2*I*y + 1), y) == FiniteSet(-I + sqrt(2)*I, -I - sqrt(2)*I) @XFAIL def test_singularities_non_rational(): x = Symbol('x', real=True) assert singularities(exp(1/x), x) == (0) assert singularities(log((x - 2)**2), x) == (2) def test_is_increasing(): assert is_increasing(x**3 - 3*x**2 + 4*x, S.Reals) assert is_increasing(-x**2, Interval(-oo, 0)) assert is_increasing(-x**2, Interval(0, oo)) is False assert is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3)) is False assert is_increasing(x**2 + y, Interval(1, oo), x) is True assert is_increasing(-x**2*a, Interval(1, oo), x) is True assert is_increasing(1) is True def test_is_strictly_increasing(): assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2)) assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo)) assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3)) is False assert is_strictly_increasing(-x**2, Interval(0, oo)) is False assert is_strictly_decreasing(1) is False def test_is_decreasing(): assert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3)) assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo)) assert is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2)) is False assert is_decreasing(-x**2, Interval(-oo, 0)) is False assert is_decreasing(-x**2*b, Interval(-oo, 0), x) is False def test_is_strictly_decreasing(): assert is_strictly_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3)) assert is_strictly_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo)) assert is_strictly_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2)) is False assert is_strictly_decreasing(-x**2, Interval(-oo, 0)) is False assert is_strictly_decreasing(1) is False def test_is_monotonic(): assert is_monotonic(1/(x**2 - 3*x), Interval.open(1.5, 3)) assert is_monotonic(1/(x**2 - 3*x), Interval.Lopen(3, oo)) assert is_monotonic(x**3 - 3*x**2 + 4*x, S.Reals) assert is_monotonic(-x**2, S.Reals) is False assert is_monotonic(x**2 + y + 1, Interval(1, 2), x) is True
unknown
codeparrot/codeparrot-clean
""" Module for formatting output data into CSV files. """ from __future__ import annotations from collections.abc import ( Hashable, Iterable, Iterator, Sequence, ) import csv as csvlib import os from typing import ( TYPE_CHECKING, Any, cast, ) import numpy as np from pandas._libs import writers as libwriters from pandas._typing import SequenceNotStr from pandas.util._decorators import cache_readonly from pandas.core.dtypes.generic import ( ABCDatetimeIndex, ABCIndex, ABCMultiIndex, ABCPeriodIndex, ) from pandas.core.dtypes.missing import notna from pandas.core.indexes.api import Index from pandas.io.common import get_handle if TYPE_CHECKING: from pandas._typing import ( CompressionOptions, FilePath, FloatFormatType, IndexLabel, StorageOptions, WriteBuffer, npt, ) from pandas.io.formats.format import DataFrameFormatter _DEFAULT_CHUNKSIZE_CELLS = 100_000 class CSVFormatter: cols: npt.NDArray[np.object_] def __init__( self, formatter: DataFrameFormatter, path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] = "", sep: str = ",", cols: Sequence[Hashable] | None = None, index_label: IndexLabel | None = None, mode: str = "w", encoding: str | None = None, errors: str = "strict", compression: CompressionOptions = "infer", quoting: int | None = None, lineterminator: str | None = "\n", chunksize: int | None = None, quotechar: str | None = '"', date_format: str | None = None, doublequote: bool = True, escapechar: str | None = None, storage_options: StorageOptions | None = None, ) -> None: self.fmt = formatter self.obj = self.fmt.frame self.filepath_or_buffer = path_or_buf self.encoding = encoding self.compression: CompressionOptions = compression self.mode = mode self.storage_options = storage_options self.sep = sep self.index_label = self._initialize_index_label(index_label) self.errors = errors self.quoting = quoting or csvlib.QUOTE_MINIMAL self.doublequote = doublequote self.escapechar = escapechar self.quotechar = self._initialize_quotechar(quotechar) self.lineterminator = lineterminator or os.linesep self.date_format = date_format self.cols = self._initialize_columns(cols) self.chunksize = self._initialize_chunksize(chunksize) @property def na_rep(self) -> str: return self.fmt.na_rep @property def float_format(self) -> FloatFormatType | None: return self.fmt.float_format @property def decimal(self) -> str: return self.fmt.decimal @property def header(self) -> bool | SequenceNotStr[str]: return self.fmt.header @property def index(self) -> bool: return self.fmt.index def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel: if index_label is not False: if index_label is None: return self._get_index_label_from_obj() elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)): # given a string for a DF with Index return [index_label] return index_label def _get_index_label_from_obj(self) -> Sequence[Hashable]: if isinstance(self.obj.index, ABCMultiIndex): return self._get_index_label_multiindex() else: return self._get_index_label_flat() def _get_index_label_multiindex(self) -> Sequence[Hashable]: return [name or "" for name in self.obj.index.names] def _get_index_label_flat(self) -> Sequence[Hashable]: index_label = self.obj.index.name return [""] if index_label is None else [index_label] def _initialize_quotechar(self, quotechar: str | None) -> str | None: if self.quoting != csvlib.QUOTE_NONE or self.escapechar is not None: # prevents crash in _csv return quotechar return None @property def has_mi_columns(self) -> bool: return bool(isinstance(self.obj.columns, ABCMultiIndex)) def _initialize_columns( self, cols: Iterable[Hashable] | None ) -> npt.NDArray[np.object_]: # validate mi options if self.has_mi_columns: if cols is not None: msg = "cannot specify cols with a MultiIndex on the columns" raise TypeError(msg) if cols is not None: if isinstance(cols, ABCIndex): cols = cols._get_values_for_csv(**self._number_format) else: cols = list(cols) self.obj = self.obj.loc[:, cols] # update columns to include possible multiplicity of dupes # and make sure cols is just a list of labels new_cols = self.obj.columns return new_cols._get_values_for_csv(**self._number_format) def _initialize_chunksize(self, chunksize: int | None) -> int: if chunksize is None: return (_DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1)) or 1 return int(chunksize) @property def _number_format(self) -> dict[str, Any]: """Dictionary used for storing number formatting settings.""" return { "na_rep": self.na_rep, "float_format": self.float_format, "date_format": self.date_format, "quoting": self.quoting, "decimal": self.decimal, } @cache_readonly def data_index(self) -> Index: data_index = self.obj.index if ( isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and self.date_format is not None ): data_index = Index( [x.strftime(self.date_format) if notna(x) else "" for x in data_index] ) elif isinstance(data_index, ABCMultiIndex): data_index = data_index.remove_unused_levels() return data_index @property def nlevels(self) -> int: if self.index: return getattr(self.data_index, "nlevels", 1) else: return 0 @property def _has_aliases(self) -> bool: return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) @property def _need_to_save_header(self) -> bool: return bool(self._has_aliases or self.header) @property def write_cols(self) -> SequenceNotStr[Hashable]: if self._has_aliases: assert not isinstance(self.header, bool) if len(self.header) != len(self.cols): raise ValueError( f"Writing {len(self.cols)} cols but got {len(self.header)} aliases" ) return self.header else: # self.cols is an ndarray derived from Index._get_values_for_csv, # so its entries are strings, i.e. hashable return cast(SequenceNotStr[Hashable], self.cols) @property def encoded_labels(self) -> list[Hashable]: encoded_labels: list[Hashable] = [] if self.index and self.index_label: assert isinstance(self.index_label, Sequence) encoded_labels = list(self.index_label) if not self.has_mi_columns or self._has_aliases: encoded_labels += list(self.write_cols) return encoded_labels def save(self) -> None: """ Create the writer & save. """ # apply compression and byte/text conversion with get_handle( self.filepath_or_buffer, self.mode, encoding=self.encoding, errors=self.errors, compression=self.compression, storage_options=self.storage_options, ) as handles: # Note: self.encoding is irrelevant here # error: Argument "quoting" to "writer" has incompatible type "int"; # expected "Literal[0, 1, 2, 3]" self.writer = csvlib.writer( handles.handle, lineterminator=self.lineterminator, delimiter=self.sep, quoting=self.quoting, # type: ignore[arg-type] doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar, ) self._save() def _save(self) -> None: if self._need_to_save_header: self._save_header() self._save_body() def _save_header(self) -> None: if not self.has_mi_columns or self._has_aliases: self.writer.writerow(self.encoded_labels) else: for row in self._generate_multiindex_header_rows(): self.writer.writerow(row) def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]: columns = self.obj.columns for i in range(columns.nlevels): # we need at least 1 index column to write our col names col_line = [] if self.index: # name is the first column col_line.append(columns.names[i]) if isinstance(self.index_label, list) and len(self.index_label) > 1: col_line.extend([""] * (len(self.index_label) - 1)) col_line.extend(columns._get_level_values(i)) yield col_line # Write out the index line if it's not empty. # Otherwise, we will print out an extraneous # blank line between the mi and the data rows. if self.encoded_labels and set(self.encoded_labels) != {""}: yield self.encoded_labels + [""] * len(columns) def _save_body(self) -> None: nrows = len(self.data_index) chunks = (nrows // self.chunksize) + 1 for i in range(chunks): start_i = i * self.chunksize end_i = min(start_i + self.chunksize, nrows) if start_i >= end_i: break self._save_chunk(start_i, end_i) def _save_chunk(self, start_i: int, end_i: int) -> None: # create the data for a chunk slicer = slice(start_i, end_i) df = self.obj.iloc[slicer] res = df._get_values_for_csv(**self._number_format) data = list(res._iter_column_arrays()) ix = ( self.data_index[slicer]._get_values_for_csv(**self._number_format) if self.nlevels != 0 else np.empty(end_i - start_i) ) libwriters.write_csv_rows( data, ix, self.nlevels, self.cols, self.writer, )
python
github
https://github.com/pandas-dev/pandas
pandas/io/formats/csvs.py
# Configuring the builder. ## Install prerequisites. ``` $ sudo dnf install podman podman-docker jq ``` ## Add services. ``` $ sudo cp self-hosted-builder/*.service /etc/systemd/system/ $ sudo systemctl daemon-reload ``` ## Download qemu-user-static image ``` # sudo docker pull docker.io/iiilinuxibmcom/qemu-user-static:6.1.0-1 ``` ## Autostart the x86_64 emulation support. ``` $ sudo systemctl enable --now qemu-user-static ``` ## Rebuild the image First build s390x builder image `docker.io/pytorch/manylinuxs390x-builder`, using following commands: ``` $ cd ~ $ git clone https://github.com/pytorch/pytorch $ cd pytorch $ git submodule update --init --recursive $ GPU_ARCH_TYPE=cpu-s390x "$(pwd)/.ci/docker/manywheel/build.sh" manylinuxs390x-builder $ docker image tag localhost/pytorch/manylinuxs390x-builder docker.io/pytorch/manylinuxs390x-builder:cpu-s390x $ docker image save -o ~/manywheel-s390x.tar docker.io/pytorch/manylinuxs390x-builder:cpu-s390x ``` Next step is to build `actions-runner` image using: ``` $ cd self-hosted-builder $ sudo docker build \ --pull \ -f actions-runner.Dockerfile \ -t iiilinuxibmcom/actions-runner.<name> \ . ``` If there are failures, ensure that selinux doesn't prevent it from working. In worst case, selinux can be disabled with `setenforce 0`. Now prepare all necessary files for runner registration: ``` $ sudo mkdir -p /etc/actions-runner/<name> $ sudo chmod 700 /etc/actions-runner/<name> $ sudo /bin/cp <github_app_private_key_file> /etc/actions-runner/<name>/key_private.pem $ sudo echo <github_app_id> | sudo tee /etc/actions-runner/<name>/appid.env $ sudo echo <github_app_install_id> | sudo tee /etc/actions-runner/<name>/installid.env $ sudo echo NAME=<worker_name> | sudo tee /etc/actions-runner/<name>/env $ sudo echo ORG=<github_org> | sudo tee -a /etc/actions-runner/<name>/env $ cd self-hosted-builder $ sudo /bin/cp helpers/*.sh /usr/local/bin/ $ sudo chmod 755 /usr/local/bin/app_token.sh /usr/local/bin/gh_token_generator.sh ``` ## Autostart the runner. ``` $ sudo systemctl enable --now actions-runner@$NAME ```
unknown
github
https://github.com/pytorch/pytorch
.github/scripts/s390x-ci/README.md
# -*- coding: utf-8 -*- # This file is part of the pymfony package. # # (c) Alexandre Quercia <alquerci@email.com> # # For the full copyright and license information, please view the LICENSE # file that was distributed with this source code. from __future__ import absolute_import; from pymfony.component.system import Object; from pymfony.component.dependency.exception import OutOfBoundsException; from pymfony.component.dependency.exception import InvalidArgumentException; from pymfony.component.dependency.interface import ContainerInterface; """ """ class Alias(Object): def __init__(self, identifier, public=True): self.__id = str(identifier).lower(); self.__public = bool(public); def isPublic(self): return self.__public; def setPublic(self, boolean): self.__public = bool(boolean); def __str__(self): return self.__id; class Reference(Object): """Reference represents a service reference. @author: Fabien Potencier <fabien@symfony.com> @api """ def __init__(self, identifier, invalidBehavior = ContainerInterface.EXCEPTION_ON_INVALID_REFERENCE, strict = True): """Constructor. @param: string id The service identifier: @param int invalidBehavior The behavior when the service does not exist @param Boolean strict Sets how this reference is validated @see Container """ self.__id = None; self.__invalidBehavior = None; self.__strict = None; self.__id = str(identifier).lower(); self.__invalidBehavior = invalidBehavior; self.__strict = strict; def __str__(self): """__str__. @return: string The service identifier: """ return str(self.__id); def getInvalidBehavior(self): """Returns the behavior to be used when the service does not exist. @return: int """ return self.__invalidBehavior; def isStrict(self): """Returns True when this Reference is strict @return: Boolean """ return self.__strict; class Definition(Object): """Definition represents a service definition. @author: Fabien Potencier <fabien@symfony.com> @api """ def __init__(self, className=None, arguments=None): """Constructor. @param: string class The service class): @param array arguments An array of arguments to pass to the service constructor @api """ if arguments is None: arguments = list(); assert isinstance(arguments, list); self._arguments = arguments; self.__class = className; self.__file = None; self.__factoryClass = None; self.__factoryMethod = None; self.__factoryService = None; self.__configurator = None; self.__properties = dict(); self.__calls = list(); self.__tags = dict(); self.__public = True; self.__synthetic = False; self.__abstract = False; self.__scope = ContainerInterface.SCOPE_CONTAINER; def setFactoryClass(self, factoryClass): """Sets the name of the class that(, acts as a factory using the factory method,): which will be invoked statically. @param: string factoryClass The factory class name(Object): @return Definition The current instance @api """ self.__factoryClass = factoryClass; return self; def getFactoryClass(self): """Gets the factory class. @return: string The factory class name(Object): @api """ return self.__factoryClass; def setFactoryMethod(self, factoryMethod): """Sets the factory method able to create an instance of this class. @param: string factoryMethod The factory method name @return Definition The current instance @api """ self.__factoryMethod = factoryMethod; return self; def getFactoryMethod(self): """Gets the factory method. @return: string The factory method name @api """ return self.__factoryMethod; def setFactoryService(self, factoryService): """Sets the name of the service that acts as a factory using the factory method. @param: string factoryService The factory service id @return Definition The current instance @api """ self.__factoryService = factoryService; return self; def getFactoryService(self): """Gets the factory service id. @return: string The factory service id @api """ return self.__factoryService; def setClass(self, className): """Sets the service class. @param: string class The(, service class): @return Definition The current instance @api """ self.__class = className; return self; def getClass(self): """Gets the service class. @return: string The service class @api """ return self.__class; def setArguments(self, arguments): """Sets the arguments to pass to the service constructor/factory method. @param: list arguments An array of arguments @return Definition The current instance @api """ assert isinstance(arguments, list); self._arguments = arguments; return self; def setProperties(self, properties): """ @api: @param: dict properties An array of properties """ assert isinstance(properties, dict) self.__properties = properties; return self; def getProperties(self): """ @api: @return dict """ return self.__properties; def setProperty(self, key, value): """ @api: """ self.__properties[key] = value; return self; def addArgument(self, argument): """Adds an argument to pass to the service constructor/factory method. @param: mixed argument An argument @return Definition The current instance @api """ self._arguments.append(argument); return self; def replaceArgument(self, index, argument): """Sets a specific argument: @param: integer index @param mixed argument @return Definition The current instance @raise OutOfBoundsException When the replaced argument does not exist @api """ if index < 0 or index > len(self._arguments) - 1: raise OutOfBoundsException( 'The index "{!d}" is not in the range [0, {!d}].' ''.format(index, len(self._arguments) - 1) ); self._arguments[index] = argument; return self; def getArguments(self): """Gets the arguments to pass to the service constructor/factory method. @return: array The array of arguments @api """ return self._arguments; def getArgument(self, index): """Gets an argument to pass to the service constructor/factory method. @param: integer index @return mixed The argument value @raise OutOfBoundsException When the argument does not exist @api """ if index < 0 or index > len(self._arguments) - 1: raise OutOfBoundsException( 'The index "{!d}" is not in the range [0, {!d}].' ''.format(index, len(self._arguments) - 1) ); return self._arguments[index]; def setMethodCalls(self, calls): """Sets the methods to call after service initialization. @param: list calls An array of method calls list of [methodName, [arg1, ...]] @return Definition The current instance @api """ assert isinstance(calls, list); self.__calls = list(); for call in calls: assert isinstance(call, list); self.addMethodCall(call[0], call[1]); return self; def addMethodCall(self, method, arguments = None): """Adds a method to call after service initialization. @param: string method The method name to call @param array arguments An array of arguments to pass to the method call @return Definition The current instance @raise InvalidArgumentException on empty method param @api """ if arguments is None: arguments = list(); arguments = list(arguments); method = str(method); if not method: raise InvalidArgumentException('Method name cannot be empty.'); self.__calls.append([method, arguments]); return self; def removeMethodCall(self, method): """Removes a method to call after service initialization. @param: string method The method name to remove @return Definition The current instance @api """ i = -1; for call in self.__calls: i += 1; if call[0] == method: del self.__calls[i]; break; return self; def hasMethodCall(self, method): """Check if the current definition has a given method to call after service initialization.: @param: string method The method name to search for @return Boolean @api """ for call in self.__calls: if call[0] == method: return True; return False; def getMethodCalls(self): """Gets the methods to call after service initialization. @return: list An array of method calls @api """ return self.__calls; def setTags(self, tags): """Sets tags for this definition @param: dict tags @return Definition the current instance @api """ assert isinstance(tags, dict); self.__tags = tags; return self; def getTags(self): """Returns all tags. @return: dict An array of tags @api """ return self.__tags; def getTag(self, name): """Gets a tag by name. @param: string name The tag name @return list An array of attributes @api """ if name in self.__tags: return self.__tags[name]; else: return list(); def addTag(self, name, attributes = None): """Adds a tag for this definition. @param: string name The tag name @param dict attributes An array of attributes @return Definition The current instance @api """ if attributes is None: attributes = dict(); assert isinstance(attributes, dict); if name not in self.__tags: self.__tags[name] = list(); self.__tags[name].append(attributes); return self; def hasTag(self, name): """Whether this definition has a tag with the given name @param: string name @return Boolean @api """ return name in self.__tags; def clearTag(self, name): """Clears all tags for a given name. @param: string name The tag name @return Definition """ if self.hasTag(name): del self.__tags[name]; return self; def clearTags(self): """Clears the tags for this definition. @return: Definition The current instance @api """ self.__tags = dict(); return self; def setFile(self, filename): """Sets a file to require before creating the service. @param: string file A full pathname to include @return Definition The current instance @api """ self.__file = filename; return self; def getFile(self): """Gets the file to require before creating the service. @return: string The full pathname to include @api """ return self.__file; def setScope(self, scope): """Sets the scope of the service @param: string scope Whether the service must be shared or not @return Definition The current instance @api """ self.__scope = scope; return self; def getScope(self): """Returns the scope of the service @return: string @api """ return self.__scope; def setPublic(self, boolean): """Sets the visibility of this service. @param: Boolean boolean @return Definition The current instance @api """ self.__public = bool(boolean); return self; def isPublic(self): """Whether this service is public facing @return: Boolean @api """ return self.__public; def setSynthetic(self, boolean): """Sets whether this definition is synthetic, that is not constructed by the container, but dynamically injected. @param: Boolean boolean @return Definition the current instance @api """ self.__synthetic = bool(boolean); return self; def isSynthetic(self): """Whether this definition is synthetic, that is not constructed by the container, but dynamically injected. @return: Boolean @api """ return self.__synthetic; def setAbstract(self, boolean): """Whether this definition is abstract, that means it merely serves as a template for other definitions. @param: Boolean boolean @return Definition the current instance @api """ self.__abstract = bool(boolean); return self; def isAbstract(self): """Whether this definition is abstract, that means it merely serves as a template for other definitions. @return: Boolean @api """ return self.__abstract; def setConfigurator(self, closure): """Sets a configurator to call after the service is fully initialized. @param: callable callable A PYTHON callable @return Definition The current instance @api """ self.__configurator = closure; return self; def getConfigurator(self): """Gets the configurator to call after the service is fully initialized. @return: callable The PHP callable to call @api """ return self.__configurator; class DefinitionDecorator(Definition): """This definition decorates another definition. @author Johannes M. Schmitt <schmittjoh@gmail.com> @api """ def __init__(self, parent): """Constructor. @param string parent The id of Definition instance to decorate. @api """ Definition.__init__(self); self.__parent = None; self.__changes = None; self.__overwriteArguments = None; self.__parent = parent; self.__changes = dict(); self.__overwriteArguments = dict(); def getParent(self): """Returns the Definition being decorated. @return string @api """ return self.__parent; def getChanges(self): """Returns all changes tracked for the Definition object. @return array An array of changes for this Definition @api """ return self.__changes; def getOverwriteArguments(self): """Returns all overwrite arguments for the Definition object. @return dict A dict of overwrite arguments for the Definition object. @api """ return self.__overwriteArguments; def setClass(self, className): """ @api """ self.__changes['class'] = True; return Definition.setClass(self, className); def setFactoryClass(self, className): """ @api """ self.__changes['factory_class'] = True; return Definition.setFactoryClass(self, className); def setFactoryMethod(self, method): """ @api """ self.__changes['factory_method'] = True; return Definition.setFactoryMethod(self, method); def setFactoryService(self, service): """ @api """ self.__changes['factory_service'] = True; return Definition.setFactoryService(self, service); def setConfigurator(self, closure): """ @api """ self.__changes['configurator'] = True; return Definition.setConfigurator(self, closure); def setFile(self, filename): """ @api """ self.__changes['file'] = True; return Definition.setFile(self, filename); def setPublic(self, boolean): """ @api """ self.__changes['public'] = True; return Definition.setPublic(self, boolean); def getArgument(self, index): """Gets an argument to pass to the service constructor/factory method. If replaceArgument() has been used to replace an argument, this method will return the replacement value. @param integer index @return mixed The argument value @raise OutOfBoundsException When the argument does not exist @api """ index = int(index); # UPDATED try: return self.__overwriteArguments[index]; except IndexError: pass; lastIndex = len(self._arguments) - 1; if (index < 0 or index > lastIndex) : raise OutOfBoundsException( 'The index "{0!d}" is not in the range [0, {1!d}].'.format( index, len(self._arguments) - 1 )); return self._arguments[index]; def replaceArgument(self, index, value): """You should always use this method when overwriting existing arguments of the parent definition. If you directly call setArguments() keep in mind that you must follow certain conventions when you want to overwrite the arguments of the parent definition, otherwise your arguments will only be appended. @param integer index @param mixed value @return DefinitionDecorator the current instance @raise InvalidArgumentException when index isn't an integer @api """ if not isinstance(index, int) or isinstance(index, bool): raise InvalidArgumentException('index must be an integer.'); # UPDATED self.__overwriteArguments[index] = value; return self;
unknown
codeparrot/codeparrot-clean