code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def body(self): if self._body is None: if self._body_reader is None: self._body = self.input.read(self.content_length or 0) else: self._body = self._body_reader(self.input) return self._body
Reads and returns the entire request body. On first access, reads `content_length` bytes from `input` and stores the result on the request object. On subsequent access, returns the cached value.
def rm(path, service_names=None): project = __load_project(path) if isinstance(project, dict): return project else: try: project.remove_stopped(service_names) except Exception as inst: return __handle_except(inst) return __standardize_result(True, 'Removing stopped containers via docker-compose', None, None)
Remove stopped containers in the docker-compose file, service_names is a python list, if omitted remove all stopped containers path Path where the docker-compose file is stored on the server service_names If specified will remove only the specified stopped services CLI Example: .. code-block:: bash salt myminion dockercompose.rm /path/where/docker-compose/stored salt myminion dockercompose.rm /path/where/docker-compose/stored '[janus]'
def wrap_inference_results(inference_result_proto): inference_proto = inference_pb2.InferenceResult() if isinstance(inference_result_proto, classification_pb2.ClassificationResponse): inference_proto.classification_result.CopyFrom( inference_result_proto.result) elif isinstance(inference_result_proto, regression_pb2.RegressionResponse): inference_proto.regression_result.CopyFrom(inference_result_proto.result) return inference_proto
Returns packaged inference results from the provided proto. Args: inference_result_proto: The classification or regression response proto. Returns: An InferenceResult proto with the result from the response.
def store_work_results(self, results, collection, md5): results['md5'] = md5 results['__time_stamp'] = datetime.datetime.utcnow() if 'mod_time' not in results: results['mod_time'] = results['__time_stamp'] try: self.database[collection].update({'md5':md5}, self.clean_for_storage(results), True) except pymongo.errors.OperationFailure: print 'Could not update exising object in capped collection, punting...' print 'collection: %s md5:%s' % (collection, md5)
Store the output results of the worker. Args: results: a dictionary. collection: the database collection to store the results in. md5: the md5 of sample data to be updated.
def create_logstash(self, **kwargs): logstash = predix.admin.logstash.Logging(**kwargs) logstash.create() logstash.add_to_manifest(self) logging.info('Install Kibana-Me-Logs application by following GitHub instructions') logging.info('git clone https://github.com/cloudfoundry-community/kibana-me-logs.git') return logstash
Creates an instance of the Logging Service.
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.reorganizeData if block_size < 2: raise ValueError("Invalid block_size value %d. Must be greater than 1." % block_size) spec_layer_params.blockSize = block_size if mode == 'SPACE_TO_DEPTH': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('SPACE_TO_DEPTH') elif mode == 'DEPTH_TO_SPACE': spec_layer_params.mode = \ _NeuralNetwork_pb2.ReorganizeDataLayerParams.ReorganizationType.Value('DEPTH_TO_SPACE') else: raise NotImplementedError( 'Unknown reorganization mode %s ' % mode)
Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE". Parameters ---------- name: str The name of this layer. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. mode: str - If mode == 'SPACE_TO_DEPTH': data is moved from the spatial to the channel dimension. Input is spatially divided into non-overlapping blocks of size block_size X block_size and data from each block is moved to the channel dimension. Output CHW dimensions are: [C * block_size * block_size, H/block_size, C/block_size]. - If mode == 'DEPTH_TO_SPACE': data is moved from the channel to the spatial dimension. Reverse of the operation 'SPACE_TO_DEPTH'. Output CHW dimensions are: [C/(block_size * block_size), H * block_size, C * block_size]. block_size: int Must be greater than 1. Must divide H and W, when mode is 'SPACE_TO_DEPTH'. (block_size * block_size) must divide C when mode is 'DEPTH_TO_SPACE'. See Also -------- add_flatten, add_reshape
def _add_view_menu(self): mainMenu = self.app.mainMenu() viewMenu = AppKit.NSMenu.alloc().init() viewMenu.setTitle_(localization["cocoa.menu.view"]) viewMenuItem = AppKit.NSMenuItem.alloc().init() viewMenuItem.setSubmenu_(viewMenu) mainMenu.addItem_(viewMenuItem) fullScreenMenuItem = viewMenu.addItemWithTitle_action_keyEquivalent_(localization["cocoa.menu.fullscreen"], "toggleFullScreen:", "f") fullScreenMenuItem.setKeyEquivalentModifierMask_(AppKit.NSControlKeyMask | AppKit.NSCommandKeyMask)
Create a default View menu that shows 'Enter Full Screen'.
def close(self): for handle in self._handles: if not handle.closed: handle.close() del self._handles[:] for transport, _ in self.connections: transport.close() self._all_closed.wait()
Close the listening sockets and all accepted connections.
def _re_raise_as(NewExc, *args, **kw): etype, val, tb = sys.exc_info() raise NewExc(*args, **kw), None, tb
Raise a new exception using the preserved traceback of the last one.
def start(self): logging.info("Fixedconf watcher plugin: Started") cidr = self.conf['fixed_cidr'] hosts = self.conf['fixed_hosts'].split(":") route_spec = {cidr : hosts} try: common.parse_route_spec_config(route_spec) self.q_route_spec.put(route_spec) except Exception as e: logging.warning("Fixedconf watcher plugin: " "Invalid route spec: %s" % str(e))
Start the config watch thread or process.
def get_queues(*queue_names, **kwargs): from .settings import QUEUES if len(queue_names) <= 1: return [get_queue(*queue_names, **kwargs)] kwargs['job_class'] = get_job_class(kwargs.pop('job_class', None)) queue_params = QUEUES[queue_names[0]] connection_params = filter_connection_params(queue_params) queues = [get_queue(queue_names[0], **kwargs)] for name in queue_names[1:]: queue = get_queue(name, **kwargs) if type(queue) is not type(queues[0]): raise ValueError( 'Queues must have the same class.' '"{0}" and "{1}" have ' 'different classes'.format(name, queue_names[0])) if connection_params != filter_connection_params(QUEUES[name]): raise ValueError( 'Queues must have the same redis connection.' '"{0}" and "{1}" have ' 'different connections'.format(name, queue_names[0])) queues.append(queue) return queues
Return queue instances from specified queue names. All instances must use the same Redis connection.
def purge_metadata_by_name(self, name): meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir) logger.debug('purging metadata directory: {}'.format(meta_dir)) try: rm_rf(meta_dir) except OSError as e: raise ProcessMetadataManager.MetadataError('failed to purge metadata directory {}: {!r}'.format(meta_dir, e))
Purge a processes metadata directory. :raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
def _validate_file_roots(file_roots): if not isinstance(file_roots, dict): log.warning('The file_roots parameter is not properly formatted,' ' using defaults') return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])} return _normalize_roots(file_roots)
If the file_roots option has a key that is None then we will error out, just replace it with an empty list
def validate(tool_class, model_class): if not hasattr(tool_class, 'name'): raise ImproperlyConfigured("No 'name' attribute found for tool %s." % ( tool_class.__name__ )) if not hasattr(tool_class, 'label'): raise ImproperlyConfigured("No 'label' attribute found for tool %s." % ( tool_class.__name__ )) if not hasattr(tool_class, 'view'): raise NotImplementedError("No 'view' method found for tool %s." % ( tool_class.__name__ ))
Does basic ObjectTool option validation.
def get_structure(atoms, cls=None): symbols = atoms.get_chemical_symbols() positions = atoms.get_positions() lattice = atoms.get_cell() cls = Structure if cls is None else cls return cls(lattice, symbols, positions, coords_are_cartesian=True)
Returns pymatgen structure from ASE Atoms. Args: atoms: ASE Atoms object cls: The Structure class to instantiate (defaults to pymatgen structure) Returns: Equivalent pymatgen.core.structure.Structure
def cases(self, env, data): for handler in self.handlers: env._push() data._push() try: result = handler(env, data) finally: env._pop() data._pop() if result is not None: return result
Calls each nested handler until one of them returns nonzero result. If any handler returns `None`, it is interpreted as "request does not match, the handler has nothing to do with it and `web.cases` should try to call the next handler".
def show_command(endpoint_id, rule_id): client = get_client() rule = client.get_endpoint_acl_rule(endpoint_id, rule_id) formatted_print( rule, text_format=FORMAT_TEXT_RECORD, fields=( ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", _shared_with_keyfunc), ("Path", "path"), ), )
Executor for `globus endpoint permission show`
def PostUnregistration(method): if not isinstance(method, types.FunctionType): raise TypeError("@PostUnregistration can only be applied on functions") validate_method_arity(method, "service_reference") _append_object_entry( method, constants.IPOPO_METHOD_CALLBACKS, constants.IPOPO_CALLBACK_POST_UNREGISTRATION, ) return method
The service post-unregistration callback decorator is called after a service of the component has been unregistered from the framework. The decorated method must accept the :class:`~pelix.framework.ServiceReference` of the registered service as argument:: @PostUnregistration def callback_method(self, service_reference): ''' service_reference: The ServiceReference of the provided service ''' # ... :param method: The decorated method :raise TypeError: The decorated element is not a valid function
def meth_list(args): r = fapi.list_repository_methods(namespace=args.namespace, name=args.method, snapshotId=args.snapshot_id) fapi._check_response_code(r, 200) methods = r.json() results = [] for m in methods: ns = m['namespace'] n = m['name'] sn_id = m['snapshotId'] results.append('{0}\t{1}\t{2}'.format(ns,n,sn_id)) return sorted(results, key=lambda s: s.lower())
List workflows in the methods repository
def std_blocksum(data, block_sizes, mask=None): data = np.ma.asanyarray(data) if mask is not None and mask is not np.ma.nomask: mask = np.asanyarray(mask) if data.shape != mask.shape: raise ValueError('data and mask must have the same shape.') data.mask |= mask stds = [] block_sizes = np.atleast_1d(block_sizes) for block_size in block_sizes: mesh_values = _mesh_values(data, block_size) block_sums = np.sum(mesh_values, axis=1) stds.append(np.std(block_sums)) return np.array(stds)
Calculate the standard deviation of block-summed data values at sizes of ``block_sizes``. Values from incomplete blocks, either because of the image edges or masked pixels, are not included. Parameters ---------- data : array-like The 2D array to block sum. block_sizes : int, array-like of int An array of integer (square) block sizes. mask : array-like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Blocks that contain *any* masked data are excluded from calculations. Returns ------- result : `~numpy.ndarray` An array of the standard deviations of the block-summed array for the input ``block_sizes``.
def get_info(brain_or_object, endpoint=None, complete=False): if not is_brain(brain_or_object): brain_or_object = get_brain(brain_or_object) if brain_or_object is None: logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object)) return {} complete = True if is_relationship_object(brain_or_object): logger.warn("Skipping relationship object {}".format(repr(brain_or_object))) return {} info = IInfo(brain_or_object).to_dict() url_info = get_url_info(brain_or_object, endpoint) info.update(url_info) parent = get_parent_info(brain_or_object) info.update(parent) if complete: obj = api.get_object(brain_or_object) adapter = IInfo(obj) info.update(adapter.to_dict()) if req.get_workflow(False): info.update(get_workflow_info(obj)) return info
Extract the data from the catalog brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: Data mapping for the object/catalog brain :rtype: dict
def in_cache(self, zenpy_object): object_type = get_object_type(zenpy_object) cache_key_attr = self._cache_key_attribute(object_type) return self.get(object_type, getattr(zenpy_object, cache_key_attr)) is not None
Determine whether or not this object is in the cache
def charset_to_int(s, charset): output = 0 for char in s: output = output * len(charset) + charset.index(char) return output
Turn a string into a non-negative integer. >>> charset_to_int('0', B40_CHARS) 0 >>> charset_to_int('10', B40_CHARS) 40 >>> charset_to_int('abcd', B40_CHARS) 658093 >>> charset_to_int('', B40_CHARS) 0 >>> charset_to_int('muneeb.id', B40_CHARS) 149190078205533 >>> charset_to_int('A', B40_CHARS) Traceback (most recent call last): ... ValueError: substring not found
def index_delete(index, hosts=None, profile=None): es = _get_instance(hosts, profile) try: result = es.indices.delete(index=index) return result.get('acknowledged', False) except elasticsearch.exceptions.NotFoundError: return True except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot delete index {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
Delete an index index Index name CLI example:: salt myminion elasticsearch.index_delete testindex
def set_perspective(self, fov, aspect, near, far): self.matrix = transforms.perspective(fov, aspect, near, far)
Set the perspective Parameters ---------- fov : float Field of view. aspect : float Aspect ratio. near : float Near location. far : float Far location.
def cummean(expr, sort=None, ascending=True, unique=False, preceding=None, following=None): data_type = _stats_type(expr) return _cumulative_op(expr, CumMean, sort=sort, ascending=ascending, unique=unique, preceding=preceding, following=following, data_type=data_type)
Calculate cumulative mean of a sequence expression. :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :param unique: whether to eliminate duplicate entries :param preceding: the start point of a window :param following: the end point of a window :return: calculated column
def save_context(context): file_path = _get_context_filepath() content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS) with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content)
Save a Context object to user data directory.
def within(self, x, ctrs, kdtree=None): if kdtree is None: idxs = np.where(lalg.norm(ctrs - x, axis=1) <= self.radius)[0] else: idxs = kdtree.query_ball_point(x, self.radius, p=2.0, eps=0) return idxs
Check which balls `x` falls within. Uses a K-D Tree to perform the search if provided.
def trcdep(): depth = ctypes.c_int() libspice.trcdep_c(ctypes.byref(depth)) return depth.value
Return the number of modules in the traceback representation. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/trcdep_c.html :return: The number of modules in the traceback. :rtype: int
async def client_event_handler(self, client_id, event_tuple, user_data): conn_string, event_name, _event = event_tuple self._logger.debug("Ignoring event %s from device %s forwarded for client %s", event_name, conn_string, client_id) return None
Method called to actually send an event to a client. Users of this class should override this method to actually forward device events to their clients. It is called with the client_id passed to (or returned from) :meth:`setup_client` as well as the user_data object that was included there. The event tuple is a 3-tuple of: - connection string - event name - event object If you override this to be acoroutine, it will be awaited. The default implementation just logs the event. Args: client_id (str): The client_id that this event should be forwarded to. event_tuple (tuple): The connection_string, event_name and event_object that should be forwarded. user_data (object): Any user data that was passed to setup_client.
def passthrough_repl(self, inputstring, **kwargs): out = [] index = None for c in append_it(inputstring, None): try: if index is not None: if c is not None and c in nums: index += c elif c == unwrapper and index: ref = self.get_ref("passthrough", index) out.append(ref) index = None elif c != "\\" or index: out.append("\\" + index) if c is not None: out.append(c) index = None elif c is not None: if c == "\\": index = "" else: out.append(c) except CoconutInternalException as err: complain(err) if index is not None: out.append(index) index = None out.append(c) return "".join(out)
Add back passthroughs.
def get_shutit_pexpect_session_environment(self, environment_id): if not isinstance(environment_id, str): self.fail('Wrong argument type in get_shutit_pexpect_session_environment') for env in shutit_global.shutit_global_object.shutit_pexpect_session_environments: if env.environment_id == environment_id: return env return None
Returns the first shutit_pexpect_session object related to the given environment-id
def get_time(): time_request = '\x1b' + 47 * '\0' now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10] return time.ctime(now - EPOCH_START)
Get time from a locally running NTP server
def gen_div(src1, src2, dst): assert src1.size == src2.size return ReilBuilder.build(ReilMnemonic.DIV, src1, src2, dst)
Return a DIV instruction.
def _clean_dirty(self, obj=None): obj = obj or self obj.__dict__['_dirty_attributes'].clear() obj._dirty = False for key, val in vars(obj).items(): if isinstance(val, BaseObject): self._clean_dirty(val) else: func = getattr(val, '_clean_dirty', None) if callable(func): func()
Recursively clean self and all child objects.
def _check_markers(task_ids, offset=10): shuffle(task_ids) has_errors = False for index in xrange(0, len(task_ids), offset): keys = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids[index:index + offset]] markers = ndb.get_multi(keys) if not all(markers): logging.debug("Not all Async's complete") return False, None has_errors = not all((marker.success for marker in markers)) return True, has_errors
Returns a flag for markers being found for the task_ids. If all task ids have markers True will be returned. Otherwise it will return False as soon as a None result is hit.
def write_meta(self, role): meta_file = utils.file_to_string(self.paths["meta"]) self.update_gen_report(role, "meta", meta_file)
Write out a new meta file.
def prune_creds_json(creds: dict, cred_ids: set) -> str: rv = deepcopy(creds) for key in ('attrs', 'predicates'): for attr_uuid, creds_by_uuid in rv[key].items(): rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids] empties = [attr_uuid for attr_uuid in rv[key] if not rv[key][attr_uuid]] for attr_uuid in empties: del rv[key][attr_uuid] return json.dumps(rv)
Strip all creds out of the input json structure that do not match any of the input credential identifiers. :param creds: indy-sdk creds structure :param cred_ids: the set of credential identifiers of interest :return: the reduced creds json
def get_page_children_dict(self, page_qs=None): children_dict = defaultdict(list) for page in page_qs or self.pages_for_display: children_dict[page.path[:-page.steplen]].append(page) return children_dict
Returns a dictionary of lists, where the keys are 'path' values for pages, and the value is a list of children pages for that page.
def get_queryset(self): qs = VersionedQuerySet(self.model, using=self._db) if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'): qs.querytime = self.instance._querytime return qs
Returns a VersionedQuerySet capable of handling version time restrictions. :return: VersionedQuerySet
def in_session(self): session = self.get_session() try: yield session session.commit() except IntegrityError: session.rollback() raise DuplicateError("Duplicate unique value detected!") except (OperationalError, DisconnectionError): session.rollback() self.close() logger.warn("Database Connection Lost!") raise DatabaseConnectionError() except Exception: session.rollback() raise finally: session.close()
Provide a session scope around a series of operations.
def value_series(self, key, start=None, end=None, interval=None, namespace=None, cache=None): return self.make_context(key=key, start=start, end=end, interval=interval, namespace=namespace, cache=cache).value_series()
Get a time series of gauge values
def home_page(self, tld_type: Optional[TLDType] = None) -> str: resource = self.random.choice(USERNAMES) domain = self.top_level_domain( tld_type=tld_type, ) return 'http://www.{}{}'.format( resource, domain)
Generate a random home page. :param tld_type: TLD type. :return: Random home page. :Example: http://www.fontir.info
def imprint(self, path=None): if self.version is not None: with open(path or self.version_file, 'w') as h: h.write(self.version + '\n') else: raise ValueError('Can not write null version to file.') return self
Write the determined version, if any, to ``self.version_file`` or the path passed as an argument.
def progressive(image_field, alt_text=''): if not isinstance(image_field, ImageFieldFile): raise ValueError('"image_field" argument must be an ImageField.') for engine in engines.all(): if isinstance(engine, BaseEngine) and hasattr(engine, 'env'): env = engine.env if isinstance(env, Environment): context = render_progressive_field(image_field, alt_text) template = env.get_template( 'progressiveimagefield/render_field.html' ) rendered = template.render(**context) return Markup(rendered) return ''
Used as a Jinja2 filter, this function returns a safe HTML chunk. Usage (in the HTML template): {{ obj.image|progressive }} :param django.db.models.fields.files.ImageFieldFile image_field: image :param str alt_text: str :return: a safe HTML template ready to be rendered
def add_vcenter(self, **kwargs): config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id = ET.SubElement(vcenter, "id") id.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, "credentials") url = ET.SubElement(credentials, "url") url.text = kwargs.pop('url') username = ET.SubElement(credentials, "username") username.text = kwargs.pop('username') password = ET.SubElement(credentials, "password") password.text = kwargs.pop('password') try: self._callback(config) return True except Exception as error: logging.error(error) return False
Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
def _store_information(self): print '<<< Generating Information Storage >>>' for name, meth in inspect.getmembers(self, predicate=inspect.isroutine): if not name.startswith('_'): info = {'command': name, 'sig': str(funcsigs.signature(meth)), 'docstring': meth.__doc__} self.store_info(info, name, type_tag='command') self.store_info({'help': '<<< Workbench Server Version %s >>>' % self.version}, 'version', type_tag='help') self.store_info({'help': self._help_workbench()}, 'workbench', type_tag='help') self.store_info({'help': self._help_basic()}, 'basic', type_tag='help') self.store_info({'help': self._help_commands()}, 'commands', type_tag='help') self.store_info({'help': self._help_workers()}, 'workers', type_tag='help')
Store infomation about Workbench and its commands
def load_schema(schema_path): with open(schema_path, 'r') as schema_file: schema = simplejson.load(schema_file) resolver = RefResolver('', '', schema.get('models', {})) return build_request_to_validator_map(schema, resolver)
Prepare the api specification for request and response validation. :returns: a mapping from :class:`RequestMatcher` to :class:`ValidatorMap` for every operation in the api specification. :rtype: dict
def is_int(tg_type, inc_array=False): global _scalar_int_types, _array_int_types if tg_type in _scalar_int_types: return True if not inc_array: return False return tg_type in _array_int_types
Tells if the given tango type is integer :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is integer or False otherwise :rtype: :py:obj:`bool`
def get_description(cls) -> str: if cls.__doc__ is None: raise ValueError('No docstring found for {}'.format(cls.__name__)) return cls.__doc__.strip()
The description is expected to be the command class' docstring.
def load(self, filename=None): fields = [] with open(filename, 'r') as f: format_data = f.read().strip() lines = format_data.split('\n') self._sql_version = lines.pop(0) self._num_fields = int(lines.pop(0)) for line in lines: line = re.sub(' +', ' ', line.strip()) row_format = BCPFormatRow(line.split(' ')) fields.append(row_format) self.fields = fields self.filename = filename
Reads a non-XML bcp FORMAT file and parses it into fields list used for creating bulk data file
def stoptimes(self, start_date, end_date): params = { 'start': self.format_date(start_date), 'end': self.format_date(end_date) } response = self._request(ENDPOINTS['STOPTIMES'], params) return response
Return all stop times in the date range :param start_date: The starting date for the query. :param end_date: The end date for the query. >>> import datetime >>> today = datetime.date.today() >>> trans.stoptimes(today - datetime.timedelta(days=1), today)
def bind(self, typevar, its_type): assert type(typevar) == tg.TypeVar if self.is_generic_in(typevar): self.bind_to_instance(typevar, its_type) else: self._ns[typevar] = its_type
Binds typevar to the type its_type. Binding occurs on the instance if the typevar is a TypeVar of the generic type of the instance, on call level otherwise.
def prepare_input_data(config): if not dd.get_disambiguate(config): return dd.get_input_sequence_files(config) work_bam = dd.get_work_bam(config) logger.info("Converting disambiguated reads to fastq...") fq_files = convert_bam_to_fastq( work_bam, dd.get_work_dir(config), None, None, config ) return fq_files
In case of disambiguation, we want to run fusion calling on the disambiguated reads, which are in the work_bam file. As EricScript accepts 2 fastq files as input, we need to convert the .bam to 2 .fq files.
def do_eni(self,args): parser = CommandArgumentParser("eni") parser.add_argument(dest='eni',help='eni index or name'); args = vars(parser.parse_args(args)) print "loading eni {}".format(args['eni']) try: index = int(args['eni']) eniSummary = self.wrappedStack['resourcesByTypeIndex']['AWS::EC2::NetworkInterface'][index] except ValueError: eniSummary = self.wrappedStack['resourcesByTypeName']['AWS::EC2::NetworkInterface'][args['eni']] pprint(eniSummary) self.stackResource(eniSummary.stack_name,eniSummary.logical_id)
Go to the specified eni. eni -h for detailed help.
def add_tlink(self,my_tlink): if self.temporalRelations_layer is None: self.temporalRelations_layer = CtemporalRelations() self.root.append(self.temporalRelations_layer.get_node()) self.temporalRelations_layer.add_tlink(my_tlink)
Adds a tlink to the temporalRelations layer @type my_tlink: L{Ctlink} @param my_tlink: tlink object
def worker_recover(name, workers=None, profile='default'): if workers is None: workers = [] return _bulk_state( 'modjk.bulk_recover', name, workers, profile )
Recover all the workers in the modjk load balancer Example: .. code-block:: yaml loadbalancer: modjk.worker_recover: - workers: - app1 - app2
def exists(name, tags=None, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: rds = conn.describe_db_instances(DBInstanceIdentifier=name) return {'exists': bool(rds)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Check to see if an RDS exists. CLI example:: salt myminion boto_rds.exists myrds region=us-east-1
def add_arguments(self, parser): parser.add_argument('app_label', nargs='*') for argument in self.arguments: parser.add_argument(*argument.split(' '), **self.arguments[argument])
Unpack self.arguments for parser.add_arguments.
def time_stamp(): fmt = '%Y-%m-%dT%H:%M:%S.%f' date = datetime.datetime date_delta = datetime.timedelta now = datetime.datetime.utcnow() return fmt, date, date_delta, now
Setup time functions :returns: ``tuple``
def transplant(new_net, net, suffix=''): for p in net.params: p_new = p + suffix if p_new not in new_net.params: print 'dropping', p continue for i in range(len(net.params[p])): if i > (len(new_net.params[p_new]) - 1): print 'dropping', p, i break if net.params[p][i].data.shape != new_net.params[p_new][i].data.shape: print 'coercing', p, i, 'from', net.params[p][i].data.shape, 'to', new_net.params[p_new][i].data.shape else: print 'copying', p, ' -> ', p_new, i new_net.params[p_new][i].data.flat = net.params[p][i].data.flat
Transfer weights by copying matching parameters, coercing parameters of incompatible shape, and dropping unmatched parameters. The coercion is useful to convert fully connected layers to their equivalent convolutional layers, since the weights are the same and only the shapes are different. In particular, equivalent fully connected and convolution layers have shapes O x I and O x I x H x W respectively for O outputs channels, I input channels, H kernel height, and W kernel width. Both `net` to `new_net` arguments must be instantiated `caffe.Net`s.
def get_filetypes_info(editor_quote="`", flag_leaf=True): NONE_REPL = "" import f311 data = [] for attr in f311.classes_file(flag_leaf): description = a99.get_obj_doc0(attr) def_ = NONE_REPL if attr.default_filename is None else attr.default_filename ee = attr.editors if ee is None: ee = NONE_REPL else: ee = ", ".join(["{0}{1}{0}".format(editor_quote, x, editor_quote) for x in ee]) data.append({"description": description, "default_filename": def_, "classname": attr.__name__, "editors": ee, "class": attr, "txtbin": "text" if attr.flag_txt else "binary"}) data.sort(key=lambda x: x["description"]) return data
Reports available data types Args: editor_quote: character to enclose the name of the editor script between. flag_leaf: see tabulate_filetypes_rest() Returns: list: list of FileTypeInfo
def _initialize(self, **resource_attributes): super(APIResourceCollection, self)._initialize(**resource_attributes) dict_list = self.data self.data = [] for resource in dict_list: self.data.append(self._expected_api_resource(**resource))
Initialize the collection. :param resource_attributes: API resource parameters
def enclosure_groups(self): if not self.__enclosure_groups: self.__enclosure_groups = EnclosureGroups(self.__connection) return self.__enclosure_groups
Gets the EnclosureGroups API client. Returns: EnclosureGroups:
def pix2sky(self, pixel): pixbox = numpy.array([pixel, pixel]) skybox = self.wcs.all_pix2world(pixbox, 1) return [float(skybox[0][0]), float(skybox[0][1])]
Get the sky coordinates for a given image pixel. Parameters ---------- pixel : (float, float) Image coordinates. Returns ------- ra,dec : float Sky coordinates (degrees)
def _format_keyword(self, keyword): import re result = '' if keyword: result = re.sub(r"\W", "", keyword) result = re.sub(r"_", "", result) return result
Removing special character from a keyword. Analysis Services must have this kind of keywords. E.g. if assay name from GeneXpert Instrument is 'Ebola RUO', an AS must be created on Bika with the keyword 'EbolaRUO'
def from_flags(flags, ednsflags): value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0) if value < 0 or value > 4095: raise ValueError('rcode must be >= 0 and <= 4095') return value
Return the rcode value encoded by flags and ednsflags. @param flags: the DNS flags @type flags: int @param ednsflags: the EDNS flags @type ednsflags: int @raises ValueError: rcode is < 0 or > 4095 @rtype: int
def science_object_update(self, pid_old, path, pid_new, format_id=None): self._queue_science_object_update(pid_old, path, pid_new, format_id)
Obsolete a Science Object on a Member Node with a different one.
def validate_timeout_or_zero(option, value): if value is None: raise ConfigurationError("%s cannot be None" % (option, )) if value == 0 or value == "0": return 0 return validate_positive_float(option, value) / 1000.0
Validates a timeout specified in milliseconds returning a value in floating point seconds for the case where None is an error and 0 is valid. Setting the timeout to nothing in the URI string is a config error.
def resample_multipitch(times, frequencies, target_times): if target_times.size == 0: return [] if times.size == 0: return [np.array([])]*len(target_times) n_times = len(frequencies) frequency_index = np.arange(0, n_times) new_frequency_index = scipy.interpolate.interp1d( times, frequency_index, kind='nearest', bounds_error=False, assume_sorted=True, fill_value=n_times)(target_times) freq_vals = frequencies + [np.array([])] frequencies_resampled = [ freq_vals[i] for i in new_frequency_index.astype(int)] return frequencies_resampled
Resamples multipitch time series to a new timescale. Values in ``target_times`` outside the range of ``times`` return no pitch estimate. Parameters ---------- times : np.ndarray Array of time stamps frequencies : list of np.ndarray List of np.ndarrays of frequency values target_times : np.ndarray Array of target time stamps Returns ------- frequencies_resampled : list of numpy arrays Frequency list of lists resampled to new timebase
def run_nupack(kwargs): run = NUPACK(kwargs['seq']) output = getattr(run, kwargs['cmd'])(**kwargs['arguments']) return output
Run picklable Nupack command. :param kwargs: keyword arguments to pass to Nupack as well as 'cmd'. :returns: Variable - whatever `cmd` returns.
def allState(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs)
List out the entire internal state This method is only for debugging the ec2-manager This method is ``experimental``
def source_extraction(in1, tolerance, mode="cpu", store_on_gpu=False, neg_comp=False): if mode=="cpu": return cpu_source_extraction(in1, tolerance, neg_comp) elif mode=="gpu": return gpu_source_extraction(in1, tolerance, store_on_gpu, neg_comp)
Convenience function for allocating work to cpu or gpu, depending on the selected mode. INPUTS: in1 (no default): Array containing the wavelet decomposition. tolerance (no default): Percentage of maximum coefficient at which objects are deemed significant. mode (default="cpu"):Mode of operation - either "gpu" or "cpu". OUTPUTS: Array containing the significant wavelet coefficients of extracted sources.
def relativize_classpath(classpath, root_dir, followlinks=True): def relativize_url(url, root_dir): url = os.path.realpath(url) if followlinks else url root_dir = os.path.realpath(root_dir) if followlinks else root_dir url_in_bundle = os.path.relpath(url, root_dir) if os.path.isdir(url): url_in_bundle += '/' return url_in_bundle return [relativize_url(url, root_dir) for url in classpath]
Convert into classpath relative to a directory. This is eventually used by a jar file located in this directory as its manifest attribute Class-Path. See https://docs.oracle.com/javase/7/docs/technotes/guides/extensions/spec.html#bundled :param list classpath: Classpath to be relativized. :param string root_dir: directory to relativize urls in the classpath, does not have to exist yet. :param bool followlinks: whether to follow symlinks to calculate relative path. :returns: Converted classpath of the same size as input classpath. :rtype: list of strings
def condition_details_has_owner(condition_details, owner): if 'subconditions' in condition_details: result = condition_details_has_owner(condition_details['subconditions'], owner) if result: return True elif isinstance(condition_details, list): for subcondition in condition_details: result = condition_details_has_owner(subcondition, owner) if result: return True else: if 'public_key' in condition_details \ and owner == condition_details['public_key']: return True return False
Check if the public_key of owner is in the condition details as an Ed25519Fulfillment.public_key Args: condition_details (dict): dict with condition details owner (str): base58 public key of owner Returns: bool: True if the public key is found in the condition details, False otherwise
def request(community_id, record_id, accept): c = Community.get(community_id) assert c is not None record = Record.get_record(record_id) if accept: c.add_record(record) record.commit() else: InclusionRequest.create(community=c, record=record, notify=False) db.session.commit() RecordIndexer().index_by_id(record.id)
Request a record acceptance to a community.
def _init_relationships(self, relationships_arg): if relationships_arg: relationships_all = self._get_all_relationships() if relationships_arg is True: return relationships_all else: return relationships_all.intersection(relationships_arg) return set()
Return a set of relationships found in all subset GO Terms.
def load_each(*loaders): def _load_each(metadata): return merge( loader(metadata) for loader in loaders ) return _load_each
Loader factory that combines a series of loaders.
def is_ancestor(self, commit1, commit2, patch=False): result = self.hg("log", "-r", "first(%s::%s)" % (commit1, commit2), "--template", "exists", patch=patch) return "exists" in result
Returns True if commit1 is a direct ancestor of commit2, or False otherwise. This method considers a commit to be a direct ancestor of itself
def upload(self, path, engine, description=None): if description is None: head, tail = ntpath.split(path) description = tail or ntpath.basename(head) url = "http://quickslice.{}/config/raw/".format(self.config.host) with open(path) as config_file: content = config_file.read() payload = {"engine": engine, "description": description, "content": content} post_resp = requests.post(url, json=payload, cookies={"session": self.session}) if not post_resp.ok: raise errors.ResourceError("config upload to slicing service failed") self.description = description self.location = post_resp.headers["Location"]
Create a new config resource in the slicing service and upload the path contents to it
def scan(self, filetypes=None): self.logger.debug("Scanning FS content.") checksums = self.filetype_filter(self._filesystem.checksums('/'), filetypes=filetypes) self.logger.debug("Querying %d objects to VTotal.", len(checksums)) for files in chunks(checksums, size=self.batchsize): files = dict((reversed(e) for e in files)) response = vtquery(self._apikey, files.keys()) yield from self.parse_response(files, response)
Iterates over the content of the disk and queries VirusTotal to determine whether it's malicious or not. filetypes is a list containing regular expression patterns. If given, only the files which type will match with one or more of the given patterns will be queried against VirusTotal. For each file which is unknown by VT or positive to any of its engines, the method yields a namedtuple: VTReport(path -> C:\\Windows\\System32\\infected.dll hash -> ab231... detections) -> dictionary engine -> detection Files unknown by VirusTotal will contain the string 'unknown' in the detection field.
def json_description_metadata(description): if description[:6] == 'shape=': shape = tuple(int(i) for i in description[7:-1].split(',')) return dict(shape=shape) if description[:1] == '{' and description[-1:] == '}': return json.loads(description) raise ValueError('invalid JSON image description', description)
Return metatata from JSON formated image description as dict. Raise ValuError if description is of unknown format. >>> description = '{"shape": [256, 256, 3], "axes": "YXS"}' >>> json_description_metadata(description) # doctest: +SKIP {'shape': [256, 256, 3], 'axes': 'YXS'} >>> json_description_metadata('shape=(256, 256, 3)') {'shape': (256, 256, 3)}
def get_bridges(vnic_dir='/sys/devices/virtual/net'): b_regex = "%s/*/bridge" % vnic_dir return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
Return a list of bridges on the system.
def _parse_args(cls): cls.parser = argparse.ArgumentParser() cls.parser.add_argument( "symbol", help="Symbol for horizontal line", nargs="*") cls.parser.add_argument( "--color", "-c", help="Color of the line", default=None, nargs=1) cls.parser.add_argument( "--version", "-v", action="version", version="0.13") return cls.parser
Method to parse command line arguments
def scene_command(self, command): self.logger.info("scene_command: Group %s Command %s", self.group_id, command) command_url = self.hub.hub_url + '/0?' + command + self.group_id + "=I=0" return self.hub.post_direct_command(command_url)
Wrapper to send posted scene command and get response
def addResource(self, key, filePath, text): url = self.root + "/addresource" params = { "f": "json", "token" : self._securityHandler.token, "key" : key, "text" : text } files = {} files['file'] = filePath res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
The add resource operation allows the administrator to add a file resource, for example, the organization's logo or custom banner. The resource can be used by any member of the organization. File resources use storage space from your quota and are scanned for viruses. Inputs: key - The name the resource should be stored under. filePath - path of file to upload text - Some text to be written (for example, JSON or JavaScript) directly to the resource from a web client.
def populateFromDirectory(self, vcfDirectory): pattern = os.path.join(vcfDirectory, "*.vcf.gz") dataFiles = [] indexFiles = [] for vcfFile in glob.glob(pattern): dataFiles.append(vcfFile) indexFiles.append(vcfFile + ".tbi") self.populateFromFile(dataFiles, indexFiles)
Populates this VariantSet by examing all the VCF files in the specified directory. This is mainly used for as a convenience for testing purposes.
def end_profiling(profiler, filename, sorting=None): profiler.disable() s = six.StringIO() ps = pstats.Stats(profiler, stream=s).sort_stats(sorting) ps.print_stats() with open(filename, "w+") as f: _logger.info("[calculate_ts_features] Finished profiling of time series feature extraction") f.write(s.getvalue())
Helper function to stop the profiling process and write out the profiled data into the given filename. Before this, sort the stats by the passed sorting. :param profiler: An already started profiler (probably by start_profiling). :type profiler: cProfile.Profile :param filename: The name of the output file to save the profile. :type filename: basestring :param sorting: The sorting of the statistics passed to the sort_stats function. :type sorting: basestring :return: None :rtype: None Start and stop the profiler with: >>> profiler = start_profiling() >>> # Do something you want to profile >>> end_profiling(profiler, "out.txt", "cumulative")
def init_types_collection(filter_filename=default_filter_filename): global _filter_filename _filter_filename = filter_filename sys.setprofile(_trace_dispatch) threading.setprofile(_trace_dispatch)
Setup profiler hooks to enable type collection. Call this one time from the main thread. The optional argument is a filter that maps a filename (from code.co_filename) to either a normalized filename or None. For the default filter see default_filter_filename().
def _cleanup_api(self): resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId, **self._common_aws_args) if resources.get('resources'): res = resources.get('resources')[1:] res.reverse() for resource in res: delres = __salt__['boto_apigateway.delete_api_resources'](restApiId=self.restApiId, path=resource.get('path'), **self._common_aws_args) if not delres.get('deleted'): return delres models = __salt__['boto_apigateway.describe_api_models'](restApiId=self.restApiId, **self._common_aws_args) if models.get('models'): for model in models.get('models'): delres = __salt__['boto_apigateway.delete_api_model'](restApiId=self.restApiId, modelName=model.get('name'), **self._common_aws_args) if not delres.get('deleted'): return delres return {'deleted': True}
Helper method to clean up resources and models if we detected a change in the swagger file for a stage
def add_contact_to_group(self, contact, group): if isinstance(contact, basestring): contact = self.get_contact(contact) if isinstance(group, basestring): group = self.get_group(group) method, url = get_URL('contacts_add_to_group') payload = { 'apikey': self.config.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'contactid': contact['contactid'], 'contactgroupid': group['contactgroupid'] } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return True hellraiser(res)
Add contact to group :param contact: name or contact object :param group: name or group object :type contact: ``str``, ``unicode``, ``dict`` :type group: ``str``, ``unicode``, ``dict`` :rtype: ``bool``
def is_expired(self, time_offset_seconds=0): now = datetime.datetime.utcnow() if time_offset_seconds: now = now + datetime.timedelta(seconds=time_offset_seconds) ts = boto.utils.parse_ts(self.expiration) delta = ts - now return delta.total_seconds() <= 0
Checks to see if the Session Token is expired or not. By default it will check to see if the Session Token is expired as of the moment the method is called. However, you can supply an optional parameter which is the number of seconds of offset into the future for the check. For example, if you supply a value of 5, this method will return a True if the Session Token will be expired 5 seconds from this moment. :type time_offset_seconds: int :param time_offset_seconds: The number of seconds into the future to test the Session Token for expiration.
def update_not_existing_kwargs(to_update, update_from): if to_update is None: to_update = {} to_update.update({k:v for k,v in update_from.items() if k not in to_update}) return to_update
This function updates the keyword aguments from update_from in to_update, only if the keys are not set in to_update. This is used for updated kwargs from the default dicts.
def from_interbase_coordinates(contig, start, end=None): typechecks.require_string(contig) typechecks.require_integer(start) if end is None: end = start + 1 typechecks.require_integer(end) contig = pyensembl.locus.normalize_chromosome(contig) return Locus(contig, start, end)
Given coordinates in 0-based interbase coordinates, return a Locus instance.
def _update_port_locations(self, initial_coordinates): particles = list(self.particles()) for port in self.all_ports(): if port.anchor: idx = particles.index(port.anchor) shift = particles[idx].pos - initial_coordinates[idx] port.translate(shift)
Adjust port locations after particles have moved Compares the locations of Particles between 'self' and an array of reference coordinates. Shifts Ports in accordance with how far anchors have been moved. This conserves the location of Ports with respect to their anchor Particles, but does not conserve the orientation of Ports with respect to the molecule as a whole. Parameters ---------- initial_coordinates : np.ndarray, shape=(n, 3), dtype=float Reference coordinates to use for comparing how far anchor Particles have shifted.
def log(self, level, *args, **kwargs): return self._log_kw(level, args, kwargs)
Delegate a log call to the underlying logger.
def list_icmp_block(zone, permanent=True): cmd = '--zone={0} --list-icmp-blocks'.format(zone) if permanent: cmd += ' --permanent' return __firewall_cmd(cmd).split()
List ICMP blocks on a zone .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewlld.list_icmp_block zone
def position(parser, token): bits = token.split_contents() nodelist = parser.parse(('end' + bits[0],)) parser.delete_first_token() return _parse_position_tag(bits, nodelist)
Render a given position for category. If some position is not defined for first category, position from its parent category is used unless nofallback is specified. Syntax:: {% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %} {% position POSITION_NAME for CATEGORY using BOX_TYPE [nofallback] %}{% endposition %} Example usage:: {% position top_left for category %}{% endposition %}
def timestamps(self): timestamps = set() for series in self.groups.itervalues(): timestamps |= set(series.timestamps) return sorted(list(timestamps))
Get all timestamps from all series in the group.
def initialize_switch_endpoints(self): self._switches = {} self._port_group_info = {} self._validate_config() for s in cfg.CONF.ml2_arista.switch_info: switch_ip, switch_user, switch_pass = s.split(":") if switch_pass == "''": switch_pass = '' self._switches[switch_ip] = api.EAPIClient( switch_ip, switch_user, switch_pass, verify=False, timeout=cfg.CONF.ml2_arista.conn_timeout) self._check_dynamic_acl_support()
Initialize endpoints for switch communication