code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_as_nullable_type(self, key, value_type): value = self.get(key) return TypeConverter.to_nullable_type(value_type, value)
Converts map element into a value defined by specied typecode. If conversion is not possible it returns None. :param key: an index of element to get. :param value_type: the TypeCode that defined the type of the result :return: element value defined by the typecode or None if conversion is not supported.
def get_feature_names(self): feature_names = [] for name, trans, weight in self._iter(): if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s (type %s) does not " "provide get_feature_names." % (str(name), type(trans).__name__)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names
Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform.
def invert(m): m = stypes.toDoubleMatrix(m) mout = stypes.emptyDoubleMatrix() libspice.invert_c(m, mout) return stypes.cMatrixToNumpy(mout)
Generate the inverse of a 3x3 matrix. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/invert_c.html :param m: Matrix to be inverted. :type m: 3x3-Element Array of floats :return: Inverted matrix (m1)^-1 :rtype: 3x3-Element Array of floats
async def get_codec(self): act = self.service.action("X_GetCodec") res = await act.async_call() return res
Get codec settings.
def variable( self, name=None, function=None, decl_type=None, header_dir=None, header_file=None, recursive=None): return ( self._find_single( self._impl_matchers[ scopedef_t.variable], name=name, function=function, decl_type=decl_type, header_dir=header_dir, header_file=header_file, recursive=recursive) )
returns reference to variable declaration, that is matched defined criteria
def get_encoder(self, content_type): if content_type in self._encoders: return self._encoders[content_type] else: return self._client.get_encoder(content_type)
Get the encoding function for the provided content type for this bucket. :param content_type: the requested media type :type content_type: str :param content_type: Content type requested
def create(cls, video, language_code, file_format, content, provider): video_transcript = cls(video=video, language_code=language_code, file_format=file_format, provider=provider) with closing(content) as transcript_content: try: file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format) video_transcript.transcript.save(file_name, transcript_content) video_transcript.save() except Exception: logger.exception( '[VAL] Transcript save failed to storage for video_id "%s" language code "%s"', video.edx_video_id, language_code ) raise return video_transcript
Create a Video Transcript. Arguments: video(Video): Video data model object language_code(unicode): A language code. file_format(unicode): Transcript file format. content(InMemoryUploadedFile): Transcript content. provider(unicode): Transcript provider.
def fetchAllUsersFromThreads(self, threads): users = [] users_to_fetch = [] for thread in threads: if thread.type == ThreadType.USER: if thread.uid not in [user.uid for user in users]: users.append(thread) elif thread.type == ThreadType.GROUP: for user_id in thread.participants: if ( user_id not in [user.uid for user in users] and user_id not in users_to_fetch ): users_to_fetch.append(user_id) for user_id, user in self.fetchUserInfo(*users_to_fetch).items(): users.append(user) return users
Get all users involved in threads. :param threads: models.Thread: List of threads to check for users :return: :class:`models.User` objects :rtype: list :raises: FBchatException if request failed
def _finalize(self, chain=-1): chain = range(self.chains)[chain] for name in self.trace_names[chain]: self._traces[name]._finalize(chain) self.commit()
Finalize the chain for all tallyable objects.
def pymongo_class_wrapper(f, pymongo_class): @functools.wraps(f) @coroutine def _wrapper(self, *args, **kwargs): result = yield f(self, *args, **kwargs) if result.__class__ == pymongo_class: raise gen.Return(self.wrap(result)) else: raise gen.Return(result) return _wrapper
Executes the coroutine f and wraps its result in a Motor class. See WrapAsync.
def add_tileset(self, tileset): assert (isinstance(tileset, TiledTileset)) self.tilesets.append(tileset)
Add a tileset to the map :param tileset: TiledTileset
def get(self): self.loaded = True if not hasattr(self.manager, "get"): return if not self.get_details: return new = self.manager.get(self) if new: self._add_details(new._info)
Gets the details for the object.
def export_vtkjs(self, filename, compress_arrays=False): if not hasattr(self, 'ren_win'): raise RuntimeError('Export must be called before showing/closing the scene.') if isinstance(vtki.FIGURE_PATH, str) and not os.path.isabs(filename): filename = os.path.join(vtki.FIGURE_PATH, filename) return export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
Export the current rendering scene as a VTKjs scene for rendering in a web browser
def _register_with_pkg_resources(cls): try: import pkg_resources pkg_resources.__name__ except ImportError: return pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
Ensure package resources can be loaded from this loader. May be called multiple times, as the operation is idempotent.
def get_validated_options(options, warn=True): validated_options = {} for opt, value in iteritems(options): lower = opt.lower() try: validator = URI_VALIDATORS.get(lower, raise_config_error) value = validator(opt, value) except (ValueError, ConfigurationError) as exc: if warn: warnings.warn(str(exc)) else: raise else: validated_options[lower] = value return validated_options
Validate each entry in options and raise a warning if it is not valid. Returns a copy of options with invalid entries removed
def im_open(self, *, user: str, **kwargs) -> SlackResponse: kwargs.update({"user": user}) return self.api_call("im.open", json=kwargs)
Opens a direct message channel. Args: user (str): The user id to open a DM with. e.g. 'W1234567890'
def _l2ycbcr(self, mode): self._check_modes(("L", "LA")) luma = self.channels[0] zeros = np.ma.zeros(luma.shape) zeros.mask = luma.mask self.channels = [luma, zeros, zeros] + self.channels[1:] if self.fill_value is not None: self.fill_value = [self.fill_value[0], 0, 0] + self.fill_value[1:] self.mode = mode
Convert from L to YCbCr.
def plot_total(self, colorbar=True, cb_orientation='vertical', cb_label='$|B|$, nT', ax=None, show=True, fname=None, **kwargs): if ax is None: fig, axes = self.total.plot( colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.total.plot( colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the total magnetic intensity. Usage ----- x.plot_total([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$|B|$, nT' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def get_start_state(self, set_final_outcome=False): if self.get_path() in state_machine_execution_engine.start_state_paths: for state_id, state in self.states.items(): if state.get_path() in state_machine_execution_engine.start_state_paths: state_machine_execution_engine.start_state_paths.remove(self.get_path()) self._start_state_modified = True return state if self.start_state_id is None: return None if self.start_state_id == self.state_id: if set_final_outcome: for transition_id in self.transitions: if self.transitions[transition_id].from_state is None: to_outcome_id = self.transitions[transition_id].to_outcome self.final_outcome = self.outcomes[to_outcome_id] break return self return self.states[self.start_state_id]
Get the start state of the container state :param set_final_outcome: if the final_outcome of the state should be set if the income directly connects to an outcome :return: the start state
def dict_print(self, output_file="dict.csv"): with codecs.open(output_file, "w", encoding='utf-8') as f: for (v, k) in self.token_key.items(): f.write("%s,%d\n" % (v, k))
Print mapping from tokens to numeric indices.
def parent_widget(self): parent = self.parent() if parent is not None and isinstance(parent, QtGraphicsItem): return parent.widget
Reimplemented to only return GraphicsItems
def BitmathType(bmstring): try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object
def config(self, function): self.configurations.append(ConfigScope(function)) return self.configurations[-1]
Decorator to add a function to the configuration of the Experiment. The decorated function is turned into a :class:`~sacred.config_scope.ConfigScope` and added to the Ingredient/Experiment. When the experiment is run, this function will also be executed and all json-serializable local variables inside it will end up as entries in the configuration of the experiment.
def concatenate(self, catalogue): atts = getattr(self, 'data') attn = getattr(catalogue, 'data') data = _merge_data(atts, attn) if data is not None: setattr(self, 'data', data) for attrib in vars(self): atts = getattr(self, attrib) attn = getattr(catalogue, attrib) if attrib is 'end_year': setattr(self, attrib, max(atts, attn)) elif attrib is 'start_year': setattr(self, attrib, min(atts, attn)) elif attrib is 'data': pass elif attrib is 'number_earthquakes': setattr(self, attrib, atts + attn) elif attrib is 'processes': if atts != attn: raise ValueError('The catalogues cannot be merged' + ' since the they have' + ' a different processing history') else: raise ValueError('unknown attribute: %s' % attrib) self.sort_catalogue_chronologically()
This method attaches one catalogue to the current one :parameter catalogue: An instance of :class:`htmk.seismicity.catalogue.Catalogue`
def is_standalone(self): return (not self.args.client and not self.args.browser and not self.args.server and not self.args.webserver)
Return True if Glances is running in standalone mode.
def chemical_formula(self): counts = {} for number in self.numbers: counts[number] = counts.get(number, 0)+1 items = [] for number, count in sorted(counts.items(), reverse=True): if count == 1: items.append(periodic[number].symbol) else: items.append("%s%i" % (periodic[number].symbol, count)) return "".join(items)
the chemical formula of the molecule
def detect(checksum_revisions, radius=defaults.RADIUS): revert_detector = Detector(radius) for checksum, revision in checksum_revisions: revert = revert_detector.process(checksum, revision) if revert is not None: yield revert
Detects reverts that occur in a sequence of revisions. Note that, `revision` data meta will simply be returned in the case of a revert. This function serves as a convenience wrapper around calls to :class:`mwreverts.Detector`'s :func:`~mwreverts.Detector.process` method. :Parameters: checksum_revisions : `iterable` ( (checksum, revision) ) an iterable over tuples of checksum and revision meta data radius : int a positive integer indicating the maximum revision distance that a revert can span. :Return: a iterator over :class:`mwreverts.Revert` :Example: >>> import mwreverts >>> >>> checksum_revisions = [ ... ("aaa", {'rev_id': 1}), ... ("bbb", {'rev_id': 2}), ... ("aaa", {'rev_id': 3}), ... ("ccc", {'rev_id': 4}) ... ] >>> >>> list(mwreverts.detect(checksum_revisions)) [Revert(reverting={'rev_id': 3}, reverteds=[{'rev_id': 2}], reverted_to={'rev_id': 1})]
def interactive_merge_conflict_handler(self, exception): if connected_to_terminal(sys.stdin): logger.info(compact( )) while True: if prompt_for_confirmation("Ignore merge error because you've resolved all conflicts?"): if self.merge_conflicts: logger.warning("I'm still seeing merge conflicts, please double check! (%s)", concatenate(self.merge_conflicts)) else: return True else: break return False
Give the operator a chance to interactively resolve merge conflicts. :param exception: An :exc:`~executor.ExternalCommandFailed` object. :returns: :data:`True` if the operator has interactively resolved any merge conflicts (and as such the merge error doesn't need to be propagated), :data:`False` otherwise. This method checks whether :data:`sys.stdin` is connected to a terminal to decide whether interaction with an operator is possible. If it is then an interactive terminal prompt is used to ask the operator to resolve the merge conflict(s). If the operator confirms the prompt, the merge error is swallowed instead of propagated. When :data:`sys.stdin` is not connected to a terminal or the operator denies the prompt the merge error is propagated.
def delete_intent(project_id, intent_id): import dialogflow_v2 as dialogflow intents_client = dialogflow.IntentsClient() intent_path = intents_client.intent_path(project_id, intent_id) intents_client.delete_intent(intent_path)
Delete intent with the given intent type and intent value.
def check_alive_instances(self): for instance in self.instances: if instance in self.to_restart: continue if instance.is_external and instance.process and not instance.process.is_alive(): logger.error("The external module %s died unexpectedly!", instance.name) logger.info("Setting the module %s to restart", instance.name) instance.clear_queues(self.daemon.sync_manager) self.set_to_restart(instance) continue if self.daemon.max_queue_size == 0: continue queue_size = 0 try: queue_size = instance.to_q.qsize() except Exception: pass if queue_size > self.daemon.max_queue_size: logger.error("The module %s has a too important queue size (%s > %s max)!", instance.name, queue_size, self.daemon.max_queue_size) logger.info("Setting the module %s to restart", instance.name) instance.clear_queues(self.daemon.sync_manager) self.set_to_restart(instance)
Check alive instances. If not, log error and try to restart it :return: None
def page(self, order=values.unset, from_=values.unset, bounds=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): params = values.of({ 'Order': order, 'From': from_, 'Bounds': bounds, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return SyncListItemPage(self._version, response, self._solution)
Retrieve a single page of SyncListItemInstance records from the API. Request is executed immediately :param SyncListItemInstance.QueryResultOrder order: The order :param unicode from_: The from :param SyncListItemInstance.QueryFromBoundType bounds: The bounds :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SyncListItemInstance :rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemPage
def geom_iter(self, g_nums): from .utils import pack_tups vals = pack_tups(g_nums) for val in vals: yield self.geom_single(val[0])
Iterator over a subset of geometries. The indices of the geometries to be returned are indicated by an iterable of |int|\\ s passed as `g_nums`. As with :meth:`geom_single`, each geometry is returned as a length-3N |npfloat_| with each atom's x/y/z coordinates grouped together:: [A1x, A1y, A1z, A2x, A2y, A2z, ...] In order to use NumPy `slicing or advanced indexing <http://docs.scipy.org/doc/numpy-1.10.0/reference/ arrays.indexing.html>`__, :data:`geoms` must first be explicitly converted to |nparray|, e.g.:: >>> x = opan.xyz.OpanXYZ(path='...') >>> np.array(x.geoms)[[2,6,9]] Parameters ---------- g_nums length-R iterable of |int| -- Indices of the desired geometries Yields ------ geom length-3N |npfloat_| -- Vectors of the atomic coordinates for each geometry indicated in `g_nums` Raises ------ ~exceptions.IndexError If an item in `g_nums` is invalid (out of range)
def add_package(package_name, package_path='templates', encoding='utf-8'): if not _has_jinja: raise RuntimeError(_except_text) _jload.add_loader(PackageLoader(package_name, package_path, encoding))
Adds the given package to the template search routine
def _return_response_and_status_code(response, json_results=True): if response.status_code == requests.codes.ok: return dict(results=response.json() if json_results else response.content, response_code=response.status_code) elif response.status_code == 400: return dict( error='package sent is either malformed or not within the past 24 hours.', response_code=response.status_code) elif response.status_code == 204: return dict( error='You exceeded the public API request rate limit (4 requests of any nature per minute)', response_code=response.status_code) elif response.status_code == 403: return dict( error='You tried to perform calls to functions for which you require a Private API key.', response_code=response.status_code) elif response.status_code == 404: return dict(error='File not found.', response_code=response.status_code) else: return dict(response_code=response.status_code)
Output the requests response content or content as json and status code :rtype : dict :param response: requests response object :param json_results: Should return JSON or raw content :return: dict containing the response content and/or the status code with error string.
def delete(self): self.__class__.objects.filter(pk=self.pk).delete()
Removes a node and all it's descendants.
def program_rtr_default_gw(self, tenant_id, rout_id, gw): args = ['route', 'add', 'default', 'gw', gw] ret = self.program_rtr(args, rout_id) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
Program the default gateway of a router.
def parent(self): "Get this object's parent" if self._parent: return self._parent elif getattr(self, '__parent_type__', None): return self._get_subfolder('..' if self._url[2].endswith('/') else '.', self.__parent_type__) else: raise AttributeError("%r has no parent attribute" % type(self))
Get this object's parent
def execute(self, eopatch): feature_type, feature_name = next(self.feature(eopatch)) eopatch[feature_type][feature_name] = self.process(eopatch[feature_type][feature_name]) return eopatch
Execute method takes EOPatch and changes the specified feature
def factory(token, alg=''): _jw = JWS(alg=alg) if _jw.is_jws(token): return _jw else: return None
Instantiate an JWS instance if the token is a signed JWT. :param token: The token that might be a signed JWT :param alg: The expected signature algorithm :return: A JWS instance if the token was a signed JWT, otherwise None
def items(self) -> Tuple[Tuple[str, "Package"], ...]: item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
Return an iterable containing package name and corresponding `Package` instance that are available.
def _update_images(self): wd_images = self.data['claims'].get('P18') if wd_images: if not isinstance(wd_images, list): wd_images = [wd_images] if 'image' not in self.data: self.data['image'] = [] for img_file in wd_images: self.data['image'].append({'file': img_file, 'kind': 'wikidata-image'})
add images from Wikidata
def plot_vzz(self, colorbar=True, cb_orientation='vertical', cb_label=None, ax=None, show=True, fname=None, **kwargs): if cb_label is None: cb_label = self._vzz_label if ax is None: fig, axes = self.vzz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.vzz.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the Vzz component of the tensor. Usage ----- x.plot_vzz([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = '$V_{zz}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def run(self, node): n = super(Transformation, self).run(node) if self.update: ast.fix_missing_locations(n) self.passmanager._cache.clear() return n
Apply transformation and dependencies and fix new node location.
def _remove_boundaries(self, interval): begin = interval.begin end = interval.end if self.boundary_table[begin] == 1: del self.boundary_table[begin] else: self.boundary_table[begin] -= 1 if self.boundary_table[end] == 1: del self.boundary_table[end] else: self.boundary_table[end] -= 1
Removes the boundaries of the interval from the boundary table.
def query(self, model_cls): self._filters_cmd = list() self.query_filters = list() self._order_by_cmd = None self._offset = 0 self._limit = 0 self.query_class = model_cls._name return self
SQLAlchemy query like method
def _equivalent_node_iterator_helper(self, node: BaseEntity, visited: Set[BaseEntity]) -> BaseEntity: for v in self[node]: if v in visited: continue if self._has_no_equivalent_edge(node, v): continue visited.add(v) yield v yield from self._equivalent_node_iterator_helper(v, visited)
Iterate over nodes and their data that are equal to the given node, starting with the original.
def get_vbox_version(config_kmk): "Return the vbox config major, minor, build" with open(config_kmk, 'rb') as f: config = f.read() major = b"6" minor = b"0" build = b"4" return b".".join([major, minor, build])
Return the vbox config major, minor, build
def slice_on_length(self, data_len, *addSlices): if len(self.ordered_ranges) + len(addSlices) == 0: return slice(None,None,None) ranges = self.ordered_ranges if len(addSlices) > 0: ranges = ranges + DimensionRange(*addSlices).ordered_ranges return self._combine_lists_of_ranges_on_length(data_len, *ranges)
Returns a slice representing the dimension range restrictions applied to a list of length data_len. If addSlices contains additional slice requirements, they are processed in the order they are given.
def filename(self, value): warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value
Deprecated, user `source'.
def _on_context_disconnect(self, context): self._lock.acquire() try: LOG.info('%r: Forgetting %r due to stream disconnect', self, context) self._forget_context_unlocked(context) finally: self._lock.release()
Respond to Context disconnect event by deleting any record of the no longer reachable context. This method runs in the Broker thread and must not to block.
def get_etags_and_matchers(self, request): self.evaluate_preconditions(request) return super(APIETAGProcessor, self).get_etags_and_matchers(request)
Get the etags from the header and perform a validation against the required preconditions.
def catalog(self): if self._catalog is None: logger.debug("SuperModel::catalog: *Fetch catalog*") self._catalog = self.get_catalog_for(self.brain) return self._catalog
Primary registered catalog for the wrapped portal type
def connect_controller(self, vid, pid, serial): self.lib.tdConnectTellStickController(vid, pid, serial)
Connect a controller.
def get_collections(db, collection=None, prefix=None, suffix=None): if collection is not None: return [collection, ] collections = db.collection_names(include_system_collections=False) if prefix is not None: collections = [c for c in collections if c.startswith(prefix)] if suffix is not None: collections = [c for c in collections if c.endswith(suffix)] return sorted(collections)
Returns a sorted list of collection names found in ``db``. Arguments: db (Database): A pymongo Database object. Can be obtained with ``get_db``. collection (str): Name of a collection. If the collection is present in the MongoDB database, a single-element list will be returned with the collecion name. If not, an empty list will be returned. This option is primarly included to allow for quick checking to see if a collection name is present. Default is None, which results in this option being ignored. prefix (str): If supplied, only collections that begin with ``prefix`` will be returned. suffix (str): If supplied, only collections that end with ``suffix`` will be returned. Returns: list: A sorted list of collection names.
def _check_changetype(self, dn, changetype, attr_value): if dn is None: self._error('Read changetype: before getting valid dn: line.') if changetype is not None: self._error('Two lines starting with changetype: in one record.') if attr_value not in CHANGE_TYPES: self._error('changetype value %s is invalid.' % attr_value)
Check changetype attribute for issues.
def get_copies_count(self): setup = api.get_setup() default_num = setup.getDefaultNumberOfCopies() request_num = self.request.form.get("copies_count") return to_int(request_num, default_num)
Return the copies_count number request parameter :returns: the number of copies for each sticker as stated in the request :rtype: int
def _offset_for(self, param): if param.has_parent(): p = param._parent_._get_original(param) if p in self.parameters: return reduce(lambda a,b: a + b.size, self.parameters[:p._parent_index_], 0) return self._offset_for(param._parent_) + param._parent_._offset_for(param) return 0
Return the offset of the param inside this parameterized object. This does not need to account for shaped parameters, as it basically just sums up the parameter sizes which come before param.
def StartTiming(self, profile_name): if profile_name not in self._profile_measurements: self._profile_measurements[profile_name] = CPUTimeMeasurement() self._profile_measurements[profile_name].SampleStart()
Starts timing CPU time. Args: profile_name (str): name of the profile to sample.
def wrap_exception(func: Callable) -> Callable: try: from pygatt.backends.bgapi.exceptions import BGAPIError from pygatt.exceptions import NotConnectedError except ImportError: return func def _func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except BGAPIError as exception: raise BluetoothBackendException() from exception except NotConnectedError as exception: raise BluetoothBackendException() from exception return _func_wrapper
Decorator to wrap pygatt exceptions into BluetoothBackendException.
def hashable_identity(obj): if hasattr(obj, '__func__'): return (id(obj.__func__), id(obj.__self__)) elif hasattr(obj, 'im_func'): return (id(obj.im_func), id(obj.im_self)) elif isinstance(obj, (basestring, unicode)): return obj else: return id(obj)
Generate a hashable ID that is stable for methods etc Approach borrowed from blinker. Why it matters: see e.g. http://stackoverflow.com/questions/13348031/python-bound-and-unbound-method-object
def clean_up_datetime(obj_map): clean_map = {} for key, value in obj_map.items(): if isinstance(value, datetime.datetime): clean_map[key] = { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, 'tzinfo': value.tzinfo } elif isinstance(value, dict): clean_map[key] = clean_up_datetime(value) elif isinstance(value, list): if key not in clean_map: clean_map[key] = [] if len(value) > 0: for index, list_value in enumerate(value): if isinstance(list_value, dict): clean_map[key].append(clean_up_datetime(list_value)) else: clean_map[key].append(list_value) else: clean_map[key] = value else: clean_map[key] = value return clean_map
convert datetime objects to dictionaries for storage
def get_quantmap(features, acc_col, quantfields): qmap = {} for feature in features: feat_acc = feature.pop(acc_col) qmap[feat_acc] = {qf: feature[qf] for qf in quantfields} return qmap
Runs through proteins that are in a quanted protein table, extracts and maps their information based on the quantfields list input. Map is a dict with protein_accessions as keys.
def replace(self, src): "Given some source html substitute and annotated as applicable" for html in self.substitutions.keys(): if src == html: annotation = self.annotation % self.substitutions[src][1] return annotation + self.substitutions[src][0] return src
Given some source html substitute and annotated as applicable
def main(): global DEFAULT_SERVER_PORT, DEFAULT_SERVER_LISTEN_ADDRESS, DEFAULT_LOGGING_LEVEL if 'ANYBADGE_PORT' in environ: DEFAULT_SERVER_PORT = environ['ANYBADGE_PORT'] if 'ANYBADGE_LISTEN_ADDRESS' in environ: DEFAULT_SERVER_LISTEN_ADDRESS = environ['ANYBADGE_LISTEN_ADDRESS'] if 'ANYBADGE_LOG_LEVEL' in environ: DEFAULT_LOGGING_LEVEL = logging.getLevelName(environ['ANYBADGE_LOG_LEVEL']) args = parse_args() logging_level = DEFAULT_LOGGING_LEVEL if args.debug: logging_level = logging.DEBUG logging.basicConfig(format='%(asctime)-15s %(levelname)s:%(filename)s(%(lineno)d):%(funcName)s: %(message)s', level=logging_level) logger.info('Starting up anybadge server.') run(listen_address=args.listen_address, port=args.port)
Run server.
def run(addr, *commands, **kwargs): results = [] handler = VarnishHandler(addr, **kwargs) for cmd in commands: if isinstance(cmd, tuple) and len(cmd)>1: results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd]) else: results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:])) break handler.close() return results
Non-threaded batch command runner returning output results
def short_repr(item, max_length=15): item = repr(item) if len(item) > max_length: item = '{}...{}'.format(item[:max_length - 3], item[-1]) return item
Short representation of item if it is too long
def train_by_stream(self, stream: StreamWrapper) -> None: self._run_epoch(stream=stream, train=True)
Train the model with the given stream. :param stream: stream to train with
def dict_to_vtk(data, path='./dictvtk', voxel_size=1, origin=(0, 0, 0)): r vs = voxel_size for entry in data: if data[entry].dtype == bool: data[entry] = data[entry].astype(np.int8) if data[entry].flags['C_CONTIGUOUS']: data[entry] = np.ascontiguousarray(data[entry]) imageToVTK(path, cellData=data, spacing=(vs, vs, vs), origin=origin)
r""" Accepts multiple images as a dictionary and compiles them into a vtk file Parameters ---------- data : dict A dictionary of *key: value* pairs, where the *key* is the name of the scalar property stored in each voxel of the array stored in the corresponding *value*. path : string Path to output file voxel_size : int The side length of the voxels (voxels are cubic) origin : float data origin (according to selected voxel size) Notes ----- Outputs a vtk, vtp or vti file that can opened in ParaView
def get_boot_device(self): operation = 'get_boot_device' try: boot_device = self.sp_manager.get_boot_device() return boot_device except UcsException as ex: print(_("Cisco client exception: %(msg)s."), {'msg': ex}) raise exception.UcsOperationError(operation=operation, error=ex)
Get the current boot device for the node. Provides the current boot device of the node. Be aware that not all drivers support this. :raises: InvalidParameterValue if any connection parameters are incorrect. :raises: MissingParameterValue if a required parameter is missing :returns: a dictionary containing: :boot_device: the boot device, one of :mod:`ironic.common.boot_devices` or None if it is unknown. :persistent: Whether the boot device will persist to all future boots or not, None if it is unknown.
def doLog(self, level, where, format, *args, **kwargs): if _canShortcutLogging(self.logCategory, level): return {} args = self.logFunction(*args) return doLog(level, self.logObjectName(), self.logCategory, format, args, where=where, **kwargs)
Log a message at the given level, with the possibility of going higher up in the stack. @param level: log level @type level: int @param where: how many frames to go back from the last log frame; or a function (to log for a future call) @type where: int (negative), or function @param kwargs: a dict of pre-calculated values from a previous doLog call @return: a dict of calculated variables, to be reused in a call to doLog that should show the same location @rtype: dict
def sort_func(self, key): if key == self._KEYS.TIME: return 'aaa' if key == self._KEYS.DATA: return 'zzy' if key == self._KEYS.SOURCE: return 'zzz' return key
Logic for sorting keys in a `Spectrum` relative to one another.
def find_all(self, kw: YangIdentifier, pref: YangIdentifier = None) -> List["Statement"]: return [c for c in self.substatements if c.keyword == kw and c.prefix == pref]
Return the list all substatements with the given keyword and prefix. Args: kw: Statement keyword (local part for extensions). pref: Keyword prefix (``None`` for built-in statements).
def _load(self, filename=None): if not filename: filename = self.filename wb_ = open_workbook(filename) self.rsr = {} sheet_names = [] for sheet in wb_.sheets(): if sheet.name in ['Title', ]: continue ch_name = AHI_BAND_NAMES.get( sheet.name.strip(), sheet.name.strip()) sheet_names.append(sheet.name.strip()) self.rsr[ch_name] = {'wavelength': None, 'response': None} wvl = np.array( sheet.col_values(0, start_rowx=5, end_rowx=5453)) resp = np.array( sheet.col_values(2, start_rowx=5, end_rowx=5453)) self.rsr[ch_name]['wavelength'] = wvl self.rsr[ch_name]['response'] = resp
Load the Himawari AHI RSR data for the band requested
def query_list_pager(con, idx, kind='2'): all_list = MPost.query_under_condition(con, kind=kind) return all_list[(idx - 1) * CMS_CFG['list_num']: idx * CMS_CFG['list_num']]
Get records of certain pager.
def keys(self, prefix=None, limit=None, offset=None, namespace=None): return self.make_context(prefix=prefix, limit=limit, offset=offset, namespace=namespace).keys()
Get gauge keys
def count(self, low, high=None): if high is None: high = low return self.database.zcount(self.key, low, high)
Return the number of items between the given bounds.
def write_record(self, warc_record): warc_record.write_to(self.fileobj) if isinstance(self.fileobj, gzip2.GzipFile): self.fileobj.close_member()
Adds a warc record to this WARC file.
def add_compliance_header(self): if (self.manipulator.compliance_uri is not None): self.headers['Link'] = '<' + \ self.manipulator.compliance_uri + '>;rel="profile"'
Add IIIF Compliance level header to response.
def normalize_likes(sql): sql = sql.replace('%', '') sql = re.sub(r"LIKE '[^\']+'", 'LIKE X', sql) matches = re.finditer(r'(or|and) [^\s]+ LIKE X', sql, flags=re.IGNORECASE) matches = [match.group(0) for match in matches] if matches else None if matches: for match in set(matches): sql = re.sub(r'(\s?' + re.escape(match) + ')+', ' ' + match + ' ...', sql) return sql
Normalize and wrap LIKE statements :type sql str :rtype: str
def dict2bibtex(data): bibtex = '@' + data['ENTRYTYPE'] + '{' + data['ID'] + ",\n" for field in [i for i in sorted(data) if i not in ['ENTRYTYPE', 'ID']]: bibtex += "\t" + field + "={" + data[field] + "},\n" bibtex += "}\n\n" return bibtex
Convert a single BibTeX entry dict to a BibTeX string. :param data: A dict representing BibTeX entry, as the ones from \ ``bibtexparser.BibDatabase.entries`` output. :return: A formatted BibTeX string.
def path_for_import(name): return os.path.dirname(os.path.abspath(import_module(name).__file__))
Returns the directory path for the given package or module.
def getkey(stype, site_id=None, key=None): 'Returns the cache key depending on its type.' base = '{0}.feedjack'.format(settings.CACHE_MIDDLEWARE_KEY_PREFIX) if stype == T_HOST: return '{0}.hostcache'.format(base) elif stype == T_ITEM: return '{0}.{1}.item.{2}'.format(base, site_id, str2md5(key)) elif stype == T_META: return '{0}.{1}.meta'.format(base, site_id) elif stype == T_INTERVAL: return '{0}.interval.{1}'.format(base, str2md5(key))
Returns the cache key depending on its type.
def linkify_s_by_sg(self, servicegroups): for serv in self: new_servicegroups = [] if hasattr(serv, 'servicegroups') and serv.servicegroups != '': for sg_name in serv.servicegroups: sg_name = sg_name.strip() servicegroup = servicegroups.find_by_name(sg_name) if servicegroup is not None: new_servicegroups.append(servicegroup.uuid) else: err = "Error: the servicegroup '%s' of the service '%s' is unknown" %\ (sg_name, serv.get_dbg_name()) serv.add_error(err) serv.servicegroups = new_servicegroups
Link services with servicegroups :param servicegroups: Servicegroups :type servicegroups: alignak.objects.servicegroup.Servicegroups :return: None
def partial(cls, prefix, source): match = prefix + "." matches = cls([(key[len(match):], source[key]) for key in source if key.startswith(match)]) if not matches: raise ValueError() return matches
Strip a prefix from the keys of another dictionary, returning a Bunch containing only valid key, value pairs.
def remove_security_group(self, name): for group in self.security_groups: if group.isc_name == name: group.delete()
Remove a security group from container
def contrast(self, color, step): hls = colorsys.rgb_to_hls(*self.rgb(color)) if self.is_light(color): return colorsys.hls_to_rgb(hls[0], hls[1] - step, hls[2]) else: return colorsys.hls_to_rgb(hls[0], hls[1] + step, hls[2])
if color is dark, will return a lighter one, otherwise darker
def close(self): if hasattr(self, "thread"): self.thread._exit = True self.thread.join(1000) if self._conn is not None: self._conn.close()
Close the current connection and terminate the agent Should be called manually
def _validate_condition_keys(self, field, value, error): if 'field' in value: operators = self.nonscalar_conditions + self.scalar_conditions matches = sum(1 for k in operators if k in value) if matches == 0: error(field, 'Must contain one of {}'.format(operators)) return False elif matches > 1: error( field, 'Must contain no more than one of {}'.format(operators) ) return False return True elif 'and' in value: for condition in value['and']: self._validate_condition_keys(field, condition, error) elif 'or' in value: for condition in value['or']: self._validate_condition_keys(field, condition, error) else: error(field, "Must contain field + operator keys, 'and', or 'or'.") return False
Validates that all of the keys in one of the sets of keys are defined as keys of ``value``.
def html(self, text=TEXT): self.logger.debug("Generating the HTML report{}..." .format(["", " (text only)"][text])) html = [] for piece in self._pieces: if isinstance(piece, string_types): html.append(markdown2.markdown(piece, extras=["tables"])) elif isinstance(piece, Element): html.append(piece.html()) return "\n\n".join(html)
Generate an HTML file from the report data.
def unregister_peer(self, connection_id): public_key = self.peer_to_public_key(connection_id) if public_key: self._consensus_notifier.notify_peer_disconnected(public_key) with self._lock: if connection_id in self._peers: del self._peers[connection_id] LOGGER.debug("Removed connection_id %s, " "connected identities are now %s", connection_id, self._peers) self._topology.set_connection_status(connection_id, PeerStatus.TEMP) else: LOGGER.warning("Connection unregister failed as connection " "was not registered: %s", connection_id)
Removes a connection_id from the registry. Args: connection_id (str): A unique identifier which identifies an connection on the network server socket.
def add_site(self, site_name, location_name=None, er_data=None, pmag_data=None): if location_name: location = self.find_by_name(location_name, self.locations) if not location: location = self.add_location(location_name) else: location = None new_site = Site(site_name, location, self.data_model, er_data, pmag_data) self.sites.append(new_site) if location: location.sites.append(new_site) return new_site
Create a Site object and add it to self.sites. If a location name is provided, add the site to location.sites as well.
def add_header(self, entry): info = entry.split('\t') self.n_individuals = len(info)-9 for i,v in enumerate(info[9:]): self.individuals[v] = i return self.n_individuals > 0
Parses the VCF Header field and returns the number of samples in the VCF file
def update_cursor_position(self, line, index): value = 'Line {}, Col {}'.format(line + 1, index + 1) self.set_value(value)
Update cursor position.
def reciprocal(self): try: return self.character.portal[self.dest][self.orig] except KeyError: raise KeyError("This portal has no reciprocal")
If there's another Portal connecting the same origin and destination that I do, but going the opposite way, return it. Else raise KeyError.
def load_zipfile(self, path): zin = zipfile.ZipFile(path) for zinfo in zin.infolist(): name = zinfo.filename if name.endswith("/"): self.mkdir(name) else: content = zin.read(name) self.touch(name, content)
import contents of a zipfile
def stop(self, accountID, **kwargs): return self.create( accountID, order=StopOrderRequest(**kwargs) )
Shortcut to create a Stop Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a StopOrderRequest Returns: v20.response.Response containing the results from submitting the request
def _count_classified_pixels(self): class_values = self.class_dictionary.values() classification_count = np.array([[[np.count_nonzero(prediction[np.nonzero(mask)] == class_val) for prediction, mask in zip(self.classification_masks, masktype)] for masktype in self.truth_masks] for class_val in class_values]) classification_count = np.moveaxis(classification_count, 0, -1) classification_count = np.moveaxis(classification_count, 0, -2) if self.pixel_classification_counts is None: self.pixel_classification_counts = np.copy(classification_count) else: self.pixel_classification_counts = np.concatenate((self.pixel_classification_counts, classification_count))
Count the pixels belonging to each classified class.
def set(self, fmt, offset, value): bfo = BitFieldOperation(self.database, self.key) return bfo.set(fmt, offset, value)
Set the value of a given bitfield. :param fmt: format-string for the bitfield being read, e.g. u8 for an unsigned 8-bit integer. :param int offset: offset (in number of bits). :param int value: value to set at the given position. :returns: a :py:class:`BitFieldOperation` instance.
def _query(self, sql, *params): cursor = None try: cursor = self._cursor() cursor.execute(sql, params) return cursor except psycopg2.Error as e: if cursor and cursor.connection.closed == 0: if isinstance(e, psycopg2.OperationalError): self.close_connection() else: raise e if self.state == 'restarting': raise RetryFailedError('cluster is being restarted') raise PostgresConnectionException('connection problems')
We are always using the same cursor, therefore this method is not thread-safe!!! You can call it from different threads only if you are holding explicit `AsyncExecutor` lock, because the main thread is always holding this lock when running HA cycle.
def get_model_alias(self): if self.model_alias: return self.model_alias return '{}.{}'.format(self.get_app_label(), self.get_model_name())
Get model alias