code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def order_by(self, *order_bys): self._order_bys = [] for ingr in order_bys: order_by = self._shelf.find(ingr, (Dimension, Metric)) self._order_bys.append(order_by) self.dirty = True return self
Add a list of ingredients to order by to the query. These can either be Dimension or Metric objects or strings representing order_bys on the shelf. The Order_by expression will be added to the query's order_by statement :param order_bys: Order_bys to add to the recipe. Order_bys can either be keys on the ``shelf`` or Dimension or Metric objects. If the key is prefixed by "-" the ordering will be descending. :type order_bys: list
def filter_by(self, types=(), units=()): if not (isinstance(types, Sequence) and isinstance(units, Sequence)): raise TypeError('types/units must be a sequence') empty = frozenset() if types: type_names = set() for type_ in types: type_names |= self.by_type.get(type_, empty) if not units: return type_names if units: unit_names = set() for unit in units: unit_names |= self.by_unit.get(unit, empty) if not types: return unit_names return (type_names & unit_names) if (types and units) else empty
Return list of value labels, filtered by either or both type and unit. An empty sequence for either argument will match as long as the other argument matches any values.
def move_mouse_relative_to_window(self, window, x, y): _libxdo.xdo_move_mouse_relative_to_window( self._xdo, ctypes.c_ulong(window), x, y)
Move the mouse to a specific location relative to the top-left corner of a window. :param x: the target X coordinate on the screen in pixels. :param y: the target Y coordinate on the screen in pixels.
def get_doc_comments(text): r def make_pair(match): comment = match.group() try: end = text.find('\n', match.end(0)) + 1 if '@class' not in comment: next_line = next(split_delimited('()', '\n', text[end:])) else: next_line = text[end:text.find('\n', end)] except StopIteration: next_line = '' return (comment, next_line) return [make_pair(match) for match in re.finditer('/\*\*(.*?)\*/', text, re.DOTALL)]
r""" Return a list of all documentation comments in the file text. Each comment is a pair, with the first element being the comment text and the second element being the line after it, which may be needed to guess function & arguments. >>> get_doc_comments(read_file('examples/module.js'))[0][0][:40] '/**\n * This is the module documentation.' >>> get_doc_comments(read_file('examples/module.js'))[1][0][7:50] 'This is documentation for the first method.' >>> get_doc_comments(read_file('examples/module.js'))[1][1] 'function the_first_function(arg1, arg2) ' >>> get_doc_comments(read_file('examples/module.js'))[2][0] '/** This is the documentation for the second function. */'
def parse_media_type(media_type): media_type, sep, parameter = str(media_type).partition(';') media_type, sep, subtype = media_type.partition('/') return tuple(x.strip() or None for x in (media_type, subtype, parameter))
Returns type, subtype, parameter tuple from an http media_type. Can be applied to the 'Accept' or 'Content-Type' http header fields.
def __get_keys(self): keys = list() tree_node = self while tree_node is not None and tree_node.key is not None: keys.insert(0, tree_node.key) tree_node = tree_node.parent return keys
Return the keys associated with this node by adding its key and then adding parent keys recursively.
def _bind_socket(self, bindaddr): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) try: sock.bind(bindaddr) except Exception: self._logger.exception("Unable to bind to %s" % str(bindaddr)) raise sock.listen(self.BACKLOG) return sock
Create a listening server socket.
def experiments_predictions_download(self, experiment_id, run_id): model_run = self.experiments_predictions_get(experiment_id, run_id) if model_run is None: return None if not model_run.state.is_success: return None funcdata = self.funcdata.get_object(model_run.state.model_output) if funcdata is None: return None return FileInfo( funcdata.upload_file, funcdata.properties[datastore.PROPERTY_MIMETYPE], funcdata.properties[datastore.PROPERTY_FILENAME] )
Donwload the results of a prediction for a given experiment. Parameters ---------- experiment_id : string Unique experiment identifier run_id : string Unique model run identifier Returns ------- FileInfo Information about prediction result file on disk or None if prediction is unknown or has no result
def branchpoints(image, mask=None): global branchpoints_table if mask is None: masked_image = image else: masked_image = image.astype(bool).copy() masked_image[~mask] = False result = table_lookup(masked_image, branchpoints_table, False, 1) if not mask is None: result[~mask] = image[~mask] return result
Remove all pixels from an image except for branchpoints image - a skeletonized image mask - a mask of pixels excluded from consideration 1 0 1 ? 0 ? 0 1 0 -> 0 1 0 0 1 0 0 ? 0
def get_write_fields(self): rec_write_fields = self.get_write_subset('record') if self.comments != None: rec_write_fields.append('comments') self.check_field('n_sig') if self.n_sig > 0: sig_write_fields = self.get_write_subset('signal') else: sig_write_fields = None return rec_write_fields, sig_write_fields
Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field.
def _step_begin(self, label, log=True): if log: self.step_label = label self.step_begin_time = self.log(u"STEP %d BEGIN (%s)" % (self.step_index, label))
Log begin of a step
def integer_ceil(a, b): quanta, mod = divmod(a, b) if mod: quanta += 1 return quanta
Return the ceil integer of a div b.
def quantile(expr, prob=None, **kw): prob = kw.get('_prob', prob) output_type = _stats_type(expr) if isinstance(prob, (list, set)) and not isinstance(expr, GroupBy): output_type = types.List(output_type) return _reduction(expr, Quantile, output_type, _prob=prob)
Percentile value. :param expr: :param prob: probability or list of probabilities, in [0, 1] :return:
def update_highlights(self, old_highlight_set, new_highlight_set): if not self.gui_up: return un_hilite_set = old_highlight_set - new_highlight_set re_hilite_set = new_highlight_set - old_highlight_set for key in un_hilite_set: self._highlight_path(key, False) for key in re_hilite_set: self._highlight_path(key, True)
Unhighlight the entries represented by ``old_highlight_set`` and highlight the ones represented by ``new_highlight_set``. Both are sets of keys.
def list_ipsec_site_connections(self, retrieve_all=True, **_params): return self.list('ipsec_site_connections', self.ipsec_site_connections_path, retrieve_all, **_params)
Fetches all configured IPsecSiteConnections for a project.
def _tree_to_string(cls, root_element, xml_declaration=True, pretty_print=True): from lxml import etree return gf.safe_unicode(etree.tostring( root_element, encoding="UTF-8", method="xml", xml_declaration=xml_declaration, pretty_print=pretty_print ))
Return an ``lxml`` tree as a Unicode string.
def inflate_bbox(self): left, top, right, bottom = self.bounding_box self.bounding_box = ( left & 0xFFFC, top, right if right % 4 == 0 else (right & 0xFFFC) + 0x04, bottom) return self.bounding_box
Realign the left and right edges of the bounding box such that they are inflated to align modulo 4. This method is optional, and used mainly to accommodate devices with COM/SEG GDDRAM structures that store pixels in 4-bit nibbles.
def handle_length(schema, field, validator, parent_schema): if isinstance(field, fields.String): minKey = 'minLength' maxKey = 'maxLength' elif isinstance(field, (fields.List, fields.Nested)): minKey = 'minItems' maxKey = 'maxItems' else: raise ValueError("In order to set the Length validator for JSON " "schema, the field must be either a List or a String") if validator.min: schema[minKey] = validator.min if validator.max: schema[maxKey] = validator.max if validator.equal: schema[minKey] = validator.equal schema[maxKey] = validator.equal return schema
Adds validation logic for ``marshmallow.validate.Length``, setting the values appropriately for ``fields.List``, ``fields.Nested``, and ``fields.String``. Args: schema (dict): The original JSON schema we generated. This is what we want to post-process. field (fields.Field): The field that generated the original schema and who this post-processor belongs to. validator (marshmallow.validate.Length): The validator attached to the passed in field. parent_schema (marshmallow.Schema): The Schema instance that the field belongs to. Returns: dict: A, possibly, new JSON Schema that has been post processed and altered. Raises: ValueError: Raised if the `field` is something other than `fields.List`, `fields.Nested`, or `fields.String`
def update_port_monitor(self, resource, timeout=-1): data = resource.copy() if 'type' not in data: data['type'] = 'port-monitor' uri = "{}{}".format(self.data["uri"], self.PORT_MONITOR_PATH) return self._helper.update(data, uri=uri, timeout=timeout)
Updates the port monitor configuration of a logical interconnect. Args: resource: Port monitor configuration. Returns: dict: Port monitor configuration.
def get(token: Union[str, int] = None) -> 'Role': if token is None: return Role.USER for role in Role: if role == Role.ROLE_REMOVE: continue if isinstance(token, int) and token in role.value: return role if str(token).upper() == role.name or token in (str(v) for v in role.value): return role return None
Return enum instance corresponding to input token. :param token: token identifying role to indy-sdk: 'STEWARD', 'TRUSTEE', 'TRUST_ANCHOR', '' or None :return: enum instance corresponding to input token
def _parse_snapshot_restore_command(cls, args, action): argparser = ArgumentParser(prog="cluster %s" % action) group = argparser.add_mutually_exclusive_group(required=True) group.add_argument("--id", dest="cluster_id", help="execute on cluster with this id") group.add_argument("--label", dest="label", help="execute on cluster with this label") argparser.add_argument("--s3_location", help="s3_location where backup is stored", required=True) if action == "snapshot": argparser.add_argument("--backup_type", help="backup_type: full/incremental, default is full") elif action == "restore_point": argparser.add_argument("--backup_id", help="back_id from which restoration will be done", required=True) argparser.add_argument("--table_names", help="table(s) which are to be restored", required=True) argparser.add_argument("--no-overwrite", action="store_false", help="With this option, restore overwrites to the existing table if theres any in restore target") argparser.add_argument("--no-automatic", action="store_false", help="With this option, all the dependencies are automatically restored together with this backup image following the correct order") arguments = argparser.parse_args(args) return arguments
Parse command line arguments for snapshot command.
def mangleNec(code, freq=40): timings = [] for octet in binascii.unhexlify(code.replace(" ", "")): burst = lambda x: x and "0226 06AD" or "0226 0258" for bit in reversed("%08d" % int(bin(ord(octet))[2:])): bit = int(bit) timings.append(burst(bit)) return mangleIR("K %0X22 214d 10b3 " % freq + " ".join(timings) + " 0226 2000")
Convert NEC code to shorthand notation
def config_flag(option, value, default=False, section=cli.name): class x(object): def __bool__(self, option=option, value=value, default=default, section=section): config = read_config() type = builtins.type(value) get_option = option_getter(type) try: return get_option(config, section, option) == value except (NoOptionError, NoSectionError): return default __nonzero__ = __bool__ return x()
Guesses whether a CLI flag should be turned on or off from the configuration. If the configuration option value is same with the given value, it returns ``True``. :: @click.option('--ko-kr', 'locale', is_flag=True, default=config_flag('locale', 'ko_KR'))
def accel_increase_height(self, *args): height = self.settings.general.get_int('window-height') self.settings.general.set_int('window-height', min(height + 2, 100)) return True
Callback to increase height.
def parse(self, filename): path = os.path.abspath(filename) if filename.endswith(".xml"): return PawXmlSetup(path) ppdesc = self.read_ppdesc(path) if ppdesc is None: logger.critical("Cannot find ppdesc in %s" % path) return None psp_type = ppdesc.psp_type parsers = { "FHI": NcAbinitHeader.fhi_header, "GTH": NcAbinitHeader.gth_header, "TM": NcAbinitHeader.tm_header, "Teter": NcAbinitHeader.tm_header, "HGH": NcAbinitHeader.hgh_header, "HGHK": NcAbinitHeader.hgh_header, "ONCVPSP": NcAbinitHeader.oncvpsp_header, "PAW_abinit_text": PawAbinitHeader.paw_header, } try: header = parsers[ppdesc.name](path, ppdesc) except Exception: raise self.Error(path + ":\n" + straceback()) if psp_type == "NC": pseudo = NcAbinitPseudo(path, header) elif psp_type == "PAW": pseudo = PawAbinitPseudo(path, header) else: raise NotImplementedError("psp_type not in [NC, PAW]") return pseudo
Read and parse a pseudopotential file. Main entry point for client code. Returns: pseudopotential object or None if filename is not a valid pseudopotential file.
def root(self): sector = self.header.directory_sector_start position = (sector + 1) << self.header.sector_shift return RootEntry(self, position)
Property provides access to root object in CFB.
def resampled( chunksize_bytes=DEFAULT_CHUNK_SIZE, resample_to=SR44100(), store_resampled=False): class Resampled(BaseModel): meta = JSONFeature( MetaData, store=True, encoder=AudioMetaDataEncoder) raw = ByteStreamFeature( ByteStream, chunksize=chunksize_bytes, needs=meta, store=False) ogg = OggVorbisFeature( OggVorbis, needs=raw, store=True) pcm = AudioSamplesFeature( AudioStream, needs=raw, store=False) resampled = AudioSamplesFeature( Resampler, needs=pcm, samplerate=resample_to, store=store_resampled) return Resampled
Create a basic processing pipeline that can resample all incoming audio to a normalized sampling rate for downstream processing, and store a convenient, compressed version for playback :param chunksize_bytes: The number of bytes from the raw stream to process at once :param resample_to: The new, normalized sampling rate :return: A simple processing pipeline
def update_domain_name(self, domain_name, certificate_name=None, certificate_body=None, certificate_private_key=None, certificate_chain=None, certificate_arn=None, lambda_name=None, stage=None, route53=True, base_path=None): print("Updating domain name!") certificate_name = certificate_name + str(time.time()) api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name) if not certificate_arn\ and certificate_body and certificate_private_key and certificate_chain: acm_certificate = self.acm_client.import_certificate(Certificate=certificate_body, PrivateKey=certificate_private_key, CertificateChain=certificate_chain) certificate_arn = acm_certificate['CertificateArn'] self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path) return self.apigateway_client.update_domain_name(domainName=domain_name, patchOperations=[ {"op" : "replace", "path" : "/certificateName", "value" : certificate_name}, {"op" : "replace", "path" : "/certificateArn", "value" : certificate_arn} ])
This updates your certificate information for an existing domain, with similar arguments to boto's update_domain_name API Gateway api. It returns the resulting new domain information including the new certificate's ARN if created during this process. Previously, this method involved downtime that could take up to 40 minutes because the API Gateway api only allowed this by deleting, and then creating it. Related issues: https://github.com/Miserlou/Zappa/issues/590 https://github.com/Miserlou/Zappa/issues/588 https://github.com/Miserlou/Zappa/pull/458 https://github.com/Miserlou/Zappa/issues/882 https://github.com/Miserlou/Zappa/pull/883
def is_path_like(obj, attr=('name', 'is_file', 'is_dir', 'iterdir')): for a in attr: if not hasattr(obj, a): return False return True
test if object is pathlib.Path like
def wait_for_any_log(nodes, pattern, timeout, filename='system.log', marks=None): if marks is None: marks = {} for _ in range(timeout): for node in nodes: found = node.grep_log(pattern, filename=filename, from_mark=marks.get(node, None)) if found: return node time.sleep(1) raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " Unable to find: " + repr(pattern) + " in any node log within " + str(timeout) + "s")
Look for a pattern in the system.log of any in a given list of nodes. @param nodes The list of nodes whose logs to scan @param pattern The target pattern @param timeout How long to wait for the pattern. Note that strictly speaking, timeout is not really a timeout, but a maximum number of attempts. This implies that the all the grepping takes no time at all, so it is somewhat inaccurate, but probably close enough. @param marks A dict of nodes to marks in the file. Keys must match the first param list. @return The first node in whose log the pattern was found
def _parse_phone(self, val): ret = { 'type': None, 'value': None } try: ret['type'] = val[1]['type'] except (IndexError, KeyError, ValueError, TypeError): pass ret['value'] = val[3].strip() try: self.vars['phone'].append(ret) except AttributeError: self.vars['phone'] = [] self.vars['phone'].append(ret)
The function for parsing the vcard phone numbers. Args: val (:obj:`list`): The value to parse.
def dump_database_as_insert_sql(engine: Engine, fileobj: TextIO = sys.stdout, include_ddl: bool = False, multirow: bool = False) -> None: for tablename in get_table_names(engine): dump_table_as_insert_sql( engine=engine, table_name=tablename, fileobj=fileobj, include_ddl=include_ddl, multirow=multirow )
Reads an entire database and writes SQL to replicate it to the output file-like object. Args: engine: SQLAlchemy :class:`Engine` fileobj: file-like object to write to include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements
def filter_req_paths(paths, func): if not isinstance(paths, list): raise ValueError("Paths must be a list of paths.") libs = set() junk = set(['\n']) for p in paths: with p.open(mode='r') as reqs: lines = set([line for line in reqs if func(line)]) libs.update(lines) return list(libs - junk)
Return list of filtered libs.
def _register_service(self): if ( self._registration is None and self.specifications and self.__validated and self.__controller_on ): properties = self._ipopo_instance.context.properties.copy() bundle_context = self._ipopo_instance.bundle_context self._registration = bundle_context.register_service( self.specifications, self._ipopo_instance.instance, properties, factory=self.__is_factory, prototype=self.__is_prototype, ) self._svc_reference = self._registration.get_reference() self._ipopo_instance.safe_callback( ipopo_constants.IPOPO_CALLBACK_POST_REGISTRATION, self._svc_reference, )
Registers the provided service, if possible
def plot_probability_alive_matrix( model, max_frequency=None, max_recency=None, title="Probability Customer is Alive,\nby Frequency and Recency of a Customer", xlabel="Customer's Historical Frequency", ylabel="Customer's Recency", **kwargs ): from matplotlib import pyplot as plt z = model.conditional_probability_alive_matrix(max_frequency, max_recency) interpolation = kwargs.pop("interpolation", "none") ax = plt.subplot(111) pcm = ax.imshow(z, interpolation=interpolation, **kwargs) plt.xlabel(xlabel) plt.ylabel(ylabel) plt.title(title) forceAspect(ax) plt.colorbar(pcm, ax=ax) return ax
Plot probability alive matrix as heatmap. Plot a figure of the probability a customer is alive based on their frequency and recency. Parameters ---------- model: lifetimes model A fitted lifetimes model. max_frequency: int, optional The maximum frequency to plot. Default is max observed frequency. max_recency: int, optional The maximum recency to plot. This also determines the age of the customer. Default to max observed age. title: str, optional Figure title xlabel: str, optional Figure xlabel ylabel: str, optional Figure ylabel kwargs Passed into the matplotlib.imshow command. Returns ------- axes: matplotlib.AxesSubplot
def __reset_unique_identities(self): self.log("Reseting unique identities...") self.log("Clearing identities relationships") nids = 0 uidentities = api.unique_identities(self.db) for uidentity in uidentities: for identity in uidentity.identities: api.move_identity(self.db, identity.id, identity.id) nids += 1 self.log("Relationships cleared for %s identities" % nids) self.log("Clearing enrollments") with self.db.connect() as session: enrollments = session.query(Enrollment).all() for enr in enrollments: session.delete(enr) self.log("Enrollments cleared")
Clear identities relationships and enrollments data
def load(self): javabridge.call(self.jobject, "reset", "()V") return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
Loads the text files from the specified directory and returns the Instances object. In case of incremental loading, only the structure. :return: the full dataset or the header (if incremental) :rtype: Instances
def in_app() -> bool: try: MirageEnvironment.set_import_root() import apps if os.path.isfile("apps.py"): return True else: return False except ImportError: return False except: return False
Judge where current working directory is in Django application or not. returns: - (Bool) cwd is in app dir returns True
def delete_tmp_dir(self): logger.debug("Deleting: " + self.tmp_dir) shutil.rmtree(self.tmp_dir, True)
Delete the entire tmp dir
def __ordering_deprecated(self): msg = _format("Ordering comparisons involving {0} objects are " "deprecated.", self.__class__.__name__) if DEBUG_WARNING_ORIGIN: msg += "\nTraceback:\n" + ''.join(traceback.format_stack()) warnings.warn(msg, DeprecationWarning, stacklevel=3)
Deprecated warning for pywbem CIM Objects
def authenticate(self, reauth=False): auth_url = BASE_URL + "/rest/user" payload = {'email': self.email, 'password': self.password} arequest = requests.get(auth_url, params=payload) status = arequest.status_code if status != 200: if reauth: _LOGGER.error("Reauthentication request failed. " + status) else: _LOGGER.error("Authentication request failed, please check credintials. " + status) self.token = arequest.json().get('usertoken') if reauth: _LOGGER.info("Reauthentication was successful, token updated.") else: _LOGGER.info("Authentication was successful, token set.")
Authenticate with the API and return an authentication token.
def rotate(self, angle, axis, point=None, radians=False): q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians) self._vector = q.rotate_vector(v=self._vector, point=point) return
Rotates `Atom` by `angle`. Parameters ---------- angle : float Angle that `Atom` will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the `Atom` will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the `axis` lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees.
def model_setup(self): for device in self.devman.devices: if self.__dict__[device].n: try: self.__dict__[device].setup() except Exception as e: raise e
Call the ``setup`` function of the loaded models. This function is to be called after parsing all the data files during the system set up. Returns ------- None
def status(self): return {self._acronym_status(l): l for l in self.resp_text.split('\n') if l.startswith(self.prefix_status)}
Development status.
def get_volumes(self): vols = [self.find_volume(name) for name in self.virsp.listVolumes()] return vols
Return a list of all Volumes in this Storage Pool
def add_static_path(self, prefix: str, path: str) -> None: pattern = prefix if not pattern.startswith('/'): pattern = '/' + pattern if not pattern.endswith('/(.*)'): pattern = pattern + '/(.*)' self.add_handlers( r'.*', [(pattern, StaticFileHandler, dict(path=path))] )
Add path to serve static files. ``prefix`` is used for url prefix to serve static files and ``path`` is a path to the static file directory. ``prefix = '/_static'`` is reserved for the server, so do not use it for your app.
def clear(self): self.country_code = None self.national_number = None self.extension = None self.italian_leading_zero = None self.number_of_leading_zeros = None self.raw_input = None self.country_code_source = CountryCodeSource.UNSPECIFIED self.preferred_domestic_carrier_code = None
Erase the contents of the object
def switch(self): base_block = self.base_block or self self.next_block = Block( self.parent, base_block=base_block, py3_wrapper=self.py3_wrapper ) return self.next_block
block has been split via | so we need to start a new block for that option and return it to the user.
def get_point(cls, idx, size): x, y = cls.POSITION[idx % 4] idx //= 4 block_size = 2 while block_size < size: block_idx = idx % 4 x, y = cls.get_point_in_block(x, y, block_idx, block_size) idx //= 4 block_size *= 2 return x, y
Get curve point coordinates by index. Parameters ---------- idx : `int` Point index. size : `int` Curve size. Returns ------- (`int`, `int`) Point coordinates.
def create_nouns(max=2): nouns = [] for noun in range(0, max): nouns.append(random.choice(noun_list)) return " ".join(nouns)
Return a string of random nouns up to max number
def _update_events(self): events = self._skybell.dev_cache(self, CONST.EVENT) or {} for activity in self._activities: event = activity.get(CONST.EVENT) created_at = activity.get(CONST.CREATED_AT) old_event = events.get(event) if old_event and created_at < old_event.get(CONST.CREATED_AT): continue else: events[event] = activity self._skybell.update_dev_cache( self, { CONST.EVENT: events })
Update our cached list of latest activity events.
def import_family(self, rfa_file): self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file
def remove_labels(self, test): ii = 0 while ii < len(self.labels): if test(self.labels[ii]): self.labels.pop(ii) else: ii += 1 return self
Remove labels from this cell. The function or callable ``test`` is called for each label in the cell. If its return value evaluates to ``True``, the corresponding label is removed from the cell. Parameters ---------- test : callable Test function to query whether a label should be removed. The function is called with the label as the only argument. Returns ------- out : ``Cell`` This cell. Examples -------- Remove labels in layer 1: >>> cell.remove_labels(lambda lbl: lbl.layer == 1)
def timeout(duration): if not isinstance(duration, int): raise TypeError("timeout duration should be a positive integer") if duration <= 0: raise ValueError("timeoutDuration should be a positive integer") def decorator(func): def wrapped_func(*args, **kwargs): try: def alarm_handler(signum, stack): raise TimeoutError() signal.signal(signal.SIGALRM, alarm_handler) signal.alarm(duration) reply = func(*args, **kwargs) except TimeoutError as e: raise e return reply return wrapped_func return decorator
A decorator to force a time limit on the execution of an external function. :param int duration: the timeout duration :raises: TypeError, if duration is anything other than integer :raises: ValueError, if duration is a negative integer :raises TimeoutError, if the external function execution crosses 'duration' time
def encode_offset_fetch_request(cls, group, payloads, from_kafka=False): version = 1 if from_kafka else 0 return kafka.protocol.commit.OffsetFetchRequest[version]( consumer_group=group, topics=[( topic, list(topic_payloads.keys())) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
Encode an OffsetFetchRequest struct. The request is encoded using version 0 if from_kafka is false, indicating a request for Zookeeper offsets. It is encoded using version 1 otherwise, indicating a request for Kafka offsets. Arguments: group: string, the consumer group you are fetching offsets for payloads: list of OffsetFetchRequestPayload from_kafka: bool, default False, set True for Kafka-committed offsets
def write_job(self,fh): if isinstance(self.job(),CondorDAGManJob): fh.write( ' '.join( ['SUBDAG EXTERNAL', self.__name, self.__job.get_sub_file()]) ) if self.job().get_dag_directory(): fh.write( ' DIR ' + self.job().get_dag_directory() ) else: fh.write( 'JOB ' + self.__name + ' ' + self.__job.get_sub_file() ) fh.write( '\n') fh.write( 'RETRY ' + self.__name + ' ' + str(self.__retry) + '\n' )
Write the DAG entry for this node's job to the DAG file descriptor. @param fh: descriptor of open DAG file.
def is_available(self) -> bool: status_response = self._client.get_state( 'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format( self._monitor_id ) ) if not status_response: _LOGGER.warning('Could not get availability for monitor {}'.format( self._monitor_id )) return False monitor_status = self._raw_result.get('Monitor_Status', None) capture_fps = monitor_status and monitor_status['CaptureFPS'] return status_response.get('status', False) and capture_fps != "0.00"
Indicate if this Monitor is currently available.
def import_variables(self, container, varnames=None): if varnames is None: for keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword]) else: for keyword in varnames: if keyword in self.tkvariables: setattr(container, keyword, self.tkvariables[keyword])
Helper method to avoid call get_variable for every variable.
def _coulomb(n1, n2, k, r): delta = [x2 - x1 for x1, x2 in zip(n1['velocity'], n2['velocity'])] distance = sqrt(sum(d ** 2 for d in delta)) if distance < 0.1: delta = [uniform(0.1, 0.2) for _ in repeat(None, 3)] distance = sqrt(sum(d ** 2 for d in delta)) if distance < r: force = (k / distance) ** 2 n1['force'] = [f - force * d for f, d in zip(n1['force'], delta)] n2['force'] = [f + force * d for f, d in zip(n2['force'], delta)]
Calculates Coulomb forces and updates node data.
def partition(self, dimension): for i, channel in enumerate(self.u): if self.v[i].shape[1] < dimension: raise IndexError('Channel is max dimension %s' % self.v[i].shape[1]) self.data[i] = channel[:, 0:dimension] self.dimension = dimension return self
Partition subspace into desired dimension. :type dimension: int :param dimension: Maximum dimension to use.
def load(self, spec): if spec.template is not None: return self.loader.unicode(spec.template, spec.template_encoding) path = self._find(spec) return self.loader.read(path, spec.template_encoding)
Find and return the template associated to a TemplateSpec instance. Returns the template as a unicode string. Arguments: spec: a TemplateSpec instance.
def randomize(self, device=None, percent=100, silent=False): volume = self.get_volume(device) blocks = int(volume['size'] / BLOCK_SIZE) num_writes = int(blocks * percent * 0.01) offsets = sorted(random.sample(range(blocks), num_writes)) total = 0 if not silent: print('Writing urandom to %s bytes in %s' % (volume['size'], volume['path'])) with open(volume['path'], 'w') as file: for offset in offsets: if not silent: self.dot() file.seek(offset * BLOCK_SIZE) data = os.urandom(32768) * 128 total += len(data) file.write(data) print("\nWrote: %s" % total)
Writes random data to the beginning of each 4MB block on a block device this is useful when performance testing the backup process (Without any optional arguments will randomize the first 32k of each 4MB block on 100 percent of the device)
def parse_nem_file(nem_file) -> NEMFile: reader = csv.reader(nem_file, delimiter=',') return parse_nem_rows(reader, file_name=nem_file)
Parse NEM file and return meter readings named tuple
def scan(self): if self.implicit is not None: return self.implicit = [] self.implicit_set = set() self._children_reset() if not self.has_builder(): return build_env = self.get_build_env() executor = self.get_executor() if implicit_cache and not implicit_deps_changed: implicit = self.get_stored_implicit() if implicit is not None: for tgt in executor.get_all_targets(): tgt.add_to_implicit(implicit) if implicit_deps_unchanged or self.is_up_to_date(): return for tgt in executor.get_all_targets(): tgt.implicit = [] tgt.implicit_set = set() executor.scan_sources(self.builder.source_scanner) scanner = self.get_target_scanner() if scanner: executor.scan_targets(scanner)
Scan this node's dependents for implicit dependencies.
def try_friends(self, others): befriended = False k = int(10*self['openness']) shuffle(others) for friend in islice(others, k): if friend == self: continue if friend.befriend(self): self.befriend(friend, force=True) self.debug('Hooray! new friend: {}'.format(friend.id)) befriended = True else: self.debug('{} does not want to be friends'.format(friend.id)) return befriended
Look for random agents around me and try to befriend them
def canceled_plan_summary_for(self, year, month): return ( self.canceled_during(year, month) .values("plan") .order_by() .annotate(count=models.Count("plan")) )
Return Subscriptions canceled within a time range with plan counts annotated.
def callback(self): try: return self._callback() except: s = straceback() self.exceptions.append(s) self.shutdown(msg="Exception raised in callback!\n" + s)
The function that will be executed by the scheduler.
def find_trigger_value(psd_var, idx, start, sample_rate): time = start + idx / sample_rate ind = numpy.digitize(time, psd_var.sample_times) ind -= 1 vals = psd_var[ind] return vals
Find the PSD variation value at a particular time Parameters ---------- psd_var : TimeSeries Time series of the varaibility in the PSD estimation idx : numpy.ndarray Time indices of the triggers start : float GPS start time sample_rate : float Sample rate defined in ini file Returns ------- vals : Array PSD variation value at a particular time
def submit_if_ready(args, submit_args, config): __, ext = os.path.splitext(args.input_file) if ext.lower() != ".xml": return None with io.open(args.input_file, encoding="utf-8") as input_file: xml = input_file.read(1024) if not ("<testsuites" in xml or "<testcases" in xml or "<requirements" in xml): return None if args.no_submit: logger.info("Nothing to do") return 0 response = dump2polarion.submit_and_verify( xml_file=args.input_file, config=config, **submit_args ) return 0 if response else 2
Submits the input XML file if it's already in the expected format.
def make_string(seq): string = '' for c in seq: try: if 32 <= c and c < 256: string += chr(c) except TypeError: pass if not string: return str(seq) return string
Don't throw an exception when given an out of range character.
def has_obsgroup_id(self, group_id): self.con.execute('SELECT 1 FROM archive_obs_groups WHERE publicId = %s', (group_id,)) return len(self.con.fetchall()) > 0
Check for the presence of the given group_id :param string group_id: The group ID :return: True if we have a :class:`meteorpi_model.ObservationGroup` with this Id, False otherwise
def _process_blacklist(self, blacklist): blacklist_cache = {} blacklist_cache_old = self._cache.get('blacklist', {}) for entry in blacklist: blackkey = (entry.version, entry.operator) if blackkey in blacklist_cache: continue elif blackkey in blacklist_cache_old: blacklist_cache[blackkey] = blacklist_cache_old[blackkey] else: entry_cache = blacklist_cache[blackkey] = set() blackversion = parse_version(entry.version or '0') blackop = OPERATORS[entry.operator] for key in self: if blackop(parse_version(key), blackversion): entry_cache.add(key) self._cache['blacklist'] = blacklist_cache return set().union(*blacklist_cache.values())
Process blacklist into set of excluded versions
def csep_periodic(ra, rb, L): seps = ra[:, np.newaxis, :] - rb[np.newaxis, :, :] for i_dim in range(ra.shape[1]): seps_dim = seps[:, :, i_dim] seps_dim[seps_dim > L[i_dim] / 2.0] -= L[i_dim] seps_dim[seps_dim < -L[i_dim] / 2.0] += L[i_dim] return seps
Return separation vectors between each pair of the two sets of points. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- csep: float array-like, shape (n, m, d) csep[i, j] is the separation vector from point j to point i. Note the un-intuitive vector direction.
def min_sequence_length(self, dataset_split): return { problem.DatasetSplit.TRAIN: 8, problem.DatasetSplit.EVAL: 65, problem.DatasetSplit.TEST: 65 }[dataset_split]
Determine the minimum sequence length given a dataset_split. Args: dataset_split: A problem.DatasetSplit. Returns: The minimum length that a sequence can be for this dataset_split.
def get_base_input(test=False): from django.forms.widgets import DateTimeBaseInput if 'get_context' in dir(DateTimeBaseInput) and not test: base_input = DateTimeBaseInput else: from bootstrap_datepicker_plus._compatibility import ( CompatibleDateTimeBaseInput ) base_input = CompatibleDateTimeBaseInput return base_input
Return DateTimeBaseInput class from django.forms.widgets module Return _compatibility.DateTimeBaseInput class for older django versions.
def addNode(self, node): self.mybldgbuids[node.buid] = node self.allbldgbuids[node.buid] = (node, self.doneevent)
Update the shared map with my in-construction node
def _encode_image(self, np_image): if np_image.dtype != np.uint8: raise ValueError('Image should be uint8. Detected: %s.' % np_image.dtype) utils.assert_shape_match(np_image.shape, self._shape) return self._runner.run(ENCODE_FN[self._encoding_format], np_image)
Returns np_image encoded as jpeg or png.
def create_bokeh_server(io_loop, files, argvs, host, port): from bokeh.server.server import Server from bokeh.command.util import build_single_handler_applications apps = build_single_handler_applications(files, argvs) kwargs = { 'io_loop':io_loop, 'generate_session_ids':True, 'redirect_root':True, 'use_x_headers':False, 'secret_key':None, 'num_procs':1, 'host': host, 'sign_sessions':False, 'develop':False, 'port':port, 'use_index':True } server = Server(apps,**kwargs) return server
Start bokeh server with applications paths
def secure(func_or_obj, check_permissions_for_obj=None): if _allowed_check_permissions_types(func_or_obj): return _secure_method(func_or_obj) else: if not _allowed_check_permissions_types(check_permissions_for_obj): msg = "When securing an object, secure() requires the " + \ "second argument to be method" raise TypeError(msg) return _SecuredAttribute(func_or_obj, check_permissions_for_obj)
This method secures a method or class depending on invocation. To decorate a method use one argument: @secure(<check_permissions_method>) To secure a class, invoke with two arguments: secure(<obj instance>, <check_permissions_method>)
def search(self, index_name, query): try: results = self.els_search.search(index=index_name, body=query) return results except Exception, error: error_str = 'Query failed: %s\n' % str(error) error_str += '\nIs there a dynamic script in the query?, see www.elasticsearch.org' print error_str raise RuntimeError(error_str)
Search the given index_name with the given ELS query. Args: index_name: Name of the Index query: The string to be searched. Returns: List of results. Raises: RuntimeError: When the search query fails.
def _get_openstack_release(self): for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS): setattr(self, os_pair, i) releases = { ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, ('xenial', None): self.xenial_mitaka, ('xenial', 'cloud:xenial-newton'): self.xenial_newton, ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata, ('xenial', 'cloud:xenial-pike'): self.xenial_pike, ('xenial', 'cloud:xenial-queens'): self.xenial_queens, ('yakkety', None): self.yakkety_newton, ('zesty', None): self.zesty_ocata, ('artful', None): self.artful_pike, ('bionic', None): self.bionic_queens, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('cosmic', None): self.cosmic_rocky, ('disco', None): self.disco_stein, } return releases[(self.series, self.openstack)]
Get openstack release. Return an integer representing the enum value of the openstack release.
def header(self): chunk_size = a2b_hex('%08x' % (len(self.track_data) + len(self.end_of_track()))) return TRACK_HEADER + chunk_size
Return the bytes for the header of track. The header contains the length of the track_data, so you'll have to call this function when you're done adding data (when you're not using get_midi_data).
def render_app_name(context, app, template="/admin_app_name.html"): try: template = app['app_label'] + template text = render_to_string(template, context) except: text = app['name'] return text
Render the application name using the default template name. If it cannot find a template matching the given path, fallback to the application name.
def on_step_end(self, step, logs={}): self.total_steps += 1 if self.total_steps % self.interval != 0: return filepath = self.filepath.format(step=self.total_steps, **logs) if self.verbose > 0: print('Step {}: saving model to {}'.format(self.total_steps, filepath)) self.model.save_weights(filepath, overwrite=True)
Save weights at interval steps during training
def fill_window(self, seqNum): if _debug: SSM._debug("fill_window %r", seqNum) if _debug: SSM._debug(" - actualWindowSize: %r", self.actualWindowSize) for ix in range(self.actualWindowSize): apdu = self.get_segment(seqNum + ix) self.ssmSAP.request(apdu) if not apdu.apduMor: self.sentAllSegments = True break
This function sends all of the packets necessary to fill out the segmentation window.
def vacuum(self, threshold=0.3): url = ("http://{master_addr}:{master_port}/" "vol/vacuum?garbageThreshold={threshold}").format( master_addr=self.master_addr, master_port=self.master_port, threshold=threshold) res = self.conn.get_data(url) if res is not None: return True return False
Force garbage collection :param float threshold (optional): The threshold is optional, and will not change the default threshold. :rtype: boolean
def handleHeader(self, key, value): if key == 'CIMError': self.CIMError = urllib.parse.unquote(value) if key == 'PGErrorDetail': self.PGErrorDetail = urllib.parse.unquote(value)
Handle header values.
def preLoad(self): logging.getLogger().debug("Preloading segment '%s'" % (self)) real_url = self.buildUrl() cache_url = self.buildUrl(cache_friendly=True) audio_data = self.download(real_url) assert(audio_data) __class__.cache[cache_url] = audio_data
Store audio data in cache for fast playback.
def _to_number(cls, string): try: if float(string) - int(string) == 0: return int(string) return float(string) except ValueError: try: return float(string) except ValueError: return string
Convert string to int or float.
def make_symlink(source, link_path): if not supports_symlinks(): dbt.exceptions.system_error('create a symbolic link') return os.symlink(source, link_path)
Create a symlink at `link_path` referring to `source`.
def plot_color_map_bars(values, vmin=None, vmax=None, color_map=None, axis=None, **kwargs): if axis is None: fig, axis = plt.subplots() norm = mpl.colors.Normalize(vmin=vmin or min(values), vmax=vmax or max(values), clip=True) if color_map is None: color_map = mpl.rcParams['image.cmap'] colors = color_map(norm(values.values).filled()) values.plot(kind='bar', ax=axis, color=colors, **kwargs) return axis
Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis.
def schoice(self, seq: str, end: int = 10) -> str: return ''.join(self.choice(list(seq)) for _ in range(end))
Choice function which returns string created from sequence. :param seq: Sequence of letters or digits. :type seq: tuple or list :param end: Max value. :return: Single string.
def serialize_me(self, arn, event_time, tech, item=None): payload = { 'arn': arn, 'event_time': event_time, 'tech': tech } if item: payload['item'] = item else: payload['event_too_big'] = True return self.dumps(payload).data.replace('<empty>', '')
Dumps the proper JSON for the schema. If the event is too big, then don't include the item. :param arn: :param event_time: :param tech: :param item: :return:
def run(quiet, args): if not args: raise ClickException('pass a command to run') cmd = ' '.join(args) application = get_current_application() name = application.name settings = os.environ.get('DJANGO_SETTINGS_MODULE', '%s.settings' % name) return application.run( cmd, verbose=not quiet, abort=False, capture=True, env={ 'DJANGO_SETTINGS_MODULE': settings } )
Run a local command. Examples: $ django run manage.py runserver ...
def append(self, data): t = self.tell() self.seek(0, 2) if hasattr(data, 'getvalue'): self.write_utf8_string(data.getvalue()) else: self.write_utf8_string(data) self.seek(t)
Append data to the end of the stream. The pointer will not move if this operation is successful. @param data: The data to append to the stream. @type data: C{str} or C{unicode} @raise TypeError: data is not C{str} or C{unicode}
def cancel(self, invoice_id, **kwargs): url = "{}/{}/cancel".format(self.base_url, invoice_id) return self.post_url(url, {}, **kwargs)
Cancel an unpaid Invoice with given ID via API It can only be called on an invoice that is not in the paid state. Args: invoice_id : Id for cancel the invoice Returns: The response for the API will be the invoice entity, similar to create/update API response, with status attribute's value as cancelled
def merge(self, other, inplace=None, overwrite_vars=frozenset(), compat='no_conflicts', join='outer'): inplace = _check_inplace(inplace) variables, coord_names, dims = dataset_merge_method( self, other, overwrite_vars=overwrite_vars, compat=compat, join=join) return self._replace_vars_and_dims(variables, coord_names, dims, inplace=inplace)
Merge the arrays of two datasets into a single dataset. This method generally not allow for overriding data, with the exception of attributes, which are ignored on the second dataset. Variables with the same name are checked for conflicts via the equals or identical methods. Parameters ---------- other : Dataset or castable to Dataset Dataset or variables to merge with this dataset. inplace : bool, optional If True, merge the other dataset into this dataset in-place. Otherwise, return a new dataset object. overwrite_vars : str or sequence, optional If provided, update variables of these name(s) without checking for conflicts in this dataset. compat : {'broadcast_equals', 'equals', 'identical', 'no_conflicts'}, optional String indicating how to compare variables of the same name for potential conflicts: - 'broadcast_equals': all values must be equal when variables are broadcast against each other to ensure common dimensions. - 'equals': all values and dimensions must be the same. - 'identical': all values, dimensions and attributes must be the same. - 'no_conflicts': only values which are not null in both datasets must be equal. The returned dataset then contains the combination of all non-null values. join : {'outer', 'inner', 'left', 'right', 'exact'}, optional Method for joining ``self`` and ``other`` along shared dimensions: - 'outer': use the union of the indexes - 'inner': use the intersection of the indexes - 'left': use indexes from ``self`` - 'right': use indexes from ``other`` - 'exact': error instead of aligning non-equal indexes Returns ------- merged : Dataset Merged dataset. Raises ------ MergeError If any variables conflict (see ``compat``).
def ReplaceAttachment(self, attachment_link, attachment, options=None): if options is None: options = {} CosmosClient.__ValidateResource(attachment) path = base.GetPathFromLink(attachment_link) attachment_id = base.GetResourceIdOrFullNameFromLink(attachment_link) return self.Replace(attachment, path, 'attachments', attachment_id, None, options)
Replaces an attachment and returns it. :param str attachment_link: The link to the attachment. :param dict attachment: :param dict options: The request options for the request. :return: The replaced Attachment :rtype: dict
def add_nodes(self, lb, nodes): if not isinstance(nodes, (list, tuple)): nodes = [nodes] node_dicts = [nd.to_dict() for nd in nodes] resp, body = self.api.method_post("/loadbalancers/%s/nodes" % lb.id, body={"nodes": node_dicts}) return resp, body
Adds the list of nodes to the specified load balancer.
def flat_unity(length, delta_f, low_freq_cutoff): fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f) kmin = int(low_freq_cutoff / fseries.delta_f) fseries.data[:kmin] = 0 return fseries
Returns a FrequencySeries of ones above the low_frequency_cutoff. Parameters ---------- length : int Length of output Frequencyseries. delta_f : float Frequency step for output FrequencySeries. low_freq_cutoff : int Low-frequency cutoff for output FrequencySeries. Returns ------- FrequencySeries Returns a FrequencySeries containing the unity PSD model.