code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True): interval = 5 iterations = int(timeout / interval) for i in range(0, iterations): jobs_result = _query('linode', 'job.list', args={'LinodeID': linode_id})['DATA'] if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1: return True time.sleep(interval) log.log( logging.INFO if not quiet else logging.DEBUG, 'Still waiting on Job %s for Linode %s.', job_id, linode_id ) return False
Wait for a Job to return. linode_id The ID of the Linode to wait on. Required. job_id The ID of the job to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when True. Otherwise, logs to info.
def join(self): self.closed = True while self.expect > 0: val = self.wait_change.get() self.expect -= 1 if val is not None: gevent.joinall(list(self.greenlets), timeout=30) gevent.killall(list(self.greenlets), block=True, timeout=30) raise val
Wait for transfer to exit, raising errors as necessary.
def openDatFile(datpath): pkgname, filename = datpath.split('/', 1) pkgmod = s_dyndeps.getDynMod(pkgname) pkgfile = os.path.abspath(pkgmod.__file__) if os.path.isfile(pkgfile): dirname = os.path.dirname(pkgfile) datname = os.path.join(dirname, filename) return open(datname, 'rb')
Open a file-like object using a pkg relative path. Example: fd = openDatFile('foopkg.barpkg/wootwoot.bin')
def create_custom_field(self, field_name, data_type, options=[], visible_in_preference_center=True): body = { "FieldName": field_name, "DataType": data_type, "Options": options, "VisibleInPreferenceCenter": visible_in_preference_center} response = self._post(self.uri_for("customfields"), json.dumps(body)) return json_to_py(response)
Creates a new custom field for this list.
def reset_current_row(self, *args, **kwargs): i = self.configobj_treev.currentIndex() m = self.configobj_treev.model() m.restore_default(i)
Reset the selected rows value to its default value :returns: None :rtype: None :raises: None
def parse_timedelta(deltastr): matches = TIMEDELTA_REGEX.match(deltastr) if not matches: return None components = {} for name, value in matches.groupdict().items(): if value: components[name] = int(value) for period, hours in (('days', 24), ('years', 8766)): if period in components: components['hours'] = components.get('hours', 0) + \ components[period] * hours del components[period] return int(timedelta(**components).total_seconds())
Parse a string describing a period of time.
def _get_vm_by_id(vmid, allDetails=False): for vm_name, vm_details in six.iteritems(get_resources_vms(includeConfig=allDetails)): if six.text_type(vm_details['vmid']) == six.text_type(vmid): return vm_details log.info('VM with ID "%s" could not be found.', vmid) return False
Retrieve a VM based on the ID.
def _default_warning_handler(library_msg, _): library_msg = library_msg.decode('utf-8').rstrip() msg = "OpenJPEG library warning: {0}".format(library_msg) warnings.warn(msg, UserWarning)
Default warning handler callback.
def truncate_table(self, tablename): self.cursor.execute('TRUNCATE TABLE %s' %tablename) self.db.commit()
Use 'TRUNCATE TABLE' to truncate the given table
def _disk(self): mountpoints = [ p.mountpoint for p in psutil.disk_partitions() if p.device.endswith(self.device) ] if len(mountpoints) != 1: raise CommandError("Unknown device: {0}".format(self.device)) value = int(psutil.disk_usage(mountpoints[0]).percent) set_metric("disk-{0}".format(self.device), value, category=self.category) gauge("disk-{0}".format(self.device), value)
Record Disk usage.
def plot_neuron(ax, nrn, neurite_type=NeuriteType.all, plane='xy', soma_outline=True, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree(ax, neurite, plane=plane, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name) ax.set_xlabel(plane[0]) ax.set_ylabel(plane[1])
Plots a 2D figure of the neuron, that contains a soma and the neurites Args: ax(matplotlib axes): on what to plot neurite_type(NeuriteType): an optional filter on the neurite type nrn(neuron): neuron to be plotted soma_outline(bool): should the soma be drawn as an outline plane(str): Any pair of 'xyz' diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
async def _receive(self, stream_id, pp_id, data): await self._data_channel_receive(stream_id, pp_id, data)
Receive data stream -> ULP.
def archive_sciobj(pid): sciobj_model = d1_gmn.app.model_util.get_sci_model(pid) sciobj_model.is_archived = True sciobj_model.save() _update_modified_timestamp(sciobj_model)
Set the status of an object to archived. Preconditions: - The object with the pid is verified to exist. - The object is not a replica. - The object is not archived.
def count_annotation_entries(self) -> int: return self.session.query(NamespaceEntry).filter(NamespaceEntry.is_annotation).count()
Count the number of annotation entries in the database.
def write(self, file_path, template, context={}, preserve=False, force=False): if not file_path or not template: click.secho('source or target missing for document') return if not context: context = self.context error = False try: self._write(file_path, template, context, preserve, force) except TemplateSyntaxError as exc: message = '{0}:{1}: error: {2}'.format(exc.filename, exc.lineno, exc.message) click.secho(message, fg='red', err=True) error = True except TemplateNotFound as exc: message = '{0}: error: Template not found'.format(exc.name) click.secho(message, fg='red', err=True) error = True except TemplateError as exc: error = True if error and Generator.strict: sys.exit(1)
Using a template file name it renders a template into a file given a context
def crop_coords(img, padding): coords = np.nonzero(img) empty_axis_exists = np.any([len(arr) == 0 for arr in coords]) if empty_axis_exists: end_coords = img.shape beg_coords = np.zeros((1, img.ndim)).astype(int).flatten() else: min_coords = np.array([arr.min() for arr in coords]) max_coords = np.array([arr.max() for arr in coords]) beg_coords = np.fmax(0, min_coords - padding) end_coords = np.fmin(img.shape, max_coords + padding) return beg_coords, end_coords
Find coordinates describing extent of non-zero portion of image, padded
def get_domain(self): if hasattr(self, 'domain'): return Domain(self.rest_client.make_request(self.domain), self.rest_client)
Get the Streams domain for the instance that owns this view. Returns: Domain: Streams domain for the instance owning this view.
def create_router(self, context, router): new_router = super(AristaL3ServicePlugin, self).create_router( context, router) try: self.driver.create_router(context, new_router) return new_router except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating router on Arista HW router=%s "), new_router) super(AristaL3ServicePlugin, self).delete_router( context, new_router['id'] )
Create a new router entry in DB, and create it Arista HW.
def _assert_explicit_vr(dicom_input): if settings.validate_multiframe_implicit: header = dicom_input[0] if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2': raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM')
Assert that explicit vr is used
def _compute_dlt(self): res = super()._compute_dlt() for rec in self: ltaf_to_apply = self.env['ddmrp.adjustment'].search( rec._ltaf_to_apply_domain()) if ltaf_to_apply: ltaf = 1 values = ltaf_to_apply.mapped('value') for val in values: ltaf *= val prev = rec.dlt rec.dlt *= ltaf _logger.debug( "LTAF=%s applied to %s. DLT: %s -> %s" % (ltaf, rec.name, prev, rec.dlt)) return res
Apply Lead Time Adj Factor if existing
def get_wsgi_filter(self, name=None, defaults=None): name = self._maybe_get_default_name(name) defaults = self._get_defaults(defaults) return loadfilter( self.pastedeploy_spec, name=name, relative_to=self.relative_to, global_conf=defaults, )
Reads the configuration soruce and finds and loads a WSGI filter defined by the filter entry with the name ``name`` per the PasteDeploy configuration format and loading mechanism. :param name: The named WSGI filter to find, load and return. Defaults to ``None`` which becomes ``main`` inside :func:`paste.deploy.loadfilter`. :param defaults: The ``global_conf`` that will be used during filter instantiation. :return: A callable that can filter a WSGI application.
def init(self): "Initialize the message-digest and set all fields to zero." self.length = 0L self.input = [] self.A = 0x67452301L self.B = 0xefcdab89L self.C = 0x98badcfeL self.D = 0x10325476L
Initialize the message-digest and set all fields to zero.
def check_auth(self, all_credentials): if all_credentials or self.authset: cached = set(itervalues(all_credentials)) authset = self.authset.copy() for credentials in authset - cached: auth.logout(credentials.source, self) self.authset.discard(credentials) for credentials in cached - authset: auth.authenticate(credentials, self) self.authset.add(credentials)
Update this socket's authentication. Log in or out to bring this socket's credentials up to date with those provided. Can raise ConnectionFailure or OperationFailure. :Parameters: - `all_credentials`: dict, maps auth source to MongoCredential.
def permute(x: SYM, perm: List[int]) -> SYM: x_s = [] for i in perm: x_s.append(x[i]) return ca.vertcat(*x_s)
Perumute a vector
def delete_role(self, name): return roles.delete_role(self._get_resource_root(), self.name, name, self._get_cluster_name())
Delete a role by name. @param name: Role name @return: The deleted ApiRole object
def load_lang_model(self): if self.lang in self.languages: if not Spacy.model_installed(self.lang): download(self.lang) model = spacy.load(self.lang) elif self.lang in self.alpha_languages: language_module = importlib.import_module(f"spacy.lang.{self.lang}") language_method = getattr(language_module, self.alpha_languages[self.lang]) model = language_method() self.model = model
Load spaCy language model or download if model is available and not installed. Currenty supported spaCy languages en English (50MB) de German (645MB) fr French (1.33GB) es Spanish (377MB) :return:
def are_genes_in_api(my_clue_api_client, gene_symbols): if len(gene_symbols) > 0: query_gene_symbols = gene_symbols if type(gene_symbols) is list else list(gene_symbols) query_result = my_clue_api_client.run_filter_query(resource_name, {"where":{"gene_symbol":{"inq":query_gene_symbols}}, "fields":{"gene_symbol":True}}) logger.debug("query_result: {}".format(query_result)) r = set([x["gene_symbol"] for x in query_result]) return r else: logger.warning("provided gene_symbols was empty, cannot run query") return set()
determine if genes are present in the API Args: my_clue_api_client: gene_symbols: collection of gene symbols to query the API with Returns: set of the found gene symbols
def attribute(func): attr = abc.abstractmethod(func) attr.__iattribute__ = True attr = _property(attr) return attr
Wrap a function as an attribute.
def post(self, request, *args, **kwargs): try: kwargs = self.load_object(kwargs) except Exception as e: return self.render_te_response({ 'title': str(e), }) if not self.has_permission(request): return self.render_te_response({ 'title': 'No access', }) return self.render_te_response(self.handle_dialog(*args, **kwargs))
Handle post request
def block_partition(block, i): i += 1 new_block = BasicBlock(block.asm[i:]) block.mem = block.mem[:i] block.asm = block.asm[:i] block.update_labels() new_block.update_labels() new_block.goes_to = block.goes_to block.goes_to = IdentitySet() new_block.label_goes = block.label_goes block.label_goes = [] new_block.next = new_block.original_next = block.original_next new_block.prev = block new_block.add_comes_from(block) if new_block.next is not None: new_block.next.prev = new_block new_block.next.add_comes_from(new_block) new_block.next.delete_from(block) block.next = block.original_next = new_block block.update_next_block() block.add_goes_to(new_block) return block, new_block
Returns two blocks, as a result of partitioning the given one at i-th instruction.
def get(self, attri): isCol=False isHead=False if attri in self.dcols: isCol=True elif attri in self.hattrs: isHead=True else: print("That attribute does not exist in this File") print('Returning None') if isCol: return self.getColData(attri) elif isHead: return hattrs
Method that dynamically determines the type of attribute that is passed into this method. Also it then returns that attribute's associated data. Parameters ---------- attri : string The attribute we are looking for.
def execute_cell_input(self, cell_input, allow_stdin=None): if cell_input: logger.debug('Executing cell: "%s"...', cell_input.splitlines()[0][:40]) else: logger.debug('Executing empty cell') return self.kc.execute(cell_input, allow_stdin=allow_stdin, stop_on_error=False)
Executes a string of python code in cell input. We do not allow the kernel to make requests to the stdin this is the norm for notebooks Function returns a unique message id of the reply from the kernel.
def create_media_assetfile(access_token, parent_asset_id, name, is_primary="false", \ is_encrypted="false", encryption_scheme="None", encryptionkey_id="None"): path = '/Files' endpoint = ''.join([ams_rest_endpoint, path]) if encryption_scheme == "StorageEncryption": body = '{ \ "IsEncrypted": "' + is_encrypted + '", \ "EncryptionScheme": "' + encryption_scheme + '", \ "EncryptionVersion": "' + "1.0" + '", \ "EncryptionKeyId": "' + encryptionkey_id + '", \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' else: body = '{ \ "IsPrimary": "' + is_primary + '", \ "MimeType": "video/mp4", \ "Name": "' + name + '", \ "ParentAssetId": "' + parent_asset_id + '" \ }' return do_ams_post(endpoint, path, body, access_token)
Create Media Service Asset File. Args: access_token (str): A valid Azure authentication token. parent_asset_id (str): Media Service Parent Asset ID. name (str): Media Service Asset Name. is_primary (str): Media Service Primary Flag. is_encrypted (str): Media Service Encryption Flag. encryption_scheme (str): Media Service Encryption Scheme. encryptionkey_id (str): Media Service Encryption Key ID. Returns: HTTP response. JSON body.
def list(): fields = [ ('Name', 'name'), ('ID', 'id'), ('Owner', 'is_owner'), ('Permission', 'permission'), ] with Session() as session: try: resp = session.VFolder.list() if not resp: print('There is no virtual folders created yet.') return rows = (tuple(vf[key] for _, key in fields) for vf in resp) hdrs = (display_name for display_name, _ in fields) print(tabulate(rows, hdrs)) except Exception as e: print_error(e) sys.exit(1)
List virtual folders that belongs to the current user.
def _custom_diag_normal_kl(lhs, rhs, name=None): with tf.name_scope(name or 'kl_divergence'): mean0 = lhs.mean() mean1 = rhs.mean() logstd0 = tf.log(lhs.stddev()) logstd1 = tf.log(rhs.stddev()) logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1 return 0.5 * ( tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) + tf.reduce_sum((mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) + tf.reduce_sum(logstd1_2, -1) - tf.reduce_sum(logstd0_2, -1) - mean0.shape[-1].value)
Empirical KL divergence of two normals with diagonal covariance. Args: lhs: Diagonal Normal distribution. rhs: Diagonal Normal distribution. name: Name scope for the op. Returns: KL divergence from lhs to rhs.
def signed_cell_areas(self): assert ( self.node_coords.shape[1] == 2 ), "Signed areas only make sense for triangles in 2D." if self._signed_cell_areas is None: p = self.node_coords[self.cells["nodes"]].T self._signed_cell_areas = ( +p[0][2] * (p[1][0] - p[1][1]) + p[0][0] * (p[1][1] - p[1][2]) + p[0][1] * (p[1][2] - p[1][0]) ) / 2 return self._signed_cell_areas
Signed area of a triangle in 2D.
def _update_expander_status(self, message): if message.type == ExpanderMessage.RELAY: self._relay_status[(message.address, message.channel)] = message.value self.on_relay_changed(message=message) return self._relay_status[(message.address, message.channel)]
Uses the provided message to update the expander states. :param message: message to use to update :type message: :py:class:`~alarmdecoder.messages.ExpanderMessage` :returns: boolean indicating the new status
def validateInt(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, min=None, max=None, lessThan=None, greaterThan=None, excMsg=None): return validateNum(value=value, blank=blank, strip=strip, allowlistRegexes=None, blocklistRegexes=blocklistRegexes, _numType='int', min=min, max=max, lessThan=lessThan, greaterThan=greaterThan)
Raises ValidationException if value is not a int. Returns value, so it can be used inline in an expression: print(2 + validateInt(your_number)) Note that since int() and ignore leading or trailing whitespace when converting a string to a number, so does this validateNum(). * value (str): The value being validated as an int or float. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * _numType (str): One of 'num', 'int', or 'float' for the kind of number to validate against, where 'num' means int or float. * min (int, float): The (inclusive) minimum value for the value to pass validation. * max (int, float): The (inclusive) maximum value for the value to pass validation. * lessThan (int, float): The (exclusive) minimum value for the value to pass validation. * greaterThan (int, float): The (exclusive) maximum value for the value to pass validation. * excMsg (str): A custom message to use in the raised ValidationException. If you specify min or max, you cannot also respectively specify lessThan or greaterThan. Doing so will raise PySimpleValidateException. >>> import pysimplevalidate as pysv >>> pysv.validateInt('42') 42 >>> pysv.validateInt('forty two') Traceback (most recent call last): ... pysimplevalidate.ValidationException: 'forty two' is not an integer.
def currencies(self): try: resp = self.client.get(self.ENDPOINT_CURRENCIES) except requests.exceptions.RequestException as e: raise OpenExchangeRatesClientException(e) return resp.json()
Fetches current currency data of the service :Example Data: { AED: "United Arab Emirates Dirham", AFN: "Afghan Afghani", ALL: "Albanian Lek", AMD: "Armenian Dram", ANG: "Netherlands Antillean Guilder", AOA: "Angolan Kwanza", ARS: "Argentine Peso", AUD: "Australian Dollar", AWG: "Aruban Florin", AZN: "Azerbaijani Manat" ... }
def id_request(self): import inspect curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) _LOGGER.debug('caller name: %s', calframe[1][3]) msg = StandardSend(self.address, COMMAND_ID_REQUEST_0X10_0X00) self._plm.send_msg(msg)
Request a device ID from a device.
def determine(value): i = -2 for v in base_values: if value == v: return (value, 0, 1, 1) if value < v: break i += 1 scaled = float(value) / 2 ** i if scaled >= 0.9375: return (base_values[i], 0, 1, 1) elif scaled >= 0.8125: return (base_values[i + 1], 0, 7, 4) elif scaled >= 17 / 24.0: return (base_values[i + 1], 0, 3, 2) elif scaled >= 31 / 48.0: return (v, 1, 1, 1) elif scaled >= 67 / 112.0: return (base_values[i + 1], 0, 5, 4) d = 3 for x in range(2, 5): d += 2 ** x if scaled == 2.0 ** x / d: return (v, x, 1, 1) return (base_values[i + 1], 0, 1, 1)
Analyse the value and return a tuple containing the parts it's made of. The tuple respectively consists of the base note value, the number of dots, and the ratio (see tuplet). Examples: >>> determine(8) (8, 0, 1, 1) >>> determine(12) (8, 0, 3, 2) >>> determine(14) (8, 0, 7, 4) This function recognizes all the base values, triplets, quintuplets, septuplets and up to four dots. The values are matched on range.
def check_apm_out(self): now = time.time() if now - self.last_apm_send_time < 0.02: return self.last_apm_send_time = now if self.hil_state_msg is not None: self.master.mav.send(self.hil_state_msg)
check if we should send new data to the APM
def _modulo(self, decimal_argument): _times, remainder = self._context.divmod(decimal_argument, 100) return remainder if remainder >= 0 else remainder + 100
The mod operator is prone to floating point errors, so use decimal. 101.1 % 100 >>> 1.0999999999999943 decimal_context.divmod(Decimal('100.1'), 100) >>> (Decimal('1'), Decimal('0.1'))
def _stop_scan(self): try: response = self._send_command(6, 4, []) if response.payload[0] != 0: if response.payload[0] != 129: self._logger.error('Error stopping scan for devices, error=%d', response.payload[0]) return False, {'reason': "Could not stop scan for ble devices"} except InternalTimeoutError: return False, {'reason': "Timeout waiting for response"} except DeviceNotConfiguredError: return True, {'reason': "Device not connected (did you disconnect the dongle?"} return True, None
Stop scanning for BLE devices
def write_message( self, message: Union[str, bytes], binary: bool = False ) -> "Future[None]": return self.protocol.write_message(message, binary=binary)
Sends a message to the WebSocket server. If the stream is closed, raises `WebSocketClosedError`. Returns a `.Future` which can be used for flow control. .. versionchanged:: 5.0 Exception raised on a closed stream changed from `.StreamClosedError` to `WebSocketClosedError`.
def get_scanner_param_default(self, param): assert isinstance(param, str) entry = self.scanner_params.get(param) if not entry: return None return entry.get('default')
Returns default value of a scanner parameter.
def is_all_initialized(self): return frozenset(self._class_map.keys()) == \ frozenset(self._instance_map.keys())
Return whether all the instances have been initialized. Returns: bool
def create_ip_arp_request(srchw, srcip, targetip): ether = Ethernet() ether.src = srchw ether.dst = SpecialEthAddr.ETHER_BROADCAST.value ether.ethertype = EtherType.ARP arp = Arp() arp.operation = ArpOperation.Request arp.senderhwaddr = srchw arp.senderprotoaddr = srcip arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value arp.targetprotoaddr = targetip return ether + arp
Create and return a packet containing an Ethernet header and ARP header.
def symlink(source, destination, adapter=None, must_exist=True, fatal=True, logger=LOG.debug): return _file_op(source, destination, _symlink, adapter, fatal, logger, must_exist=must_exist)
Symlink source <- destination :param str|None source: Source file or folder :param str|None destination: Destination file or folder :param callable adapter: Optional function to call on 'source' before copy :param bool must_exist: If True, verify that source does indeed exist :param bool|None fatal: Abort execution on failure if True :param callable|None logger: Logger to use :return int: 1 if effectively done, 0 if no-op, -1 on failure
def labeller(rows=None, cols=None, multi_line=True, default=label_value, **kwargs): rows_labeller = as_labeller(rows, default, multi_line) cols_labeller = as_labeller(cols, default, multi_line) def _labeller(label_info): if label_info._meta['dimension'] == 'rows': margin_labeller = rows_labeller else: margin_labeller = cols_labeller label_info = label_info.astype(str) for name, value in label_info.iteritems(): func = as_labeller(kwargs.get(name), margin_labeller) new_info = func(label_info[[name]]) label_info[name] = new_info[name] if not multi_line: label_info = collapse_label_lines(label_info) return label_info return _labeller
Return a labeller function Parameters ---------- rows : str | function | None How to label the rows cols : str | function | None How to label the columns multi_line : bool Whether to place each variable on a separate line default : function | str Fallback labelling function. If it is a string, it should be the name of one the labelling functions provided by plotnine. kwargs : dict {variable name : function | string} pairs for renaming variables. A function to rename the variable or a string name. Returns ------- out : function Function to do the labelling
def count_weights(scope=None, exclude=None, graph=None): if scope: scope = scope if scope.endswith('/') else scope + '/' graph = graph or tf.get_default_graph() vars_ = graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) if scope: vars_ = [var for var in vars_ if var.name.startswith(scope)] if exclude: exclude = re.compile(exclude) vars_ = [var for var in vars_ if not exclude.match(var.name)] shapes = [var.get_shape().as_list() for var in vars_] return int(sum(np.prod(shape) for shape in shapes))
Count learnable parameters. Args: scope: Restrict the count to a variable scope. exclude: Regex to match variable names to exclude. graph: Operate on a graph other than the current default graph. Returns: Number of learnable parameters as integer.
def make_registry(directory, output, recursive=True): directory = Path(directory) if recursive: pattern = "**/*" else: pattern = "*" files = sorted( [ str(path.relative_to(directory)) for path in directory.glob(pattern) if path.is_file() ] ) hashes = [file_hash(str(directory / fname)) for fname in files] with open(output, "w") as outfile: for fname, fhash in zip(files, hashes): outfile.write("{} {}\n".format(fname.replace("\\", "/"), fhash))
Make a registry of files and hashes for the given directory. This is helpful if you have many files in your test dataset as it keeps you from needing to manually update the registry. Parameters ---------- directory : str Directory of the test data to put in the registry. All file names in the registry will be relative to this directory. output : str Name of the output registry file. recursive : bool If True, will recursively look for files in subdirectories of *directory*.
def view_totlosses(token, dstore): oq = dstore['oqparam'] tot_losses = dstore['losses_by_asset']['mean'].sum(axis=0) return rst_table(tot_losses.view(oq.loss_dt()), fmt='%.6E')
This is a debugging view. You can use it to check that the total losses, i.e. the losses obtained by summing the average losses on all assets are indeed equal to the aggregate losses. This is a sanity check for the correctness of the implementation.
def __send_exc_clear(self, log_if_exc_set=None): if not (log_if_exc_set is None or self.__send_exc is None): logger.info(log_if_exc_set) self.__send_exc_time = None self.__send_exc = None
Clear send exception and time. If exception was previously was set, optionally log log_if_exc_set at INFO level.
def save_matches(self, matches): if not os.path.exists(os.path.dirname(self.location())): os.makedirs(os.path.dirname(self.location())) with open(self.location(), "w+") as f: matches = [m for m in matches if not m['processed']] for m in matches: match_obj = json.dumps(m) f.write(match_obj + "\n")
Save matches of a failed execution to the log. :param matches: a list of matches in JSON format
def output(self, chunk): if chunk is not None: for queue in self.output_queues: queue.put(chunk)
Dispatch the given Chunk onto all the registered output queues. If the chunk is None, it is silently ignored.
async def start_transaction(connection_name: Optional[str] = None) -> BaseTransactionWrapper: connection = _get_connection(connection_name) transaction = connection._in_transaction() await transaction.start() return transaction
Function to manually control your transaction. Returns transaction object with ``.rollback()`` and ``.commit()`` methods. All db calls in same coroutine context will run into transaction before ending transaction with above methods. :param connection_name: name of connection to run with, optional if you have only one db connection
def build_pic_map(self): if self.pic_to_replace: part=self.docx.part self.pic_map.update(self._img_filename_to_part(part)) for relid, rel in six.iteritems(self.docx.part.rels): if rel.reltype in (REL_TYPE.HEADER,REL_TYPE.FOOTER): self.pic_map.update(self._img_filename_to_part(rel.target_part))
Searches in docx template all the xml pictures tag and store them in pic_map dict
def compare_profiles(profile1, profile2): length = len(profile1) profile1 = np.array(list(profile1)) profile2 = np.array(list(profile2)) similarity_array = profile1 == profile2 matches = np.sum(similarity_array) similarity_ratio = matches/length return similarity_ratio
Given two profiles, determine the ratio of similarity, i.e. the hamming distance between the strings. Args: profile1/2 (str): profile string Returns: similarity_ratio (float): the ratio of similiarity (0-1)
def dispatch_queue(self): self.queue_lock.acquire() q = list(self.queue) self.queue = [] self.queue_lock.release() log.debug("Dispatching requests: {}".format(q)) for req in q: req.response = self.dispatch_request(req) for req in q: req.signal()
Dispatch any queued requests. Called by the debugger when it stops.
def query(self, query): if str(query.key) in self._items: return query(self._items[str(query.key)].values()) else: return query([])
Returns an iterable of objects matching criteria expressed in `query` Naively applies the query operations on the objects within the namespaced collection corresponding to ``query.key.path``. Args: query: Query object describing the objects to return. Raturns: iterable cursor with all objects matching criteria
def parabolic(f, x): xv = 1/2. * (f[x-1] - f[x+1]) / (f[x-1] - 2 * f[x] + f[x+1]) + x yv = f[x] - 1/4. * (f[x-1] - f[x+1]) * (xv - x) return (xv, yv)
Interpolation. From ageobot, from somewhere else.
def rank_member_if( self, rank_conditional, member, score, member_data=None): self.rank_member_if_in( self.leaderboard_name, rank_conditional, member, score, member_data)
Rank a member in the leaderboard based on execution of the +rank_conditional+. The +rank_conditional+ is passed the following parameters: member: Member name. current_score: Current score for the member in the leaderboard. score: Member score. member_data: Optional member data. leaderboard_options: Leaderboard options, e.g. 'reverse': Value of reverse option @param rank_conditional [function] Function which must return +True+ or +False+ that controls whether or not the member is ranked in the leaderboard. @param member [String] Member name. @param score [float] Member score. @param member_data [String] Optional member_data.
def visualise(seq, sort=lambda x: x[0]): frmt = "{:6} {:8,d} {}" if isinstance(seq, dict): seq = seq.items() if sort: seq = sorted(seq, key=sort) mx, mn = max([i[1] for i in seq]), min([i[1] for i in seq]) range = mx - mn for i in seq: v = int((i[1] * 100) / range) print (frmt.format(i[0], i[1], "*" * v))
visualises as seq or dictionary
def all_arguments(cls, function, arguments): if isinstance(arguments, dict): arguments = Arguments(**arguments) elif not isinstance(arguments, Arguments): arguments = Arguments(*arguments) return cls(function, arguments)
Helper function for creating `FunctionCall`s with `Arguments`. Args: function: The value to store for the action function. arguments: The values to store for the arguments of the action. Can either be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an iterable is provided, the values will be unpacked into an `Arguments` object. Returns: A new `FunctionCall` instance.
def chain(args): p = OptionParser(chain.__doc__) p.add_option("--dist", dest="dist", default=100, type="int", help="extent of flanking regions to search [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) blastfile, = args dist = opts.dist assert dist > 0 blast = BlastSlow(blastfile) logging.debug("A total of {} records imported".format(len(blast))) chained_hsps = chain_HSPs(blast, xdist=dist, ydist=dist) logging.debug("A total of {} records after chaining".format(len(chained_hsps))) for b in chained_hsps: print(b)
%prog chain blastfile Chain adjacent HSPs together to form larger HSP.
def class_in_progress(stack=None): if stack is None: stack = inspect.stack() for frame in stack: statement_list = frame[4] if statement_list is None: continue if statement_list[0].strip().startswith('class '): return True return False
True if currently inside a class definition, else False.
def write_all_sequences_file(self, outname, outdir=None): if not outdir: outdir = self.sequence_dir if not outdir: raise ValueError('Output directory must be specified') outfile = op.join(outdir, outname + '.faa') SeqIO.write(self.sequences, outfile, "fasta") log.info('{}: wrote all protein sequences to file'.format(outfile)) return outfile
Write all the stored sequences as a single FASTA file. By default, sets IDs to model gene IDs. Args: outname (str): Name of the output FASTA file without the extension outdir (str): Path to output directory for the file, default is the sequences directory
def random(cls): axis = random_unit() angle = np.random.uniform(0,2*np.pi) invert = bool(np.random.randint(0,2)) return Rotation.from_properties(angle, axis, invert)
Return a random rotation
def delete(self): try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults("Deletion of multiple records is not supported") except NoResults as e: e.args = ('Cannot delete a non-existing record',) raise response = self.session.delete(self._get_table_url(sys_id=result['sys_id'])) return self._get_content(response)
Deletes the queried record and returns response content after response validation :raise: :NoResults: if query returned no results :NotImplementedError: if query returned more than one result (currently not supported) :return: - Delete response content (Generally always {'Success': True})
def subtract_by_key(dict_a, dict_b): difference_dict = {} for key in dict_a: if key not in dict_b: difference_dict[key] = dict_a[key] return difference_dict
given two dicts, a and b, this function returns c = a - b, where a - b is defined as the key difference between a and b. e.g., {1:None, 2:3, 3:"yellow", 4:True} - {2:4, 1:"green"} = {3:"yellow", 4:True}
def get_datacenter_id(): datacenter_id = config.get_cloud_config_value( 'datacenter_id', get_configured_provider(), __opts__, search_global=False ) conn = get_conn() try: conn.get_datacenter(datacenter_id=datacenter_id) except PBNotFoundError: log.error('Failed to get datacenter: %s', datacenter_id) raise return datacenter_id
Return datacenter ID from provider configuration
def get(self): if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`.
def list_by_ids(self, ids): ids = utils.coerce_to_list(ids) uri = "/%s?ids=%s" % (self.uri_base, ",".join(ids)) curr_prkey = self.plural_response_key self.plural_response_key = "" ret = self._list(uri) self.plural_response_key = curr_prkey return ret
If you wish to retrieve a list of messages from this queue and know the IDs of those messages, you can pass in a list of those IDs, and only the matching messages will be returned. This avoids pulling down all the messages in a queue and filtering on the client side.
def init(cls, *args, **kwargs): instance = cls() instance._values.update(dict(*args, **kwargs)) return instance
Initialize the config like as you would a regular dict.
def center_middle(r, window_size): res = copy.copy(r) mid = res.start + (len(res) / 2) res.start = mid - (window_size / 2) res.end = res.start + window_size return res
Center a region on its middle and expand it to window_size bases. :return: the new region.
def from_specification(cls, specification, model, classical_gap, ground_energy): return cls(specification.graph, specification.decision_variables, specification.feasible_configurations, specification.vartype, model, classical_gap, ground_energy, ising_linear_ranges=specification.ising_linear_ranges, ising_quadratic_ranges=specification.ising_quadratic_ranges)
Construct a PenaltyModel from a Specification. Args: specification (:class:`.Specification`): A specification that was used to generate the model. model (:class:`dimod.BinaryQuadraticModel`): A binary quadratic model that has ground states that match the feasible_configurations. classical_gap (numeric): The difference in classical energy between the ground state and the first excited state. Must be positive. ground_energy (numeric): The minimum energy of all possible configurations. Returns: :class:`.PenaltyModel`
def get(self): io_loop = IOLoop.current() new_get = Future() with self._lock: get, self._get = self._get, new_get answer = Future() def _on_node(future): if future.exception(): return answer.set_exc_info(future.exc_info()) node = future.result() value = node.value new_hole, node.next = node.next, None new_get.set_result(new_hole) answer.set_result(value) def _on_get(future): if future.exception(): return answer.set_exc_info(future.exc_info()) hole = future.result() io_loop.add_future(hole, _on_node) io_loop.add_future(get, _on_get) return answer
Gets the next item from the queue. Returns a Future that resolves to the next item once it is available.
def save_figure_tofile(fig, fmt, fname): root, ext = osp.splitext(fname) if ext == '.png' and fmt == 'image/svg+xml': qimg = svg_to_image(fig) qimg.save(fname) else: if fmt == 'image/svg+xml' and is_unicode(fig): fig = fig.encode('utf-8') with open(fname, 'wb') as f: f.write(fig)
Save fig to fname in the format specified by fmt.
def params(self): if self.context.el_type in [Function, Subroutine]: return self.evaluator.element.parameters
Raises an ``AttributeError``if the definition is not callable. Otherwise returns a list of `ValueElement` that represents the params.
def hostname_text(self): if self._hostname_text is None: self.chain.connection.log("Collecting hostname information") self._hostname_text = self.driver.get_hostname_text() if self._hostname_text: self.chain.connection.log("Hostname info collected") else: self.chain.connection.log("Hostname info not collected") return self._hostname_text
Return hostname text and collect if not collected.
def by(self, technology): if technology == PluginTechnology.LV2 \ or str(technology).upper() == PluginTechnology.LV2.value.upper(): return self.lv2_builder.all else: return []
Get the plugins registered in PedalPi by technology :param PluginTechnology technology: PluginTechnology identifier
def configure_syslog(request=None, logger=None, exceptions=False): syslog_host = getattr(options, 'syslog_host', None) if not syslog_host: return sys.modules["logging"].raiseExceptions = exceptions handler = SysLogHandler(address=(syslog_host, options.syslog_port)) formatter = log_formatter(request) handler.setFormatter(formatter) if request: handler.addFilter(RequestFilter(request)) if logger: logger.addHandler(handler) else: logging.getLogger().addHandler(handler)
Configure syslog logging channel. It is turned on by setting `syslog_host` in the config file. The port default to 514 can be overridden by setting `syslog_port`. :param request: tornado.httputil.HTTPServerRequest instance :param exceptions: boolean - This indicates if we should raise exceptions encountered in the logging system.
def get_parser(self): self.path = None self.anony_defs = [] self.exhausted = False return self
Returns a ParserFactory with the state reset so it can be used to parse again. :return: ParserFactory
def _prep_components(component_list: Sequence[str]) -> List[Tuple[str, Tuple[str]]]: components = [] for c in component_list: path, args_plus = c.split('(') cleaned_args = _clean_args(args_plus[:-1].split(','), path) components.append((path, cleaned_args)) return components
Transform component description strings into tuples of component paths and required arguments. Parameters ---------- component_list : The component descriptions to transform. Returns ------- List of component/argument tuples.
def drawstarslist(self, dictlist, r = 10, colour = None): self.checkforpilimage() colour = self.defaultcolour(colour) self.changecolourmode(colour) self.makedraw() for star in dictlist: self.drawcircle(star["x"], star["y"], r = r, colour = colour, label = star["name"]) if self.verbose : print "I've drawn %i stars." % len(dictlist)
Calls drawcircle and writelable for an list of stars. Provide a list of dictionnaries, where each dictionnary contains "name", "x", and "y".
def rmdir(remote_folder, missing_okay): board_files = files.Files(_board) board_files.rmdir(remote_folder, missing_okay=missing_okay)
Forcefully remove a folder and all its children from the board. Remove the specified folder from the board's filesystem. Must specify one argument which is the path to the folder to delete. This will delete the directory and ALL of its children recursively, use with caution! For example to delete everything under /adafruit_library from the root of a board run: ampy --port /board/serial/port rmdir adafruit_library
def reset(self): self._frame_counter = 0 ob_real = self.real_env.reset() self.sim_env.add_to_initial_stack(ob_real) for _ in range(3): ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"]) self.sim_env.add_to_initial_stack(ob_real) ob_sim = self.sim_env.reset() assert np.all(ob_real == ob_sim) self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}), (ob_sim, 0, False, {})) self.set_zero_cumulative_rewards() ob, _, _, _ = self._player_step_tuple(self._last_step_tuples) return ob
Reset simulated and real environments.
def addHendrix(self): self.hendrix = HendrixService( self.application, threadpool=self.getThreadPool(), resources=self.resources, services=self.services, loud=self.options['loud'] ) if self.options["https_only"] is not True: self.hendrix.spawn_new_server(self.options['http_port'], HendrixTCPService)
Instantiates a HendrixService with this object's threadpool. It will be added as a service later.
def _initial_broks(self, broker_name): with self.app.conf_lock: logger.info("A new broker just connected : %s", broker_name) return self.app.sched.fill_initial_broks(broker_name)
Get initial_broks from the scheduler This is used by the brokers to prepare the initial status broks This do not send broks, it only makes scheduler internal processing. Then the broker must use the *_broks* API to get all the stuff :param broker_name: broker name, used to filter broks :type broker_name: str :return: None
def has_attribute_type(self, attribute: str, typ: Type) -> bool: if not self.has_attribute(attribute): return False attr_node = self.get_attribute(attribute).yaml_node if typ in scalar_type_to_tag: tag = scalar_type_to_tag[typ] return attr_node.tag == tag elif typ == list: return isinstance(attr_node, yaml.SequenceNode) elif typ == dict: return isinstance(attr_node, yaml.MappingNode) raise ValueError('Invalid argument for typ attribute')
Whether the given attribute exists and has a compatible type. Returns true iff the attribute exists and is an instance of \ the given type. Matching between types passed as typ and \ yaml node types is as follows: +---------+-------------------------------------------+ | typ | yaml | +=========+===========================================+ | str | ScalarNode containing string | +---------+-------------------------------------------+ | int | ScalarNode containing int | +---------+-------------------------------------------+ | float | ScalarNode containing float | +---------+-------------------------------------------+ | bool | ScalarNode containing bool | +---------+-------------------------------------------+ | None | ScalarNode containing null | +---------+-------------------------------------------+ | list | SequenceNode | +---------+-------------------------------------------+ | dict | MappingNode | +---------+-------------------------------------------+ Args: attribute: The name of the attribute to check. typ: The type to check against. Returns: True iff the attribute exists and matches the type.
def quality_and_fitness_parsed(mime_type, parsed_ranges): best_fitness = -1 best_fit_q = 0 (target_type, target_subtype, target_params) = \ parse_media_range(mime_type) for (type, subtype, params) in parsed_ranges: type_match = ( type in (target_type, '*') or target_type == '*' ) subtype_match = ( subtype in (target_subtype, '*') or target_subtype == '*' ) if type_match and subtype_match: fitness = type == target_type and 100 or 0 fitness += subtype == target_subtype and 10 or 0 param_matches = sum([ 1 for (key, value) in target_params.items() if key != 'q' and key in params and value == params[key] ]) fitness += param_matches fitness += float(target_params.get('q', 1)) if fitness > best_fitness: best_fitness = fitness best_fit_q = params['q'] return float(best_fit_q), best_fitness
Find the best match for a mime-type amongst parsed media-ranges. Find the best match for a given mime-type against a list of media_ranges that have already been parsed by parse_media_range(). Returns a tuple of the fitness value and the value of the 'q' quality parameter of the best match, or (-1, 0) if no match was found. Just as for quality_parsed(), 'parsed_ranges' must be a list of parsed media ranges. :rtype: (float,int)
def hook(*hook_patterns): def _register(action): def arg_gen(): rel = endpoint_from_name(hookenv.relation_type()) if rel: yield rel handler = Handler.get(action) handler.add_predicate(partial(_hook, hook_patterns)) handler.add_args(arg_gen()) return action return _register
Register the decorated function to run when the current hook matches any of the ``hook_patterns``. This decorator is generally deprecated and should only be used when absolutely necessary. The hook patterns can use the ``{interface:...}`` and ``{A,B,...}`` syntax supported by :func:`~charms.reactive.bus.any_hook`. Note that hook decorators **cannot** be combined with :func:`when` or :func:`when_not` decorators.
def create(container, portal_type, *args, **kwargs): from bika.lims.utils import tmpID if kwargs.get("title") is None: kwargs["title"] = "New {}".format(portal_type) tmp_id = tmpID() types_tool = get_tool("portal_types") fti = types_tool.getTypeInfo(portal_type) if fti.product: obj = _createObjectByType(portal_type, container, tmp_id) else: factory = getUtility(IFactory, fti.factory) obj = factory(tmp_id, *args, **kwargs) if hasattr(obj, '_setPortalTypeName'): obj._setPortalTypeName(fti.getId()) notify(ObjectCreatedEvent(obj)) container._setObject(tmp_id, obj) obj = container._getOb(obj.getId()) if is_at_content(obj): obj.processForm() obj.edit(**kwargs) modified(obj) return obj
Creates an object in Bika LIMS This code uses most of the parts from the TypesTool see: `Products.CMFCore.TypesTool._constructInstance` :param container: container :type container: ATContentType/DexterityContentType/CatalogBrain :param portal_type: The portal type to create, e.g. "Client" :type portal_type: string :param title: The title for the new content object :type title: string :returns: The new created object
def read(self) -> None: for folder in self.folders.values(): for file_ in folder.values(): file_.read()
Call method |NetCDFFile.read| of all handled |NetCDFFile| objects.
def is_local(self): local_repo = package_repository_manager.get_repository( self.config.local_packages_path) return (self.resource._repository.uid == local_repo.uid)
Returns True if the package is in the local package repository
def by_summoner(self, region, encrypted_summoner_id): url, query = ThirdPartyCodeApiV4Urls.by_summoner( region=region, encrypted_summoner_id=encrypted_summoner_id ) return self._raw_request(self.by_summoner.__name__, region, url, query)
FOR KR SUMMONERS, A 404 WILL ALWAYS BE RETURNED. Valid codes must be no longer than 256 characters and only use valid characters: 0-9, a-z, A-Z, and - :param string region: the region to execute this request on :param string encrypted_summoner_id: Summoner ID :returns: string
def ccube(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) localpath = NameFactory.ccube_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the name of a counts cube file
def writeWorkerDebug(debugStats, queueLength, path_suffix=""): createDirectory(path_suffix) origin_prefix = "origin-" if scoop.IS_ORIGIN else "" statsFilename = os.path.join( getDebugDirectory(), path_suffix, "{1}worker-{0}-STATS".format(getDebugIdentifier(), origin_prefix) ) lengthFilename = os.path.join( getDebugDirectory(), path_suffix, "{1}worker-{0}-QUEUE".format(getDebugIdentifier(), origin_prefix) ) with open(statsFilename, 'wb') as f: pickle.dump(debugStats, f) with open(lengthFilename, 'wb') as f: pickle.dump(queueLength, f)
Serialize the execution data using pickle and writes it into the debug directory.
def sample_surface_even(mesh, count): from .points import remove_close radius = np.sqrt(mesh.area / (2 * count)) samples, ids = sample_surface(mesh, count * 5) result, mask = remove_close(samples, radius) return result, ids[mask]
Sample the surface of a mesh, returning samples which are approximately evenly spaced. Parameters --------- mesh: Trimesh object count: number of points to return Returns --------- samples: (count,3) points in space on the surface of mesh face_index: (count,) indices of faces for each sampled point