code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def collection_names(self, callback): callback = partial(self._collection_names_result, callback) self["system.namespaces"].find(_must_use_master=True, callback=callback)
Get a list of all the collection names in selected database
def setup(self): self.radiation_count = 0 self.noise_count = 0 self.count = 0 self.count_history = [0] * HISTORY_LENGTH self.history_index = 0 self.previous_time = millis() self.previous_history_time = millis() self.duration = 0 GPIO.setup(self.radiation_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(self.noise_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.add_event_detect( self.radiation_pin, GPIO.FALLING, callback=self._on_radiation ) GPIO.add_event_detect(self.noise_pin, GPIO.FALLING, callback=self._on_noise) self._enable_timer() return self
Initialize the driver by setting up GPIO interrupts and periodic statistics processing.
def load_pyfile(self, path): with open(path) as config_file: contents = config_file.read() try: exec(compile(contents, path, 'exec'), self) except Exception as e: raise MalformedConfig(path, six.text_type(e))
Load python file as config. Args: path (string): path to the python file
def apply_patch(self, patch): history_file = File(self.__history_file) patches_history = history_file.cache() and [line.strip() for line in history_file.content] or [] if patch.uid not in patches_history: LOGGER.debug("> Applying '{0}' patch!".format(patch.name)) if patch.apply(): history_file.content = ["{0}\n".format(patch.uid)] history_file.append() else: raise umbra.exceptions.PatchApplyError("{0} | '{1}' patch failed to apply!".format( self.__class__.__name__, patch.path)) else: LOGGER.debug("> '{0}' patch is already applied!".format(patch.name)) return True
Applies given patch. :param patch: Patch. :type patch: Patch :return: Method success. :rtype: bool
def debug(self, msg): if self.__debug is not False: if self.__debug is None: debug_filename = getattr(settings, "AD_DEBUG_FILE", None) if debug_filename: self.__debug = open(settings.AD_DEBUG_FILE, 'a') else: self.__debug = False if self.__debug: self.__debug.write("{}\n".format(msg)) self.__debug.flush()
Handle the debugging to a file
def _tuplefy_namespace(self, namespace): namespace_split = namespace.split('.', 1) if len(namespace_split) is 1: namespace_tuple = ('*', namespace_split[0]) elif len(namespace_split) is 2: namespace_tuple = (namespace_split[0],namespace_split[1]) else: return None return namespace_tuple
Converts a mongodb namespace to a db, collection tuple
def stop(self): with self.__receiver_thread_exit_condition: while not self.__receiver_thread_exited and self.is_connected(): self.__receiver_thread_exit_condition.wait()
Stop the connection. Performs a clean shutdown by waiting for the receiver thread to exit.
def get_splits_query(self): query = ( self.book.session.query(Split) .filter(Split.transaction_guid == self.transaction.guid) ) return query
Returns the query for related splits
def used_args(self): values = [] for idx, c in enumerate(self.words[1:]): if c.startswith('-'): continue option_str = self.words[1:][idx - 1] option = self.get_option(option_str) if option is None or not option.need_value: values.append((c, c == self.document.get_word_before_cursor(WORD=True))) logger.debug("Found args values %s" % values) for arg in self.cmd.args.values(): if not values: raise StopIteration if arg.is_multiple: values = [] yield arg elif type(arg.nargs) is int: for _ in range(arg.nargs): value = values.pop(0) if value[1] is False: yield arg if not values: raise StopIteration
Return args already used in the command line rtype: command.Arg generator
def ensure_newline(self): DECTCEM_SHOW = '\033[?25h' AT_END = DECTCEM_SHOW + '\n' if not self._cursor_at_newline: self.write(AT_END) self._cursor_at_newline = True
use before any custom printing when using the progress iter to ensure your print statement starts on a new line instead of at the end of a progress line
def listLastFires(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
Get information about recent hook fires This endpoint will return information about the the last few times this hook has been fired, including whether the hook was fired successfully or not This method gives output: ``v1/list-lastFires-response.json#`` This method is ``experimental``
def _is_container_excluded(self, container): container_name = DockerUtil.container_name_extractor(container)[0] return container_name in self._filtered_containers
Check if a container is excluded according to the filter rules. Requires _filter_containers to run first.
def get_site_t2g_eg_resolved_dos(self, site): t2g_dos = [] eg_dos = [] for s, atom_dos in self.pdos.items(): if s == site: for orb, pdos in atom_dos.items(): if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz): t2g_dos.append(pdos) elif orb in (Orbital.dx2, Orbital.dz2): eg_dos.append(pdos) return {"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)), "e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos))}
Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site.
def _type_digest(self, config: bool) -> Dict[str, Any]: res = {"base": self.yang_type()} if self.name is not None: res["derived"] = self.name return res
Return receiver's type digest. Args: config: Specifies whether the type is on a configuration node.
def get_defs(self, position=None): if position is None: position = self.position return self.checkdefs[position][1]
Gets the defs at the position.
def unit_of_work(metadata=None, timeout=None): def wrapper(f): def wrapped(*args, **kwargs): return f(*args, **kwargs) wrapped.metadata = metadata wrapped.timeout = timeout return wrapped return wrapper
This function is a decorator for transaction functions that allows extra control over how the transaction is carried out. For example, a timeout (in seconds) may be applied:: @unit_of_work(timeout=25.0) def count_people(tx): return tx.run("MATCH (a:Person) RETURN count(a)").single().value()
def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var): num_ground_truth = scheduled_sample_var idx = tf.random_shuffle(tf.range(batch_size)) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) if isinstance(batch_size, int): output.set_shape([batch_size] + common_layers.shape_list(output)[1:]) return output
Sample batch with specified mix of groundtruth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x.
def get_permission_usage(self, permission, apilevel=None): permmap = load_api_specific_resource_module('api_permission_mappings', apilevel) if not permmap: raise ValueError("No permission mapping found! Is one available? " "The requested API level was '{}'".format(apilevel)) apis = {k for k, v in permmap.items() if permission in v} if not apis: raise ValueError("No API methods could be found which use the permission. " "Does the permission exists? You requested: '{}'".format(permission)) for cls in self.get_external_classes(): for meth_analysis in cls.get_methods(): meth = meth_analysis.get_method() if meth.permission_api_name in apis: yield meth_analysis
Find the usage of a permission inside the Analysis. example:: from androguard.misc import AnalyzeAPK a, d, dx = AnalyzeAPK("somefile.apk") for meth in dx.get_permission_usage('android.permission.SEND_SMS', a.get_effective_target_sdk_version()): print("Using API method {}".format(meth)) print("used in:") for _, m, _ in meth.get_xref_from(): print(m.full_name) .. note:: The permission mappings might be incomplete! See also :meth:`get_permissions`. :param permission: the name of the android permission (usually 'android.permission.XXX') :param apilevel: the requested API level or None for default :return: yields :class:`MethodClassAnalysis` objects for all using API methods
def match(tgt, opts=None): if not opts: opts = __opts__ try: if ',' + opts['id'] + ',' in tgt \ or tgt.startswith(opts['id'] + ',') \ or tgt.endswith(',' + opts['id']): return True return opts['id'] == tgt except (AttributeError, TypeError): try: return opts['id'] in tgt except Exception: return False log.warning( 'List matcher unexpectedly did not return, for target %s, ' 'this is probably a bug.', tgt ) return False
Determines if this host is on the list
def __list_fields(cls): for name in cls._doc_type.mapping: field = cls._doc_type.mapping[name] yield name, field, False if hasattr(cls.__class__, '_index'): if not cls._index._mapping: return for name in cls._index._mapping: if name in cls._doc_type.mapping: continue field = cls._index._mapping[name] yield name, field, True
Get all the fields defined for our class, if we have an Index, try looking at the index mappings as well, mark the fields from Index as optional.
def dsort(self, order): r order = order if isinstance(order, list) else [order] norder = [{item: "A"} if not isinstance(item, dict) else item for item in order] self._in_header([list(item.keys())[0] for item in norder]) clist = [] for nitem in norder: for key, value in nitem.items(): clist.append( ( key if isinstance(key, int) else self._header_upper.index(key.upper()), value.upper() == "D", ) ) for (cindex, rvalue) in reversed(clist): fpointer = operator.itemgetter(cindex) self._data.sort(key=fpointer, reverse=rvalue)
r""" Sort rows. :param order: Sort order :type order: :ref:`CsvColFilter` .. [[[cog cog.out(exobj.get_sphinx_autodoc()) ]]] .. Auto-generated exceptions documentation for .. pcsv.csv_file.CsvFile.dsort :raises: * RuntimeError (Argument \`order\` is not valid) * RuntimeError (Invalid column specification) * ValueError (Column *[column_identifier]* not found) .. [[[end]]]
def med_filt(x, k=201): if x.ndim > 1: x = np.squeeze(x) med = np.median(x) assert k % 2 == 1, "Median filter length must be odd." assert x.ndim == 1, "Input must be one-dimensional." k2 = (k - 1) // 2 y = np.zeros((len(x), k), dtype=x.dtype) y[:, k2] = x for i in range(k2): j = k2 - i y[j:, i] = x[:-j] y[:j, i] = x[0] y[:-j, -(i + 1)] = x[j:] y[-j:, -(i + 1)] = med return np.median(y, axis=1)
Apply a length-k median filter to a 1D array x. Boundaries are extended by repeating endpoints.
def lons(self, degrees=True): if degrees is False: return _np.radians(self._lons()) else: return self._lons()
Return the longitudes of each column of the gridded data. Usage ----- lons = x.get_lon([degrees]) Returns ------- lons : ndarray, shape (nlon) 1-D numpy array of size nlon containing the longitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians.
def merge_segments(segments, exif=b""): if segments[1][0:2] == b"\xff\xe0" and \ segments[2][0:2] == b"\xff\xe1" and \ segments[2][4:10] == b"Exif\x00\x00": if exif: segments[2] = exif segments.pop(1) elif exif is None: segments.pop(2) else: segments.pop(1) elif segments[1][0:2] == b"\xff\xe0": if exif: segments[1] = exif elif segments[1][0:2] == b"\xff\xe1" and \ segments[1][4:10] == b"Exif\x00\x00": if exif: segments[1] = exif elif exif is None: segments.pop(1) else: if exif: segments.insert(1, exif) return b"".join(segments)
Merges Exif with APP0 and APP1 manipulations.
def _proc_accept_header(self, request, result): if result: return try: accept = request.headers['accept'] except KeyError: return ctype, params = best_match(accept, self.types.keys()) if ctype not in self.types: return mapped_ctype, mapped_version = self.types[ctype](params) if mapped_ctype: result.set_ctype(mapped_ctype, ctype) if mapped_version: result.set_version(mapped_version)
Process the Accept header rules for the request. Both the desired API version and content type can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in.
def send_stun(self, message, addr): logger.debug('%s > %s %s', self, addr, message) self._send(bytes(message))
Send a STUN message to the TURN server.
def cdna_codon_sequence_after_insertion_frameshift( sequence_from_start_codon, cds_offset_before_insertion, inserted_nucleotides): coding_sequence_after_insertion = \ sequence_from_start_codon[cds_offset_before_insertion + 1:] if cds_offset_before_insertion % 3 == 2: mutated_codon_index = cds_offset_before_insertion // 3 + 1 nucleotides_before = "" elif cds_offset_before_insertion % 3 == 1: mutated_codon_index = cds_offset_before_insertion // 3 nucleotides_before = sequence_from_start_codon[ cds_offset_before_insertion - 1:cds_offset_before_insertion + 1] elif cds_offset_before_insertion % 3 == 0: mutated_codon_index = cds_offset_before_insertion // 3 nucleotides_before = sequence_from_start_codon[cds_offset_before_insertion] sequence_from_mutated_codon = ( nucleotides_before + inserted_nucleotides + coding_sequence_after_insertion) return mutated_codon_index, sequence_from_mutated_codon
Returns index of mutated codon and nucleotide sequence starting at the first mutated codon.
def uuid(self, **params): digest = str(uuidlib.uuid4()).replace('-', '') return self.humanize(digest, **params), digest
Generate a UUID with a human-readable representation. Returns `(human_repr, full_digest)`. Accepts the same keyword arguments as :meth:`humanize` (they'll be passed straight through).
def add_done_callback(self, fn): with self._condition: if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: self._done_callbacks.append(fn) return fn(self)
Attaches a callable that will be called when the future finishes. Args: fn: A callable that will be called with this future as its only argument when the future completes or is cancelled. The callable will always be called by a thread in the same process in which it was added. If the future has already completed or been cancelled then the callable will be called immediately. These callables are called in the order that they were added.
def model_vis(self, context): column = self._vis_column msshape = None try: coldesc = self._manager.column_descriptors[column] except KeyError as e: coldesc = None if coldesc is not None: try: msshape = [-1] + coldesc['shape'].tolist() except KeyError as e: msshape = None if msshape is None: guessed_shape = [self._manager._nchan, 4] montblanc.log.warn("Could not obtain 'shape' from the '{c}' " "column descriptor. Guessing it is '{gs}'.".format( c=column, gs=guessed_shape)) msshape = [-1] + guessed_shape lrow, urow = MS.row_extents(context) self._manager.ordered_main_table.putcol(column, context.data.reshape(msshape), startrow=lrow, nrow=urow-lrow)
model visibility data sink
def __create_profile(self, profile, uuid, verbose): kw = profile.to_dict() kw['country_code'] = profile.country_code kw.pop('uuid') kw.pop('country') api.edit_profile(self.db, uuid, **kw) self.log("-- profile %s updated" % uuid, verbose)
Create profile information from a profile object
def save_save_state(self, data_dict: Dict[str, LinkItem]): json_data = json_format.MessageToJson(self._create_save_state(data_dict).to_protobuf()) with self._save_state_file.open(mode='w', encoding="utf8") as writer: writer.write(json_data)
Save meta data about the downloaded things and the plugin to file. :param data_dict: data :type data_dict: Dict[link, ~unidown.plugin.link_item.LinkItem]
def verifyZeroInteractions(*objs): for obj in objs: theMock = _get_mock_or_raise(obj) if len(theMock.invocations) > 0: raise VerificationError( "\nUnwanted interaction: %s" % theMock.invocations[0])
Verify that no methods have been called on given objs. Note that strict mocks usually throw early on unexpected, unstubbed invocations. Partial mocks ('monkeypatched' objects or modules) do not support this functionality at all, bc only for the stubbed invocations the actual usage gets recorded. So this function is of limited use, nowadays.
def scard(incard, cell): assert isinstance(cell, stypes.SpiceCell) incard = ctypes.c_int(incard) libspice.scard_c(incard, ctypes.byref(cell)) return cell
Set the cardinality of a SPICE cell of any data type. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/scard_c.html :param incard: Cardinality of (number of elements in) the cell. :type incard: int :param cell: The cell. :type cell: spiceypy.utils.support_types.SpiceCell :return: The updated Cell. :rtype: spiceypy.utils.support_types.SpiceCell
def delete_namespace(parsed_xml): if parsed_xml.getroot().tag.startswith('{'): root = parsed_xml.getroot().tag end_ns = root.find('}') remove_namespace(parsed_xml, root[1:end_ns]) return parsed_xml
Identifies the namespace associated with the root node of a XML document and removes that names from the document. :param parsed_xml: lxml.Etree object. :return: Returns the sources document with the namespace removed.
def show_firewall(self, firewall, **_params): return self.get(self.firewall_path % (firewall), params=_params)
Fetches information of a certain firewall.
def print_tally(self): self.update_count = self.upload_count - self.create_count if self.test_run: print("Test run complete with the following results:") print("Skipped {0}. Created {1}. Updated {2}. Deleted {3}.".format( self.skip_count, self.create_count, self.update_count, self.delete_count))
Prints the final tally to stdout.
def getOverlaySortOrder(self, ulOverlayHandle): fn = self.function_table.getOverlaySortOrder punSortOrder = c_uint32() result = fn(ulOverlayHandle, byref(punSortOrder)) return result, punSortOrder.value
Gets the sort order of the overlay. See SetOverlaySortOrder for how this works.
def distance_color_labels(labels): colors = color_labels(labels, True) rlabels = labels.ravel() order = np.lexsort((rlabels, colors.ravel())) different = np.hstack([[rlabels[order[0]] > 0], rlabels[order[1:]] != rlabels[order[:-1]]]) rcolor = colors.ravel() rcolor[order] = np.cumsum(different).astype(colors.dtype) return rcolor.reshape(colors.shape).astype(labels.dtype)
Recolor a labels matrix so that adjacent labels have distant numbers
def get_cytoband_map(name): fn = pkg_resources.resource_filename( __name__, _data_path_fmt.format(name=name)) return json.load(gzip.open(fn, mode="rt", encoding="utf-8"))
Fetch one cytoband map by name >>> map = get_cytoband_map("ucsc-hg38") >>> map["1"]["p32.2"] [55600000, 58500000, 'gpos50']
def email_embed_image(email, img_content_id, img_data): img = MIMEImage(img_data) img.add_header('Content-ID', '<%s>' % img_content_id) img.add_header('Content-Disposition', 'inline') email.attach(img)
email is a django.core.mail.EmailMessage object
def _get_common_params(self, user_id, attributes): commonParams = {} commonParams[self.EventParams.PROJECT_ID] = self._get_project_id() commonParams[self.EventParams.ACCOUNT_ID] = self._get_account_id() visitor = {} visitor[self.EventParams.END_USER_ID] = user_id visitor[self.EventParams.SNAPSHOTS] = [] commonParams[self.EventParams.USERS] = [] commonParams[self.EventParams.USERS].append(visitor) commonParams[self.EventParams.USERS][0][self.EventParams.ATTRIBUTES] = self._get_attributes(attributes) commonParams[self.EventParams.SOURCE_SDK_TYPE] = 'python-sdk' commonParams[self.EventParams.ENRICH_DECISIONS] = True commonParams[self.EventParams.SOURCE_SDK_VERSION] = version.__version__ commonParams[self.EventParams.ANONYMIZE_IP] = self._get_anonymize_ip() commonParams[self.EventParams.REVISION] = self._get_revision() return commonParams
Get params which are used same in both conversion and impression events. Args: user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Dict consisting of parameters common to both impression and conversion events.
def fprint(self, obj, stream=None, **kwargs): if stream is None: stream = sys.stdout options = self.options options.update(kwargs) if isinstance(obj, dimod.SampleSet): self._print_sampleset(obj, stream, **options) return raise TypeError("cannot format type {}".format(type(obj)))
Prints the formatted representation of the object on stream
def view_class2(self, fatherid=''): if self.is_admin(): pass else: return False kwd = {'class1str': self.format_class2(fatherid), 'parentid': '0', 'parentlist': MCategory.get_parent_list()} if fatherid.endswith('00'): self.render('misc/publish/publish2.html', userinfo=self.userinfo, kwd=kwd) else: catinfo = MCategory.get_by_uid(fatherid) self.redirect('/{1}/_cat_add/{0}'.format(fatherid, router_post[catinfo.kind]))
Publishing from 2ed range category.
def _download_from_s3(bucket, key, version=None): s3 = boto3.client('s3') extra_args = {} if version: extra_args["VersionId"] = version with tempfile.TemporaryFile() as fp: try: s3.download_fileobj( bucket, key, fp, ExtraArgs=extra_args) fp.seek(0) return fp.read() except botocore.exceptions.ClientError: LOG.error("Unable to download Swagger document from S3 Bucket=%s Key=%s Version=%s", bucket, key, version) raise
Download a file from given S3 location, if available. Parameters ---------- bucket : str S3 Bucket name key : str S3 Bucket Key aka file path version : str Optional Version ID of the file Returns ------- str Contents of the file that was downloaded Raises ------ botocore.exceptions.ClientError if we were unable to download the file from S3
def _get_row_tag(row, tag): is_empty = True data = [] for column_label in row.find_all(tag): data.append( String(column_label.text).strip_bad_html() ) if data[-1]: is_empty = False if not is_empty: return data return None
Parses row and gets columns matching tag :param row: HTML row :param tag: tag to get :return: list of labels in row
def set_mode_manual(self): if self.mavlink10(): self.mav.command_long_send(self.target_system, self.target_component, mavlink.MAV_CMD_DO_SET_MODE, 0, mavlink.MAV_MODE_MANUAL_ARMED, 0, 0, 0, 0, 0, 0) else: MAV_ACTION_SET_MANUAL = 12 self.mav.action_send(self.target_system, self.target_component, MAV_ACTION_SET_MANUAL)
enter MANUAL mode
def decrease_poolsize(self): if self._session_pool_size <= 1: raise SessionPoolMinSizeReached('Session pool size cannot be decreased further') with self._session_pool_lock: if self._session_pool_size <= 1: log.debug('Session pool size was decreased in another thread') return log.warning('Lowering session pool size from %s to %s', self._session_pool_size, self._session_pool_size - 1) self.get_session().close() self._session_pool_size -= 1
Decreases the session pool size in response to error messages from the server requesting to rate-limit requests. We decrease by one session per call.
def sha1_hash(string): hasher = sha1() hasher.update(string.encode()) return hasher.hexdigest()
Return the SHA1 of the input string.
def lemmatize(self): for unit in self.unit_list: if lemmatizer.lemmatize(unit.text) in self.lemmas: unit.text = lemmatizer.lemmatize(unit.text)
Lemmatize all Units in self.unit_list. Modifies: - self.unit_list: converts the .text property into its lemmatized form. This method lemmatizes all inflected variants of permissible words to those words' respective canonical forms. This is done to ensure that each instance of a permissible word will correspond to a term vector with which semantic relatedness to other words' term vectors can be computed. (Term vectors were derived from a corpus in which inflected words were similarly lemmatized, meaning that , e.g., 'dogs' will not have a term vector to use for semantic relatedness computation.)
def synchronize_simultaneous(self, node_ip): for candidate in self.factory.candidates[node_ip]: if not candidate["con"].connected: continue if candidate["time"] -\ self.factory.nodes["simultaneous"][node_ip]["time"] >\ self.challege_timeout: msg = "RECONNECT" self.factory.nodes["simultaneous"][node_ip]["con"].\ send_line(msg) return self.cleanup_candidates(node_ip) self.propogate_candidates(node_ip)
Because adjacent mappings for certain NAT types can be stolen by other connections, the purpose of this function is to ensure the last connection by a passive simultaneous node is recent compared to the time for a candidate to increase the chance that the precited mappings remain active for the TCP hole punching attempt.
def create_thread(self, body): self.add_comment(body, allow_create=True) the_response = Response() the_response.code = "OK" the_response.status_code = 200
Implement create_thread as required by parent. This basically just calls add_comment with allow_create=True and then builds a response object to indicate everything is fine.
def has_perm(self, user, perm, obj=None, *args, **kwargs): try: if not self._obj_ok(obj): if hasattr(obj, 'get_permissions_object'): obj = obj.get_permissions_object(perm) else: raise InvalidPermissionObjectException return user.permset_tree.allow(Action(perm), obj) except ObjectDoesNotExist: return False
Test user permissions for a single action and object. :param user: The user to test. :type user: ``User`` :param perm: The action to test. :type perm: ``str`` :param obj: The object path to test. :type obj: ``tutelary.engine.Object`` :returns: ``bool`` -- is the action permitted?
def on_train_end(self, **kwargs: Any) -> None: "Store the notebook and stop run" self.client.log_artifact(run_id=self.run, local_path=self.nb_path) self.client.set_terminated(run_id=self.run)
Store the notebook and stop run
def blockgen(bytes, block_size=16): for i in range(0, len(bytes), block_size): block = bytes[i:i + block_size] block_len = len(block) if block_len > 0: yield block if block_len < block_size: break
a block generator for pprp
def install_ui_colorscheme(self, name, style_dict): assert isinstance(name, six.text_type) assert isinstance(style_dict, dict) self.ui_styles[name] = style_dict
Install a new UI color scheme.
def find_seq_id(block, name, case_sensitive=True): rec = find_seq_rec(block, name, case_sensitive) return rec['id']
Given part of a sequence ID, find the first actual ID that contains it. Example:: >>> find_seq_id(block, '2QG5') 'gi|158430190|pdb|2QG5|A' Raise a ValueError if no matching key is found.
def backfill_unk_emb(self, E, filled_words): unk_emb = E[self[self._unk]] for i, word in enumerate(self): if word not in filled_words: E[i] = unk_emb
Backfills an embedding matrix with the embedding for the unknown token. :param E: original embedding matrix of dimensions `(vocab_size, emb_dim)`. :param filled_words: these words will not be backfilled with unk. NOTE: this function is for internal use.
def get_layout(self): if self.request.is_ajax(): layout = ['modal', ] else: layout = ['static_page', ] if self.workflow_class.wizard: layout += ['wizard', ] return layout
Returns classes for the workflow element in template. The returned classes are determied based on the workflow characteristics.
def token(cls: Type[SIGType], pubkey: str) -> SIGType: sig = cls() sig.pubkey = pubkey return sig
Return SIG instance from pubkey :param pubkey: Public key of the signature issuer :return:
def color_format(): str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT color_format = color_message(str_format) return ColoredFormatter(color_format)
Main entry point to get a colored formatter, it will use the BASE_FORMAT by default and fall back to no colors if the system does not support it
def request(self, url, method, body=None, headers=None, **kwargs): content_type = kwargs.pop('content_type', None) or 'application/json' headers = headers or {} headers.setdefault('Accept', content_type) if body: headers.setdefault('Content-Type', content_type) headers['User-Agent'] = self.USER_AGENT resp = requests.request( method, url, data=body, headers=headers, verify=self.verify_cert, timeout=self.timeout, **kwargs) return resp, resp.text
Request without authentication.
def name(self): return ffi.string(lib.EnvGetDefruleName(self._env, self._rule)).decode()
Rule name.
def run(user, port=4242): owd = os.getcwd() dir = export(user) os.chdir(dir) Handler = SimpleHTTPServer.SimpleHTTPRequestHandler try: httpd = SocketServer.TCPServer(("", port), Handler) print("Serving bandicoot visualization at http://0.0.0.0:%i" % port) httpd.serve_forever() except KeyboardInterrupt: print("^C received, shutting down the web server") httpd.server_close() finally: os.chdir(owd)
Build a temporary directory with a visualization and serve it over HTTP. Examples -------- >>> bandicoot.visualization.run(U) Successfully exported the visualization to /tmp/tmpsIyncS Serving bandicoot visualization at http://0.0.0.0:4242
def initialize_segment_register_x64(self, state, concrete_target): _l.debug("Synchronizing gs segment register") state.regs.gs = self._read_gs_register_x64(concrete_target)
Set the gs register in the angr to the value of the fs register in the concrete process :param state: state which will be modified :param concrete_target: concrete target that will be used to read the fs register :return: None
def authenticate(self, request): from doac.middleware import AuthenticationMiddleware try: response = AuthenticationMiddleware().process_request(request) except: raise exceptions.AuthenticationFailed("Invalid handler") if not hasattr(request, "user") or not request.user.is_authenticated(): return None if not hasattr(request, "access_token"): raise exceptions.AuthenticationFailed("Access token was not valid") return request.user, request.access_token
Send the request through the authentication middleware that is provided with DOAC and grab the user and token from it.
def get_rows(self): possible_dataframes = ['F', 'FY', 'M', 'S', 'D_cba', 'D_pba', 'D_imp', 'D_exp', 'D_cba_reg', 'D_pba_reg', 'D_imp_reg', 'D_exp_reg', 'D_cba_cap', 'D_pba_cap', 'D_imp_cap', 'D_exp_cap', ] for df in possible_dataframes: if (df in self.__dict__) and (getattr(self, df) is not None): return getattr(self, df).index.get_values() else: logging.warn("No attributes available to get row names") return None
Returns the name of the rows of the extension
def _get_mine(fun): if fun in _CACHE and _CACHE[fun]: return _CACHE[fun] net_runner_opts = _get_net_runner_opts() _CACHE[fun] = __salt__['mine.get'](net_runner_opts.get('target'), fun, tgt_type=net_runner_opts.get('expr_form')) return _CACHE[fun]
Return the mine function from all the targeted minions. Just a small helper to avoid redundant pieces of code.
def read_fwf(self, *args, **kwargs): import pandas t = self.resolved_url.get_resource().get_target() return pandas.read_fwf(t.fspath, *args, **kwargs)
Fetch the target and pass through to pandas.read_fwf. Don't provide the first argument of read_fwf(); it is supplied internally.
def delete_entity(self, entity_id, mount_point=DEFAULT_MOUNT_POINT): api_path = '/v1/{mount_point}/entity/id/{id}'.format( mount_point=mount_point, id=entity_id, ) return self._adapter.delete( url=api_path, )
Delete an entity and all its associated aliases. Supported methods: DELETE: /{mount_point}/entity/id/:id. Produces: 204 (empty body) :param entity_id: Identifier of the entity. :type entity_id: str :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The response of the request. :rtype: requests.Response
def add(self, data_bytes): try: if isinstance(data_bytes, basestring): data_bytes = map(ord, data_bytes) except NameError: if isinstance(data_bytes, str): data_bytes = map(ord, data_bytes) for b in data_bytes: self._crc ^= (b << 56) & Signature.MASK64 for _ in range(8): if self._crc & (1 << 63): self._crc = ((self._crc << 1) & Signature.MASK64) ^ Signature.POLY else: self._crc <<= 1
Feed ASCII string or bytes to the signature function
def change_password(self, new_password): self.set_password(new_password) self.save() password_changed.send(sender=self.__class__, user=self)
Changes password and sends a signal
def split_volume_from_journal(citation_elements): for el in citation_elements: if el['type'] == 'JOURNAL' and ';' in el['title']: el['title'], series = el['title'].rsplit(';', 1) el['volume'] = series + el['volume'] return citation_elements
Split volume from journal title We need this because sometimes the volume is attached to the journal title instead of the volume. In those cases we move it here from the title to the volume
def _get_item_class(self, url): if '/layers/' in url: return Layer elif '/tables/' in url: return Table elif '/sets/' in url: return Set else: raise NotImplementedError("No support for catalog results of type %s" % url)
Return the model class matching a URL
def trash(self, request, **kwargs): content = self.get_object() content.indexed = False content.save() LogEntry.objects.log(request.user, content, "Trashed") return Response({"status": "Trashed"})
Psuedo-deletes a `Content` instance and removes it from the ElasticSearch index Content is not actually deleted, merely hidden by deleted from ES index.import :param request: a WSGI request object :param kwargs: keyword arguments (optional) :return: `rest_framework.response.Response`
def dump_img(fname): img = Image.open(fname) width, _ = img.size txt = '' pixels = list(img.getdata()) for col in range(width): txt += str(pixels[col:col+width]) return txt
output the image as text
def model_post_delete(sender, instance, **kwargs): if sender._meta.app_label == 'rest_framework_reactive': return def notify(): table = sender._meta.db_table notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk) transaction.on_commit(notify)
Signal emitted after any model is deleted via Django ORM. :param sender: Model class that was deleted :param instance: The actual instance that was removed
def random_string(length=6, alphabet=string.ascii_letters+string.digits): return ''.join([random.choice(alphabet) for i in xrange(length)])
Return a random string of given length and alphabet. Default alphabet is url-friendly (base62).
def remove(self, iterable, data=None, index=0): if index == len(iterable): if self.is_terminal: if data: self.data.remove(data) if len(self.data) == 0: self.is_terminal = False else: self.data.clear() self.is_terminal = False return True else: return False elif iterable[index] in self.children: return self.children[iterable[index]].remove(iterable, index=index+1, data=data) else: return False
Remove an element from the trie Args iterable(hashable): key used to find what is to be removed data(object): data associated with the key index(int): index of what is to me removed Returns: bool: True: if it was removed False: if it was not removed
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params): params.update({ 'side': self.SIDE_BUY, }) return self.order_limit(timeInForce=timeInForce, **params)
Send in a new limit buy order Any order with an icebergQty MUST have timeInForce set to GTC. :param symbol: required :type symbol: str :param quantity: required :type quantity: decimal :param price: required :type price: str :param timeInForce: default Good till cancelled :type timeInForce: str :param newClientOrderId: A unique id for the order. Automatically generated if not sent. :type newClientOrderId: str :param stopPrice: Used with stop orders :type stopPrice: decimal :param icebergQty: Used with iceberg orders :type icebergQty: decimal :param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT. :type newOrderRespType: str :param recvWindow: the number of milliseconds the request is valid for :type recvWindow: int :returns: API response See order endpoint for full response options :raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
def is_valid_short_number_for_region(short_numobj, region_dialing_from): if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: return False short_number = national_significant_number(short_numobj) general_desc = metadata.general_desc if not _matches_possible_number_and_national_number(short_number, general_desc): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: return False return _matches_possible_number_and_national_number(short_number, short_number_desc)
Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern
def setPriority(self, queue, priority): q = self.queueindex[queue] self.queues[q[0]].removeSubQueue(q[1]) newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority)) q[0] = priority newPriority.addSubQueue(q[1])
Set priority of a sub-queue
def cancel(self): if self.status > COMPLETED: return cmd = self.backend.cmd_cancel(self) self._run_cmd(cmd, self.remote, ignore_exit_code=True, ssh=self.ssh) self._status = CANCELLED self.dump()
Instruct the cluster to cancel the running job. Has no effect if job is not running
def has_property(obj, name): if obj == None: raise Exception("Object cannot be null") if name == None: raise Exception("Property name cannot be null") name = name.lower() for property_name in dir(obj): if property_name.lower() != name: continue property = getattr(obj, property_name) if PropertyReflector._is_property(property, property_name): return True return False
Checks if object has a property with specified name. :param obj: an object to introspect. :param name: a name of the property to check. :return: true if the object has the property and false if it doesn't.
def get_playlists(self, search, start=0, max_items=100): return self.get_music_service_information('playlists', search, start, max_items)
Search for playlists. See get_music_service_information for details on the arguments. Note: Un-intuitively this method returns MSAlbumList items. See note in class doc string for details.
def _should_run(het_file): has_hets = False with open(het_file) as in_handle: for i, line in enumerate(in_handle): if i > 1: has_hets = True break return has_hets
Check for enough input data to proceed with analysis.
def cancel(self): logger.debug("Running cancel hooks: {}".format(self)) if not self.cancelled: logger.debug("Cancelling {}".format(self)) self.cancelled = True self.save()
Cancel an EighthScheduledActivity. This does nothing besides set the cancelled flag and save the object.
def end_request(self): if not self._chunked: return trailers = [(n, get_header(self._headers, n)) for n in self._trailer] \ if self._trailer else None ending = create_chunked_body_end(trailers) self._protocol.writer.write(ending)
End the request body.
def next_token(self): if self.lookahead: self.current_token = self.lookahead.popleft() return self.current_token self.current_token = self._parse_next_token() return self.current_token
Returns the next logical token, advancing the tokenizer.
def lincc(x, y): covar = cov(x, y) * (len(x) - 1) / float(len(x)) xvar = var(x) * (len(x) - 1) / float(len(x)) yvar = var(y) * (len(y) - 1) / float(len(y)) lincc = (2 * covar) / ((xvar + yvar) + ((mean(x) - mean(y)) ** 2)) return lincc
Calculates Lin's concordance correlation coefficient. Usage: alincc(x,y) where x, y are equal-length arrays Returns: Lin's CC
def draw(self, painter, options, widget): self.declaration.draw(painter, options, widget)
Handle the draw event for the widget.
def lookup(self, hostname): matches = [x for x in self._config if fnmatch.fnmatch(hostname, x['host'])] _star = matches.pop(0) matches.append(_star) ret = {} for m in matches: for k,v in m.iteritems(): if not k in ret: ret[k] = v ret = self._expand_variables(ret, hostname) del ret['host'] return ret
Return a dict of config options for a given hostname. The host-matching rules of OpenSSH's C{ssh_config} man page are used, which means that all configuration options from matching host specifications are merged, with more specific hostmasks taking precedence. In other words, if C{"Port"} is set under C{"Host *"} and also C{"Host *.example.com"}, and the lookup is for C{"ssh.example.com"}, then the port entry for C{"Host *.example.com"} will win out. The keys in the returned dict are all normalized to lowercase (look for C{"port"}, not C{"Port"}. No other processing is done to the keys or values. @param hostname: the hostname to lookup @type hostname: str
def cross_entropy_calc(TOP, P, POP): try: result = 0 for i in TOP.keys(): reference_likelihood = P[i] / POP[i] response_likelihood = TOP[i] / POP[i] if response_likelihood != 0 and reference_likelihood != 0: result += reference_likelihood * \ math.log(response_likelihood, 2) return -result except Exception: return "None"
Calculate cross entropy. :param TOP: test outcome positive :type TOP : dict :param P: condition positive :type P : dict :param POP: population :type POP : dict :return: cross entropy as float
def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): return conv_block_internal(conv1d, inputs, filters, dilation_rates_and_kernel_sizes, **kwargs)
A block of standard 1d convolutions.
def force_rerun(flag, outfile): if flag: return True elif not flag and not op.exists(outfile): return True elif not flag and not is_non_zero_file(outfile): return True else: return False
Check if we should force rerunning of a command if an output file exists. Args: flag (bool): Flag to force rerun. outfile (str): Path to output file which may already exist. Returns: bool: If we should force rerunning of a command Examples: >>> force_rerun(flag=True, outfile='/not/existing/file.txt') True >>> force_rerun(flag=False, outfile='/not/existing/file.txt') True >>> force_rerun(flag=True, outfile='./utils.py') True >>> force_rerun(flag=False, outfile='./utils.py') False
def md5hash(self): digest = hashlib.md5(self.content).digest() return b64_string(digest)
Return the MD5 hash string of the file content
def _indent(indent=0, quote='', indent_char=' '): if indent > 0: indent_string = ''.join(( str(quote), (indent_char * (indent - len(quote))) )) else: indent_string = ''.join(( ('\x08' * (-1 * (indent - len(quote)))), str(quote)) ) if len(indent_string): INDENT_STRINGS.append(indent_string)
Indent util function, compute new indent_string
def parse_memory_value(s): number, unit = split_number_and_unit(s) if not unit or unit == 'B': return number elif unit == 'kB': return number * _BYTE_FACTOR elif unit == 'MB': return number * _BYTE_FACTOR * _BYTE_FACTOR elif unit == 'GB': return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR elif unit == 'TB': return number * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR * _BYTE_FACTOR else: raise ValueError('unknown unit: {} (allowed are B, kB, MB, GB, and TB)'.format(unit))
Parse a string that contains a number of bytes, optionally with a unit like MB. @return the number of bytes encoded by the string
def take_at_most_n_seconds(time_s, func, *args, **kwargs): thread = threading.Thread(target=func, args=args, kwargs=kwargs) thread.start() thread.join(time_s) if thread.is_alive(): return False return True
A function that returns whether a function call took less than time_s. NOTE: The function call is not killed and will run indefinitely if hung. Args: time_s: Maximum amount of time to take. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. Returns: True if the function finished in less than time_s seconds.
def _validate(self, value): if value is None: raise ValueError('The pk for %s is not "auto-increment", you must fill it' % self._model.__name__) value = self.normalize(value) if self.exists(value): raise UniquenessError('PKField %s already exists for model %s)' % (value, self._instance.__class__)) return value
Validate that a given new pk to set is always set, and return it. The returned value should be normalized, and will be used without check.