code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def end_of_line(event): " Move to the end of the line. " buff = event.current_buffer buff.cursor_position += buff.document.get_end_of_line_position()
Move to the end of the line.
def iterator(self): default_language = getattr(self, '_default_language', None) for obj in super(MultilingualModelQuerySet, self).iterator(): obj._default_language = default_language yield obj
Add the default language information to all returned objects.
def add_group(self, groupname, statements): msg = OmapiMessage.open(b"group") msg.message.append(("create", struct.pack("!I", 1))) msg.obj.append(("name", groupname)) msg.obj.append(("statements", statements)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add ...
Adds a group @type groupname: bytes @type statements: str
def diagonal_line(xi=None, yi=None, *, ax=None, c=None, ls=None, lw=None, zorder=3): if ax is None: ax = plt.gca() if xi is None: xi = ax.get_xlim() if yi is None: yi = ax.get_ylim() if c is None: c = matplotlib.rcParams["grid.color"] if ls is None: ls = matpl...
Plot a diagonal line. Parameters ---------- xi : 1D array-like (optional) The x axis points. If None, taken from axis limits. Default is None. yi : 1D array-like The y axis points. If None, taken from axis limits. Default is None. ax : axis (optional) Axis to plot on. If non...
def decode(self, data, password=None): self.password = password self.raw = gntp.shim.u(data) parts = self.raw.split('\r\n\r\n') self.info = self._parse_info(self.raw) self.headers = self._parse_dict(parts[0])
Decode GNTP Message :param string data:
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False): if infer_type_and_cast: params_as_dict = infer_and_cast(self.params) else: params_as_dict = self.params if quiet: return params_as_dict def log_recursively(parameters, history):...
Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, opti...
def _is_socket(cls, stream): try: fd = stream.fileno() except ValueError: return False sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) try: sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error as ex: if e...
Check if the given stream is a socket.
def get_setting(*args, **kwargs): for name in args: if hasattr(settings, name): return getattr(settings, name) if kwargs.get('raise_error', False): setting_url = url % args[0].lower().replace('_', '-') raise ImproperlyConfigured('Please make sure you specified at ' ...
Get a setting and raise an appropriate user friendly error if the setting is not found.
def _response(self, pdu): if _debug: UDPDirector._debug("_response %r", pdu) addr = pdu.pduSource peer = self.peers.get(addr, None) if not peer: peer = self.actorClass(self, addr) peer.response(pdu)
Incoming datagrams are routed through an actor.
def walk(self, oid, host, port, community): ret = {} if not isinstance(oid, tuple): oid = self._convert_to_oid(oid) host = socket.gethostbyname(host) snmpAuthData = cmdgen.CommunityData( 'agent-{}'.format(community), community) snmpTransportDat...
Perform an SNMP walk on a given OID
def init_word_db(cls, name, text): text = text.replace('\n', ' ').replace('\r', ' ') words = [w.strip() for w in text.split(' ') if w.strip()] assert len(words) > 2, \ 'Database text sources must contain 3 or more words.' freqs = {} for i in range(len(words) - 2):...
Initialize a database of words for the maker with the given name
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]: J = ca.jacobian(f, x) nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf() return { 'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, ...
Sort equations by dependence
def _debugGraph(self): print("Len of graph: ", len(self.rdflib_graph)) for x, y, z in self.rdflib_graph: print(x, y, z)
internal util to print out contents of graph
def parse_log_line(line): matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) error = re.search(r'Traceback', line) if error: return {'line': line, 'step': 'error'} if not matches: return {'line': line, 'step': None} tstamp = matches.group(1) msg = matches.group(2) if not...
Parses a log line and returns it with more information :param line: str - A line from a bcbio-nextgen log :returns dict: A dictionary containing the line, if its a new step if its a Traceback or if the analysis is finished
def draw(board, term, cells): for (x, y), state in board.iteritems(): with term.location(x, y): print cells[state],
Draw a board to the terminal.
def reverse(self): if self.closed(): raise ValueError("Attempt to call reverse() on a " "closed Queryable.") try: r = reversed(self._iterable) return self._create(r) except TypeError: pass return self._create(se...
Returns the sequence reversed. Note: This method uses deferred execution, but the whole source sequence is consumed once execution commences. Returns: The source sequence in reverse order. Raises: ValueError: If the Queryable is closed().
def vector_str(p, decimal_places=2, print_zero=True): style = '{0:.' + str(decimal_places) + 'f}' return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p]))
Pretty-print the vector values.
def _trigger(self): self._completed.set() for callback in self._callbacks: callback(self)
Trigger all callbacks registered to this Future. This method is called internally by the batch once the batch completes. Args: message_id (str): The message ID, as a string.
def nearest_subpackage(cls, package, all_packages): def shared_prefix(candidate): zipped = zip(package.split('.'), candidate.split('.')) matching = itertools.takewhile(lambda pair: pair[0] == pair[1], zipped) return [pair[0] for pair in matching] shared_packages = [_f for _f in map(shared_pref...
Given a package, find its nearest parent in all_packages.
def draw(self): glPushAttrib(GL_ALL_ATTRIB_BITS) self.draw_score() for sprite in self: sprite.draw() glPopAttrib()
Draw all the sprites in the system using their renderers. This method is convenient to call from you Pyglet window's on_draw handler to redraw particles when needed.
def remove_metadata_key(self, obj, key): meta_dict = {key: ""} return self.set_metadata(obj, meta_dict)
Removes the specified key from the object's metadata. If the key does not exist in the metadata, nothing is done.
def predecessors(self, node, exclude_compressed=True): preds = super(Graph, self).predecessors(node) if exclude_compressed: return [n for n in preds if not self.node[n].get('compressed', False)] else: return preds
Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of pre...
def normalize_result(result, default, threshold=0.2): if result is None: return default if result.get('confidence') is None: return default if result.get('confidence') < threshold: return default return normalize_encoding(result.get('encoding'), defa...
Interpret a chardet result.
def merge_tags(left, right, factory=Tags): if isinstance(left, Mapping): tags = dict(left) elif hasattr(left, 'tags'): tags = _tags_to_dict(left.tags) else: tags = _tags_to_dict(left) if isinstance(right, Mapping): tags.update(right) elif hasattr(left, 'tags'): ...
Merge two sets of tags into a new troposphere object Args: left (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with lower priority right (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with higher priority factory (typ...
def create_exclude_rules(args): global _cached_exclude_rules if _cached_exclude_rules is not None: return _cached_exclude_rules rules = [] for excl_path in args.exclude: abspath = os.path.abspath(os.path.join(args.root, excl_path)) rules.append((abspath, True)) for incl_path ...
Creates the exlude rules
def difference(self, *others): return self.copy(super(NGram, self).difference(*others))
Return the difference of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.difference(b)) ['eggs']
def hasKey(self, key, notNone=False): result = [] result_tracker = [] for counter, row in enumerate(self.table): (target, _, value) = internal.dict_crawl(row, key) if target: if notNone==False or not value is None: result.append(row) ...
Return entries where the key is present. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": None , "wigs": [...
def sub_tags(self, *tags, **kw): records = [x for x in self.sub_records if x.tag in tags] if kw.get('follow', True): records = [rec.ref if isinstance(rec, Pointer) else rec for rec in records] return records
Returns list of direct sub-records matching any tag name. Unlike :py:meth:`sub_tag` method this method does not support hierarchical paths and does not resolve pointers. :param str tags: Names of the sub-record tag :param kw: Keyword arguments, only recognized keyword is `follow` ...
def createSQL(self, sql, args=()): before = time.time() self._execSQL(sql, args) after = time.time() if after - before > 2.0: log.msg('Extremely long CREATE: %s' % (after - before,)) log.msg(sql)
For use with auto-committing statements such as CREATE TABLE or CREATE INDEX.
def get_dependants(project_name): for package in get_installed_distributions(user_only=ENABLE_USER_SITE): if is_dependant(package, project_name): yield package.project_name
Yield dependants of `project_name`.
def from_Suite(suite, maxwidth=80): subtitle = str(len(suite.compositions)) + ' Compositions' if suite.subtitle\ == '' else suite.subtitle result = os.linesep.join(add_headers( maxwidth, suite.title, subtitle, suite.author, suite.email, suite.description,...
Convert a mingus.containers.Suite to an ASCII tablature string, complete with headers. This function makes use of the Suite's title, subtitle, author, email and description attributes.
def _create_trial_info(self, expr_dir): meta = self._build_trial_meta(expr_dir) self.logger.debug("Create trial for %s" % meta) trial_record = TrialRecord.from_json(meta) trial_record.save()
Create information for given trial. Meta file will be loaded if exists, and the trial information will be saved in db backend. Args: expr_dir (str): Directory path of the experiment.
def get_span_kind_as_int(self, span): kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = 1 elif span.tags["span.kind"] in self.exit_kind: kind = 2 else: kind = 3 return ...
Will retrieve the `span.kind` tag and return the appropriate integer value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: Integer
def render_filter(self, next_filter): next(next_filter) while True: data = (yield) res = [self.cell_format(access(data)) for access in self.accessors] next_filter.send(res)
Produce formatted output from the raw data stream.
def spectrogram(t_signal, frame_width=FRAME_WIDTH, overlap=FRAME_STRIDE): frame_width = min(t_signal.shape[0], frame_width) w = np.hanning(frame_width) num_components = frame_width // 2 + 1 num_frames = 1 + (len(t_signal) - frame_width) // overlap f_signal = np.empty([num_frames, num_components], dt...
Calculate the magnitude spectrogram of a single-channel time-domain signal from the real frequency components of the STFT with a hanning window applied to each frame. The frame size and overlap between frames should be specified in number of samples.
def check_status_code(response, codes=None): codes = codes or [200] if response.status_code not in codes: raise StatusCodeError(response.status_code)
Checks response.status_code is in codes. :param requests.request response: Requests response :param list codes: List of accepted codes or callable :raises: StatusCodeError if code invalid
def init_centers_widths(self, R): kmeans = KMeans( init='k-means++', n_clusters=self.K, n_init=10, random_state=100) kmeans.fit(R) centers = kmeans.cluster_centers_ widths = self._get_max_sigma(R) * np.ones((self.K, 1)) return cente...
Initialize prior of centers and widths Returns ------- centers : 2D array, with shape [K, n_dim] Prior of factors' centers. widths : 1D array, with shape [K, 1] Prior of factors' widths.
def recv_blocking(conn, msglen): msg = b'' while len(msg) < msglen: maxlen = msglen-len(msg) if maxlen > 4096: maxlen = 4096 tmpmsg = conn.recv(maxlen) if not tmpmsg: raise RuntimeError("socket connection broken") msg += tmpmsg logging.debu...
Recieve data until msglen bytes have been received.
def result(self, timeout=None): if not self._poll(timeout).HasField('response'): raise GaxError(self._operation.error.message) return _from_any(self._result_type, self._operation.response)
Enters polling loop on OperationsClient.get_operation, and once Operation.done is true, then returns Operation.response if successful or throws GaxError if not successful. This method will wait up to timeout seconds. If the call hasn't completed in timeout seconds, then a RetryError wil...
def list_active_vms(cwd=None): vms = [] cmd = 'vagrant status' reply = __salt__['cmd.shell'](cmd, cwd=cwd) log.info('--->\n%s', reply) for line in reply.split('\n'): tokens = line.strip().split() if len(tokens) > 1: if tokens[1] == 'running': vms.append(to...
Return a list of machine names for active virtual machine on the host, which are defined in the Vagrantfile at the indicated path. CLI Example: .. code-block:: bash salt '*' vagrant.list_active_vms cwd=/projects/project_1
def upload(self, picture, resize=None, rotation=None, noexif=None, callback=None): if not resize: resize = self._resize if not rotation: rotation = self._rotation if not noexif: noexif = self._noexif if not callback: callback...
wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) ...
def replace_entity_resource(model, oldres, newres): oldrids = set() for rid, link in model: if link[ORIGIN] == oldres or link[TARGET] == oldres or oldres in link[ATTRIBUTES].values(): oldrids.add(rid) new_link = (newres if o == oldres else o, r, newres if t == oldres else t, dict...
Replace one entity in the model with another with the same links :param model: Versa model to be updated :param oldres: old/former resource IRI to be replaced :param newres: new/replacement resource IRI :return: None
def check_compatibility(self, other, check_edges=False, precision=1E-7): if self.GetDimension() != other.GetDimension(): raise TypeError("histogram dimensionalities do not match") if len(self) != len(other): raise ValueError("histogram sizes do not match") for axis in ran...
Test whether two histograms are considered compatible by the number of dimensions, number of bins along each axis, and optionally the bin edges. Parameters ---------- other : histogram A rootpy histogram check_edges : bool, optional (default=False) ...
def get_single_file_info(self, rel_path): f_path = self.get_full_file_path(rel_path) return get_single_file_info(f_path, rel_path)
Gets last change time for a single file
def fetch_objects(self, oids): objects = self.model.objects.in_bulk(oids) if len(objects.keys()) != len(oids): non_existants = set(oids) - set(objects.keys()) msg = _('Unknown identifiers: {identifiers}').format( identifiers=', '.join(str(ne) for ne in non_existan...
This methods is used to fetch models from a list of identifiers. Default implementation performs a bulk query on identifiers. Override this method to customize the objects retrieval.
def set_file_path(self, filePath): if filePath is not None: assert isinstance(filePath, basestring), "filePath must be None or string" filePath = str(filePath) self.__filePath = filePath
Set the file path that needs to be locked. :Parameters: #. filePath (None, path): The file that needs to be locked. When given and a lock is acquired, the file will be automatically opened for writing or reading depending on the given mode. If None is given, the locker...
def generate_cutD_genomic_CDR3_segs(self): max_palindrome_L = self.max_delDl_palindrome max_palindrome_R = self.max_delDr_palindrome self.cutD_genomic_CDR3_segs = [] for CDR3_D_seg in [x[1] for x in self.genD]: if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R): ...
Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_gen...
def get_secondary_count(context, default=0): if not is_ar(context): return default primary = context.getPrimaryAnalysisRequest() if not primary: return default return len(primary.getSecondaryAnalysisRequests())
Returns the number of secondary ARs of this AR
def pad_to_size(data, shape, value=0.0): shape = [data.shape[i] if shape[i] == -1 else shape[i] for i in range(len(shape))] new_data = np.empty(shape) new_data[:] = value II = [slice((shape[i] - data.shape[i])//2, (shape[i] - data.shape[i])//2 + data.shape[i]) for ...
This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one m...
def _finalize_memory(jvm_opts): avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_...
GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage
def CreateTask(self, session_identifier): task = tasks.Task(session_identifier) logger.debug('Created task: {0:s}.'.format(task.identifier)) with self._lock: self._tasks_queued[task.identifier] = task self._total_number_of_tasks += 1 self.SampleTaskStatus(task, 'created') return task
Creates a task. Args: session_identifier (str): the identifier of the session the task is part of. Returns: Task: task attribute container.
def Runs(self): with self._accumulators_mutex: items = list(six.iteritems(self._accumulators)) return {run_name: accumulator.Tags() for run_name, accumulator in items}
Return all the run names in the `EventMultiplexer`. Returns: ``` {runName: { scalarValues: [tagA, tagB, tagC], graph: true, meta_graph: true}} ```
def write_sampler_metadata(self, sampler): super(MultiTemperedMetadataIO, self).write_sampler_metadata(sampler) self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
Adds writing ntemps to file.
def permissions(self): can_read = self._info.file.permissions & lib.GP_FILE_PERM_READ can_write = self._info.file.permissions & lib.GP_FILE_PERM_DELETE return "{0}{1}".format("r" if can_read else "-", "w" if can_write else "-")
Permissions of the file. Can be "r-" (read-only), "-w" (write-only), "rw" (read-write) or "--" (no rights). :rtype: str
def scan_django_settings(values, imports): if isinstance(values, (str, bytes)): if utils.is_import_str(values): imports.add(values) elif isinstance(values, dict): for k, v in values.items(): scan_django_settings(k, imports) scan_django_settings(v, imports) ...
Recursively scans Django settings for values that appear to be imported modules.
def _readline(sock, buf): chunks = [] last_char = b'' while True: if last_char == b'\r' and buf[0:1] == b'\n': chunks[-1] = chunks[-1][:-1] return buf[1:], b''.join(chunks) elif buf.find(b'\r\n') != -1: before, sep, after = buf.partition(b"\r\n") ...
Read line of text from the socket. Read a line of text (delimited by "\r\n") from the socket, and return that line along with any trailing characters read from the socket. Args: sock: Socket object, should be connected. buf: String, zero or more characters, returned from an earlier ...
def parameter_action(self, text, loc, par): exshared.setpos(loc, text) if DEBUG > 0: print("PARAM:",par) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return index = self.symtab.insert_parameter(par.name, par.type) self.shared.function_para...
Code executed after recognising a parameter
def median(self, **kwargs): if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(...
Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row.
def pollNextEvent(self, pEvent): fn = self.function_table.pollNextEvent result = fn(byref(pEvent), sizeof(VREvent_t)) return result != 0
Returns true and fills the event with the next event on the queue if there is one. If there are no events this method returns false. uncbVREvent should be the size in bytes of the VREvent_t struct
def filter(self, displayed=False, enabled=False): if self.evaluated: result = self if displayed: result = ElementSelector( result.browser, elements=[e for e in result if e.is_displayed()] ) if enabled: ...
Filter elements by visibility and enabled status. :param displayed: whether to filter out invisible elements :param enabled: whether to filter out disabled elements Returns: an :class:`ElementSelector`
def update(self, friendly_name=None, description=None, query=None): self._table._load_info() if query is not None: if isinstance(query, _query.Query): query = query.sql self._table._info['view'] = {'query': query} self._table.update(friendly_name=friendly_name, description=description)
Selectively updates View information. Any parameters that are None (the default) are not applied in the update. Args: friendly_name: if not None, the new friendly name. description: if not None, the new description. query: if not None, a new query string for the View.
def loc_to_index(self, loc): if loc is None: return self._active_renderer_index elif isinstance(loc, int): return loc elif isinstance(loc, collections.Iterable): assert len(loc) == 2, '"loc" must contain two items' return loc[0]*self.shape[0] + loc...
Return index of the render window given a location index. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- idx : int Index of the ren...
def patch_func(replacement, target_mod, func_name): original = getattr(target_mod, func_name) vars(replacement).setdefault('unpatched', original) setattr(target_mod, func_name, replacement)
Patch func_name in target_mod with replacement Important - original must be resolved by name to avoid patching an already patched function.
def delete_project(self, id): url = '/projects/{id}'.format(id=id) response = self.delete(url) if response is True: return {} else: return response
Delete a project from the Gitlab server Gitlab currently returns a Boolean True if the deleted and as such we return an empty Dictionary :param id: The ID of the project or NAMESPACE/PROJECT_NAME :return: Dictionary :raise: HttpError: If invalid response returned
def update_parameters(parameters, grads, learning_rate=1.2): W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] dW1 = grads["dW1"] db1 = grads["db1"] dW2 = grads["dW2"] db2 = grads["db2"] W1 -= learning_rate * dW1 b1 -= learning_rate * db1 ...
Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters
def _areadist(ax, v, xr, c, bins=100, by=None, alpha=1, label=None): y, x = np.histogram(v[~np.isnan(v)], bins) x = x[:-1] if by is None: by = np.zeros((bins,)) ax.fill_between(x, y, by, facecolor=c, alpha=alpha, label=label) return y
Plot the histogram distribution but as an area plot
def entity(self, entity_type, identifier=None): entity = _ACLEntity(entity_type=entity_type, identifier=identifier) if self.has_entity(entity): entity = self.get_entity(entity) else: self.add_entity(entity) return entity
Factory method for creating an Entity. If an entity with the same type and identifier already exists, this will return a reference to that entity. If not, it will create a new one and add it to the list of known entities for this ACL. :type entity_type: str :param enti...
def download_object(container_name, object_name, destination_path, profile, overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs): conn = _get_driver(profile=profile) obj = conn.get_object(container_name, object_name) libcloud_kwargs = salt.utils.args.clean_kwargs(**libclo...
Download an object to the specified destination path. :param container_name: Container name :type container_name: ``str`` :param object_name: Object name :type object_name: ``str`` :param destination_path: Full path to a file or a directory where the incoming fil...
def best_hits(self): self.quality_sort() best_hits = dict((query, next(blines)) for (query, blines) in \ groupby(self, lambda x: x.query)) self.ref_sort() return best_hits
returns a dict with query => best mapped position
def hlog_inv(y, b=500, r=_display_max, d=_l_mmax): aux = 1. * d / r * y s = sign(y) if s.shape: s[s == 0] = 1 elif s == 0: s = 1 return s * 10 ** (s * aux) + b * aux - s
Inverse of base 10 hyperlog transform.
def generate_tags_multiple_files(input_files, tag, ignore_tags, ns=None): return itertools.chain.from_iterable([generate_xmltags( fn, tag, ignore_tags, ns) for fn in input_files])
Calls xmltag generator for multiple files.
def parse_function(fn): try: return parse_string(inspect.getsource(fn)) except (IOError, OSError) as e: raise ValueError( 'Cannot differentiate function: %s. Tangent must be able to access the ' 'source code of the function. Functions defined in a Python ' 'interpreter and functions ...
Get the source of a function and return its AST.
def get_crawldelay (self, useragent): for entry in self.entries: if entry.applies_to(useragent): return entry.crawldelay return 0
Look for a configured crawl delay. @return: crawl delay in seconds or zero @rtype: integer >= 0
def parse( args: typing.List[str] = None, arg_parser: ArgumentParser = None ) -> dict: parser = arg_parser or create_parser() return vars(parser.parse_args(args))
Parses the arguments for the cauldron server
def _remove_dots(src): output = {} for key, val in six.iteritems(src): if isinstance(val, dict): val = _remove_dots(val) output[key.replace('.', '-')] = val return output
Remove dots from the given data structure
def save(self, f): return pickle.dump((self.perceptron.weights, self.tagdict, self.classes, self.clusters), f, protocol=pickle.HIGHEST_PROTOCOL)
Save pickled model to file.
def ae_core_density(self): mesh, values, attrib = self._parse_radfunc("ae_core_density") return RadialFunction(mesh, values)
The all-electron radial density.
def content(self, **args): self.gist_name = '' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid') if self.g...
Doesn't require manual fetching of gistID of a gist passing gistName will return the content of gist. In case, names are ambigious, provide GistID or it will return the contents of recent ambigious gistname
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use): res_x = str(args.resX) res_y = str(args.resY) if program_to_use == "Ghostscript": if ex.system_os == "Windows": ex.render_pdf_file_to_image_files__ghostscript_bmp( p...
Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root. Any directories must have already been created, and the calling program is responsible for deleting any directories or image files. The program program_to_use, currently ei...
def _calc_b(w, aod700): b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172 b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557 b = b1 * np.log(w) + b0 return b
Calculate the b coefficient.
def _mangle_prefix(res): res['total_addresses'] = unicode(res['total_addresses']) res['used_addresses'] = unicode(res['used_addresses']) res['free_addresses'] = unicode(res['free_addresses']) if res['expires'].tzinfo is None: res['expires'] = pytz.utc.localize(res['expires']) if res['expires...
Mangle prefix result
def list_spiders(self, project): url = self._build_url(constants.LIST_SPIDERS_ENDPOINT) params = {'project': project} json = self.client.get(url, params=params, timeout=self.timeout) return json['spiders']
Lists all known spiders for a specific project. First class, maps to Scrapyd's list spiders endpoint.
def get_genes_for_hgnc_id(self, hgnc_symbol): headers = {"content-type": "application/json"} self.attempt = 0 ext = "/xrefs/symbol/homo_sapiens/{}".format(hgnc_symbol) r = self.ensembl_request(ext, headers) genes = [] for item in json.loads(r): if item["type"]...
obtain the ensembl gene IDs that correspond to a HGNC symbol
def to_python(self, reply, propagate=True): try: return reply['ok'] except KeyError: error = self.Error(*reply.get('nok') or ()) if propagate: raise error return error
Extracts the value out of the reply message. :param reply: In the case of a successful call the reply message will be:: {'ok': return_value, **default_fields} Therefore the method returns: return_value, **default_fields If the method raises an exception th...
def set_ignore_interrupts(flag=True): log.info("setting ignore_interrupts to %r" % flag) state.ignore_interrupts = bool(flag)
turn off EINTR-raising from emulated syscalls on interruption by signals due to the nature of greenhouse's system call emulation, ``signal.siginterrupt`` can't be made to work with it. specifically, greenhouse can't differentiate between different signals. so this function toggles whether to restart fo...
def run_pip_install(upgrade=0): command = 'pip install -r {0}'.format( settings.FAB_SETTING('SERVER_REQUIREMENTS_PATH')) if upgrade: command += ' --upgrade' run_workon(command)
Installs the requirement.txt file on the given server. Usage:: fab <server> run_pip_install fab <server> run_pip_install:upgrade=1 :param upgrade: If set to 1, the command will be executed with the ``--upgrade`` flag.
def values(self): def iter_values(): val = self._element.val if val is None: return for idx in range(val.ptCount_val): yield val.pt_v(idx) return tuple(iter_values())
Read-only. A sequence containing the float values for this series, in the order they appear on the chart.
def check_enough_space(dataset_local_dir, remote_fname, local_fname, max_disk_usage=0.9): storage_need = os.path.getsize(remote_fname) storage_total, storage_used = disk_usage(dataset_local_dir) return ((storage_used + storage_need) < (storage_total * max_disk_usage))
Check if the given local folder has enough space. Check if the given local folder has enough space to store the specified remote file. Parameters ---------- remote_fname : str Path to the remote file remote_fname : str Path to the local folder max_disk_usage : float ...
def spaceless_pdf_plot_maker(array, filename, vmax=None, dpi=DEFAULT_DPI): if vmax is None: vmax = np.percentile(array, DEFAULT_SATURATION_THRESHOLD) plt.gca().set_axis_off() plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) plt.gca().xaxis.set_major...
Draw a pretty plot from an array A function that performs all the tedious matplotlib magic to draw a 2D array with as few parameters and as little whitespace as possible. Parameters ---------- array : array_like The input array to draw. filename : file, str or pathlib.Path ...
def build(id=None, name=None, revision=None, temporary_build=False, timestamp_alignment=False, no_build_dependencies=False, keep_pod_on_failure=False, force_rebuild=False, rebuild_mode=common.REBUILD_MODES_DEFAULT): data = build_raw(id, name, revision, temporary_bui...
Trigger a BuildConfiguration by name or ID
def __populate_repositories_of_interest(self, username): user = self.github.get_user(username) self.user_starred_repositories.extend(user.get_starred()) if self.deep_dive: for following_user in user.get_following(): self.user_following_starred_repositories.extend( ...
Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the ...
def estimateKronCovariances(phenos,K1r=None,K1c=None,K2r=None,K2c=None,covs=None,Acovs=None,covar_type='lowrank_diag',rank=1): print(".. Training the backgrond covariance with a GP model") vc = VAR.CVarianceDecomposition(phenos) if K1r is not None: vc.addRandomEffect(K1r,covar_type=covar_type,rank=r...
estimates the background covariance model before testing Args: phenos: [N x P] SP.array of P phenotypes for N individuals K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed K1c: ...
def longest_run_1d(arr): v, rl = rle_1d(arr)[:2] return np.where(v, rl, 0).max()
Return the length of the longest consecutive run of identical values. Parameters ---------- arr : bool array Input array Returns ------- int Length of longest run.
def copy_contents_to(self, destination): logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory.
def remove_label(self, label, relabel=False): self.remove_labels(label, relabel=relabel)
Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled ...
def users(self): return sa.orm.relationship( "User", secondary="users_resources_permissions", passive_deletes=True, passive_updates=True, )
returns all users that have permissions for this resource
def queue_stats(self, queue, tags): for mname, pymqi_value in iteritems(metrics.queue_metrics()): try: mname = '{}.queue.{}'.format(self.METRIC_PREFIX, mname) m = queue.inquire(pymqi_value) self.gauge(mname, m, tags=tags) except pymqi.Error...
Grab stats from queues
def const_shuffle(arr, seed=23980): old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
Shuffle an array in-place with a fixed seed.
def to_dict(self): return {'access_key': self.access_key, 'secret_key': self.secret_key, 'session_token': self.session_token, 'expiration': self.expiration, 'request_id': self.request_id}
Return a Python dict containing the important information about this Session Token.
def add_user(self, username, email, directoryId=1, password=None, fullname=None, notify=False, active=True, ignore_existing=False, application_keys=None, ...
Create a new JIRA user. :param username: the username of the new user :type username: str :param email: email address of the new user :type email: str :param directoryId: The directory ID the new user should be a part of (Default: 1) :type directoryId: int :param...