Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
372,900
def _unknown_args(self, args): for u in args: self.tcex.log.warning(u.format(u))
Log argparser unknown arguments. Args: args (list): List of unknown arguments
372,901
def lookup_camera_by_id(self, device_id): camera = list(filter( lambda cam: cam.device_id == device_id, self.cameras))[0] if camera: return camera return None
Return camera object by device_id.
372,902
def _check_pillar_minions(self, expr, delimiter, greedy): return self._check_cache_minions(expr, delimiter, greedy, )
Return the minions found by looking via pillar
372,903
def wait_for_binary_interface(self, **kwargs): if self.cluster.version() >= : self.watch_log_for("Starting listening for CQL clients", **kwargs) binary_itf = self.network_interfaces[] if not common.check_socket_listening(binary_itf, timeout=30): warnings.warn("Binary interface %s:%s is not listening after 30 seconds, node may have failed to start." % (binary_itf[0], binary_itf[1]))
Waits for the Binary CQL interface to be listening. If > 1.2 will check log for 'Starting listening for CQL clients' before checking for the interface to be listening. Emits a warning if not listening after 30 seconds.
372,904
def _fullqualname_method_py3(obj): if inspect.isclass(obj.__self__): cls = obj.__self__.__qualname__ else: cls = obj.__self__.__class__.__qualname__ return obj.__self__.__module__ + + cls + + obj.__name__
Fully qualified name for 'method' objects in Python 3.
372,905
def for_user(self, user): qs = SharedMemberQuerySet(model=self.model, using=self._db, user=user) qs = qs.filter(Q(author=user) | Q(foldershareduser__user=user)) return qs.distinct() & self.distinct()
All folders the given user can do something with.
372,906
def get_work_item_by_id(self, wi_id): work_items = self.get_work_items(id=wi_id) if work_items is not None: return work_items[0] return None
Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None
372,907
def remove(self, label): if label.id in self._labels: self._labels[label.id] = None self._dirty = True
Remove a label. Args: label (gkeepapi.node.Label): The Label object.
372,908
def get_range(self, ignore_blank_lines=True): ref_lvl = self.trigger_level first_line = self._trigger.blockNumber() block = self._trigger.next() last_line = block.blockNumber() lvl = self.scope_level if ref_lvl == lvl: ref_lvl -= 1 while (block.isValid() and TextBlockHelper.get_fold_lvl(block) > ref_lvl): last_line = block.blockNumber() block = block.next() if ignore_blank_lines and last_line: block = block.document().findBlockByNumber(last_line) while block.blockNumber() and block.text().strip() == : block = block.previous() last_line = block.blockNumber() return first_line, last_line
Gets the fold region range (start and end line). .. note:: Start line do no encompass the trigger line. :param ignore_blank_lines: True to ignore blank lines at the end of the scope (the method will rewind to find that last meaningful block that is part of the fold scope). :returns: tuple(int, int)
372,909
def add_sender(self, partition=None, operation=None, send_timeout=60, keep_alive=30, auto_reconnect=True): target = "amqps://{}{}".format(self.address.hostname, self.address.path) if operation: target = target + operation handler = Sender( self, target, partition=partition, send_timeout=send_timeout, keep_alive=keep_alive, auto_reconnect=auto_reconnect) self.clients.append(handler) return handler
Add a sender to the client to EventData object to an EventHub. :param partition: Optionally specify a particular partition to send to. If omitted, the events will be distributed to available partitions via round-robin. :type parition: str :operation: An optional operation to be appended to the hostname in the target URL. The value must start with `/` character. :type operation: str :param send_timeout: The timeout in seconds for an individual event to be sent from the time that it is queued. Default value is 60 seconds. If set to 0, there will be no timeout. :type send_timeout: int :param keep_alive: The time interval in seconds between pinging the connection to keep it alive during periods of inactivity. The default value is 30 seconds. If set to `None`, the connection will not be pinged. :type keep_alive: int :param auto_reconnect: Whether to automatically reconnect the sender if a retryable error occurs. Default value is `True`. :rtype: ~azure.eventhub.sender.Sender
372,910
def event_params(segments, params, band=None, n_fft=None, slopes=None, prep=None, parent=None): if parent is not None: progress = QProgressDialog(, , 0, len(segments) - 1, parent) progress.setWindowModality(Qt.ApplicationModal) param_keys = [, , , , , , , , ] if params == : params = {k: 1 for k in param_keys} if prep is None: prep = {k: 0 for k in param_keys} if band is None: band = (None, None) params_out = [] evt_output = False for i, seg in enumerate(segments): out = dict(seg) dat = seg[] if params[]: out[] = float(dat.number_of()) / dat.s_freq evt_output = True if params[]: dat1 = dat if prep[]: dat1 = seg[] out[] = math(dat1, operator=_amin, axis=) evt_output = True if params[]: dat1 = dat if prep[]: dat1 = seg[] out[] = math(dat1, operator=_amax, axis=) evt_output = True if params[]: dat1 = dat if prep[]: dat1 = seg[] out[] = math(dat1, operator=_ptp, axis=) evt_output = True if params[]: dat1 = dat if prep[]: dat1 = seg[] out[] = math(dat1, operator=(square, _mean, sqrt), axis=) evt_output = True for pw, pk in [(, ), (, )]: if params[pw] or params[pk]: evt_output = True if prep[pw] or prep[pk]: prep_pw, prep_pk = band_power(seg[], band, scaling=pw, n_fft=n_fft) if not (prep[pw] and prep[pk]): raw_pw, raw_pk = band_power(dat, band, scaling=pw, n_fft=n_fft) if prep[pw]: out[pw] = prep_pw else: out[pw] = raw_pw if prep[pk]: out[pk] = prep_pk else: out[pk] = raw_pk if slopes: evt_output = True out[] = {} dat1 = dat if slopes[]: dat1 = seg[] if slopes[]: dat1 = math(dat1, operator=negative, axis=) if slopes[] and slopes[]: level = elif slopes[]: level = else: level = for chan in dat1.axis[][0]: d = dat1(chan=chan)[0] out[][chan] = get_slopes(d, dat.s_freq, level=level) if evt_output: timeline = dat.axis[][0] out[] = timeline[0] out[] = timeline[-1] params_out.append(out) if parent: progress.setValue(i) if progress.wasCanceled(): msg = parent.statusBar().showMessage(msg) return if parent: progress.close() return params_out
Compute event parameters. Parameters ---------- segments : instance of wonambi.trans.select.Segments list of segments, with time series and metadata params : dict of bool, or str 'dur', 'minamp', 'maxamp', 'ptp', 'rms', 'power', 'peakf', 'energy', 'peakef'. If 'all', a dict will be created with these keys and all values as True, so that all parameters are returned. band : tuple of float band of interest for power and energy n_fft : int length of FFT. if shorter than input signal, signal is truncated; if longer, signal is zero-padded to length slopes : dict of bool 'avg_slope', 'max_slope', 'prep', 'invert' prep : dict of bool same keys as params. if True, segment['trans_data'] will be used as dat parent : QMainWindow for use with GUI only Returns ------- list of dict list of segments, with time series, metadata and parameters
372,911
def remove_builder(cls, builder_name: str): cls.builders.pop(builder_name, None) for hook_spec in cls.hooks.values(): hook_spec.pop(builder_name, None)
Remove a registered builder `builder_name`. No reason to use this except for tests.
372,912
def return_hdr(self): orig = {} for xml_file in self.filename.glob(): if xml_file.stem[0] != : orig[xml_file.stem] = parse_xml(str(xml_file)) signals = sorted(self.filename.glob()) for signal in signals: block_hdr, i_data = read_all_block_hdr(signal) self._signal.append(signal) self._block_hdr.append(block_hdr) self._i_data.append(i_data) n_samples = asarray([x[][0] for x in block_hdr], ) self._n_samples.append(n_samples) try: subj_id = orig[][0][0][] except KeyError: subj_id = try: start_time = datetime.strptime(orig[][0][][:26], ) except KeyError: start_time = DEFAULT_DATETIME self.start_time = start_time videos = (list(self.filename.glob()) + list(self.filename.glob())) videos = [x for x in videos if x.stem[0] != ] if len(videos) > 1: lg.warning( + .join(videos)) self._videos = videos s_freq = [x[0][][0] for x in self._block_hdr] assert all([x == s_freq[0] for x in s_freq]) SIGNAL = 0 s_freq = self._block_hdr[SIGNAL][0][][0] n_samples = sum(self._n_samples[SIGNAL]) chan_name, self._nchan_signal1 = _read_chan_name(orig) self._orig = orig return subj_id, start_time, s_freq, chan_name, n_samples, orig
Return the header for further use. Returns ------- subj_id : str subject identification code start_time : datetime start time of the dataset s_freq : float sampling frequency chan_name : list of str list of all the channels n_samples : int number of samples in the dataset orig : dict additional information taken directly from the header
372,913
def makeDigraph(automaton, inputAsString=repr, outputAsString=repr, stateAsString=repr): digraph = graphviz.Digraph(graph_attr={: , : }, node_attr={: }, edge_attr={: }) for state in automaton.states(): if state is automaton.initialState: stateShape = "bold" fontName = "Menlo-Bold" else: stateShape = "" fontName = "Menlo" digraph.node(stateAsString(state), fontame=fontName, shape="ellipse", style=stateShape, color="blue") for n, eachTransition in enumerate(automaton.allTransitions()): inState, inputSymbol, outState, outputSymbols = eachTransition thisTransition = "t{}".format(n) inputLabel = inputAsString(inputSymbol) port = "tableport" table = tableMaker(inputLabel, [outputAsString(outputSymbol) for outputSymbol in outputSymbols], port=port) digraph.node(thisTransition, label=_gvhtml(table), margin="0.2", shape="none") digraph.edge(stateAsString(inState), .format(thisTransition, port), arrowhead="none") digraph.edge(.format(thisTransition, port), stateAsString(outState)) return digraph
Produce a L{graphviz.Digraph} object from an automaton.
372,914
def transpose_note(note, transpose, scale="C"): val = note_to_val(note) val += transpose return val_to_note(val, scale)
Transpose a note :param str note: note to transpose :type transpose: int :param str scale: key scale :rtype: str :return: transposed note
372,915
def app_routes(app): _routes = [] for rule in app.url_map.iter_rules(): _routes.append({ : rule.rule, : rule.endpoint, : list(rule.methods) }) return jsonify({: _routes})
list of route of an app
372,916
def forward_ad(node, wrt, preserve_result=False, check_dims=True): if not isinstance(node, gast.FunctionDef): raise TypeError cfg_obj = cfg.CFG.build_cfg(node) cfg.Active(range(len(node.args.args))).visit(cfg_obj.entry) fad = ForwardAD(wrt, preserve_result, check_dims) node = fad.visit(node) node = annotate.find_stacks(node) node = gast.Module([node]) anno.clearanno(node) return node, fad.required
Perform forward-mode AD on an AST. This function analyses the AST to determine which variables are active and proceeds by taking the naive derivative. Before returning the primal and adjoint it annotates push and pop statements as such. Args: node: A `FunctionDef` AST node. wrt: A tuple of argument indices with respect to which we take the derivative. preserve_result: A boolean indicating whether the original non-differentiated function value should be returned check_dims: A boolean indicating whether the provided derivatives should have the same shape as their corresponding arguments. Returns: mod: A `Module` node containing the naive primal and adjoint of the function which can be fed to the `split` and `joint` functions. required: A list of tuples of functions and argument indices. These functions were called by the function but did not have an adjoint.
372,917
def parse(self, target): if isinstance(target, ContentNode): if target.name: self.parent = target self.name.parse(self) self.name += target.name target.ruleset.append(self) self.root.cache[][str(self.name).split()[0]].add(self) super(Ruleset, self).parse(target)
Parse nested rulesets and save it in cache.
372,918
def delete_category(category_id): try: res = _pybossa_req(, , category_id) if type(res).__name__ == : return True else: return res except: raise
Delete a Category with id = category_id. :param category_id: PYBOSSA Category ID :type category_id: integer :returns: True -- the response status code
372,919
def update_virtual_meta(self): import astropy.units try: path = os.path.join(self.get_private_dir(create=False), "virtual_meta.yaml") if os.path.exists(path): meta_info = vaex.utils.read_json_or_yaml(path) if not in meta_info: return self.virtual_columns.update(meta_info["virtual_columns"]) self.variables.update(meta_info["variables"]) self.ucds.update(meta_info["ucds"]) self.descriptions.update(meta_info["descriptions"]) units = {key: astropy.units.Unit(value) for key, value in meta_info["units"].items()} self.units.update(units) except: logger.exception("non fatal error")
Will read back the virtual column etc, written by :func:`DataFrame.write_virtual_meta`. This will be done when opening a DataFrame.
372,920
def install_handler(self, event_type, handler, user_handle=None): return self.visalib.install_visa_handler(self.session, event_type, handler, user_handle)
Installs handlers for event callbacks in this resource. :param event_type: Logical event identifier. :param handler: Interpreted as a valid reference to a handler to be installed by a client application. :param user_handle: A value specified by an application that can be used for identifying handlers uniquely for an event type. :returns: user handle (a ctypes object)
372,921
def linewidth(self, linewidth=None): if linewidth is None: return self._linewidth else: if not is_numeric(linewidth): raise TypeError( "linewidth must be number, not " % str(linewidth) ) self._linewidth = linewidth
Returns or sets (if a value is provided) the width of the series' line. :param Number linewidth: If given, the series' linewidth will be set to\ this. :rtype: ``Number``
372,922
def declare(self, queue=, virtual_host=, passive=False, durable=False, auto_delete=False, arguments=None): if passive: return self.get(queue, virtual_host=virtual_host) queue_payload = json.dumps( { : durable, : auto_delete, : arguments or {}, : virtual_host } ) return self.http_client.put( API_QUEUE % ( quote(virtual_host, ), queue ), payload=queue_payload)
Declare a Queue. :param str queue: Queue name :param str virtual_host: Virtual host name :param bool passive: Do not create :param bool durable: Durable queue :param bool auto_delete: Automatically delete when not in use :param dict|None arguments: Queue key/value arguments :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
372,923
def run(self): old_inplace, self.inplace = self.inplace, 0 _build_ext.run(self) self.inplace = old_inplace if old_inplace: self.copy_extensions_to_source()
Build extensions in build directory, then copy if --inplace
372,924
def updateData(self, state_data, action_data, reward_data): self.state_data[:, self.updates] = state_data self.action_data[:, self.updates] = action_data self.reward_data[0, self.updates] = reward_data self.updates += 1 self._render()
Updates the data used by the renderer.
372,925
def file_key_retire( blockchain_id, file_key, config_path=CONFIG_PATH, wallet_keys=None ): config_dir = os.path.dirname(config_path) url = file_url_expired_keys( blockchain_id ) proxy = blockstack_client.get_default_proxy( config_path=config_path ) old_key_bundle_res = blockstack_client.data_get( url, wallet_keys=wallet_keys, proxy=proxy ) if in old_key_bundle_res: log.warn( % old_key_bundle_res[]) old_key_list = [] else: old_key_list = old_key_bundle_res[][] for old_key in old_key_list: if old_key[] == file_key[]: log.warning("Key %s is already retired" % file_key[]) return {: True} old_key_list.insert(0, file_key ) res = blockstack_client.data_put( url, {: old_key_list}, wallet_keys=wallet_keys, proxy=proxy ) if in res: log.error("Failed to append to expired key bundle: %s" % res[]) return {: } return {: True}
Retire the given key. Move it to the head of the old key bundle list @file_key should be data returned by file_key_lookup Return {'status': True} on success Return {'error': ...} on error
372,926
def sum_sp_values(self): if self.values is None: ret = IdValues() else: ret = IdValues({: sum(int(x) for x in self.values.values())}) return ret
return system level values (spa + spb) input: "values": { "spa": 385, "spb": 505 }, return: "values": { "0": 890 },
372,927
def prepare_data(data_dir, fileroot, block_pct_tokens_thresh=0.1): if not 0.0 <= block_pct_tokens_thresh <= 1.0: raise ValueError() html = read_html_file(data_dir, fileroot) blocks = read_gold_standard_blocks_file(data_dir, fileroot, split_blocks=True) content_blocks = [] comments_blocks = [] for block in blocks: block_split = block.split() num_block_tokens = len(block_split[2].split()) content_blocks.append( (float(block_split[0]), num_block_tokens, block_split[3].split())) comments_blocks.append( (float(block_split[1]), num_block_tokens, block_split[4].split())) parsed_content_blocks = _parse_content_or_comments_blocks( content_blocks, block_pct_tokens_thresh) parsed_comments_blocks = _parse_content_or_comments_blocks( comments_blocks, block_pct_tokens_thresh) return (html, parsed_content_blocks, parsed_comments_blocks)
Prepare data for a single HTML + gold standard blocks example, uniquely identified by ``fileroot``. Args: data_dir (str) fileroot (str) block_pct_tokens_thresh (float): must be in [0.0, 1.0] Returns: Tuple[str, Tuple[np.array[int], np.array[int], List[str]], Tuple[np.array[int], np.array[int], List[str]]]: The first element is simply the raw html as a string. The second and third elements are 3-tuples for content and comments, respectively, where the first element is a numpy array of 1s and 0s whose values correspond to whether or not a given block is considered non-content or not; the second element is a numpy integer array whose values are the total number of tokens in each block; and the third element is a flat list of content or comment tokens as strings, concatenated from all blocks. See Also: :func:`prepare_all_data`
372,928
def req_withdraw(self, address, amount, currency, fee=0, addr_tag="", _async=False): params = { : address, : amount, : currency, : fee, : addr_tag } path = return api_key_post(params, path, _async=_async)
申请提现虚拟币 :param address: :param amount: :param currency:btc, ltc, bcc, eth, etc ...(火币Pro支持的币种) :param fee: :param addr_tag: :return: { "status": "ok", "data": 700 }
372,929
def Liu(Tb, Tc, Pc): rs critical temperature, pressure and boiling point. The enthalpy of vaporization is given by: .. math:: \Delta H_{vap} = RT_b \left[ \frac{T_b}{220}\right]^{0.0627} \frac{ (1-T_{br})^{0.38} \ln(P_c/P_A)}{1-T_{br} + 0.38 T_{br} \ln T_{br}} Parameters ---------- Tb : float Boiling temperature of the fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] Returns ------- Hvap : float Enthalpy of vaporization, [J/mol] Notes ----- This formulation can be adjusted for lower boiling points, due to the use of a rationalized pressure relationship. The formulation is taken from the original article. A correction for alcohols and organic acids based on carbon number, which only modifies the boiling point, is available but not implemented. No sample calculations are available in the article. Internal units: Pa and K Examples -------- Same problem as in Perry Tbr = Tb/Tc return R*Tb*(Tb/220.)**0.0627*(1. - Tbr)**0.38*log(Pc/101325.) \ / (1 - Tbr + 0.38*Tbr*log(Tbr))
r'''Calculates enthalpy of vaporization at the normal boiling point using the Liu [1]_ correlation, and a chemical's critical temperature, pressure and boiling point. The enthalpy of vaporization is given by: .. math:: \Delta H_{vap} = RT_b \left[ \frac{T_b}{220}\right]^{0.0627} \frac{ (1-T_{br})^{0.38} \ln(P_c/P_A)}{1-T_{br} + 0.38 T_{br} \ln T_{br}} Parameters ---------- Tb : float Boiling temperature of the fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] Returns ------- Hvap : float Enthalpy of vaporization, [J/mol] Notes ----- This formulation can be adjusted for lower boiling points, due to the use of a rationalized pressure relationship. The formulation is taken from the original article. A correction for alcohols and organic acids based on carbon number, which only modifies the boiling point, is available but not implemented. No sample calculations are available in the article. Internal units: Pa and K Examples -------- Same problem as in Perry's examples >>> Liu(294.0, 466.0, 5.55E6) 26378.566319606754 References ---------- .. [1] LIU, ZHI-YONG. "Estimation of Heat of Vaporization of Pure Liquid at Its Normal Boiling Temperature." Chemical Engineering Communications 184, no. 1 (February 1, 2001): 221-28. doi:10.1080/00986440108912849.
372,930
def get_argument_starttime(self): try: starttime = self.get_argument(constants.PARAM_STARTTIME) return starttime except tornado.web.MissingArgumentError as e: raise Exception(e.log_message)
Helper function to get starttime argument. Raises exception if argument is missing. Returns the starttime argument.
372,931
def _parse_q2r(self, f): natom, dim, epsilon, borns = self._parse_parameters(f) fc_dct = {: self._parse_fc(f, natom, dim), : dim, : epsilon, : borns} return fc_dct
Parse q2r output file The format of q2r output is described at the mailing list below: http://www.democritos.it/pipermail/pw_forum/2005-April/002408.html http://www.democritos.it/pipermail/pw_forum/2008-September/010099.html http://www.democritos.it/pipermail/pw_forum/2009-August/013613.html https://www.mail-archive.com/pw_forum@pwscf.org/msg24388.html
372,932
def remove_namespace(self, ns_uri): if not self.contains_namespace(ns_uri): return ni = self.__ns_uri_map.pop(ns_uri) for prefix in ni.prefixes: del self.__prefix_map[prefix]
Removes the indicated namespace from this set.
372,933
def add_arguments(parser, default_level=logging.INFO): adder = ( getattr(parser, , None) or getattr(parser, ) ) adder( , , default=default_level, type=log_level, help="Set log level (DEBUG, INFO, WARNING, ERROR)")
Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level.
372,934
def _parse_arguments(): parser = argparse.ArgumentParser(description="CMake AST Dumper") parser.add_argument("filename", nargs=1, metavar=("FILE"), help="read FILE") return parser.parse_args()
Return a parser context result.
372,935
def score_x_of_a_kind_yatzy(dice: List[int], min_same_faces: int) -> int: for die, count in Counter(dice).most_common(1): if count >= min_same_faces: return die * min_same_faces return 0
Similar to yahtzee, but only return the sum of the dice that satisfy min_same_faces
372,936
def do_mkdir(self, line): args = self.line_to_args(line) for filename in args: filename = resolve_path(filename) if not mkdir(filename): print_err( % filename)
mkdir DIRECTORY... Creates one or more directories.
372,937
def handle_http_error(self, response, custom_messages=None, raise_for_status=False): if not custom_messages: custom_messages = {} if response.status_code in custom_messages.keys(): raise errors.HTTPError(custom_messages[response.status_code]) if raise_for_status: response.raise_for_status()
Converts service errors to Python exceptions Parameters ---------- response : requests.Response A service response. custom_messages : dict, optional A mapping of custom exception messages to HTTP status codes. raise_for_status : bool, optional If True, the requests library provides Python exceptions. Returns ------- None
372,938
def from_point(cls, point, network=BitcoinMainNet, **kwargs): verifying_key = VerifyingKey.from_public_point(point, curve=SECP256k1) return cls.from_verifying_key(verifying_key, network=network, **kwargs)
Create a PublicKey from a point on the SECP256k1 curve. :param point: A point on the SECP256k1 curve. :type point: SECP256k1.point
372,939
def _split_index(self, key): if not isinstance(key, tuple): key = (key,) elif key == (): return (), () if key[0] is Ellipsis: num_pad = self.ndims - len(key) + 1 key = (slice(None),) * num_pad + key[1:] elif len(key) < self.ndims: num_pad = self.ndims - len(key) key = key + (slice(None),) * num_pad map_slice = key[:self.ndims] if self._check_key_type: map_slice = self._apply_key_type(map_slice) if len(key) == self.ndims: return map_slice, () else: return map_slice, key[self.ndims:]
Partitions key into key and deep dimension groups. If only key indices are supplied, the data is indexed with an empty tuple. Keys with indices than there are dimensions will be padded.
372,940
def is_bday(date, bday=None): _date = Timestamp(date) if bday is None: bday = CustomBusinessDay(calendar=USFederalHolidayCalendar()) return _date == (_date + bday) - bday
Return true iff the given date is a business day. Parameters ---------- date : :class:`pandas.Timestamp` Any value that can be converted to a pandas Timestamp--e.g., '2012-05-01', dt.datetime(2012, 5, 1, 3) bday : :class:`pandas.tseries.offsets.CustomBusinessDay` Defaults to `CustomBusinessDay(calendar=USFederalHolidayCalendar())`. Pass this parameter in performance-sensitive contexts, such as when calling this function in a loop. The creation of the `CustomBusinessDay` object is the performance bottleneck of this function. Cf. `pandas.tseries.offsets.CustomBusinessDay <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#custom-business-days-experimental>`_. Returns ------- val : bool True iff `date` is a business day
372,941
def delete(self, *args, **kwargs): api = Api() api.authenticate() api.delete_video(self.video_id) return super(Video, self).delete(*args, **kwargs)
Deletes the video from youtube Raises: OperationError
372,942
def mount(nbd, root=None): * __salt__[]( .format(nbd), python_shell=False, ) ret = {} if root is None: root = os.path.join( tempfile.gettempdir(), , os.path.basename(nbd) ) for part in glob.glob(.format(nbd)): m_pt = os.path.join(root, os.path.basename(part)) time.sleep(1) mnt = __salt__[](m_pt, part, True) if mnt is not True: continue ret[m_pt] = part return ret
Pass in the nbd connection device location, mount all partitions and return a dict of mount points CLI Example: .. code-block:: bash salt '*' qemu_nbd.mount /dev/nbd0
372,943
def where(self, inplace=False, **kwargs): masks = {k: np.ones(v, dtype=) for k,v in self.dimensions.items()} def index_to_mask(index, n): val = np.zeros(n, dtype=) val[index] = True return val def masks_and(dict1, dict2): return {k: dict1[k] & index_to_mask(dict2[k], len(dict1[k])) for k in dict1 } for key in kwargs: value = kwargs[key] if key.endswith(): if isinstance(value, int): value = [value] dim = key[:-len()] m = self._propagate_dim(value, dim) masks = masks_and(masks, m) else: attribute = self.get_attribute(key) if isinstance(value, list): mask = reduce(operator.or_, [attribute.value == m for m in value]) else: mask = attribute.value == value m = self._propagate_dim(mask, attribute.dim) masks = masks_and(masks, m) return masks
Return indices over every dimension that met the conditions. Condition syntax: *attribute* = value Return indices that satisfy the condition where the attribute is equal to the value e.g. type_array = 'H' *attribute* = list(value1, value2) Return indices that satisfy the condition where the attribute is equal to any of the value in the list. e.g. type_array = ['H', 'O'] *dimension_index* = value: int *dimension_index* = value: list(int) Return only elements that correspond to the index in the specified dimension: atom_index = 0 atom_index = [0, 1]
372,944
def parse(cls, expression): parsed = {"name": None, "arguments": [], "options": []} if not expression.strip(): raise ValueError("Console command signature is empty.") expression = expression.replace(os.linesep, "") matches = re.match(r"[^\s]+", expression) if not matches: raise ValueError("Unable to determine command name from signature.") name = matches.group(0) parsed["name"] = name tokens = re.findall(r"\{\s*(.*?)\s*\}", expression) if tokens: parsed.update(cls._parameters(tokens)) return parsed
Parse the given console command definition into a dict. :param expression: The expression to parse :type expression: str :rtype: dict
372,945
def generate(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/generate" return self._client.get(uri)
Generates and returns a random range. Args: id_or_uri: ID or URI of range. Returns: dict: A dict containing a list with IDs.
372,946
def parser(key = "default"): if key not in _parsers: if key == "ssh": _parsers["ssh"] = CodeParser(True, False) else: key = "default" return _parsers[key]
Returns the parser for the given key, (e.g. 'ssh')
372,947
def _reads_per_position(bam_in, loci_file, out_dir): data = Counter() a = pybedtools.BedTool(bam_in) b = pybedtools.BedTool(loci_file) c = a.intersect(b, s=True, bed=True, wo=True) for line in c: end = int(line[1]) + 1 + int(line[2]) if line[5] == "+" else int(line[1]) + 1 start = int(line[1]) + 1 if line[5] == "+" else int(line[1]) + 1 + int(line[2]) side5 = "%s\t5p\t%s" % (line[15], start) side3 = "%s\t3p\t%s" % (line[15], end) data[side5] += 1 data[side3] += 1 counts_reads = op.join(out_dir, ) with open(counts_reads, ) as out_handle: for k in data: print(k, file=out_handle, end="") return counts_reads
Create input for compute entropy
372,948
def _parse_sentencetree(self, tree, parent_node_id=None, ignore_traces=True): def get_nodelabel(node): if isinstance(node, nltk.tree.Tree): return node.label() elif isinstance(node, unicode): return node.encode() else: raise ValueError("Unexpected node type: {0}, {1}".format(type(node), node)) root_node_id = self._node_id self.node[root_node_id][] = get_nodelabel(tree) for subtree in tree: self._node_id += 1 node_label = get_nodelabel(subtree) node_label = PTB_BRACKET_UNESCAPE.get(node_label, node_label) pos_tag = self.node[parent_node_id][] token_attrs = { : node_label, self.ns+: node_label, self.ns+: pos_tag} self.node[parent_node_id].update(token_attrs) self.tokens.append(parent_node_id) if isinstance(subtree, nltk.tree.Tree): self._parse_sentencetree(subtree, parent_node_id=self._node_id)
parse a sentence Tree into this document graph
372,949
def add_annotation( self, subj: URIRef, pred: URIRef, obj: Union[Literal, URIRef], a_p: URIRef , a_o: Union[Literal, URIRef], ) -> BNode: bnode: BNode = self.triple2annotation_bnode.get( (subj, pred, obj) ) if not bnode: a_s: BNode = BNode() self.triple2annotation_bnode[(subj, pred, obj)]: BNode = a_s self.g.add((a_s, RDF.type, OWL.Axiom)) self.g.add((a_s, OWL.annotatedSource, self.process_subj_or_pred(subj))) self.g.add((a_s, OWL.annotatedProperty,self.process_subj_or_pred(pred))) self.g.add((a_s, OWL.annotatedTarget, self.process_obj(obj))) else: a_s: BNode = bnode self.g.add((a_s, self.process_subj_or_pred(a_p), self.process_obj(a_o))) return bnode
Adds annotation to rdflib graph. The annotation axiom will filled in if this is a new annotation for the triple. Args: subj: Entity subject to be annotated pref: Entities Predicate Anchor to be annotated obj: Entities Object Anchor to be annotated a_p: Annotation predicate a_o: Annotation object Returns: A BNode which is an address to the location in the RDF graph that is storing the annotation information.
372,950
def current_changed(self, index): editor = self.get_current_editor() if editor.lsp_ready and not editor.document_opened: editor.document_did_open() if index != -1: editor.setFocus() logger.debug("Set focus to: %s" % editor.filename) else: self.reset_statusbar.emit() self.opened_files_list_changed.emit() self.stack_history.refresh() self.stack_history.remove_and_append(index) try: logger.debug("Current changed: %d - %s" % (index, self.data[index].editor.filename)) except IndexError: pass self.update_plugin_title.emit() if editor is not None: try: self.current_file_changed.emit(self.data[index].filename, editor.get_position()) except IndexError: pass
Stack index has changed
372,951
def calibrate_signal(signal, resp, fs, frange): dc = np.mean(resp) resp = resp - dc npts = len(signal) f0 = np.ceil(frange[0] / (float(fs) / npts)) f1 = np.floor(frange[1] / (float(fs) / npts)) y = resp Y = np.fft.rfft(y) x = signal X = np.fft.rfft(x) H = Y / X A = X / H return np.fft.irfft(A)
Given original signal and recording, spits out a calibrated signal
372,952
def set_latency(self, latency): self._client[][] = latency yield from self._server.client_latency(self.identifier, latency)
Set client latency.
372,953
def backup(file_name, jail=None, chroot=None, root=None): *** ret = __salt__[]( _pkg(jail, chroot, root) + [, , file_name], output_loglevel=, python_shell=False ) return ret.split()[1]
Export installed packages into yaml+mtree file CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail Backup packages from the specified jail. Note that this will run the command within the jail, and so the path to the backup file will be relative to the root of the jail CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg jail=<jail name or id> chroot Backup packages from the specified chroot (ignored if ``jail`` is specified). Note that this will run the command within the chroot, and so the path to the backup file will be relative to the root of the chroot. root Backup packages from the specified root (ignored if ``jail`` is specified). Note that this will run the command within the root, and so the path to the backup file will be relative to the root of the root. CLI Example: .. code-block:: bash salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot
372,954
def has_role(item): def predicate(ctx): if not isinstance(ctx.channel, discord.abc.GuildChannel): raise NoPrivateMessage() if isinstance(item, int): role = discord.utils.get(ctx.author.roles, id=item) else: role = discord.utils.get(ctx.author.roles, name=item) if role is None: raise MissingRole(item) return True return check(predicate)
A :func:`.check` that is added that checks if the member invoking the command has the role specified via the name or ID specified. If a string is specified, you must give the exact name of the role, including caps and spelling. If an integer is specified, you must give the exact snowflake ID of the role. If the message is invoked in a private message context then the check will return ``False``. This check raises one of two special exceptions, :exc:`.MissingRole` if the user is missing a role, or :exc:`.NoPrivateMessage` if it is used in a private message. Both inherit from :exc:`.CheckFailure`. .. versionchanged:: 1.1.0 Raise :exc:`.MissingRole` or :exc:`.NoPrivateMessage` instead of generic :exc:`.CheckFailure` Parameters ----------- item: Union[:class:`int`, :class:`str`] The name or ID of the role to check.
372,955
def certify_date(value, required=True): if certify_required( value=value, required=required, ): return if not isinstance(value, date): raise CertifierTypeError( message="expected timestamp (date∂), but value is of type {cls!r}".format( cls=value.__class__.__name__), value=value, required=required, )
Certifier for datetime.date values. :param value: The value to be certified. :param bool required: Whether the value can be `None` Defaults to True. :raises CertifierTypeError: The type is invalid
372,956
def _header_string(basis_dict): tw = textwrap.TextWrapper(initial_indent=, subsequent_indent= * 20) header = * 70 + header += header += + version() + header += + _main_url + header += * 70 + header += + basis_dict[] + header += tw.fill( + basis_dict[]) + header += + basis_dict[] + header += tw.fill(.format(basis_dict[], basis_dict[])) + header += * 70 + return header
Creates a header with information about a basis set Information includes description, revision, etc, but not references
372,957
def dumps(self, obj, salt=None): payload = want_bytes(self.dump_payload(obj)) rv = self.make_signer(salt).sign(payload) if self.is_text_serializer: rv = rv.decode() return rv
Returns a signed string serialized with the internal serializer. The return value can be either a byte or unicode string depending on the format of the internal serializer.
372,958
def prepare_request_body(self, private_key=None, subject=None, issuer=None, audience=None, expires_at=None, issued_at=None, extra_claims=None, body=, scope=None, include_client_id=False, **kwargs): import jwt key = private_key or self.private_key if not key: raise ValueError( ) claim = { : issuer or self.issuer, : audience or self.audience, : subject or self.subject, : int(expires_at or time.time() + 3600), : int(issued_at or time.time()), } for attr in (, , ): if claim[attr] is None: raise ValueError( % attr) if in kwargs: claim[] = kwargs.pop() if in kwargs: claim[] = kwargs.pop() claim.update(extra_claims or {}) assertion = jwt.encode(claim, key, ) assertion = to_unicode(assertion) kwargs[] = self.client_id kwargs[] = include_client_id return prepare_token_request(self.grant_type, body=body, assertion=assertion, scope=scope, **kwargs)
Create and add a JWT assertion to the request body. :param private_key: Private key used for signing and encrypting. Must be given as a string. :param subject: (sub) The principal that is the subject of the JWT, i.e. which user is the token requested on behalf of. For example, ``foo@example.com. :param issuer: (iss) The JWT MUST contain an "iss" (issuer) claim that contains a unique identifier for the entity that issued the JWT. For example, ``your-client@provider.com``. :param audience: (aud) A value identifying the authorization server as an intended audience, e.g. ``https://provider.com/oauth2/token``. :param expires_at: A unix expiration timestamp for the JWT. Defaults to an hour from now, i.e. ``time.time() + 3600``. :param issued_at: A unix timestamp of when the JWT was created. Defaults to now, i.e. ``time.time()``. :param extra_claims: A dict of additional claims to include in the JWT. :param body: Existing request body (URL encoded string) to embed parameters into. This may contain extra paramters. Default ''. :param scope: The scope of the access request. :param include_client_id: `True` to send the `client_id` in the body of the upstream request. This is required if the client is not authenticating with the authorization server as described in `Section 3.2.1`_. False otherwise (default). :type include_client_id: Boolean :param not_before: A unix timestamp after which the JWT may be used. Not included unless provided. * :param jwt_id: A unique JWT token identifier. Not included unless provided. * :param kwargs: Extra credentials to include in the token request. Parameters marked with a `*` above are not explicit arguments in the function signature, but are specially documented arguments for items appearing in the generic `**kwargs` keyworded input. The "scope" parameter may be used, as defined in the Assertion Framework for OAuth 2.0 Client Authentication and Authorization Grants [I-D.ietf-oauth-assertions] specification, to indicate the requested scope. Authentication of the client is optional, as described in `Section 3.2.1`_ of OAuth 2.0 [RFC6749] and consequently, the "client_id" is only needed when a form of client authentication that relies on the parameter is used. The following non-normative example demonstrates an Access Token Request with a JWT as an authorization grant (with extra line breaks for display purposes only): .. code-block: http POST /token.oauth2 HTTP/1.1 Host: as.example.com Content-Type: application/x-www-form-urlencoded grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer &assertion=eyJhbGciOiJFUzI1NiJ9. eyJpc3Mi[...omitted for brevity...]. J9l-ZhwP[...omitted for brevity...] .. _`Section 3.2.1`: https://tools.ietf.org/html/rfc6749#section-3.2.1
372,959
def _cache_key_select_daterange(method, self, field_id, field_title, style=None): key = update_timer(), field_id, field_title, style return key
This function returns the key used to decide if method select_daterange has to be recomputed
372,960
def get_plugin_conf(self, phase, name): match = [x for x in self.template[phase] if x.get() == name] return match[0]
Return the configuration for a plugin. Raises KeyError if there are no plugins of that type. Raises IndexError if the named plugin is not listed.
372,961
def filter_exclude_downhole(self, threshold, filt=True): f = self.filt.grab_filt(filt) if self.n == 1: nfilt = filters.exclude_downhole(f, threshold) else: nfilt = [] for i in range(self.n): nf = self.ns == i + 1 nfilt.append(filters.exclude_downhole(f & nf, threshold)) nfilt = np.apply_along_axis(any, 0, nfilt) self.filt.add(name=.format(threshold), filt=nfilt, info=.format(threshold), params=(threshold, filt))
Exclude all points down-hole (after) the first excluded data. Parameters ---------- threhold : int The minimum number of contiguous excluded data points that must exist before downhole exclusion occurs. file : valid filter string or bool Which filter to consider. If True, applies to currently active filters.
372,962
def populate_observable(self, time, kind, dataset, **kwargs): if kind in [, ]: return if time==self.time and dataset in self.populated_at_time and not in kind: self.populated_at_time.append(dataset)
TODO: add documentation
372,963
def _find_valid_index(self, how): assert how in [, ] if len(self) == 0: return None is_valid = ~self.isna() if self.ndim == 2: is_valid = is_valid.any(1) if how == : idxpos = is_valid.values[::].argmax() if how == : idxpos = len(self) - 1 - is_valid.values[::-1].argmax() chk_notna = is_valid.iat[idxpos] idx = self.index[idxpos] if not chk_notna: return None return idx
Retrieves the index of the first valid value. Parameters ---------- how : {'first', 'last'} Use this parameter to change between the first or last valid index. Returns ------- idx_first_valid : type of index
372,964
def load_neurons(neurons, neuron_loader=load_neuron, name=None, population_class=Population, ignored_exceptions=()): Population if isinstance(neurons, (list, tuple)): files = neurons name = name if name is not None else elif isinstance(neurons, StringType): files = get_files_by_path(neurons) name = name if name is not None else os.path.basename(neurons) ignored_exceptions = tuple(ignored_exceptions) pop = [] for f in files: try: pop.append(neuron_loader(f)) except NeuroMError as e: if isinstance(e, ignored_exceptions): L.info(, e, os.path.basename(f)) continue raise return population_class(pop, name=name)
Create a population object from all morphologies in a directory\ of from morphologies in a list of file names Parameters: neurons: directory path or list of neuron file paths neuron_loader: function taking a filename and returning a neuron population_class: class representing populations name (str): optional name of population. By default 'Population' or\ filepath basename depending on whether neurons is list or\ directory path respectively. Returns: neuron population object
372,965
def potential_cloud_pixels(self): eq1 = self.basic_test() eq2 = self.whiteness_test() eq3 = self.hot_test() eq4 = self.nirswir_test() if self.sat == : cir = self.cirrus_test() return (eq1 & eq2 & eq3 & eq4) | cir else: return eq1 & eq2 & eq3 & eq4
Determine potential cloud pixels (PCPs) Combine basic spectral testsr to get a premliminary cloud mask First pass, section 3.1.1 in Zhu and Woodcock 2012 Equation 6 (Zhu and Woodcock, 2012) Parameters ---------- ndvi: ndarray ndsi: ndarray blue: ndarray green: ndarray red: ndarray nir: ndarray swir1: ndarray swir2: ndarray cirrus: ndarray tirs1: ndarray Output ------ ndarray: potential cloud mask, boolean
372,966
def parse_fn(fn): try: parts = os.path.splitext(os.path.split(fn)[-1])[0].replace(, )\ .split()[:2] coords = [float(crds) for crds in re.split(, parts[0] + parts[1])[1:]] except: coords = [np.nan] * 4 return coords
This parses the file name and returns the coordinates of the tile Parameters ----------- fn : str Filename of a GEOTIFF Returns -------- coords = [LLC.lat, LLC.lon, URC.lat, URC.lon]
372,967
def select_valid_methods_P(self, T, P): r if self.forced_P: considered_methods = list(self.user_methods_P) else: considered_methods = list(self.all_methods_P) if self.user_methods_P: [considered_methods.remove(i) for i in self.user_methods_P] preferences = sorted([self.ranked_methods_P.index(i) for i in considered_methods]) sorted_methods = [self.ranked_methods_P[i] for i in preferences] if self.user_methods_P: [sorted_methods.insert(0, i) for i in reversed(self.user_methods_P)] sorted_valid_methods_P = [] for method in sorted_methods: if self.test_method_validity_P(T, P, method): sorted_valid_methods_P.append(method) return sorted_valid_methods_P
r'''Method to obtain a sorted list methods which are valid at `T` according to `test_method_validity`. Considers either only user methods if forced is True, or all methods. User methods are first tested according to their listed order, and unless forced is True, then all methods are tested and sorted by their order in `ranked_methods`. Parameters ---------- T : float Temperature at which to test methods, [K] P : float Pressure at which to test methods, [Pa] Returns ------- sorted_valid_methods_P : list Sorted lists of methods valid at T and P according to `test_method_validity`
372,968
def ResetConsoleColor() -> bool: if sys.stdout: sys.stdout.flush() bool(ctypes.windll.kernel32.SetConsoleTextAttribute(_ConsoleOutputHandle, _DefaultConsoleColor))
Reset to the default text color on console window. Return bool, True if succeed otherwise False.
372,969
def random(key: str, index: Index, index_map: IndexMap=None) -> pd.Series: if len(index) > 0: random_state = np.random.RandomState(seed=get_hash(key)) sample_size = index_map.map_size if index_map is not None else index.max() + 1 try: draw_index = index_map[index] except (IndexError, TypeError): draw_index = index raw_draws = random_state.random_sample(sample_size) return pd.Series(raw_draws[draw_index], index=index) return pd.Series(index=index)
Produces an indexed `pandas.Series` of uniformly distributed random numbers. The index passed in typically corresponds to a subset of rows in a `pandas.DataFrame` for which a probabilistic draw needs to be made. Parameters ---------- key : A string used to create a seed for the random number generation. index : The index used for the returned series. index_map : A mapping between the provided index (which may contain ints, floats, datetimes or any arbitrary combination of them) and an integer index into the random number array. Returns ------- pd.Series A series of random numbers indexed by the provided index.
372,970
def instance(self, counter=None, pipeline_counter=None): pipeline_counter = pipeline_counter or self.pipeline_counter pipeline_instance = None if not pipeline_counter: pipeline_instance = self.server.pipeline(self.pipeline_name).instance() self.pipeline_counter = int(pipeline_instance[]) if not counter: if pipeline_instance is None: pipeline_instance = ( self.server .pipeline(self.pipeline_name) .instance(pipeline_counter) ) for stages in pipeline_instance[]: if stages[] == self.stage_name: return self.instance( counter=int(stages[]), pipeline_counter=pipeline_counter ) return self._get( .format(pipeline_counter=pipeline_counter, counter=counter))
Returns all the information regarding a specific stage run See the `Go stage instance documentation`__ for examples. .. __: http://api.go.cd/current/#get-stage-instance Args: counter (int): The stage instance to fetch. If falsey returns the latest stage instance from :meth:`history`. pipeline_counter (int): The pipeline instance for which to fetch the stage. If falsey returns the latest pipeline instance. Returns: Response: :class:`gocd.api.response.Response` object
372,971
def raw_search(self, *args, **kwargs): limit = 50 try: limit = kwargs[] except KeyError: pass self._mail.select("inbox") try: date = kwargs[] date_str = date.strftime("%d-%b-%Y") _, email_ids = self._mail.search(None, % date_str) except KeyError: _, email_ids = self._mail.search(None, ) email_ids = email_ids[0].split() matching_uids = [] for _ in range(1, min(limit, len(email_ids))): email_id = email_ids.pop() rfc_body = self._mail.fetch(email_id, "(RFC822)")[1][0][1] match = True for expr in args: if re.search(expr, rfc_body) is None: match = False break if match: uid = re.search( "UID\\D*(\\d+)\\D*", self._mail.fetch(email_id, )[1][0]).group(1) matching_uids.append(uid) return matching_uids
Find the a set of emails matching each regular expression passed in against the (RFC822) content. Args: *args: list of regular expressions. Kwargs: limit (int) - Limit to how many of the most resent emails to search through. date (datetime) - If specified, it will filter avoid checking messages older than this date.
372,972
def scheme_chunker(text, getreffs): level = len(text.citation) types = [citation.name for citation in text.citation] if types == ["book", "poem", "line"]: level = 2 elif types == ["book", "line"]: return line_chunker(text, getreffs) return [tuple([reff.split(":")[-1]]*2) for reff in getreffs(level=level)]
This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata :param text: Text Object representing either an edition or a translation :type text: MyCapytains.resources.inventory.Text :param getreffs: callback function which retrieves a list of references :type getreffs: function :return: List of urn references with their human readable version :rtype: [(str, str)]
372,973
def xml_replace(filename, **replacements): keywords = set(replacements.keys()) templatename = f targetname = f print(f) print(f) print() with open(templatename) as templatefile: templatebody = templatefile.read() parts = templatebody.replace(, ).split() defaults = {} for idx, part in enumerate(parts): if idx % 2: subparts = part.partition() if subparts[2]: parts[idx] = subparts[0] if subparts[0] not in replacements: if ((subparts[0] in defaults) and (defaults[subparts[0]] != str(subparts[2]))): raise RuntimeError( f f f) defaults[subparts[0]] = str(subparts[2]) markers = parts[1::2] try: unused_keywords = keywords.copy() for idx, part in enumerate(parts): if idx % 2: argument_info = newpart = replacements.get(part) if newpart is None: argument_info = newpart = defaults.get(part) if newpart is None: raise RuntimeError( f) print(f) parts[idx] = str(newpart) unused_keywords.discard(part) targetbody = .join(parts) if unused_keywords: raise RuntimeError( f f) with open(targetname, ) as targetfile: targetfile.write(targetbody) except BaseException: objecttools.augment_excmessage( f f f f)
Read the content of an XML template file (XMLT), apply the given `replacements` to its substitution markers, and write the result into an XML file with the same name but ending with `xml` instead of `xmlt`. First, we write an XMLT file, containing a regular HTML comment, a readily defined element `e1`, and some other elements with substitutions markers. Substitution markers are HTML comments starting and ending with the `|` character: >>> from hydpy import xml_replace, TestIO >>> with TestIO(): ... with open('test1.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<!--a normal comment-->\\n' ... '<e1>element 1</e1>\\n' ... '<e2><!--|e2|--></e2>\\n' ... '<e3><!--|e3_|--></e3>\\n' ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e2><!--|e2|--></e2>') Function |xml_replace| can both be called within a Python session and from a command line. We start with the first type of application. Each substitution marker must be met by a keyword argument unless it holds a default value (`e4`). All arguments are converted to a |str| object (`e3`). Template files can use the same substitution marker multiple times (`e2`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4') template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> ELEMENT 4 (given argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>ELEMENT 4</e4> <e2>E2</e2> Without custom values, |xml_replace| applies predefined default values, if available (`e4`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>element 4</e4> <e2>E2</e2> Missing and useless keyword arguments result in errors: >>> with TestIO(): ... xml_replace('test1', e2='E2') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2`, \ the following error occurred: Marker `e3_` cannot be replaced. >>> with TestIO(): ... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \ e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used. Using different default values for the same substitution marker is not allowed: >>> from hydpy import pub, TestIO, xml_replace >>> with TestIO(): ... with open('test2.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e4><!--|e4=ELEMENT 4|--></e4>') >>> with TestIO(): ... xml_replace('test2', e4=4) template file: test2.xmlt target file: test2.xml replacements: e4 --> 4 (given argument) e4 --> 4 (given argument) >>> with TestIO(): ... with open('test2.xml') as targetfile: ... print(targetfile.read()) <e4>4</e4> <e4>4</e4> >>> with TestIO(): ... xml_replace('test2') Traceback (most recent call last): ... RuntimeError: Template file `test2.xmlt` defines different default values \ for marker `e4`. As mentioned above, function |xml_replace| is registered as a "script function" and can thus be used via command line: >>> pub.scriptfunctions['xml_replace'].__name__ 'xml_replace' >>> pub.scriptfunctions['xml_replace'].__module__ 'hydpy.exe.replacetools' Use script |hyd| to execute function |xml_replace|: >>> from hydpy import run_subprocess >>> with TestIO(): ... run_subprocess( ... 'hyd.py xml_replace test1 e2="Element 2" e3_=3') template file: test1.xmlt target file: test1.xml replacements: e2 --> Element 2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> Element 2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>Element 2</e2> <e3>3</e3> <e4>element 4</e4> <e2>Element 2</e2>
372,974
def _check_repo_sign_utils_support(name): if salt.utils.path.which(name): return True else: raise CommandExecutionError( {0}\.format(name) )
Check for specified command name in search path
372,975
def geturl(environ, query=True, path=True, use_server_name=False): url = [environ[] + ] if use_server_name: url.append(environ[]) if environ[] == : if environ[] != : url.append( + environ[]) else: if environ[] != : url.append( + environ[]) else: url.append(environ[]) if path: url.append(getpath(environ)) if query and environ.get(): url.append( + environ[]) return .join(url)
Rebuilds a request URL (from PEP 333). You may want to chose to use the environment variables server_name and server_port instead of http_host in some case. The parameter use_server_name allows you to chose. :param query: Is QUERY_STRING included in URI (default: True) :param path: Is path included in URI (default: True) :param use_server_name: If SERVER_NAME/_HOST should be used instead of HTTP_HOST
372,976
def _load(self): try: get = requests.get(self._ref, verify=self.http_verify, auth=self.auth, timeout=self.timeout) except requests.exceptions.RequestException as err: raise NotFoundError(err) return get.content
Function load. :return: Response content :raises: NotFoundError
372,977
def store_magic_envelope_doc(self, payload): try: json_payload = json.loads(decode_if_bytes(payload)) except ValueError: xml = unquote(decode_if_bytes(payload)) xml = xml.lstrip().encode("utf-8") logger.debug("diaspora.protocol.store_magic_envelope_doc: xml payload: %s", xml) self.doc = etree.fromstring(xml) else: logger.debug("diaspora.protocol.store_magic_envelope_doc: json payload: %s", json_payload) self.doc = self.get_json_payload_magic_envelope(json_payload)
Get the Magic Envelope, trying JSON first.
372,978
def all_subclasses(cls): for subcls in cls.__subclasses__(): yield subcls for subsubcls in all_subclasses(subcls): yield subsubcls
Generator yielding all subclasses of `cls` recursively
372,979
def get_interfaces(self): result = {} interfaces = junos_views.junos_iface_table(self.device) interfaces.get() interfaces_logical = junos_views.junos_logical_iface_table(self.device) interfaces_logical.get() def _convert_to_dict(interfaces): interfaces = dict(interfaces) for iface, iface_data in interfaces.items(): result[iface] = { "is_up": iface_data["is_up"], "is_enabled": ( True if iface_data["is_enabled"] is None else iface_data["is_enabled"] ), "description": (iface_data["description"] or ""), "last_flapped": float((iface_data["last_flapped"] or -1)), "mac_address": napalm.base.helpers.convert( napalm.base.helpers.mac, iface_data["mac_address"], py23_compat.text_type(iface_data["mac_address"]), ), "speed": -1, "mtu": 0, } match_mtu = re.search(r"(\w+)", str(iface_data["mtu"]) or "") mtu = napalm.base.helpers.convert(int, match_mtu.group(0), 0) result[iface]["mtu"] = mtu match = re.search(r"(\d+|[Aa]uto)(\w*)", iface_data["speed"] or "") if match and match.group(1).lower() == "auto": match = re.search( r"(\d+)(\w*)", iface_data["negotiated_speed"] or "" ) if match is None: continue speed_value = napalm.base.helpers.convert(int, match.group(1), -1) if speed_value == -1: continue speed_unit = match.group(2) if speed_unit.lower() == "gbps": speed_value *= 1000 result[iface]["speed"] = speed_value return result result = _convert_to_dict(interfaces) result.update(_convert_to_dict(interfaces_logical)) return result
Return interfaces details.
372,980
def _prm_store_from_dict(self, fullname, store_dict, hdf5_group, store_flags, kwargs): for key, data_to_store in store_dict.items(): original_hdf5_group = None flag = store_flags[key] if in key: original_hdf5_group = hdf5_group split_key = key.split() key = split_key.pop() for inner_key in split_key: hdf5_group, newly_created = self._all_create_or_get_group(inner_key, hdf5_group) if newly_created: setattr(hdf5_group._v_attrs, HDF5StorageService.STORAGE_TYPE, HDF5StorageService.NESTED_GROUP) else: store_type = self._all_get_from_attrs(hdf5_group, HDF5StorageService.STORAGE_TYPE) if store_type != HDF5StorageService.NESTED_GROUP: raise ValueError( % (hdf5_group._v_name, store_type)) if key in hdf5_group: if original_hdf5_group is not None: hdf5_group = original_hdf5_group
Stores a `store_dict`
372,981
def cart_to_polar(arr_c): if arr_c.shape[-1] == 1: arr_p = arr_c.copy() elif arr_c.shape[-1] == 2: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) elif arr_c.shape[-1] == 3: arr_p = np.empty_like(arr_c) arr_p[..., 0] = vector_mag(arr_c) arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0]) arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0]) else: raise Exception() return arr_p
Return cartesian vectors in their polar representation. Parameters ---------- arr_c: array, shape (a1, a2, ..., d) Cartesian vectors, with last axis indexing the dimension. Returns ------- arr_p: array, shape of arr_c Polar vectors, using (radius, inclination, azimuth) convention.
372,982
def add_property(self, name, value): with self.__properties_lock: if name in self.__properties: return False self.__properties[name] = value return True
Adds a property to the framework **if it is not yet set**. If the property already exists (same name), then nothing is done. Properties can't be updated. :param name: The property name :param value: The value to set :return: True if the property was stored, else False
372,983
def delete_connection(self, name, reason=None): headers = {: reason} if reason else {} self._api_delete( .format( urllib.parse.quote_plus(name) ), headers=headers, )
Closes an individual connection. Give an optional reason :param name: The connection name :type name: str :param reason: An option reason why the connection was deleted :type reason: str
372,984
def _update_mean_in_window(self): self._mean_x_in_window = numpy.mean(self._x_in_window) self._mean_y_in_window = numpy.mean(self._y_in_window)
Compute mean in window the slow way. useful for first step. Considers all values in window See Also -------- _add_observation_to_means : fast update of mean for single observation addition _remove_observation_from_means : fast update of mean for single observation removal
372,985
def delete(self, *args): cache = get_cache() key = self.get_cache_key(*args) if key in cache: del cache[key]
Remove the key from the request cache and from memcache.
372,986
def R_op(self, inputs, eval_points): try: dom_weight = self.operator.domain.weighting.const except AttributeError: dom_weight = 1.0 try: ran_weight = self.operator.range.weighting.const except AttributeError: ran_weight = 1.0 scale = dom_weight / ran_weight op = self class TheanoJacobianAdjoint(theano.Op): __props__ = () def make_node(self, x, v): x = theano.tensor.as_tensor_variable(x) v = theano.tensor.as_tensor_variable(v) return theano.Apply(self, [x, v], [x.type()]) def perform(self, node, inputs_storage, output_storage): x = inputs_storage[0] v = inputs_storage[1] out = output_storage[0] out[0] = np.asarray(op.operator.derivative(x).adjoint(v)) if scale != 1.0: out[0] *= scale def infer_shape(self, node, input_shapes): return [tuple(native(si) for si in op.operator.domain.shape)] r_op = TheanoJacobianAdjoint() r_op_apply = r_op(inputs[0], eval_points[0]) return [r_op_apply]
Apply the adjoint of the Jacobian at ``inputs`` to ``eval_points``. This is the symbolic counterpart of ODL's :: op.derivative(x).adjoint(v) See `grad` for its usage. Parameters ---------- inputs : 1-element list of `theano.tensor.var.TensorVariable` Symbolic input to the gradient, the point at which the Jacobian is computed. eval_points : 1-element list of `theano.tensor.var.TensorVariable` Symbolic input to the adjoint of the Jacobian, i.e., the variable to which the Jacobian adjoint should be applied. Returns ------- outputs : 1-element list of `theano.tensor.var.TensorVariable` Symbolic result of the application of the Jacobian adjoint. It uses a wrapper class ``OdlDerivativeAdjointAsTheanoROp`` for ``(x, v) --> op.derivative(x).adjoint(v)``.
372,987
def convert_surrogate_pair(match): pair = match.group(0) codept = 0x10000 + (ord(pair[0]) - 0xd800) * 0x400 + (ord(pair[1]) - 0xdc00) return chr(codept)
Convert a surrogate pair to the single codepoint it represents. This implements the formula described at: http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
372,988
def parse(args): from tzlocal import get_localzone try: timezone = get_localzone() if isinstance(timezone, pytz.BaseTzInfo): timezone = timezone.zone except Exception: timezone = if timezone == : timezone = parser = argparse.ArgumentParser(description=, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument(, dest=, action=, default=None, help=) parser.add_argument(, dest=, action=, default=None, help=) parser.add_argument(, , dest=, action=DbAction, default=, help= ) parser.add_argument(, , dest=, action=, choices=(, ), default=, help= ) parser.add_argument(, , dest=, action=, choices=(, ), default=, help=) parser.add_argument(, , dest=, required=False, default=timezone, action=, help=) parser.add_argument(, , dest=, action=, choices=(, ), default=, help= ) parser.add_argument(, dest=, action=, choices=(, ), default=, help=) parser.add_argument(, help=, default=) parser.add_argument(, , dest=, action=, help= ) parser.add_argument(, dest=, action=, choices=data.DJANGO_SUPPORTED, default=data.DJANGO_DEFAULT, help=) parser.add_argument(, , dest=, action=, choices=data.DJANGOCMS_SUPPORTED, default=data.DJANGOCMS_DEFAULT, help=) parser.add_argument(, , dest=, default=, action=, help=) parser.add_argument(, dest=, action=, choices=(, ), default=, help=) parser.add_argument(, dest=, action=, default=, help=) parser.add_argument(, dest=, action=, choices=(, ), default=, help= ) parser.add_argument(dest=, action=, help=) parser.add_argument(, , dest=, action=, help=s going to be installed and configured--dump-requirements-Rdump_reqsstore_trueIt dumps the requirements that would be installed according to parameters given. Together with --requirements argument is useful for customizing the virtualenv--no-input-qnoinputstore_trueDon\ ) parser.add_argument(, , dest=, action=, default=False, help=) parser.add_argument(, dest=, action=, default=False, help=t swallow subcommands output--filer-ffilerstore_trueInstall and configure django-filer plugins - Always enabled--requirements-rrequirements_filestoreExternally defined requirements file--no-deps-nno_depsstore_trueDon\) parser.add_argument(, dest=, action=, default=False, help=t install plugins--no-db-driverno_db_driverstore_trueDon\) parser.add_argument(, , dest=, action=, default=False, help=t run syncdb / migrate after bootstrapping--no-user-uno_userstore_trueDon\) parser.add_argument(, dest=, action=, default=None, help= ) parser.add_argument(, dest=, action=, default=None, help=) parser.add_argument(, , dest=, action=, default=False, help=) parser.add_argument(, , dest=, action=, default=False, help=) parser.add_argument(, dest=, action=, default=False, help=) if in args: for action in parser._positionals._actions: if action.dest == : action.default = config_args = ini.parse_config_file(parser, args) args = parser.parse_args(config_args + args) if not args.wizard: args.noinput = True else: args.noinput = False if not args.project_directory: args.project_directory = args.project_name args.project_directory = os.path.abspath(args.project_directory) if not validate_project(args.project_name): sys.stderr.write( s already defined. Please use only numbers, letters and underscores.\nproject_path.Path "{0}" already exists and is not empty, please choose a different one\nIf you want to use this path anyway use the -s flag to skip this check.\nPath "{0}" already exists, please choose a different one\nCannot dump because given configuration file "{0}" exists.\ns use it as is if not args.languages: try: args.languages = [locale.getdefaultlocale()[0].split()[0]] except Exception: args.languages = [] elif isinstance(args.languages, six.string_types): args.languages = args.languages.split() elif len(args.languages) == 1 and isinstance(args.languages[0], six.string_types): args.languages = args.languages[0].split() args.languages = [lang.strip().lower() for lang in args.languages] if len(args.languages) > 1: args.i18n = args.aldryn = False args.filer = True try: django_version, cms_version = supported_versions(args.django_version, args.cms_version) cms_package = data.PACKAGE_MATRIX.get( cms_version, data.PACKAGE_MATRIX[data.DJANGOCMS_LTS] ) except RuntimeError as e: sys.stderr.write(compat.unicode(e)) sys.exit(6) if django_version is None: sys.stderr.write( .format(.join(data.DJANGO_SUPPORTED)) ) sys.exit(6) if cms_version is None: sys.stderr.write( .format(.join(data.DJANGOCMS_SUPPORTED)) ) sys.exit(6) default_settings = .format(args.project_name) env_settings = os.environ.get(, default_settings) if env_settings != default_settings: sys.stderr.write( {0}\ .format(env_settings) ) sys.exit(10) if not getattr(args, ): requirements = [] if args.cms_version == : requirements.append(cms_package) warnings.warn(data.VERSION_WARNING.format(, )) elif args.cms_version == : requirements.append(cms_package) elif args.cms_version == : requirements.append(cms_package) warnings.warn(data.VERSION_WARNING.format(, )) else: requirements.append(cms_package) if args.cms_version in (, ): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) if not args.no_db_driver: requirements.append(args.db_driver) if not args.no_plugins: if args.cms_version in (, ): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) elif LooseVersion(cms_version) >= LooseVersion(): requirements.extend(data.REQUIREMENTS[]) requirements.extend(data.REQUIREMENTS[]) if args.aldryn: requirements.extend(data.REQUIREMENTS[]) if args.django_version == : requirements.append(data.DJANGO_DEVELOP) warnings.warn(data.VERSION_WARNING.format(, )) elif args.django_version == : requirements.append(data.DJANGO_BETA) warnings.warn(data.VERSION_WARNING.format(, )) else: requirements.append(.format(less_than_version(django_version))) if django_version == : requirements.extend(data.REQUIREMENTS[]) elif django_version == : requirements.extend(data.REQUIREMENTS[]) elif django_version == : requirements.extend(data.REQUIREMENTS[]) elif django_version == : requirements.extend(data.REQUIREMENTS[]) elif django_version == : requirements.extend(data.REQUIREMENTS[]) elif django_version == : requirements.extend(data.REQUIREMENTS[]) requirements.extend(data.REQUIREMENTS[]) setattr(args, , .join(requirements).strip()) setattr(args, , cms_version) setattr(args, , django_version) setattr(args, , os.path.join(args.project_directory, args.project_name, ).strip()) setattr(args, , os.path.join(args.project_directory, args.project_name, ).strip()) if args.config_dump: ini.dump_config_file(args.config_dump, args, parser) return args
Define the available arguments
372,989
def uintersect1d(arr1, arr2, assume_unique=False): v = np.intersect1d(arr1, arr2, assume_unique=assume_unique) v = _validate_numpy_wrapper_units(v, [arr1, arr2]) return v
Find the sorted unique elements of the two input arrays. A wrapper around numpy.intersect1d that preserves units. All input arrays must have the same units. See the documentation of numpy.intersect1d for full details. Examples -------- >>> from unyt import cm >>> A = [1, 2, 3]*cm >>> B = [2, 3, 4]*cm >>> uintersect1d(A, B) unyt_array([2, 3], 'cm')
372,990
def is_blackout(self) -> bool: if not current_app.config[]: if self.severity in current_app.config[]: return False return db.is_blackout_period(self)
Does this alert match a blackout period?
372,991
def file_ops(staticfied, args): destination = args.o or args.output if destination: with open(destination, ) as file: file.write(staticfied) else: print(staticfied)
Write to stdout or a file
372,992
def to_dict(self): checks = { key: HealthResult.evaluate(func, self.graph) for key, func in self.checks.items() } dct = dict( name=self.name, ok=all(checks.values()), ) if checks: dct["checks"] = { key: checks[key].to_dict() for key in sorted(checks.keys()) } return dct
Encode the name, the status of all checks, and the current overall status.
372,993
def __restrictIndex(self, i): if self.numRecords: rmax = self.numRecords - 1 if abs(i) > rmax: raise IndexError("Shape or Record index out of range.") if i < 0: i = range(self.numRecords)[i] return i
Provides list-like handling of a record index with a clearer error message if the index is out of bounds.
372,994
def _removecleaner(self, cleaner): oldlen = len(self._old_cleaners) self._old_cleaners = [ oldc for oldc in self._old_cleaners if not oldc.issame(cleaner) ] return len(self._old_cleaners) != oldlen
Remove the cleaner from the list if it already exists. Returns True if the cleaner was removed.
372,995
def insert_from_segwizard(self, fileobj, instruments, name, version = None, comment = None): self.add(LigolwSegmentList(active = segmentsUtils.fromsegwizard(fileobj, coltype = LIGOTimeGPS), instruments = instruments, name = name, version = version, comment = comment))
Parse the contents of the file object fileobj as a segwizard-format segment list, and insert the result as a new list of "active" segments into this LigolwSegments object. A new entry will be created in the segment_definer table for the segment list, and instruments, name and comment are used to populate the entry's metadata. Note that the "valid" segments are left empty, nominally indicating that there are no periods of validity.
372,996
def share(self, base=None, keys=None, by=None, **kwargs): if by is not None: if base is not None: if hasattr(base, ) or isinstance(base, Plotter): base = [base] if by.lower() in [, ]: bases = {ax: p[0] for ax, p in six.iteritems( Project(base).axes)} elif by.lower() in [, ]: bases = {fig: p[0] for fig, p in six.iteritems( Project(base).figs)} else: raise ValueError( "*by* must be out of {, , , }. " "Not %s" % (by, )) else: bases = {} projects = self.axes if by == else self.figs for obj, p in projects.items(): p.share(bases.get(obj), keys, **kwargs) else: plotters = self.plotters if not plotters: return if base is None: if len(plotters) == 1: return base = plotters[0] plotters = plotters[1:] elif not isinstance(base, Plotter): base = getattr(getattr(base, , base), , base) base.share(plotters, keys=keys, **kwargs)
Share the formatoptions of one plotter with all the others This method shares specified formatoptions from `base` with all the plotters in this instance. Parameters ---------- base: None, Plotter, xarray.DataArray, InteractiveList, or list of them The source of the plotter that shares its formatoptions with the others. It can be None (then the first instance in this project is used), a :class:`~psyplot.plotter.Plotter` or any data object with a *psy* attribute. If `by` is not None, then it is expected that `base` is a list of data objects for each figure/axes %(Plotter.share.parameters.keys)s by: {'fig', 'figure', 'ax', 'axes'} Share the formatoptions only with the others on the same ``'figure'`` or the same ``'axes'``. In this case, base must either be ``None`` or a list of the types specified for `base` %(Plotter.share.parameters.no_keys|plotters)s See Also -------- psyplot.plotter.share
372,997
def childgroup(self, field): grid = getattr(self, "grid", None) named_grid = getattr(self, "named_grid", None) if grid is not None: childgroup = self._childgroup(field.children, grid) elif named_grid is not None: childgroup = self._childgroup_by_name(field.children, named_grid) else: raise AttributeError(u"Missing the grid or named_grid argument") return childgroup
Return a list of fields stored by row regarding the configured grid :param field: The original field this widget is attached to
372,998
def md_to_pdf(input_name, output_name): if output_name[-4:] == : os.system("pandoc " + input_name + " -o " + output_name) else: os.system("pandoc " + input_name + " -o " + output_name + ".pdf" )
Converts an input MarkDown file to a PDF of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .pdf can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.md To convert the document: >>> from aide_document import convert >>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf') .pdf can also be omitted from the second argument.
372,999
def search_seqs(self, seqrec, in_seq, locus, run=0, partial_ann=None): start = seq_search[1] if feat_name != else 0 si = seq_search[1]+1 if seq_search[1] != 0 and \ feat_name != else 0 mapcheck = set([0 if i in coordinates else 1 for i in range(si, end+1)]) skip = False if found_feats and len(found_feats) > 0: for f in found_feats: o1 = structures[locus][feat_name] o2 = structures[locus][f] loctyp = loctype(found_feats[f].location.start, found_feats[f].location.end, start, end) if o1 < o2 and loctyp: skip = True if self.verbose: self.logger.info("Skipping map for " + feat_name) elif o2 < o1 and not loctyp: skip = True if self.verbose: self.logger.info("Skipping map for " + feat_name) if 1 not in mapcheck and not skip: for i in range(si, end+1): if i in coordinates: if feat_name == "exon_8" or feat_name == : deleted_coords.update({i: coordinates[i]}) del coordinates[i] else: if self.verbose: self.logger.error("seqsearch - shouldthree_prime_UTR UTR. These features need to be mapped start = seq_search[1] si = seq_search[1]+1 if seq_search[1] != 0 else 0 mapcheck = set([0 if i in coordinates else 1 for i in range(si, end+1)]) for i in range(si, end+1): if i in coordinates: del coordinates[i] else: if self.verbose: self.logger.error("seqsearch - shouldexon_3HLA-DRB1exon_2s a class II sequence and if in exact_matches and len(blocks) == 2 \ and is_classII(locus) and seq_covered < 300: if self.verbose and self.verbosity > 1: self.logger.info("Running search for class II sequence") r = True for b in blocks: x = b[len(b)-1] if x == max(list(mapping.keys())): x = b[0]-1 else: x += 1 f = mapping[x] if f != : r = False if r: for b in blocks: x = b[len(b)-1] if x == max(list(mapping.keys())): featname = "intron_2" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]-1), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) else: featname = "intron_1" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) seq_covered -= len(b) if self.verbose and self.verbosity > 1: self.logger.info("Successfully annotated class II sequence") return Annotation(features=found_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, method=method, mapping=mapping, exact_match=exact_matches) annotated_feats, mb, mapping = self._resolve_unmapped(blocks, feat_missing, ambig_map, mapping, found_feats, locus, seq_covered ) if(not mb and blocks and len(feat_missing.keys()) == 0 and len(ambig_map.keys()) == 0): mb = blocks if mb: if locus in [, ] and len(in_seq.seq) < 3000 \ and in exact_matches: for i in deleted_coords: mapping[i] = 1 coordinates.update(deleted_coords) mb = getblocks(coordinates) feat_missing.update(added_feat) del exact_matches[exact_matches.index()] del found_feats[] if in annotated_feats: del annotated_feats[] if in found_feats: del found_feats[] if in annotated_feats: del annotated_feats[] refmissing = [f for f in structures[locus] if f not in annotated_feats] if self.verbose and self.verbosity > 1: self.logger.info("* Annotation not complete *") if self.verbose and self.verbosity > 2: self.logger.info("Refseq was missing these features = " + ",".join(list(refmissing))) if self.verbose and self.verbosity > 1 and len(ambig_map) > 1: self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map))) if self.verbose and self.verbosity > 2 and len(exact_matches) > 1: self.logger.info("Features exact matches = " + ",".join(list(exact_matches))) if self.verbose and self.verbosity > 1 and len(annotated_feats) > 1: self.logger.info("Features annotated = " + ",".join(list(annotated_feats))) if self.verbose and self.verbosity > 1 and len(feat_missing) > 1: self.logger.info("Features missing = " + ",".join(list(feat_missing))) annotation = Annotation(features=annotated_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, blocks=mb, method=method, refmissing=refmissing, mapping=mapping, exact_match=exact_matches, annotation=None) else: mb = None if locus in [, ] and len(in_seq.seq) < 600 \ and in exact_matches \ and in annotated_feats\ and not in exact_matches: for i in deleted_coords: mapping[i] = 1 coordinates.update(deleted_coords) mb = getblocks(coordinates) feat_missing.update(added_feat) del exact_matches[exact_matches.index()] del found_feats[] if in annotated_feats: del annotated_feats[] if in found_feats: del found_feats[] if in annotated_feats: del annotated_feats[] if self.verbose: self.logger.info("* No missing blocks after seq_search *") if self.verbose and self.verbosity > 0 and len(ambig_map) > 1: self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map))) if self.verbose and self.verbosity > 0 and len(exact_matches) > 1: self.logger.info("Features exact matches = " + ",".join(list(exact_matches))) if self.verbose and self.verbosity > 0 and len(annotated_feats) > 1: self.logger.info("Features annotated = " + ",".join(list(annotated_feats))) if self.verbose and self.verbosity > 0 and len(feat_missing) > 1: self.logger.info("Features missing = " + ",".join(list(feat_missing))) annotation = Annotation(features=annotated_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, method=method, blocks=mb, mapping=mapping, exact_match=exact_matches, annotation=None) return annotation
search_seqs - method for annotating a BioPython sequence without alignment :param seqrec: The reference sequence :type seqrec: SeqRecord :param locus: The gene locus associated with the sequence. :type locus: str :param in_seq: The input sequence :type in_seq: SeqRecord :param run: The number of runs that have been done :type run: int :param partial_ann: A partial annotation from a previous step :type partial_ann: :ref:`ann` :rtype: :ref:`ann` Example usage: >>> from Bio.Seq import Seq >>> from seqann.seq_search import SeqSearch >>> inseq = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC') >>> sqsrch = SeqSearch() >>> ann = sqsrch.search_seqs(refseqs, inseq)