code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def to_input_req(self): """Converts the ``self`` instance to the desired input request format. Returns: dict: Containing the "WarmStartType" and "ParentHyperParameterTuningJobs" as the first class fields. Examples: >>> warm_start_config = WarmStartConfig(warm_start_type=WarmStartTypes.TransferLearning,parents=["p1,p2"]) >>> warm_start_config.to_input_req() { "WarmStartType":"TransferLearning", "ParentHyperParameterTuningJobs": [ {'HyperParameterTuningJobName': "p1"}, {'HyperParameterTuningJobName': "p2"}, ] } """ return { WARM_START_TYPE: self.type.value, PARENT_HYPERPARAMETER_TUNING_JOBS: [{HYPERPARAMETER_TUNING_JOB_NAME: parent} for parent in self.parents] }
Converts the ``self`` instance to the desired input request format. Returns: dict: Containing the "WarmStartType" and "ParentHyperParameterTuningJobs" as the first class fields. Examples: >>> warm_start_config = WarmStartConfig(warm_start_type=WarmStartTypes.TransferLearning,parents=["p1,p2"]) >>> warm_start_config.to_input_req() { "WarmStartType":"TransferLearning", "ParentHyperParameterTuningJobs": [ {'HyperParameterTuningJobName': "p1"}, {'HyperParameterTuningJobName': "p2"}, ] }
def remove_leading_zeros(num: str) -> str: """ Strips zeros while handling -, M, and empty strings """ if not num: return num if num.startswith('M'): ret = 'M' + num[1:].lstrip('0') elif num.startswith('-'): ret = '-' + num[1:].lstrip('0') else: ret = num.lstrip('0') return '0' if ret in ('', 'M', '-') else ret
Strips zeros while handling -, M, and empty strings
def dSbr_dV(self, Yf, Yt, V, buses=None, branches=None): """ Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @return: The branch power flow vectors and the partial derivatives of branch power flow w.r.t voltage magnitude and voltage angle. @rtype: tuple """ buses = self.buses if buses is None else buses branches = self.branches if branches is None else branches nl = len(branches) nb = len(V) il = range(nl) ib = range(nb) f = [l.from_bus._i for l in branches] t = [l.to_bus._i for l in branches] # Compute currents. If = Yf * V It = Yt * V Vnorm = V / abs(V) diagVf = csr_matrix((V[f], (il, il))) diagIf = csr_matrix((If, (il, il))) diagVt = csr_matrix((V[t], (il, il))) diagIt = csr_matrix((It, (il, il))) diagV = csr_matrix((V, (ib, ib))) diagVnorm = csr_matrix((Vnorm, (ib, ib))) shape = (nl, nb) # Partial derivative of S w.r.t voltage phase angle. dSf_dVa = 1j * (conj(diagIf) * csr_matrix((V[f], (il, f)), shape) - diagVf * conj(Yf * diagV)) dSt_dVa = 1j * (conj(diagIt) * csr_matrix((V[t], (il, t)), shape) - diagVt * conj(Yt * diagV)) # Partial derivative of S w.r.t. voltage amplitude. dSf_dVm = diagVf * conj(Yf * diagVnorm) + conj(diagIf) * \ csr_matrix((Vnorm[f], (il, f)), shape) dSt_dVm = diagVt * conj(Yt * diagVnorm) + conj(diagIt) * \ csr_matrix((Vnorm[t], (il, t)), shape) # Compute power flow vectors. Sf = V[f] * conj(If) St = V[t] * conj(It) return dSf_dVa, dSf_dVm, dSt_dVa, dSt_dVm, Sf, St
Based on dSbr_dV.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @return: The branch power flow vectors and the partial derivatives of branch power flow w.r.t voltage magnitude and voltage angle. @rtype: tuple
def get(self): """ Constructs a ExecutionContextContext :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext """ return ExecutionContextContext( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], )
Constructs a ExecutionContextContext :returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext :rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
def JoinPath(self, path_segments): """Joins the path segments into a path. Args: path_segments (list[str]): path segments. Returns: str: joined path segments prefixed with the path separator. """ # For paths on Windows we need to make sure to handle the first path # segment correctly. first_path_segment = None if path_segments and platform.system() == 'Windows': # Check if the first path segment contains a "special" path definition. first_path_segment = path_segments[0] first_path_segment_length = len(first_path_segment) first_path_segment_prefix = None # In case the path start with: \\.\C:\ if (first_path_segment_length >= 7 and first_path_segment.startswith('\\\\.\\') and first_path_segment[5:7] == ':\\'): first_path_segment_prefix = first_path_segment[4:6] first_path_segment = first_path_segment[7:] # In case the path start with: \\.\ or \\?\ elif (first_path_segment_length >= 4 and first_path_segment[:4] in ['\\\\.\\', '\\\\?\\']): first_path_segment_prefix = first_path_segment[:4] first_path_segment = first_path_segment[4:] # In case the path start with: C: elif first_path_segment_length >= 2 and first_path_segment[1] == ':': first_path_segment_prefix = first_path_segment[:2] first_path_segment = first_path_segment[2:] # In case the path start with: \\server\share (UNC). elif first_path_segment.startswith('\\\\'): prefix, _, remainder = first_path_segment[2:].partition( self.PATH_SEPARATOR) first_path_segment_prefix = '\\\\{0:s}'.format(prefix) first_path_segment = '\\{0:s}'.format(remainder) if first_path_segment_prefix: first_path_segment, _, remainder = first_path_segment.partition( self.PATH_SEPARATOR) if not remainder: _ = path_segments.pop(0) else: path_segments[0] = remainder first_path_segment = ''.join([ first_path_segment_prefix, first_path_segment]) else: first_path_segment = None # We are not using os.path.join() here since it will not remove all # variations of successive path separators. # Split all the path segments based on the path (segment) separator. path_segments = [ segment.split(self.PATH_SEPARATOR) for segment in path_segments] # Flatten the sublists into one list. path_segments = [ element for sublist in path_segments for element in sublist] # Remove empty path segments. path_segments = list(filter(None, path_segments)) if first_path_segment is None: path = '{0:s}{1:s}'.format( self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) else: path = first_path_segment if path_segments: path = '{0:s}{1:s}{2:s}'.format( path, self.PATH_SEPARATOR, self.PATH_SEPARATOR.join(path_segments)) return path
Joins the path segments into a path. Args: path_segments (list[str]): path segments. Returns: str: joined path segments prefixed with the path separator.
def within_line(self, viewer, points, p_start, p_stop, canvas_radius): """Points `points` and line endpoints `p_start`, `p_stop` are in data coordinates. Return True for points within the line defined by a line from p_start to p_end and within `canvas_radius`. The distance between points is scaled by the viewer's canvas scale. """ scale_x, scale_y = viewer.get_scale_xy() new_radius = canvas_radius * 1.0 / min(scale_x, scale_y) return self.point_within_line(points, p_start, p_stop, new_radius)
Points `points` and line endpoints `p_start`, `p_stop` are in data coordinates. Return True for points within the line defined by a line from p_start to p_end and within `canvas_radius`. The distance between points is scaled by the viewer's canvas scale.
def variant_stats_from_variant(variant, metadata, merge_fn=(lambda all_stats: \ max(all_stats, key=(lambda stats: stats.tumor_stats.depth)))): """Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats """ all_stats = [] for (variant_file, variant_metadata) in metadata.items(): if _vcf_is_maf(variant_file=variant_file): stats = maf_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_strelka(variant_file=variant_file, variant_metadata=variant_metadata): stats = strelka_somatic_variant_stats(variant, variant_metadata) elif _vcf_is_mutect(variant_file=variant_file, variant_metadata=variant_metadata): stats = mutect_somatic_variant_stats(variant, variant_metadata) else: raise ValueError("Cannot parse sample fields, variant file {} is from an unsupported caller.".format(variant_file)) all_stats.append(stats) return merge_fn(all_stats)
Parse the variant calling stats from a variant called from multiple variant files. The stats are merged based on `merge_fn` Parameters ---------- variant : varcode.Variant metadata : dict Dictionary of variant file to variant calling metadata from that file merge_fn : function Function from list of SomaticVariantStats to single SomaticVariantStats. This is used if a variant is called by multiple callers or appears in multiple VCFs. By default, this uses the data from the caller that had a higher tumor depth. Returns ------- SomaticVariantStats
def parse_name(self): """This function uses string patterns to match a title cased name. This is done in a loop until there are no more names to match so as to be able to include surnames etc. in the output.""" name = [] while True: # Match the current char until it doesnt match the given pattern: # first char must be an uppercase alpha and the rest must be lower # cased alphas. part = self.match_string_pattern(spat.alphau, spat.alphal) if part == '': break # There is no more matchable strings. self.eat_string(part) # Eat the found string name.append(part) # Store this name part if self.get_char() == ' ': # if the current char is a single space # eat it. This allows one space between parts self.eat_length(1) if not len(name): # if no name parts where detected raise an expection. raise PartpyError(self, 'Expecting a title cased name') return ' '.join(name)
This function uses string patterns to match a title cased name. This is done in a loop until there are no more names to match so as to be able to include surnames etc. in the output.
def write_uint16(self, value, little_endian=True): """ Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written. """ if little_endian: endian = "<" else: endian = ">" return self.pack('%sH' % endian, value)
Pack the value as an unsigned integer and write 2 bytes to the stream. Args: value: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int: the number of bytes written.
def hard_wrap(self): """Grammar for hard wrap linebreak. You don't need to add two spaces at the end of a line. """ self.linebreak = re.compile(r'^ *\n(?!\s*$)') self.text = re.compile( r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)' )
Grammar for hard wrap linebreak. You don't need to add two spaces at the end of a line.
def create_tx(self, txins=None, txouts=None, lock_time=0): """Create unsigned rawtx with given txins/txouts as json data. <txins>: '[{"txid" : hexdata, "index" : integer}, ...]' <txouts>: '[{"address" : hexdata, "value" : satoshis}, ...]' """ txins = [] if txins is None else txins txouts = [] if txouts is None else txouts lock_time = deserialize.positive_integer(lock_time) txins = deserialize.txins(txins) txouts = deserialize.txouts(self.testnet, txouts) tx = control.create_tx(self.service, self.testnet, txins, txouts, lock_time=lock_time) return serialize.tx(tx)
Create unsigned rawtx with given txins/txouts as json data. <txins>: '[{"txid" : hexdata, "index" : integer}, ...]' <txouts>: '[{"address" : hexdata, "value" : satoshis}, ...]'
def do_handle_log(self, workunit, level, *msg_elements): """Implementation of Reporter callback.""" if not self.is_under_main_root(workunit): return # If the element is a (msg, detail) pair, we ignore the detail. There's no # useful way to display it on the console. elements = [e if isinstance(e, six.string_types) else e[0] for e in msg_elements] msg = '\n' + ''.join(elements) if self.use_color_for_workunit(workunit, self.settings.color): msg = self._COLOR_BY_LEVEL.get(level, lambda x: x)(msg) self.emit(self._prefix(workunit, msg)) self.flush()
Implementation of Reporter callback.
def _fetch(self, request): """ Fetch using the OkHttpClient """ client = self.client #: Dispatch the async call call = Call(__id__=client.newCall(request.request)) call.enqueue(request.handler) #: Save the call reference request.call = call
Fetch using the OkHttpClient
def _make_path(self, items): '''Returns a relative path for the given dictionary of items. Uses this url rule's url pattern and replaces instances of <var_name> with the appropriate value from the items dict. ''' for key, val in items.items(): if not isinstance(val, basestring): raise TypeError, ('Value "%s" for key "%s" must be an instance' ' of basestring' % (val, key)) items[key] = quote_plus(val) try: path = self._url_format.format(**items) except AttributeError: # Old version of python path = self._url_format for key, val in items.items(): path = path.replace('{%s}' % key, val) return path
Returns a relative path for the given dictionary of items. Uses this url rule's url pattern and replaces instances of <var_name> with the appropriate value from the items dict.
def evaluate_policy(self, sigma): """ Compute the value of a policy. Parameters ---------- sigma : array_like(int, ndim=1) Policy vector, of length n. Returns ------- v_sigma : ndarray(float, ndim=1) Value vector of `sigma`, of length n. """ if self.beta == 1: raise NotImplementedError(self._error_msg_no_discounting) # Solve (I - beta * Q_sigma) v = R_sigma for v R_sigma, Q_sigma = self.RQ_sigma(sigma) b = R_sigma A = self._I - self.beta * Q_sigma v_sigma = self._lineq_solve(A, b) return v_sigma
Compute the value of a policy. Parameters ---------- sigma : array_like(int, ndim=1) Policy vector, of length n. Returns ------- v_sigma : ndarray(float, ndim=1) Value vector of `sigma`, of length n.
def _maybe_club(self, list_of_dicts): """ If all keys in a list of dicts are identical, values from each ``dict`` are clubbed, i.e. inserted under a common column heading. If the keys are not identical ``None`` is returned, and the list should be converted to HTML per the normal ``convert`` function. Parameters ---------- list_of_dicts : list List to attempt to club. Returns ------- str or None String of HTML if list was successfully clubbed. Returns ``None`` otherwise. Example ------- Given the following json object:: { "sampleData": [ {"a":1, "b":2, "c":3}, {"a":5, "b":6, "c":7}] } Calling ``_maybe_club`` would result in the following HTML table: _____________________________ | | | | | | | a | c | b | | sampleData |---|---|---| | | 1 | 3 | 2 | | | 5 | 7 | 6 | ----------------------------- Adapted from a contribution from @muellermichel to ``json2html``. """ column_headers = JsonConverter._list_of_dicts_to_column_headers(list_of_dicts) if column_headers is None: # common headers not found, return normal markup html_output = self._markup(list_of_dicts) else: html_output = self._table_opening_tag html_output += self._markup_header_row(column_headers) for list_entry in list_of_dicts: html_output += "<tr><td>" html_output += "</td><td>".join(self._markup(list_entry[column_header]) for column_header in column_headers) html_output += "</td></tr>" html_output += "</table>" return self._markup_table_cell(html_output)
If all keys in a list of dicts are identical, values from each ``dict`` are clubbed, i.e. inserted under a common column heading. If the keys are not identical ``None`` is returned, and the list should be converted to HTML per the normal ``convert`` function. Parameters ---------- list_of_dicts : list List to attempt to club. Returns ------- str or None String of HTML if list was successfully clubbed. Returns ``None`` otherwise. Example ------- Given the following json object:: { "sampleData": [ {"a":1, "b":2, "c":3}, {"a":5, "b":6, "c":7}] } Calling ``_maybe_club`` would result in the following HTML table: _____________________________ | | | | | | | a | c | b | | sampleData |---|---|---| | | 1 | 3 | 2 | | | 5 | 7 | 6 | ----------------------------- Adapted from a contribution from @muellermichel to ``json2html``.
def __remove_index(self, ids): """remove affected ids from the index""" if not ids: return ids = ",".join((str(id) for id in ids)) self.execute("DELETE FROM fact_index where id in (%s)" % ids)
remove affected ids from the index
def _collective_with_groups(self, x, mesh_axes, collective): """Grouped collective, (across the given dimensions). Args: x: a LaidOutTensor mesh_axes: a list of integers - the mesh dimensions to be reduced collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor) Returns: a LaidOutTensor """ if not mesh_axes: return x x = x.to_laid_out_tensor() if len(mesh_axes) == self.ndims: return self.LaidOutTensor(collective(x.tensor_list, self._devices)) else: groups = mtf.processor_groups(self.shape, mesh_axes) ret = [None] * self.size for g in groups: inputs = [x.tensor_list[pnum] for pnum in g] devices = [self._devices[pnum] for pnum in g] reduced = collective(inputs, devices) for pnum, y in zip(g, reduced): ret[pnum] = y return self.LaidOutTensor(ret)
Grouped collective, (across the given dimensions). Args: x: a LaidOutTensor mesh_axes: a list of integers - the mesh dimensions to be reduced collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor) Returns: a LaidOutTensor
def book(symbol=None, token='', version=''): '''Book shows IEX’s bids and asks for given symbols. https://iexcloud.io/docs/api/#deep-book Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result ''' _raiseIfNotStr(symbol) if symbol: return _getJson('deep/book?symbols=' + symbol, token, version) return _getJson('deep/book', token, version)
Book shows IEX’s bids and asks for given symbols. https://iexcloud.io/docs/api/#deep-book Args: symbol (string); Ticker to request token (string); Access token version (string); API version Returns: dict: result
def html_entity_decode_codepoint(self, m, defs=htmlentities.codepoint2name): """ decode html entity into one of the codepoint2name """ try: char = defs[m.group(1)] return "&{char};".format(char=char) except ValueError: return m.group(0) except KeyError: return m.group(0)
decode html entity into one of the codepoint2name
def besj(self, x, n): ''' Function BESJ calculates Bessel function of first kind of order n Arguments: n - an integer (>=0), the order x - value at which the Bessel function is required -------------------- C++ Mathematical Library Converted from equivalent FORTRAN library Converted by Gareth Walker for use by course 392 computational project All functions tested and yield the same results as the corresponding FORTRAN versions. If you have any problems using these functions please report them to M.Muldoon@UMIST.ac.uk Documentation available on the web http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html Version 1.0 8/98 29 October, 1999 -------------------- Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com) Adapted for use in vispy library by Nicolas P. Rougier (Nicolas.Rougier@inria.fr) ----------------------------------------------------------------------- ''' if n < 0: return 0.0 d = 1e-6 b = 0 if math.fabs(x) <= d: if n != 0: return 0 return 1 b1 = 0 # b1 is the value from the previous iteration # Set up a starting order for recurrence m1 = int(math.fabs(x)) + 6 if math.fabs(x) > 5: m1 = int(math.fabs(1.4 * x + 60 / x)) m2 = int(n + 2 + math.fabs(x) / 4) if m1 > m2: m2 = m1 # Apply recurrence down from curent max order while True: c3 = 0 c2 = 1e-30 c4 = 0 m8 = 1 if m2 / 2 * 2 == m2: m8 = -1 imax = m2 - 2 for i in range(1, imax+1): c6 = 2 * (m2 - i) * c2 / x - c3 c3 = c2 c2 = c6 if m2 - i - 1 == n: b = c6 m8 = -1 * m8 if m8 > 0: c4 = c4 + 2 * c6 c6 = 2 * c2 / x - c3 if n == 0: b = c6 c4 += c6 b /= c4 if math.fabs(b - b1) < d: return b b1 = b m2 += 3
Function BESJ calculates Bessel function of first kind of order n Arguments: n - an integer (>=0), the order x - value at which the Bessel function is required -------------------- C++ Mathematical Library Converted from equivalent FORTRAN library Converted by Gareth Walker for use by course 392 computational project All functions tested and yield the same results as the corresponding FORTRAN versions. If you have any problems using these functions please report them to M.Muldoon@UMIST.ac.uk Documentation available on the web http://www.ma.umist.ac.uk/mrm/Teaching/392/libs/392.html Version 1.0 8/98 29 October, 1999 -------------------- Adapted for use in AGG library by Andy Wilk (castor.vulgaris@gmail.com) Adapted for use in vispy library by Nicolas P. Rougier (Nicolas.Rougier@inria.fr) -----------------------------------------------------------------------
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None): """ Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3} :param inv_link_f: latent variables link(f) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array """ e = y - inv_link_f d3lik_dlink3 = ( -(2*(self.v + 1)*(-e)*(e**2 - 3*self.v*self.sigma2)) / ((e**2 + self.sigma2*self.v)**3) ) return d3lik_dlink3
Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{-2(v+1)((y_{i} - \lambda(f_{i}))^3 - 3(y_{i} - \lambda(f_{i})) \\sigma^{2} v))}{((y_{i} - \lambda(f_{i})) + \\sigma^{2} v)^3} :param inv_link_f: latent variables link(f) :type inv_link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in student t distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array
def parse_environment_data(block): """ Parse the environment block into a Python dictionary. @warn: Deprecated since WinAppDbg 1.5. @note: Values of duplicated keys are joined using null characters. @type block: list of str @param block: List of strings as returned by L{get_environment_data}. @rtype: dict(str S{->} str) @return: Dictionary of environment keys and values. """ # Issue a deprecation warning. warnings.warn( "Process.parse_environment_data() is deprecated" \ " since WinAppDbg 1.5.", DeprecationWarning) # Create an empty environment dictionary. environment = dict() # End here if the environment block is empty. if not block: return environment # Prepare the tokens (ANSI or Unicode). gst = win32.GuessStringType if type(block[0]) == gst.t_ansi: equals = '=' terminator = '\0' else: equals = u'=' terminator = u'\0' # Split the blocks into key/value pairs. for chunk in block: sep = chunk.find(equals, 1) if sep < 0: ## raise Exception() continue # corrupted environment block? key, value = chunk[:sep], chunk[sep+1:] # For duplicated keys, append the value. # Values are separated using null terminators. if key not in environment: environment[key] = value else: environment[key] += terminator + value # Return the environment dictionary. return environment
Parse the environment block into a Python dictionary. @warn: Deprecated since WinAppDbg 1.5. @note: Values of duplicated keys are joined using null characters. @type block: list of str @param block: List of strings as returned by L{get_environment_data}. @rtype: dict(str S{->} str) @return: Dictionary of environment keys and values.
def update_batch(self, **kwargs): """ Simplistic batch update operation implemented in terms of `replace()`. Assumes that: - Request and response schemas contains lists of items. - Request items define a primary key identifier - The entire batch succeeds or fails together. """ items = kwargs.pop("items") def transform(item): """ Transform the dictionary expected for replace (which uses the URI path's id) into the resource expected from individual resources (which uses plain id). """ item[self.identifier_key] = item.pop("id") return item return dict( items=[ self.replace(**transform(item)) for item in items ], )
Simplistic batch update operation implemented in terms of `replace()`. Assumes that: - Request and response schemas contains lists of items. - Request items define a primary key identifier - The entire batch succeeds or fails together.
def getAllClasses(self, hide_base_schemas=True, hide_implicit_types=True): """ * hide_base_schemas: by default, obscure all RDF/RDFS/OWL/XML stuff * hide_implicit_types: don't make any inference based on rdf:type declarations """ query = """SELECT DISTINCT ?x ?c WHERE { { { ?x a owl:Class } union { ?x a rdfs:Class } union { ?x rdfs:subClassOf ?y } union { ?z rdfs:subClassOf ?x } union { ?y rdfs:domain ?x } union { ?y rdfs:range ?x } %s } . OPTIONAL { ?x a ?c } # get the type too if available %s } ORDER BY ?x """ BIT_BASE_SCHEMAS = """FILTER( !STRSTARTS(STR(?x), "http://www.w3.org/2002/07/owl") && !STRSTARTS(STR(?x), "http://www.w3.org/1999/02/22-rdf-syntax-ns") && !STRSTARTS(STR(?x), "http://www.w3.org/2000/01/rdf-schema") && !STRSTARTS(STR(?x), "http://www.w3.org/2001/XMLSchema") && !STRSTARTS(STR(?x), "http://www.w3.org/XML/1998/namespace") && (!isBlank(?x)) ) .""" BIT_IMPLICIT_TYPES = """union { ?y rdf:type ?x }""" if hide_base_schemas == False: # ..then do not filter out XML stuff BIT_BASE_SCHEMAS = "" if hide_implicit_types == True: # .. then do not add extra clause BIT_IMPLICIT_TYPES = "" query = query % (BIT_IMPLICIT_TYPES, BIT_BASE_SCHEMAS) # print(query) qres = self.rdflib_graph.query(query) return list(qres)
* hide_base_schemas: by default, obscure all RDF/RDFS/OWL/XML stuff * hide_implicit_types: don't make any inference based on rdf:type declarations
def resetPassword(self, attempt, newPassword): """ @param attempt: L{_PasswordResetAttempt} reset the password of the user who initiated C{attempt} to C{newPassword}, and afterward, delete the attempt and any persistent sessions that belong to the user """ self.accountByAddress(attempt.username).password = newPassword self.store.query( PersistentSession, PersistentSession.authenticatedAs == str(attempt.username) ).deleteFromStore() attempt.deleteFromStore()
@param attempt: L{_PasswordResetAttempt} reset the password of the user who initiated C{attempt} to C{newPassword}, and afterward, delete the attempt and any persistent sessions that belong to the user
def fit_naa(self, reject_outliers=3.0, fit_lb=1.8, fit_ub=2.4, phase_correct=True): """ Fit a Lorentzian function to the NAA peak at ~ 2 ppm. Example of fitting inverted peak: Foerster et al. 2013, An imbalance between excitatory and inhibitory neurotransmitters in amyothrophic lateral sclerosis revealed by use of 3T proton MRS """ model, signal, params = ana.fit_lorentzian(self.diff_spectra, self.f_ppm, lb=fit_lb, ub=fit_ub) # Store the params: self.naa_model = model self.naa_signal = signal self.naa_params = params self.naa_idx = ut.make_idx(self.f_ppm, fit_lb, fit_ub) mean_params = stats.nanmean(params, 0) self.naa_auc = self._calc_auc(ut.lorentzian, params, self.naa_idx)
Fit a Lorentzian function to the NAA peak at ~ 2 ppm. Example of fitting inverted peak: Foerster et al. 2013, An imbalance between excitatory and inhibitory neurotransmitters in amyothrophic lateral sclerosis revealed by use of 3T proton MRS
def midi_outputs(self): """ :return: A list of MIDI output :class:`Ports`. """ return self.client.get_ports(is_midi=True, is_physical=True, is_output=True)
:return: A list of MIDI output :class:`Ports`.
def get_diff_amounts(self): """Gets list of total diff :return: List of total diff between 2 consecutive commits since start """ diffs = [] last_commit = None for commit in self.repo.iter_commits(): if last_commit is not None: diff = self.get_diff(commit.hexsha, last_commit.hexsha) total_changed = diff[Diff.ADD] + diff[Diff.DEL] diffs.append(total_changed) last_commit = commit return diffs
Gets list of total diff :return: List of total diff between 2 consecutive commits since start
def delete(self, force=False): """ Deletes the current framework :param force: If True, stops the framework before deleting it :return: True if the framework has been delete, False if is couldn't """ if not force and self._state not in ( Bundle.INSTALLED, Bundle.RESOLVED, Bundle.STOPPING, ): _logger.warning("Trying to delete an active framework") return False return FrameworkFactory.delete_framework(self)
Deletes the current framework :param force: If True, stops the framework before deleting it :return: True if the framework has been delete, False if is couldn't
def get_instance(self, payload): """ Build an instance of EngagementContextInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance """ return EngagementContextInstance( self._version, payload, flow_sid=self._solution['flow_sid'], engagement_sid=self._solution['engagement_sid'], )
Build an instance of EngagementContextInstance :param dict payload: Payload response from the API :returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance :rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextInstance
def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser: """ Parse the file containing the mappings from hosts to pass entries. Args: mapping_file: Name of the file to parse. If ``None``, the default file from the XDG location is used. """ LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file) def parse(mapping_file): config = configparser.ConfigParser() config.read_file(mapping_file) return config # give precedence to the user-specified file if mapping_file is not None: LOGGER.debug('Parsing command line mapping file') return parse(mapping_file) # fall back on XDG config location xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper') if xdg_config_dir is None: raise RuntimeError( 'No mapping configured so far at any XDG config location. ' 'Please create {config_file}'.format( config_file=DEFAULT_CONFIG_FILE)) mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME) LOGGER.debug('Parsing mapping file %s', mapping_file) with open(mapping_file, 'r') as file_handle: return parse(file_handle)
Parse the file containing the mappings from hosts to pass entries. Args: mapping_file: Name of the file to parse. If ``None``, the default file from the XDG location is used.
def import_from_netcdf(network, path, skip_time=False): """ Import network data from netCDF file or xarray Dataset at `path`. Parameters ---------- path : string|xr.Dataset Path to netCDF dataset or instance of xarray Dataset skip_time : bool, default False Skip reading in time dependent attributes """ assert has_xarray, "xarray must be installed for netCDF support." basename = os.path.basename(path) if isinstance(path, string_types) else None with ImporterNetCDF(path=path) as importer: _import_from_importer(network, importer, basename=basename, skip_time=skip_time)
Import network data from netCDF file or xarray Dataset at `path`. Parameters ---------- path : string|xr.Dataset Path to netCDF dataset or instance of xarray Dataset skip_time : bool, default False Skip reading in time dependent attributes
def get_node_affiliations(self, jid, node): """ Return the affiliations of other jids at a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the node to query :type node: :class:`str` :raises aioxmpp.errors.XMPPError: as returned by the service :return: The response from the service. :rtype: :class:`.xso.OwnerRequest` The affiliations are returned as :class:`.xso.OwnerRequest` instance whose :attr:`~.xso.OwnerRequest.payload` is a :class:`.xso.OwnerAffiliations` instance. """ iq = aioxmpp.stanza.IQ( type_=aioxmpp.structs.IQType.GET, to=jid, payload=pubsub_xso.OwnerRequest( pubsub_xso.OwnerAffiliations(node), ) ) return (yield from self.client.send(iq))
Return the affiliations of other jids at a node. :param jid: Address of the PubSub service. :type jid: :class:`aioxmpp.JID` :param node: Name of the node to query :type node: :class:`str` :raises aioxmpp.errors.XMPPError: as returned by the service :return: The response from the service. :rtype: :class:`.xso.OwnerRequest` The affiliations are returned as :class:`.xso.OwnerRequest` instance whose :attr:`~.xso.OwnerRequest.payload` is a :class:`.xso.OwnerAffiliations` instance.
async def _get_descriptions(self): """Read a column descriptor packet for each column in the result.""" self.fields = [] self.converters = [] use_unicode = self.connection.use_unicode conn_encoding = self.connection.encoding description = [] for i in range(self.field_count): field = await self.connection._read_packet( FieldDescriptorPacket) self.fields.append(field) description.append(field.description()) field_type = field.type_code if use_unicode: if field_type == FIELD_TYPE.JSON: # When SELECT from JSON column: charset = binary # When SELECT CAST(... AS JSON): charset = connection # encoding # This behavior is different from TEXT / BLOB. # We should decode result by connection encoding # regardless charsetnr. # See https://github.com/PyMySQL/PyMySQL/issues/488 encoding = conn_encoding # SELECT CAST(... AS JSON) elif field_type in TEXT_TYPES: if field.charsetnr == 63: # binary # TEXTs with charset=binary means BINARY types. encoding = None else: encoding = conn_encoding else: # Integers, Dates and Times, and other basic data # is encoded in ascii encoding = 'ascii' else: encoding = None converter = self.connection.decoders.get(field_type) if converter is through: converter = None self.converters.append((encoding, converter)) eof_packet = await self.connection._read_packet() assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' self.description = tuple(description)
Read a column descriptor packet for each column in the result.
def resolve_for(self, node, exact=None): """ Resolves this query relative to the given node. Args: node (node.Base): The node relative to which this query should be resolved. exact (bool, optional): Whether to exactly match text. Returns: list[Element]: A list of elements matched by this query. """ from capybara.driver.node import Node from capybara.node.element import Element from capybara.node.simple import Simple @node.synchronize def resolve(): if self.selector.format == "css": children = node._find_css(self.css()) else: children = node._find_xpath(self.xpath(exact)) def wrap(child): if isinstance(child, Node): return Element(node.session, child, node, self) else: return Simple(child) children = [wrap(child) for child in children] return Result(children, self) return resolve()
Resolves this query relative to the given node. Args: node (node.Base): The node relative to which this query should be resolved. exact (bool, optional): Whether to exactly match text. Returns: list[Element]: A list of elements matched by this query.
def get_connection(self, name): """ An individual connection. :param name: The connection name :type name: str """ return self._api_get('/api/connections/{0}'.format( urllib.parse.quote_plus(name) ))
An individual connection. :param name: The connection name :type name: str
def _maybe_start_instance(instance): """Starts instance if it's stopped, no-op otherwise.""" if not instance: return if instance.state['Name'] == 'stopped': instance.start() while True: print(f"Waiting for {instance} to start.") instance.reload() if instance.state['Name'] == 'running': break time.sleep(10)
Starts instance if it's stopped, no-op otherwise.
def to_native(key): """Find the native name for the language specified by key. >>> to_native('br') u'brezhoneg' >>> to_native('sw') u'Kiswahili' """ item = find(whatever=key) if not item: raise NonExistentLanguageError('Language does not exist.') return item[u'native']
Find the native name for the language specified by key. >>> to_native('br') u'brezhoneg' >>> to_native('sw') u'Kiswahili'
def save_raw_data_from_data_queue(data_queue, filename, mode='a', title='', scan_parameters=None): # mode="r+" to append data, raw_data_file_h5 must exist, "w" to overwrite raw_data_file_h5, "a" to append data, if raw_data_file_h5 does not exist it is created '''Writing raw data file from data queue If you need to write raw data once in a while this function may make it easy for you. ''' if not scan_parameters: scan_parameters = {} with open_raw_data_file(filename, mode='a', title='', scan_parameters=list(dict.iterkeys(scan_parameters))) as raw_data_file: raw_data_file.append(data_queue, scan_parameters=scan_parameters)
Writing raw data file from data queue If you need to write raw data once in a while this function may make it easy for you.
def filter_macro(func, *args, **kwargs): """ Promotes a function that returns a filter into its own filter type. Example:: @filter_macro def String(): return Unicode | Strip | NotEmpty # You can now use `String` anywhere you would use a regular Filter: (String | Split(':')).apply('...') You can also use ``filter_macro`` to create partials, allowing you to preset one or more initialization arguments:: Minor = filter_macro(Max, max_value=18, inclusive=False) Minor(inclusive=True).apply(18) """ filter_partial = partial(func, *args, **kwargs) class FilterMacroMeta(FilterMeta): @staticmethod def __new__(mcs, name, bases, attrs): # This is as close as we can get to running # ``update_wrapper`` on a type. for attr in WRAPPER_ASSIGNMENTS: if hasattr(func, attr): attrs[attr] = getattr(func, attr) # Note that we ignore the ``name`` argument, passing in # ``func.__name__`` instead. return super(FilterMacroMeta, mcs)\ .__new__(mcs, func.__name__, bases, attrs) def __call__(cls, *runtime_args, **runtime_kwargs): return filter_partial(*runtime_args, **runtime_kwargs) class FilterMacro(with_metaclass(FilterMacroMeta, FilterMacroType)): # This method will probably never get called due to overloaded # ``__call__`` in the metaclass, but just in case, we'll include # it because it is an abstract method in `BaseFilter`. def _apply(self, value): # noinspection PyProtectedMember return self.__class__()._apply(value) return FilterMacro
Promotes a function that returns a filter into its own filter type. Example:: @filter_macro def String(): return Unicode | Strip | NotEmpty # You can now use `String` anywhere you would use a regular Filter: (String | Split(':')).apply('...') You can also use ``filter_macro`` to create partials, allowing you to preset one or more initialization arguments:: Minor = filter_macro(Max, max_value=18, inclusive=False) Minor(inclusive=True).apply(18)
def activate(self, resource=None, timeout=3, wait_for_finish=False): """ Activate this package on the SMC :param list resource: node href's to activate on. Resource is only required for software upgrades :param int timeout: timeout between queries :raises TaskRunFailed: failure during activation (downloading, etc) :rtype: TaskOperationPoller """ return Task.execute(self, 'activate', json={'resource': resource}, timeout=timeout, wait_for_finish=wait_for_finish)
Activate this package on the SMC :param list resource: node href's to activate on. Resource is only required for software upgrades :param int timeout: timeout between queries :raises TaskRunFailed: failure during activation (downloading, etc) :rtype: TaskOperationPoller
def path_to_songname(path: str)->str: """ Extracts song name from a filepath. Used to identify which songs have already been fingerprinted on disk. """ return os.path.splitext(os.path.basename(path))[0]
Extracts song name from a filepath. Used to identify which songs have already been fingerprinted on disk.
def bschoc(value, ndim, lenvals, array, order): """ Do a binary search for a given value within a character string array, accompanied by an order vector. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bschoc_c.html :param value: Key value to be found in array. :type value: str :param ndim: Dimension of array. :type ndim: int :param lenvals: String length. :type lenvals: int :param array: Character string array to search. :type array: list of strings :param order: Order vector. :type order: Array of ints :return: index :rtype: int """ value = stypes.stringToCharP(value) ndim = ctypes.c_int(ndim) lenvals = ctypes.c_int(lenvals) array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=ndim) order = stypes.toIntVector(order) return libspice.bschoc_c(value, ndim, lenvals, array, order)
Do a binary search for a given value within a character string array, accompanied by an order vector. Return the index of the matching array entry, or -1 if the key value is not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bschoc_c.html :param value: Key value to be found in array. :type value: str :param ndim: Dimension of array. :type ndim: int :param lenvals: String length. :type lenvals: int :param array: Character string array to search. :type array: list of strings :param order: Order vector. :type order: Array of ints :return: index :rtype: int
def update_points(self): """ 统一变为多个点组成的多边形,用于处理碰撞 """ x, y, w, h = self.x, self.y, self.w, self.h self.points = (x, y, x + w, y, x + w, y + h, x, y + h)
统一变为多个点组成的多边形,用于处理碰撞
def ystep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`. """ self.Y = np.asarray(sp.prox_l2( self.AX + self.U, (self.lmbda/self.rho)*self.Wtvna, axis=self.saxes), dtype=self.dtype)
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
def export_txt(obj, file_name, two_dimensional=False, **kwargs): """ Exports control points as a text file. For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-dimensional control point output file using ``two_dimensional``. Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments. :param obj: a spline geometry object :type obj: abstract.SplineGeometry :param file_name: file name of the text file to be saved :type file_name: str :param two_dimensional: type of the text file (only works for Surface objects) :type two_dimensional: bool :raises GeomdlException: an error occurred writing the file """ # Check if the user has set any control points if obj.ctrlpts is None or len(obj.ctrlpts) == 0: raise exch.GeomdlException("There are no control points to save!") # Check the usage of two_dimensional flag if obj.pdimension == 1 and two_dimensional: # Silently ignore two_dimensional flag two_dimensional = False # File delimiters col_sep = kwargs.get('col_separator', ";") sep = kwargs.get('separator', ",") content = exch.export_text_data(obj, sep, col_sep, two_dimensional) return exch.write_file(file_name, content)
Exports control points as a text file. For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-dimensional control point output file using ``two_dimensional``. Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments. :param obj: a spline geometry object :type obj: abstract.SplineGeometry :param file_name: file name of the text file to be saved :type file_name: str :param two_dimensional: type of the text file (only works for Surface objects) :type two_dimensional: bool :raises GeomdlException: an error occurred writing the file
def load_registered_fixtures(context): """ Apply fixtures that are registered with the @fixtures decorator. """ # -- SELECT STEP REGISTRY: # HINT: Newer behave versions use runner.step_registry # to be able to support multiple runners, each with its own step_registry. runner = context._runner # pylint: disable=protected-access step_registry = getattr(runner, 'step_registry', None) if not step_registry: # -- BACKWARD-COMPATIBLE: Use module_step_registry step_registry = module_step_registry.registry # -- SETUP SCENARIO FIXTURES: for step in context.scenario.all_steps: match = step_registry.find_match(step) if match and hasattr(match.func, 'registered_fixtures'): if not context.test.fixtures: context.test.fixtures = [] context.test.fixtures.extend(match.func.registered_fixtures)
Apply fixtures that are registered with the @fixtures decorator.
def remove_users_from_account_group(self, account_id, group_id, **kwargs): # noqa: E501 """Remove users from a group. # noqa: E501 An endpoint for removing users from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_users_from_account_group(account_id, group_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: (required) :param SubjectList body: :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501 else: (data) = self.remove_users_from_account_group_with_http_info(account_id, group_id, **kwargs) # noqa: E501 return data
Remove users from a group. # noqa: E501 An endpoint for removing users from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/users -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_users_from_account_group(account_id, group_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: (required) :param SubjectList body: :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
def sub_tag(self, path, follow=True): """Returns direct sub-record with given tag name or None. Path can be a simple tag name, in which case the first direct sub-record of this record with the matching tag is returned. Path can also consist of several tags separated by slashes, in that case sub-records are searched recursively. If `follow` is True then pointer records are resolved and pointed record is used instead of pointer record, this also works for all intermediate records in a path. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: `Record` instance or `None` if sub-record with a given tag does not exist. """ tags = path.split('/') rec = self for tag in tags: recs = [x for x in (rec.sub_records or []) if x.tag == tag] if not recs: return None rec = recs[0] if follow and isinstance(rec, Pointer): rec = rec.ref return rec
Returns direct sub-record with given tag name or None. Path can be a simple tag name, in which case the first direct sub-record of this record with the matching tag is returned. Path can also consist of several tags separated by slashes, in that case sub-records are searched recursively. If `follow` is True then pointer records are resolved and pointed record is used instead of pointer record, this also works for all intermediate records in a path. :param str path: tag names separated by slashes. :param boolean follow: If True then resolve pointers. :return: `Record` instance or `None` if sub-record with a given tag does not exist.
def canonical_peer( self, peer ): """ Get the canonical peer name """ their_host, their_port = url_to_host_port( peer ) if their_host in ['127.0.0.1', '::1']: their_host = 'localhost' return "%s:%s" % (their_host, their_port)
Get the canonical peer name
def delete_snmp_template(auth, url, template_name= None, template_id= None): """ Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific snmp template from the IMC system :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :param template_id: str value template template_id value :return: int HTTP response code :rtype int """ try: if template_id is None: snmp_templates = get_snmp_templates(auth, url) if template_name is None: template_name = snmp_template['name'] template_id = None for template in snmp_templates: if template['name'] == template_name: template_id = template['id'] f_url = url + "/imcrs/plat/res/snmp/%s/delete" % template_id response = requests.delete(f_url, auth=auth, headers=HEADERS) return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " delete_snmp_template: An Error has occured"
Takes template_name as input to issue RESTUL call to HP IMC which will delete the specific snmp template from the IMC system :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param template_name: str value of template name :param template_id: str value template template_id value :return: int HTTP response code :rtype int
def postprocess(self, calc, with_module=None, dry_run=None): ''' Invokes module(s) API NB: this is the PUBLIC method @returns apps_dict ''' for appname, appclass in self.Apps.items(): if with_module and with_module != appname: continue run_permitted = False # scope-conditions if appclass['apptarget']: for key in appclass['apptarget']: negative = False if str(appclass['apptarget'][key]).startswith('!'): negative = True scope_prop = appclass['apptarget'][key][1:] else: scope_prop = appclass['apptarget'][key] if key in calc.info: # non-strict comparison ("CRYSTAL" matches "CRYSTAL09 v2.0") if (str(scope_prop) in str(calc.info[key]) or scope_prop == calc.info[key]) != negative: # true if only one, but not both run_permitted = True else: run_permitted = False break else: run_permitted = True # module code running if run_permitted: calc.apps[appname] = {'error': None, 'data': None} if dry_run: continue try: AppInstance = appclass['appmodule'](calc) except: exc_type, exc_value, exc_tb = sys.exc_info() errmsg = "Fatal error in %s module:\n %s" % ( appname, " ".join(traceback.format_exception( exc_type, exc_value, exc_tb )) ) calc.apps[appname]['error'] = errmsg calc.warning( errmsg ) else: try: calc.apps[appname]['data'] = getattr(AppInstance, appclass['appdata']) except AttributeError: errmsg = 'No appdata-defined property found for %s module!' % appname calc.apps[appname]['error'] = errmsg calc.warning( errmsg ) return calc
Invokes module(s) API NB: this is the PUBLIC method @returns apps_dict
def create_sconstruct(self, project_dir='', sayyes=False): """Creates a default SConstruct file""" project_dir = util.check_dir(project_dir) sconstruct_name = 'SConstruct' sconstruct_path = util.safe_join(project_dir, sconstruct_name) local_sconstruct_path = util.safe_join( util.get_folder('resources'), sconstruct_name) if isfile(sconstruct_path): # -- If sayyes, skip the question if sayyes: self._copy_sconstruct_file(sconstruct_name, sconstruct_path, local_sconstruct_path) else: click.secho( 'Warning: {} file already exists'.format(sconstruct_name), fg='yellow') if click.confirm('Do you want to replace it?'): self._copy_sconstruct_file(sconstruct_name, sconstruct_path, local_sconstruct_path) else: click.secho('Abort!', fg='red') else: self._copy_sconstruct_file(sconstruct_name, sconstruct_path, local_sconstruct_path)
Creates a default SConstruct file
def expect(self, pattern, timeout=-1): """Waits on the given pattern to appear in std_out""" if self.blocking: raise RuntimeError("expect can only be used on non-blocking commands.") try: self.subprocess.expect(pattern=pattern, timeout=timeout) except pexpect.EOF: pass
Waits on the given pattern to appear in std_out
def diff(cls, a, b, ignore_formatting=False): """Returns two FSArrays with differences underlined""" def underline(x): return u'\x1b[4m%s\x1b[0m' % (x,) def blink(x): return u'\x1b[5m%s\x1b[0m' % (x,) a_rows = [] b_rows = [] max_width = max([len(row) for row in a] + [len(row) for row in b]) a_lengths = [] b_lengths = [] for a_row, b_row in zip(a, b): a_lengths.append(len(a_row)) b_lengths.append(len(b_row)) extra_a = u'`' * (max_width - len(a_row)) extra_b = u'`' * (max_width - len(b_row)) a_line = u'' b_line = u'' for a_char, b_char in zip(a_row + extra_a, b_row + extra_b): if ignore_formatting: a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char else: a_char_for_eval = a_char b_char_for_eval = b_char if a_char_for_eval == b_char_for_eval: a_line += actualize(a_char) b_line += actualize(b_char) else: a_line += underline(blink(actualize(a_char))) b_line += underline(blink(actualize(b_char))) a_rows.append(a_line) b_rows.append(b_line) hdiff = '\n'.join(a_line + u' %3d | %3d ' % (a_len, b_len) + b_line for a_line, b_line, a_len, b_len in zip(a_rows, b_rows, a_lengths, b_lengths)) return hdiff
Returns two FSArrays with differences underlined
def _set_cos_traffic_class(self, v, load=False): """ Setter method for cos_traffic_class, mapped from YANG variable /qos/map/cos_traffic_class (list) If this variable is read-only (config: false) in the source YANG file, then _set_cos_traffic_class is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cos_traffic_class() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cos_traffic_class must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",cos_traffic_class.cos_traffic_class, yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}), is_container='list', yang_name="cos-traffic-class", rest_name="cos-traffic-class", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CoS-to-Traffic-Class map', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'cos_traffic_class', u'cli-mode-name': u'cos-traffic-class-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='list', is_config=True)""", }) self.__cos_traffic_class = t if hasattr(self, '_set'): self._set()
Setter method for cos_traffic_class, mapped from YANG variable /qos/map/cos_traffic_class (list) If this variable is read-only (config: false) in the source YANG file, then _set_cos_traffic_class is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cos_traffic_class() directly.
def transform(self, X): """ Add the features calculated using the timeseries_container and add them to the corresponding rows in the input pandas.DataFrame X. To save some computing time, you should only include those time serieses in the container, that you need. You can set the timeseries container with the method :func:`set_timeseries_container`. :param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the dataframe with the timeseries itself. :type X: pandas.DataFrame :return: The input DataFrame, but with added features. :rtype: pandas.DataFrame """ if self.timeseries_container is None: raise RuntimeError("You have to provide a time series using the set_timeseries_container function before.") # Extract only features for the IDs in X.index timeseries_container_X = restrict_input_to_index(self.timeseries_container, self.column_id, X.index) extracted_features = extract_features(timeseries_container_X, default_fc_parameters=self.default_fc_parameters, kind_to_fc_parameters=self.kind_to_fc_parameters, column_id=self.column_id, column_sort=self.column_sort, column_kind=self.column_kind, column_value=self.column_value, chunksize=self.chunksize, n_jobs=self.n_jobs, show_warnings=self.show_warnings, disable_progressbar=self.disable_progressbar, impute_function=self.impute_function, profile=self.profile, profiling_filename=self.profiling_filename, profiling_sorting=self.profiling_sorting) X = pd.merge(X, extracted_features, left_index=True, right_index=True, how="left") return X
Add the features calculated using the timeseries_container and add them to the corresponding rows in the input pandas.DataFrame X. To save some computing time, you should only include those time serieses in the container, that you need. You can set the timeseries container with the method :func:`set_timeseries_container`. :param X: the DataFrame to which the calculated timeseries features will be added. This is *not* the dataframe with the timeseries itself. :type X: pandas.DataFrame :return: The input DataFrame, but with added features. :rtype: pandas.DataFrame
def _sd_handler(self, desc_type, unit, desc, show_on_keypad): """Text description""" if desc_type not in self._descriptions_in_progress: LOG.debug("Text description response ignored for " + str(desc_type)) return (max_units, results, callback) = self._descriptions_in_progress[desc_type] if unit < 0 or unit >= max_units: callback(results) del self._descriptions_in_progress[desc_type] return results[unit] = desc self.send(sd_encode(desc_type=desc_type, unit=unit+1))
Text description
def request_openbus(self, service, endpoint, **kwargs): """Make a request to the given endpoint of the ``openbus`` server. This returns the plain JSON (dict) response which can then be parsed using one of the implemented types. Args: service (str): Service to fetch ('bus' or 'geo'). endpoint (str): Endpoint to send the request to. This string corresponds to the key in the ``ENDPOINTS`` dict. **kwargs: Request arguments. Returns: Obtained response (dict) or None if the endpoint was not found. """ if service == 'bus': endpoints = ENDPOINTS_BUS elif service == 'geo': endpoints = ENDPOINTS_GEO else: # Unknown service return None if endpoint not in endpoints: # Unknown endpoint return None url = URL_OPENBUS + endpoints[endpoint] # Append credentials to request kwargs['idClient'] = self._emt_id kwargs['passKey'] = self._emt_pass # SSL verification fails... # return requests.post(url, data=kwargs, verify=False).json() return requests.post(url, data=kwargs, verify=True).json()
Make a request to the given endpoint of the ``openbus`` server. This returns the plain JSON (dict) response which can then be parsed using one of the implemented types. Args: service (str): Service to fetch ('bus' or 'geo'). endpoint (str): Endpoint to send the request to. This string corresponds to the key in the ``ENDPOINTS`` dict. **kwargs: Request arguments. Returns: Obtained response (dict) or None if the endpoint was not found.
def enbase64(byte_str): """ Encode bytes/strings to base64. Args: - ``byte_str``: The string or bytes to base64 encode. Returns: - byte_str encoded as base64. """ # Python 3: base64.b64encode() expects type byte if isinstance(byte_str, str) and not PYTHON2: byte_str = bytes(byte_str, 'utf-8') return base64.b64encode(byte_str)
Encode bytes/strings to base64. Args: - ``byte_str``: The string or bytes to base64 encode. Returns: - byte_str encoded as base64.
def load_profiles_definitions(filename): """ Load the registered profiles defined in the file filename. This is a yml file that defines the basic characteristics of each profile with the following variables: It produces a dictionary that can be accessed with the a string that defines the profile organization and name in the form <org>:<profile name> }, """ with open(filename, 'r') as fp: profile_definitions = yaml.load(fp) # assume profile definitions are case insensitive profiles_dict = NocaseDict() for profile in profile_definitions: value = ProfileDef(profile["central_class"], profile["scoping_class"], profile["scoping_path"], profile['type'], profile['doc']) key = "%s:%s" % (profile["registered_org"], profile["registered_name"]) profiles_dict[key] = value return profiles_dict
Load the registered profiles defined in the file filename. This is a yml file that defines the basic characteristics of each profile with the following variables: It produces a dictionary that can be accessed with the a string that defines the profile organization and name in the form <org>:<profile name> },
def run(argv=argv): """Runs the search_google command line tool. This function runs the search_google command line tool in a terminal. It was intended for use inside a py file (.py) to be executed using python. Notes: * ``[q]`` reflects key ``q`` in the ``cseargs`` parameter for :class:`api.results` * Optional arguments with ``build_`` are keys in the ``buildargs`` parameter for :class:`api.results` For distribution, this function must be defined in the following files:: # In 'search_google/search_google/__main__.py' from .cli import run run() # In 'search_google/search_google.py' from search_google.cli import run if __name__ == '__main__': run() # In 'search_google/__init__.py' __entry_points__ = {'console_scripts': ['search_google=search_google.cli:run']} Examples:: # Import google_streetview for the cli module import search_google.cli # Create command line arguments argv = [ 'cli.py', 'google', '--searchType=image', '--build_developerKey=your_dev_key', '--cx=your_cx_id' '--num=1' ] # Run command line search_google.cli.run(argv) """ config_file = kwconfig.manage( file_path=resource_filename(Requirement.parse('search_google'), 'search_google/config.json'), defaults={ 'build_serviceName': 'customsearch', 'build_version': 'v1', 'num': 3, 'fileType': 'png', 'option_silent': 'False', 'option_preview' : 10}) # (commands) Main command calls if len(argv) > 1: if argv[1] == '-i': # browse docs open_new_tab(_doc_link) exit() elif argv[1] == '-a': # browse arguments open_new_tab(_cse_link) exit() config_file.command(argv, i=1, doc=__doc__, quit=True, silent=False) # (parse_args) Parse command arguments into dict kwargs = kwconfig.parse(argv[2:]) kwargs['q'] = argv[1] kwargs = config_file.add(kwargs) # (split_args) Split args into build, cse, and save arguments buildargs = {} cseargs = {} saveargs = {} optionargs = {} for k, v in kwargs.items(): if 'build_' == k[0:6]: buildargs[k[6:]] = v elif 'save_' == k[0:5]: saveargs[k[5:]] = v elif 'option_' == k[0:7]: optionargs[k[7:]] = v else: cseargs[k] = v # (cse_results) Get google api results results = search_google.api.results(buildargs, cseargs) # (cse_print) Print a preview of results if 'silent' in optionargs: if optionargs['silent'].lower() != 'true': results.preview(n=int(optionargs['preview'])) # (cse_save) Save links and metadata if 'links' in saveargs: results.save_links(saveargs['links']) if 'metadata' in saveargs: results.save_metadata(saveargs['metadata']) # (cse_download) Download links if 'downloads' in saveargs: results.download_links(saveargs['downloads'])
Runs the search_google command line tool. This function runs the search_google command line tool in a terminal. It was intended for use inside a py file (.py) to be executed using python. Notes: * ``[q]`` reflects key ``q`` in the ``cseargs`` parameter for :class:`api.results` * Optional arguments with ``build_`` are keys in the ``buildargs`` parameter for :class:`api.results` For distribution, this function must be defined in the following files:: # In 'search_google/search_google/__main__.py' from .cli import run run() # In 'search_google/search_google.py' from search_google.cli import run if __name__ == '__main__': run() # In 'search_google/__init__.py' __entry_points__ = {'console_scripts': ['search_google=search_google.cli:run']} Examples:: # Import google_streetview for the cli module import search_google.cli # Create command line arguments argv = [ 'cli.py', 'google', '--searchType=image', '--build_developerKey=your_dev_key', '--cx=your_cx_id' '--num=1' ] # Run command line search_google.cli.run(argv)
def wait_for_import_to_complete(self, import_id, region='us-east-1'): """ Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor """ task_running = True while task_running: import_status_cmd = "aws ec2 --profile {} --region '{}' --output 'json' describe-import-image-tasks --import-task-ids {}".format(self.aws_project, region, import_id) res = subprocess.check_output(shlex.split(import_status_cmd)) print "Current status: {}".format(res) res_json = json.loads(res) task_running, image_id = self.check_task_status_and_id(res_json)
Monitors the status of aws import, waiting for it to complete, or error out :param import_id: id of import task to monitor
def extendManager(mixinClass): ''' Use as a class decorator to add extra methods to your model manager. Example usage: class Article(django.db.models.Model): published = models.DateTimeField() ... @extendManager class objects(object): def getPublished(self): return self.filter(published__lte = django.utils.timezone.now()).order_by('-published') ... publishedArticles = Article.objects.getPublished() ''' class MixinManager(models.Manager, mixinClass): class MixinQuerySet(models.query.QuerySet, mixinClass): pass def get_queryset(self): return self.MixinQuerySet(self.model, using = self._db) return MixinManager()
Use as a class decorator to add extra methods to your model manager. Example usage: class Article(django.db.models.Model): published = models.DateTimeField() ... @extendManager class objects(object): def getPublished(self): return self.filter(published__lte = django.utils.timezone.now()).order_by('-published') ... publishedArticles = Article.objects.getPublished()
def pltnp(point, v1, v2, v3): """ Find the nearest point on a triangular plate to a given point. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltnp_c.html :param point: A point in 3-dimensional space. :type point: 3-Element Array of floats :param v1: Vertices of a triangular plate. :type v1: 3-Element Array of floats :param v2: Vertices of a triangular plate. :type v2: 3-Element Array of floats :param v3: Vertices of a triangular plate. :type v3: 3-Element Array of floats :return: the nearest point on a triangular plate to a given point and distance :rtype: tuple """ point = stypes.toDoubleVector(point) v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) v3 = stypes.toDoubleVector(v3) pnear = stypes.emptyDoubleVector(3) dist = ctypes.c_double() libspice.pltnp_c(point, v1, v2, v3, pnear, ctypes.byref(dist)) return stypes.cVectorToPython(pnear), dist.value
Find the nearest point on a triangular plate to a given point. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pltnp_c.html :param point: A point in 3-dimensional space. :type point: 3-Element Array of floats :param v1: Vertices of a triangular plate. :type v1: 3-Element Array of floats :param v2: Vertices of a triangular plate. :type v2: 3-Element Array of floats :param v3: Vertices of a triangular plate. :type v3: 3-Element Array of floats :return: the nearest point on a triangular plate to a given point and distance :rtype: tuple
def require_exp_directory(f): """Decorator to verify that a command is run inside a valid Dallinger experiment directory. """ error = "The current directory is not a valid Dallinger experiment." @wraps(f) def wrapper(**kwargs): if not verify_directory(kwargs.get("verbose")): raise click.UsageError(error) return f(**kwargs) return wrapper
Decorator to verify that a command is run inside a valid Dallinger experiment directory.
def get_python_symbol_icons(oedata): """Return a list of icons for oedata of a python file.""" class_icon = ima.icon('class') method_icon = ima.icon('method') function_icon = ima.icon('function') private_icon = ima.icon('private1') super_private_icon = ima.icon('private2') symbols = process_python_symbol_data(oedata) # line - 1, name, fold level fold_levels = sorted(list(set([s[2] for s in symbols]))) parents = [None]*len(symbols) icons = [None]*len(symbols) indexes = [] parent = None for level in fold_levels: for index, item in enumerate(symbols): line, name, fold_level, token = item if index in indexes: continue if fold_level == level: indexes.append(index) parent = item else: parents[index] = parent for index, item in enumerate(symbols): parent = parents[index] if item[-1] == 'def': icons[index] = function_icon elif item[-1] == 'class': icons[index] = class_icon else: icons[index] = QIcon() if parent is not None: if parent[-1] == 'class': if item[-1] == 'def' and item[1].startswith('__'): icons[index] = super_private_icon elif item[-1] == 'def' and item[1].startswith('_'): icons[index] = private_icon else: icons[index] = method_icon return icons
Return a list of icons for oedata of a python file.
def delete(self, filename, storage_type=None, bucket_name=None): """Deletes the specified file, either locally or from S3, depending on the file's storage type.""" if not (storage_type and bucket_name): self._delete_local(filename) else: if storage_type != 's3': raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type) self._delete_s3(filename, bucket_name)
Deletes the specified file, either locally or from S3, depending on the file's storage type.
def mapper_from_partial_prior_arguments(self, arguments): """ Creates a new model mapper from a dictionary mapping_matrix existing priors to new priors, keeping existing priors where no mapping is provided. Parameters ---------- arguments: {Prior: Prior} A dictionary mapping_matrix priors to priors Returns ------- model_mapper: ModelMapper A new model mapper with updated priors. """ original_prior_dict = {prior: prior for prior in self.priors} return self.mapper_from_prior_arguments({**original_prior_dict, **arguments})
Creates a new model mapper from a dictionary mapping_matrix existing priors to new priors, keeping existing priors where no mapping is provided. Parameters ---------- arguments: {Prior: Prior} A dictionary mapping_matrix priors to priors Returns ------- model_mapper: ModelMapper A new model mapper with updated priors.
def analyze(self, scratch, **kwargs): """Run and return the results from the DuplicateScripts plugin. Only takes into account scripts with more than 3 blocks. """ scripts_set = set() for script in self.iter_scripts(scratch): if script[0].type.text == 'define %s': continue # Ignore user defined scripts blocks_list = [] for name, _, _ in self.iter_blocks(script.blocks): blocks_list.append(name) blocks_tuple = tuple(blocks_list) if blocks_tuple in scripts_set: if len(blocks_list) > 3: self.total_duplicate += 1 self.list_duplicate.append(blocks_list) else: scripts_set.add(blocks_tuple)
Run and return the results from the DuplicateScripts plugin. Only takes into account scripts with more than 3 blocks.
def from_connections(cls, caption, connections): """Create a new Data Source give a list of Connections.""" root = ET.Element('datasource', caption=caption, version='10.0', inline='true') outer_connection = ET.SubElement(root, 'connection') outer_connection.set('class', 'federated') named_conns = ET.SubElement(outer_connection, 'named-connections') for conn in connections: nc = ET.SubElement(named_conns, 'named-connection', name=_make_unique_name(conn.dbclass), caption=conn.server) nc.append(conn._connectionXML) return cls(root)
Create a new Data Source give a list of Connections.
def start(self): """ Starts sending periodic HeartBeat operations. """ def _heartbeat(): if not self._client.lifecycle.is_live: return self._heartbeat() self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat) self._heartbeat_timer = self._client.reactor.add_timer(self._heartbeat_interval, _heartbeat)
Starts sending periodic HeartBeat operations.
def init_app(self, app): """Initialize extension to the given application. Extension will be registered to `app.extensions` with lower classname as key and instance as value. :param app: Flask application. """ self.init_extension(app) if not hasattr(app, 'extensions'): app.extensions = {} classname = self.__class__.__name__ extname = classname.replace('Flask', '').lower() app.extensions[extname] = self
Initialize extension to the given application. Extension will be registered to `app.extensions` with lower classname as key and instance as value. :param app: Flask application.
def log_start(task, logger="TaskLogger"): """Begin logging of a task Convenience function to log a task in the default TaskLogger Parameters ---------- task : str Name of the task to be started logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.start_task(task) return tasklogger
Begin logging of a task Convenience function to log a task in the default TaskLogger Parameters ---------- task : str Name of the task to be started logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
def iterate(iterator, n=None): """Efficiently advances the iterator N times; by default goes to its end. The actual loop is done "in C" and hence it is faster than equivalent 'for'. :param n: How much the iterator should be advanced. If None, it will be advanced until the end. """ ensure_iterable(iterator) if n is None: deque(iterator, maxlen=0) else: next(islice(iterator, n, n), None)
Efficiently advances the iterator N times; by default goes to its end. The actual loop is done "in C" and hence it is faster than equivalent 'for'. :param n: How much the iterator should be advanced. If None, it will be advanced until the end.
def remote_archive(class_obj: type) -> type: """ Decorator to annotate the RemoteArchive class. Registers the decorated class as the RemoteArchive known type. """ assert isinstance(class_obj, type), "class_obj is not a Class" global _remote_archive_resource_type _remote_archive_resource_type = class_obj return class_obj
Decorator to annotate the RemoteArchive class. Registers the decorated class as the RemoteArchive known type.
def build(outdir): """Blends the generated files and outputs a HTML website""" print("Building your Blended files into a website!") reload(sys) sys.setdefaultencoding('utf8') build_files(outdir) print("The files are built! You can find them in the " + outdir + "/ directory. Run the view command to see what you have created in a web browser.")
Blends the generated files and outputs a HTML website
def gamma(ranks_list1,ranks_list2): ''' Goodman and Kruskal's gamma correlation coefficient :param ranks_list1: a list of ranks (integers) :param ranks_list2: a second list of ranks (integers) of equal length with corresponding entries :return: Gamma correlation coefficient (rank correlation ignoring ties) ''' num_concordant_pairs = 0 num_discordant_pairs = 0 num_tied_x = 0 num_tied_y = 0 num_tied_xy = 0 num_items = len(ranks_list1) for i in range(num_items): rank_1 = ranks_list1[i] rank_2 = ranks_list2[i] for j in range(i + 1, num_items): diff1 = ranks_list1[j] - rank_1 diff2 = ranks_list2[j] - rank_2 if (diff1 > 0 and diff2 > 0) or (diff1 < 0 and diff2 < 0): num_concordant_pairs += 1 elif (diff1 > 0 and diff2 < 0) or (diff1 < 0 and diff2 > 0): num_discordant_pairs += 1 elif diff1 == 0 and diff2 == 0: num_tied_xy += 1 elif diff1 == 0: num_tied_x += 1 elif diff2 == 0: num_tied_y += 1 try: gamma_corr_coeff = float(num_concordant_pairs - num_discordant_pairs)/float(num_concordant_pairs + num_discordant_pairs) except: gamma_corr_coeff = 'n/a' return [num_tied_x, num_tied_y, num_tied_xy, gamma_corr_coeff]
Goodman and Kruskal's gamma correlation coefficient :param ranks_list1: a list of ranks (integers) :param ranks_list2: a second list of ranks (integers) of equal length with corresponding entries :return: Gamma correlation coefficient (rank correlation ignoring ties)
def slice_rates_to_data(self, strain): ''' For the strain data, checks to see if seismicity rates have been calculated. If so, each column in the array is sliced and stored as a single vector in the strain.data dictionary with the corresponding magnitude as a key. :param strain: Instance of :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain :returns: strain - Instance of strain class with updated data dictionary output_variables - Updated list of headers ''' output_variables = list(strain.data) cond = (isinstance(strain.target_magnitudes, np.ndarray) or isinstance(strain.target_magnitudes, list)) if cond: magnitude_list = ['%.3f' % mag for mag in strain.target_magnitudes] else: return strain, output_variables # Ensure that the number of rows in the rate array corresponds to the # number of observations assert np.shape(strain.seismicity_rate)[0] == \ strain.get_number_observations() for iloc, magnitude in enumerate(magnitude_list): strain.data[magnitude] = strain.seismicity_rate[:, iloc] output_variables.extend(magnitude_list) return strain, output_variables
For the strain data, checks to see if seismicity rates have been calculated. If so, each column in the array is sliced and stored as a single vector in the strain.data dictionary with the corresponding magnitude as a key. :param strain: Instance of :class: openquake.hmtk.strain.geodetic_strain.GeodeticStrain :returns: strain - Instance of strain class with updated data dictionary output_variables - Updated list of headers
def check_event_coverage(patterns, event_list): """Calculate the ratio of patterns that were extracted.""" proportions = [] for pattern_list in patterns: proportion = 0 for pattern in pattern_list: for node in pattern.nodes(): if node in event_list: proportion += 1.0 / len(pattern_list) break proportions.append(proportion) return proportions
Calculate the ratio of patterns that were extracted.
def solver(A, config): """Generate an SA solver given matrix A and a configuration. Parameters ---------- A : array, matrix, csr_matrix, bsr_matrix Matrix to invert, CSR or BSR format preferred for efficiency config : dict A dictionary of solver configuration parameters that is used to generate a smoothed aggregation solver Returns ------- ml : smoothed_aggregation_solver smoothed aggregation hierarchy Notes ----- config must contain the following parameter entries for smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration,solver >>> A = poisson((40,40),format='csr') >>> config = solver_configuration(A,verb=False) >>> ml = solver(A,config) """ # Convert A to acceptable format A = make_csr(A) # Generate smoothed aggregation solver try: return \ smoothed_aggregation_solver(A, B=config['B'], BH=config['BH'], smooth=config['smooth'], strength=config['strength'], max_levels=config['max_levels'], max_coarse=config['max_coarse'], coarse_solver=config['coarse_solver'], symmetry=config['symmetry'], aggregate=config['aggregate'], presmoother=config['presmoother'], postsmoother=config['postsmoother'], keep=config['keep']) except BaseException: raise TypeError('Failed generating smoothed_aggregation_solver')
Generate an SA solver given matrix A and a configuration. Parameters ---------- A : array, matrix, csr_matrix, bsr_matrix Matrix to invert, CSR or BSR format preferred for efficiency config : dict A dictionary of solver configuration parameters that is used to generate a smoothed aggregation solver Returns ------- ml : smoothed_aggregation_solver smoothed aggregation hierarchy Notes ----- config must contain the following parameter entries for smoothed_aggregation_solver: symmetry, smooth, presmoother, postsmoother, B, strength, max_levels, max_coarse, coarse_solver, aggregate, keep Examples -------- >>> from pyamg.gallery import poisson >>> from pyamg import solver_configuration,solver >>> A = poisson((40,40),format='csr') >>> config = solver_configuration(A,verb=False) >>> ml = solver(A,config)
def generate_not(self): """ Means that value have not to be valid by this definition. .. code-block:: python {'not': {'type': 'null'}} Valid values for this definition are 'hello', 42, {} ... but not None. Since draft 06 definition can be boolean. False means nothing, True means everything is invalid. """ not_definition = self._definition['not'] if not_definition is True: self.l('raise JsonSchemaException("{name} must not be there")') elif not_definition is False: return elif not not_definition: with self.l('if {}:', self._variable): self.l('raise JsonSchemaException("{name} must not be valid by not definition")') else: with self.l('try:'): self.generate_func_code_block(not_definition, self._variable, self._variable_name) self.l('except JsonSchemaException: pass') self.l('else: raise JsonSchemaException("{name} must not be valid by not definition")')
Means that value have not to be valid by this definition. .. code-block:: python {'not': {'type': 'null'}} Valid values for this definition are 'hello', 42, {} ... but not None. Since draft 06 definition can be boolean. False means nothing, True means everything is invalid.
def transform(self, X, mean=None, lenscale=None): """ Apply the spectral mixture component basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 4*nbases) where nbases is number of random bases to use, given in the constructor (to nearest larger two power). """ mean = self._check_dim(X.shape[1], mean, paramind=0) lenscale = self._check_dim(X.shape[1], lenscale, paramind=1) VX = self._makeVX(X / lenscale) mX = X.dot(mean)[:, np.newaxis] Phi = np.hstack((np.cos(VX + mX), np.sin(VX + mX), np.cos(VX - mX), np.sin(VX - mX))) / \ np.sqrt(2 * self.n) return Phi
Apply the spectral mixture component basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. mean: ndarray, optional array of shape (d,) frequency means (one for each dimension of X). If not input, this uses the value of the initial mean. lenscale: ndarray, optional array of shape (d,) length scales (one for each dimension of X). If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 4*nbases) where nbases is number of random bases to use, given in the constructor (to nearest larger two power).
def add_directory(self, iso_path=None, rr_name=None, joliet_path=None, file_mode=None, udf_path=None): # type: (Optional[str], Optional[str], Optional[str], int, Optional[str]) -> None ''' Add a directory to the ISO. At least one of an iso_path, joliet_path, or udf_path must be provided. Providing joliet_path on a non-Joliet ISO, or udf_path on a non-UDF ISO, is an error. If the ISO contains Rock Ridge, then a Rock Ridge name must be provided. Parameters: iso_path - The ISO9660 absolute path to use for the directory. rr_name - The Rock Ridge name to use for the directory. joliet_path - The Joliet absolute path to use for the directory. file_mode - The POSIX file mode to use for the directory. This only applies for Rock Ridge ISOs. udf_path - The UDF absolute path to use for the directory. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInvalidInput('This object is not yet initialized; call either open() or new() to create an ISO') if iso_path is None and joliet_path is None and udf_path is None: raise pycdlibexception.PyCdlibInvalidInput('Either iso_path or joliet_path must be passed') if file_mode is not None and not self.rock_ridge: raise pycdlibexception.PyCdlibInvalidInput('A file mode can only be specified for Rock Ridge ISOs') # For backwards-compatibility reasons, if the mode was not specified we # just assume 555. We should probably eventually make file_mode # required for Rock Ridge and remove this assumption. if file_mode is None: file_mode = 0o040555 num_bytes_to_add = 0 if iso_path is not None: iso_path_bytes = utils.normpath(iso_path) new_rr_name = self._check_rr_name(rr_name) depth = len(utils.split_path(iso_path_bytes)) if not self.rock_ridge and self.enhanced_vd is None: _check_path_depth(iso_path_bytes) (name, parent) = self._iso_name_and_parent_from_path(iso_path_bytes) _check_iso9660_directory(name, self.interchange_level) relocated = False fake_dir_rec = None orig_parent = None iso9660_name = name if self.rock_ridge and (depth % 8) == 0 and self.enhanced_vd is None: # If the depth was a multiple of 8, then we are going to have to # make a relocated entry for this record. num_bytes_to_add += self._find_or_create_rr_moved() # With a depth of 8, we have to add the directory both to the # original parent with a CL link, and to the new parent with an # RE link. Here we make the 'fake' record, as a child of the # original place; the real one will be done below. fake_dir_rec = dr.DirectoryRecord() fake_dir_rec.new_dir(self.pvd, name, parent, self.pvd.sequence_number(), self.rock_ridge, new_rr_name, self.pvd.logical_block_size(), True, False, self.xa, file_mode) num_bytes_to_add += self._add_child_to_dr(fake_dir_rec, self.pvd.logical_block_size()) # The fake dir record doesn't get an entry in the path table record. relocated = True orig_parent = parent parent = self._rr_moved_record # Since we are moving the entry underneath the RR_MOVED # directory, there is now the chance of a name collision (this # can't happen without relocation since _add_child_to_dr() below # won't allow duplicate names). Check for that here and # generate a new name. index = 0 while True: for child in self._rr_moved_record.children: if child.file_ident == iso9660_name: # Python 3.4 doesn't support substitution with a byte # array, so we do it as a string and encode to bytes. iso9660_name = name + ('%03d' % (index)).encode() index += 1 break else: break rec = dr.DirectoryRecord() rec.new_dir(self.pvd, iso9660_name, parent, self.pvd.sequence_number(), self.rock_ridge, new_rr_name, self.pvd.logical_block_size(), False, relocated, self.xa, file_mode) num_bytes_to_add += self._add_child_to_dr(rec, self.pvd.logical_block_size()) if rec.rock_ridge is not None: if relocated: fake_dir_rec.rock_ridge.cl_to_moved_dr = rec # type: ignore rec.rock_ridge.moved_to_cl_dr = fake_dir_rec # type: ignore num_bytes_to_add += self._update_rr_ce_entry(rec) self._create_dot(self.pvd, rec, self.rock_ridge, self.xa, file_mode) parent_file_mode = -1 if parent.rock_ridge is not None: parent_file_mode = parent.rock_ridge.get_file_mode() else: if parent.is_root: parent_file_mode = file_mode dotdot = self._create_dotdot(self.pvd, rec, self.rock_ridge, relocated, self.xa, parent_file_mode) if dotdot.rock_ridge is not None and relocated: dotdot.rock_ridge.parent_link = orig_parent # We always need to add an entry to the path table record ptr = path_table_record.PathTableRecord() ptr.new_dir(iso9660_name) num_bytes_to_add += self._add_to_ptr_size(ptr) + self.pvd.logical_block_size() rec.set_ptr(ptr) if joliet_path is not None: num_bytes_to_add += self._add_joliet_dir(self._normalize_joliet_path(joliet_path)) if udf_path is not None: if self.udf_root is None: raise pycdlibexception.PyCdlibInvalidInput('Can only specify a udf_path for a UDF ISO') log_block_size = self.pvd.logical_block_size() udf_path_bytes = utils.normpath(udf_path) (udf_name, udf_parent) = self._udf_name_and_parent_from_path(udf_path_bytes) file_ident = udfmod.UDFFileIdentifierDescriptor() file_ident.new(True, False, udf_name, udf_parent) num_new_extents = udf_parent.add_file_ident_desc(file_ident, log_block_size) num_bytes_to_add += num_new_extents * log_block_size file_entry = udfmod.UDFFileEntry() file_entry.new(0, 'dir', udf_parent, log_block_size) file_ident.file_entry = file_entry file_entry.file_ident = file_ident num_bytes_to_add += log_block_size udf_dotdot = udfmod.UDFFileIdentifierDescriptor() udf_dotdot.new(True, True, b'', udf_parent) num_new_extents = file_ident.file_entry.add_file_ident_desc(udf_dotdot, log_block_size) num_bytes_to_add += num_new_extents * log_block_size self.udf_logical_volume_integrity.logical_volume_impl_use.num_dirs += 1 self._finish_add(0, num_bytes_to_add)
Add a directory to the ISO. At least one of an iso_path, joliet_path, or udf_path must be provided. Providing joliet_path on a non-Joliet ISO, or udf_path on a non-UDF ISO, is an error. If the ISO contains Rock Ridge, then a Rock Ridge name must be provided. Parameters: iso_path - The ISO9660 absolute path to use for the directory. rr_name - The Rock Ridge name to use for the directory. joliet_path - The Joliet absolute path to use for the directory. file_mode - The POSIX file mode to use for the directory. This only applies for Rock Ridge ISOs. udf_path - The UDF absolute path to use for the directory. Returns: Nothing.
def get_last_week_range(weekday_start="Sunday"): """ Gets the date for the first and the last day of the previous complete week. :param weekday_start: Either "Monday" or "Sunday", indicating the first day of the week. :returns: A tuple containing two date objects, for the first and the last day of the week respectively. """ today = date.today() # Get the first day of the past complete week. start_of_week = snap_to_beginning_of_week(today, weekday_start) - timedelta(weeks=1) end_of_week = start_of_week + timedelta(days=6) return (start_of_week, end_of_week)
Gets the date for the first and the last day of the previous complete week. :param weekday_start: Either "Monday" or "Sunday", indicating the first day of the week. :returns: A tuple containing two date objects, for the first and the last day of the week respectively.
def add(self, factory, component, properties=None): # type: (str, str, dict) -> None """ Enqueues the instantiation of the given component :param factory: Factory name :param component: Component name :param properties: Component properties :raise ValueError: Component name already reserved in the queue :raise Exception: Error instantiating the component """ with self.__lock: if component in self.__names: raise ValueError( "Component name already queued: {0}".format(component) ) # Normalize properties if properties is None: properties = {} # Store component description self.__names[component] = factory self.__queue.setdefault(factory, {})[component] = properties try: with use_ipopo(self.__context) as ipopo: # Try to instantiate the component right now self._try_instantiate(ipopo, factory, component) except BundleException: # iPOPO not yet started pass
Enqueues the instantiation of the given component :param factory: Factory name :param component: Component name :param properties: Component properties :raise ValueError: Component name already reserved in the queue :raise Exception: Error instantiating the component
def addVariantSet(self): """ Adds a new VariantSet into this repo. """ self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) dataUrls = self._args.dataFiles name = self._args.name if len(dataUrls) == 1: if self._args.name is None: name = getNameFromPath(dataUrls[0]) if os.path.isdir(dataUrls[0]): # Read in the VCF files from the directory. # TODO support uncompressed VCF and BCF files vcfDir = dataUrls[0] pattern = os.path.join(vcfDir, "*.vcf.gz") dataUrls = glob.glob(pattern) if len(dataUrls) == 0: raise exceptions.RepoManagerException( "Cannot find any VCF files in the directory " "'{}'.".format(vcfDir)) dataUrls[0] = self._getFilePath(dataUrls[0], self._args.relativePath) elif self._args.name is None: raise exceptions.RepoManagerException( "Cannot infer the intended name of the VariantSet when " "more than one VCF file is provided. Please provide a " "name argument using --name.") parsed = urlparse.urlparse(dataUrls[0]) if parsed.scheme not in ['http', 'ftp']: dataUrls = map(lambda url: self._getFilePath( url, self._args.relativePath), dataUrls) # Now, get the index files for the data files that we've now obtained. indexFiles = self._args.indexFiles if indexFiles is None: # First check if all the paths exist locally, as they must # if we are making a default index path. for dataUrl in dataUrls: if not os.path.exists(dataUrl): raise exceptions.MissingIndexException( "Cannot find file '{}'. All variant files must be " "stored locally if the default index location is " "used. If you are trying to create a VariantSet " "based on remote URLs, please download the index " "files to the local file system and provide them " "with the --indexFiles argument".format(dataUrl)) # We assume that the indexes are made by adding .tbi indexSuffix = ".tbi" # TODO support BCF input properly here by adding .csi indexFiles = [filename + indexSuffix for filename in dataUrls] indexFiles = map(lambda url: self._getFilePath( url, self._args.relativePath), indexFiles) variantSet = variants.HtslibVariantSet(dataset, name) variantSet.populateFromFile(dataUrls, indexFiles) # Get the reference set that is associated with the variant set. referenceSetName = self._args.referenceSetName if referenceSetName is None: # Try to find a reference set name from the VCF header. referenceSetName = variantSet.getVcfHeaderReferenceSetName() if referenceSetName is None: raise exceptions.RepoManagerException( "Cannot infer the ReferenceSet from the VCF header. Please " "specify the ReferenceSet to associate with this " "VariantSet using the --referenceSetName option") referenceSet = self._repo.getReferenceSetByName(referenceSetName) variantSet.setReferenceSet(referenceSet) variantSet.setAttributes(json.loads(self._args.attributes)) # Now check for annotations annotationSets = [] if variantSet.isAnnotated() and self._args.addAnnotationSets: ontologyName = self._args.ontologyName if ontologyName is None: raise exceptions.RepoManagerException( "A sequence ontology name must be provided") ontology = self._repo.getOntologyByName(ontologyName) self._checkSequenceOntology(ontology) for annotationSet in variantSet.getVariantAnnotationSets(): annotationSet.setOntology(ontology) annotationSets.append(annotationSet) # Add the annotation sets and the variant set as an atomic update def updateRepo(): self._repo.insertVariantSet(variantSet) for annotationSet in annotationSets: self._repo.insertVariantAnnotationSet(annotationSet) self._updateRepo(updateRepo)
Adds a new VariantSet into this repo.
def to_json(self): """ :return: str """ json_dict = self.to_json_basic() json_dict['channels'] = self.relay_channels return json.dumps(json_dict)
:return: str
def winner(self): """Returns either x or o if one of them won, otherwise None""" for c in 'xo': for comb in [(0,3,6), (1,4,7), (2,5,8), (0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6)]: if all(self.spots[p] == c for p in comb): return c return None
Returns either x or o if one of them won, otherwise None
def zoom_out(self): """Zooms out by zoom factor""" zoom = self.grid.grid_renderer.zoom target_zoom = zoom * (1 - config["zoom_factor"]) if target_zoom > config["minimum_zoom"]: self.zoom(target_zoom)
Zooms out by zoom factor
def airspeeds_encode(self, time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy): ''' The airspeed measured by sensors and IMU time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) airspeed_imu : Airspeed estimate from IMU, cm/s (int16_t) airspeed_pitot : Pitot measured forward airpseed, cm/s (int16_t) airspeed_hot_wire : Hot wire anenometer measured airspeed, cm/s (int16_t) airspeed_ultrasonic : Ultrasonic measured airspeed, cm/s (int16_t) aoa : Angle of attack sensor, degrees * 10 (int16_t) aoy : Yaw angle sensor, degrees * 10 (int16_t) ''' return MAVLink_airspeeds_message(time_boot_ms, airspeed_imu, airspeed_pitot, airspeed_hot_wire, airspeed_ultrasonic, aoa, aoy)
The airspeed measured by sensors and IMU time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t) airspeed_imu : Airspeed estimate from IMU, cm/s (int16_t) airspeed_pitot : Pitot measured forward airpseed, cm/s (int16_t) airspeed_hot_wire : Hot wire anenometer measured airspeed, cm/s (int16_t) airspeed_ultrasonic : Ultrasonic measured airspeed, cm/s (int16_t) aoa : Angle of attack sensor, degrees * 10 (int16_t) aoy : Yaw angle sensor, degrees * 10 (int16_t)
async def save(self): """Save this subnet.""" if 'vlan' in self._changed_data and self._changed_data['vlan']: # Update uses the ID of the VLAN, not the VLAN object. self._changed_data['vlan'] = self._changed_data['vlan']['id'] if (self._orig_data['vlan'] and 'id' in self._orig_data['vlan'] and self._changed_data['vlan'] == ( self._orig_data['vlan']['id'])): # VLAN didn't really change, the object was just set to the # same VLAN. del self._changed_data['vlan'] await super(Subnet, self).save()
Save this subnet.
def _clean_streams(repo, mapped_streams): """Clean mapped standard streams.""" for stream_name in ('stdout', 'stderr'): stream = mapped_streams.get(stream_name) if not stream: continue path = os.path.relpath(stream, start=repo.working_dir) if (path, 0) not in repo.index.entries: os.remove(stream) else: blob = repo.index.entries[(path, 0)].to_blob(repo) with open(path, 'wb') as fp: fp.write(blob.data_stream.read())
Clean mapped standard streams.
def DSP_callback_toc(self): """ Add new toc time to the DSP_toc list. Will not be called if Tcapture = 0. """ if self.Tcapture > 0: self.DSP_toc.append(time.time()-self.start_time)
Add new toc time to the DSP_toc list. Will not be called if Tcapture = 0.
def headers_for_url(cls, url): """Return the headers only for the given URL as a dict""" response = cls.http_request(url, method='HEAD') if response.status != 200: cls.raise_http_error(response) return Resource.headers_as_dict(response)
Return the headers only for the given URL as a dict
def entropy(self, base = 2): """Compute the entropy of the distribution""" entropy = 0 if not base and self.base: base = self.base for type in self._dist: if not base: entropy += self._dist[type] * -math.log(self._dist[type]) else: entropy += self._dist[type] * -math.log(self._dist[type], base) return entropy
Compute the entropy of the distribution
def _get_price_id_for_upgrade(self, package_items, option, value, public=True): """Find the price id for the option and value to upgrade. Deprecated in favor of _get_price_id_for_upgrade_option() :param list package_items: Contains all the items related to an VS :param string option: Describes type of parameter to be upgraded :param int value: The value of the parameter to be upgraded :param bool public: CPU will be in Private/Public Node. """ warnings.warn("use _get_price_id_for_upgrade_option() instead", DeprecationWarning) option_category = { 'memory': 'ram', 'cpus': 'guest_core', 'nic_speed': 'port_speed' } category_code = option_category[option] for item in package_items: is_private = (item.get('units') == 'PRIVATE_CORE') for price in item['prices']: if 'locationGroupId' in price and price['locationGroupId']: # Skip location based prices continue if 'categories' not in price: continue categories = price['categories'] for category in categories: if not (category['categoryCode'] == category_code and str(item['capacity']) == str(value)): continue if option == 'cpus': if public and not is_private: return price['id'] elif not public and is_private: return price['id'] elif option == 'nic_speed': if 'Public' in item['description']: return price['id'] else: return price['id']
Find the price id for the option and value to upgrade. Deprecated in favor of _get_price_id_for_upgrade_option() :param list package_items: Contains all the items related to an VS :param string option: Describes type of parameter to be upgraded :param int value: The value of the parameter to be upgraded :param bool public: CPU will be in Private/Public Node.
def walk_files(args, root, directory, action): """ Recusively go do the subdirectories of the directory, calling the action on each file """ for entry in os.listdir(directory): if is_hidden(args, entry): continue if is_excluded_directory(args, entry): continue if is_in_default_excludes(entry): continue if not is_included(args, entry): continue if is_excluded(args, entry, directory): continue entry = os.path.join(directory, entry) if os.path.isdir(entry): walk_files(args, root, entry, action) if os.path.isfile(entry): if is_binary(entry): continue action(entry)
Recusively go do the subdirectories of the directory, calling the action on each file
def list(context, job_id, sort, limit, where, verbose): """list(context, sort, limit, where, verbose) List all files. >>> dcictl file-list job-id [OPTIONS] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output """ result = job.list_files(context, id=job_id, sort=sort, limit=limit, verbose=verbose, where=where) utils.format_output(result, context.format, verbose=verbose)
list(context, sort, limit, where, verbose) List all files. >>> dcictl file-list job-id [OPTIONS] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output