Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
373,000
def _sum(data): data = iter(data) n = _first(data) if n is not None: data = chain([n], data) if isinstance(n, F): return math.fsum(data) return sum(data) return 0
_sum(data [, start]) -> value Return a high-precision sum of the given numeric data. If optional argument ``start`` is given, it is added to the total. If ``data`` is empty, ``start`` (defaulting to 0) is returned. Examples -------- >>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75) 11.0 Some sources of round-off error will be avoided: >>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero. 1000.0 Fractions and Decimals are also supported: >>> from fractions import Fraction as F >>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)]) Fraction(63, 20) >>> from decimal import Decimal as D >>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")] >>> _sum(data) Decimal('0.6963') Mixed types are currently treated as an error, except that int is allowed.
373,001
def lrem(self, key, value, count=0): value = self._encode(value) redis_list = self._get_list(key, ) removed_count = 0 if self._encode(key) in self.redis: if count == 0: while redis_list.count(value): redis_list.remove(value) removed_count += 1 elif count > 0: counter = 0 while redis_list.count(value): redis_list.remove(value) counter += 1 removed_count += 1 if counter >= count: break elif count < 0: counter = -count new_list = [] for v in reversed(redis_list): if v == value and counter > 0: counter -= 1 removed_count += 1 else: new_list.append(v) redis_list[:] = list(reversed(new_list)) if removed_count > 0 and len(redis_list) == 0: self.delete(key) return removed_count
Emulate lrem.
373,002
def _map_arguments(self, args): config_yaml = args[] config_dict = load_yaml(config_yaml) data = config_dict.get() comp = config_dict.get() dry_run = args.get(, False) self._set_link(, SplitAndMktimeChain, comp=comp, data=data, ft1file=config_dict[], ft2file=config_dict[], hpx_order_ccube=config_dict.get(, 7), hpx_order_expcube=config_dict.get(, 7), mktime=config_dict.get(, None), do_ltsum=config_dict.get(, False), scratch=config_dict.get(, None), dry_run=dry_run) self._set_link(, ResidualCR_SG, comp=comp, data=data, mktimefilter=config_dict.get(, None), hpx_order=config_dict.get(, 4), clean=config_dict.get(, None), dirty=config_dict.get(, None), select_factor=config_dict.get(, None), mask_factor=config_dict.get(, None), sigma=config_dict.get(, None), full_output=config_dict.get(, False), dry_run=dry_run)
Map from the top-level arguments to the arguments provided to the indiviudal links
373,003
def start(self): self.connect() if not self.isAlive(): super(WAMPClient,self).start() self.hello() return self
Initialize websockets, say hello, and start listening for events
373,004
def import_trade(self, trade): for item in trade: self.make_deal(item.code, item.datetime, item.amount, item.towards, item.price.item.order_model, item.amount_model)
trade是一个可迭代的list/generator
373,005
def init_app(self, app, entry_point_group=, **kwargs): app.config.setdefault( "WORKFLOWS_OBJECT_CLASS", "invenio_workflows.api.WorkflowObject" ) state = _WorkflowState( app, entry_point_group=entry_point_group, **kwargs ) app.extensions[] = state return state
Flask application initialization.
373,006
def qualified_name_import(cls): parts = qualified_name(cls).split() return "from {} import {}".format(.join(parts[:-1]), parts[-1])
Full name of a class, including the module. Like qualified_class_name, but when you already have a class
373,007
def db_import(self, urls=None, force_download=False): if not urls: urls = [ defaults.url_base + table_conf.tables[model][] for model in table_conf.tables ] log.info(, urls) self.drop_all() self.download_urls(urls=urls, force_download=force_download) self.create_all() self.import_tables() self.session.close()
Updates the CTD database 1. downloads all files from CTD 2. drops all tables in database 3. creates all tables in database 4. import all data from CTD files :param iter[str] urls: An iterable of URL strings :param bool force_download: force method to download
373,008
def count_mismatches_before_variant(reference_prefix, cdna_prefix): if len(reference_prefix) != len(cdna_prefix): raise ValueError( "Expected reference prefix to be same length as %s" % ( reference_prefix, cdna_prefix)) return sum(xi != yi for (xi, yi) in zip(reference_prefix, cdna_prefix))
Computes the number of mismatching nucleotides between two cDNA sequences before a variant locus. Parameters ---------- reference_prefix : str cDNA sequence of a reference transcript before a variant locus cdna_prefix : str cDNA sequence detected from RNAseq before a variant locus
373,009
def _find(expr, sub, start=0, end=None): return _string_op(expr, Find, output_type=types.int64, _sub=sub, _start=start, _end=end)
Return lowest indexes in each strings in the sequence or scalar where the substring is fully contained between [start:end]. Return -1 on failure. Equivalent to standard str.find(). :param expr: :param sub: substring being searched :param start: left edge index :param end: right edge index :return: sequence or scalar
373,010
def groupuninstall(group, options=None): manager = MANAGER if options is None: options = [] elif isinstance(options, str): options = [options] options = " ".join(options) run_as_root( % locals())
Remove an existing software group. Extra *options* may be passed to ``yum`` if necessary.
373,011
def is_function_or_method(obj): return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)
Check if an object is a function or method. Args: obj: The Python object in question. Returns: True if the object is an function or method.
373,012
def is_interesting(entry): if entry.get() == : return False status = entry.find() if status is None: warning( % entry.get()) return False if status.get() in (, ): return False return True
Is this entry interesting? ``entry`` is an XML node representing one entry of the svn status XML output. It looks like this:: <entry path="unchanged.txt"> <wc-status item="normal" revision="1" props="none"> <commit revision="1"> <author>mg</author> <date>2015-02-06T07:52:38.163516Z</date> </commit> </wc-status> </entry> <entry path="added-but-not-committed.txt"> <wc-status item="added" revision="-1" props="none"></wc-status> </entry> <entry path="ext"> <wc-status item="external" props="none"></wc-status> </entry> <entry path="unknown.txt"> <wc-status props="none" item="unversioned"></wc-status> </entry>
373,013
def _configure_manager(self): self._manager = CloudDNSManager(self, resource_class=CloudDNSDomain, response_key="domains", plural_response_key="domains", uri_base="domains")
Creates a manager to handle the instances, and another to handle flavors.
373,014
def load_model_from_link(name, **overrides): path = get_data_path() / name / "__init__.py" try: cls = import_file(name, path) except AttributeError: raise IOError(Errors.E051.format(name=name)) return cls.load(**overrides)
Load a model from a shortcut link, or directory in spaCy data path.
373,015
def _get_mechanism(self, rup, coeffs): is_strike_slip = self.get_fault_type_dummy_variables(rup) return coeffs[]*is_strike_slip
Compute fifth term of equation (1) on p. 1200: ``b6 * H``
373,016
def logout_all(self, command=, note=None, echo=None, timeout=shutit_global.shutit_global_object.default_timeout, nonewline=False, loglevel=logging.DEBUG): shutit_global.shutit_global_object.yield_to_draw() for key in self.shutit_pexpect_sessions: shutit_pexpect_session = self.shutit_pexpect_sessions[key] shutit_pexpect_session.logout_all(ShutItSendSpec(shutit_pexpect_session, send=command, note=note, timeout=timeout, nonewline=nonewline, loglevel=loglevel, echo=echo)) return True
Logs the user out of all pexpect sessions within this ShutIt object. @param command: Command to run to log out (default=exit) @param note: See send()
373,017
def compute_score(self): cases = self.get_configured_tests() | set(self.result.cases) scores = DataFrame({"score": 0.0, "max": 1.0}, index=sorted(cases)) self.result.setdefault("score", dict()) self.result["score"]["sections"] = list() for test, result in iteritems(self.result.cases): metric = result["metric"] if hasattr(metric, "items"): result["score"] = test_score = dict() total = 0.0 for key, value in iteritems(metric): value = 1.0 - value total += value test_score[key] = value if len(metric) == 0: metric = 0.0 else: metric = total / len(metric) else: metric = 1.0 - metric scores.at[test, "score"] = metric scores.loc[test, :] *= self.config["weights"].get(test, 1.0) score = 0.0 maximum = 0.0 for section_id, card in iteritems( self.config[][][] ): cases = card.get("cases", None) if cases is None: continue card_score = scores.loc[cases, "score"].sum() card_total = scores.loc[cases, "max"].sum() section_score = {"section": section_id, "score": card_score / card_total} self.result["score"]["sections"].append(section_score) weight = card.get("weight", 1.0) score += card_score * weight maximum += card_total * weight self.result["score"]["total_score"] = score / maximum
Calculate the overall test score using the configuration.
373,018
def get_coverage(config: CoverageConfig) -> : if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY: utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1") if config.type == C.GRU_TYPE: return GRUCoverage(config.num_hidden, config.layer_normalization) elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}: return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization) elif config.type == C.COVERAGE_COUNT: return CountCoverage() elif config.type == C.COVERAGE_FERTILITY: return FertilityCoverage(config.max_fertility) else: raise ValueError("Unknown coverage type %s" % config.type)
Returns a Coverage instance. :param config: Coverage configuration. :return: Instance of Coverage.
373,019
def parse(self): data = utils.ruby_lines(self.readlines()) data = [tuple(j.strip() for j in line.split(None, 1)) for line in data] depends = {} for line in data: if not len(line) == 2: continue key, value = line if key == : value = value.split() lib = utils.ruby_strip(value[0]) detail = [utils.ruby_strip(j) for j in value[1:]] depends[lib] = detail datamap = {key: utils.ruby_strip(val) for key, val in data} if depends: datamap[] = depends self.seek(0) return datamap
Parse the metadata.rb into a dict.
373,020
def replace_suffixes_1(self, word): length = len(word) if word.endswith("sses"): return word[:-2] elif word.endswith("ied") or word.endswith("ies"): word = word[:-3] if len(word) == 1: word += else: word += return word elif word.endswith() or word.endswith(): return word for letter in word[:-2]: if letter in self.vowels: return word[:-1] return word
Find the longest suffix among the ones specified and perform the required action.
373,021
def is_contradictory(self, other): other = StringCell.coerce(other) if self.value is None or other.value is None: return False def sequence_in(s1, s2): return bool(re.search(".*".join(s1), s2)) return not sequence_in(self.value, other.value) and \ not sequence_in(other.value, self.value)
Can these two strings coexist ?
373,022
def combine_proximals(*factory_list): r def diag_op_factory(sigma): if np.isscalar(sigma): sigma = [sigma] * len(factory_list) return DiagonalOperator( *[factory(sigmai) for sigmai, factory in zip(sigma, factory_list)]) return diag_op_factory
r"""Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)).
373,023
def lookup_facade(name, version): for _version in range(int(version), 0, -1): try: facade = getattr(CLIENTS[str(_version)], name) return facade except (KeyError, AttributeError): continue else: raise ImportError("No supported version for facade: " "{}".format(name))
Given a facade name and version, attempt to pull that facade out of the correct client<version>.py file.
373,024
def get_ref_bedtool(ref_file, config, chrom=None): broad_runner = broad.runner_from_path("picard", config) ref_dict = broad_runner.run_fn("picard_index_ref", ref_file) ref_lines = [] with pysam.Samfile(ref_dict, "r") as ref_sam: for sq in ref_sam.header["SQ"]: if not chrom or sq["SN"] == chrom: ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"])) return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
Retrieve a pybedtool BedTool object with reference sizes from input reference.
373,025
def get_wrap_size_limit(self, output_size, conf_req=True, qop_req=C.GSS_C_QOP_DEFAULT): minor_status = ffi.new() max_input_size = ffi.new() retval = C.gss_wrap_size_limit( minor_status, self._ctx[0], ffi.cast(, conf_req), ffi.cast(, qop_req), ffi.cast(, output_size), max_input_size ) if GSS_ERROR(retval): if minor_status[0] and self.mech_type: raise _exception_for_status(retval, minor_status[0], self.mech_type) else: raise _exception_for_status(retval, minor_status[0]) return max_input_size[0]
Calculates the maximum size of message that can be fed to :meth:`wrap` so that the size of the resulting wrapped token (message plus wrapping overhead) is no more than a given maximum output size. :param output_size: The maximum output size (in bytes) of a wrapped token :type output_size: int :param conf_req: Whether to calculate the wrapping overhead for confidentiality protection (if True) or just integrity protection (if False). :type conf_req: bool :returns: The maximum input size (in bytes) of message that can be passed to :meth:`wrap` :rtype: int
373,026
def get_index_nested(x, i): for ind in range(len(x)): if i == x[ind]: return ind return -1
Description: Returns the first index of the array (vector) x containing the value i. Parameters: x: one-dimensional array i: search value
373,027
def Wow64EnableWow64FsRedirection(Wow64FsEnableRedirection): _Wow64EnableWow64FsRedirection = windll.kernel32.Wow64EnableWow64FsRedirection _Wow64EnableWow64FsRedirection.argtypes = [BOOLEAN] _Wow64EnableWow64FsRedirection.restype = BOOLEAN _Wow64EnableWow64FsRedirection.errcheck = RaiseIfZero
This function may not work reliably when there are nested calls. Therefore, this function has been replaced by the L{Wow64DisableWow64FsRedirection} and L{Wow64RevertWow64FsRedirection} functions. @see: U{http://msdn.microsoft.com/en-us/library/windows/desktop/aa365744(v=vs.85).aspx}
373,028
def input(self, _in, out, **kwargs): language_code = _re_language_code.search(_in.read()).group( ) _in.seek(0) catalog = read_po(_in) out.write(.format(language_code)) out.write(json.dumps({ key: value.string for key, value in catalog._messages.items() if key and value.string })) out.write()
Process individual translation file.
373,029
def create_index_tuple(group_ids): max_group_id = np.max(group_ids) start_idx_arr = np.full(max_group_id + 1, 0) n_items_arr = np.full(max_group_id + 1, 0) current_group_id = group_ids[0] current_idx = 0 item_count = 0 for group_id in group_ids: if group_id != current_group_id: start_idx_arr[current_group_id] = current_idx n_items_arr[current_group_id] = item_count current_idx += item_count item_count = 0 current_group_id = group_id item_count += 1 else: start_idx_arr[current_group_id] = current_idx n_items_arr[current_group_id] = item_count return (start_idx_arr, n_items_arr)
An helper function to create index tuples for fast lookup in HDF5Pump
373,030
def make_present_participles(verbs): res = [] for verb in verbs: parts = verb.split() if parts[0].endswith("e"): parts[0] = parts[0][:-1] + "ing" else: parts[0] = parts[0] + "ing" res.append(" ".join(parts)) return res
Make the list of verbs into present participles E.g.: empower -> empowering drive -> driving
373,031
def swipe_left(self, width: int = 1080, length: int = 1920) -> None: self.swipe(0.8*width, 0.5*length, 0.2*width, 0.5*length)
Swipe left.
373,032
def get_printoptions(): warnings.warn(, DeprecationWarning) d = {} for key in [, , ]: d[key] = _options[ + key] return d
Return the current print options. Returns ------- dict Dictionary of current print options with keys - short : bool - xml : bool - codestream : bool For a full description of these options, see `set_printoptions`. See also -------- set_printoptions
373,033
def drop_zero_priors(self): self.term_doc_mat = self.term_doc_mat.remove_terms( self.priors[self.priors == 0].index ) self._reindex_priors() return self
Returns ------- PriorFactory
373,034
def handle_stats(name, obj): return {: obj.total[], : obj.total[], : obj.total[], : obj.total[], : obj.files}
Stats object handler. :param name: Unused String :param obj: GitPython Stats :return: Dictionary of attributes.
373,035
def draw_peaks_inverted(self, x, peaks, line_color): y1 = self.image_height * 0.5 - peaks[0] * (self.image_height - 4) * 0.5 y2 = self.image_height * 0.5 - peaks[1] * (self.image_height - 4) * 0.5 if self.previous_y and x < self.image_width - 1: if y1 < y2: self.draw.line((x, 0, x, y1), line_color) self.draw.line((x, self.image_height, x, y2), line_color) else: self.draw.line((x, 0, x, y2), line_color) self.draw.line((x, self.image_height, x, y1), line_color) else: self.draw.line((x, 0, x, self.image_height), line_color) self.draw_anti_aliased_pixels(x, y1, y2, line_color) self.previous_x, self.previous_y = x, y1
Draw 2 inverted peaks at x
373,036
def load_from_db(cls, callback_etat=print, out=None): dic = cls._load_remote_db(callback_etat) callback_etat("Chargement...", 2, 3) if out is None: return cls(dic) cls.__init__(out, datas=dic)
Launch data fetching then load data received. The method _load_remote_db should be overridden. If out is given, datas are set in it, instead of returning a new base object.
373,037
def public_pair_to_sec(public_pair, compressed=True): x_str = to_bytes_32(public_pair[0]) if compressed: return int2byte((2 + (public_pair[1] & 1))) + x_str y_str = to_bytes_32(public_pair[1]) return b + x_str + y_str
Convert a public pair (a pair of bignums corresponding to a public key) to the gross internal sec binary format used by OpenSSL.
373,038
def put(self, key, value): self._cache[key] = value self._order.push(key) self._size += 1
>>> c = LRUCache() >>> c.put(1, 'one') >>> c.get(1) 'one' >>> c.size() 1 >>> c.put(2, 'two') >>> c.put(3, 'three') >>> c.put(4, 'four') >>> c.put(5, 'five') >>> c.get(5) 'five' >>> c.size() 5
373,039
def unpackexe(exefile, destdir): nullfd = open(os.devnull, "w") exefile = cygpath(os.path.abspath(exefile)) try: check_call([SEVENZIP, , exefile], cwd=destdir, stdout=nullfd, preexec_fn=_noumask) except Exception: log.exception("Error unpacking exe %s to %s", exefile, destdir) raise nullfd.close()
Unpack the given exefile into destdir, using 7z
373,040
def j2(x): to_return = 2./(x+1e-15)*j1(x) - j0(x) to_return[x==0] = 0 return to_return
A fast j2 defined in terms of other special functions
373,041
def find_service_by_id(self, service_id): service_id_key = service_id = str(service_id) for service in self._services: if service_id_key in service.values and str( service.values[service_id_key]) == service_id: return service try: int(service_id) return None except ValueError: pass return self.find_service_by_type(service_id)
Get service for a given service_id. :param service_id: Service id, str :return: Service
373,042
def _write(self, cmd, *datas): cmd = Command(write=cmd) cmd.write(self._transport, self._protocol, *datas)
Helper function to simplify writing.
373,043
def instance_query_movie_ids(self) -> List[str]: completions_with_desc = [] for movie_id in utils.natural_sort(self.MOVIE_DATABASE_IDS): if movie_id in self.MOVIE_DATABASE: movie_entry = self.MOVIE_DATABASE[movie_id] completions_with_desc.append(argparse_completer.CompletionItem(movie_id, movie_entry[])) self.matches_sorted = True return completions_with_desc
Demonstrates showing tabular hinting of tab completion information
373,044
def do_execute(self): for s in self.resolve_option("strings"): self._output.append(Token(s)) return None
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
373,045
def ReadPostingLists(self, keywords, start_time=FIRST_TIMESTAMP, end_time=LAST_TIMESTAMP, last_seen_map=None): return data_store.DB.IndexReadPostingLists( self.urn, keywords, start_time, end_time, last_seen_map=last_seen_map)
Finds all objects associated with any of the keywords. Args: keywords: A collection of keywords that we are interested in. start_time: Only considers keywords added at or after this point in time. end_time: Only considers keywords at or before this point in time. last_seen_map: If present, is treated as a dict and populated to map pairs (keyword, name) to the timestamp of the latest connection found. Returns: A dict mapping each keyword to a set of relevant names.
373,046
def FinalizeTaskStorage(self, task): if self._storage_type != definitions.STORAGE_TYPE_SESSION: raise IOError() storage_file_path = self._GetTaskStorageFilePath(task) processed_storage_file_path = self._GetProcessedStorageFilePath(task) try: os.rename(storage_file_path, processed_storage_file_path) except OSError as exception: raise IOError(( ).format(storage_file_path, exception))
Finalizes a processed task storage. Moves the task storage file from its temporary directory to the processed directory. Args: task (Task): task. Raises: IOError: if the storage type is not supported or if the storage file cannot be renamed. OSError: if the storage type is not supported or if the storage file cannot be renamed.
373,047
def get_group_name_nonetrick(self, group_name = None): groups = self.m["groups"] if (len(groups) == 0): raise Exception("Cannot find any groups in metadata") if (not group_name): if (len(groups) > 1): raise Exception("We have more than one payment group in metadata, so group_name should be specified") return groups[0]["group_name"] return group_name
In all getter function in case of single payment group, group_name can be None
373,048
def _gcs_list(args, _): target = args[] project = args[] if target is None: return _gcs_list_buckets(project, ) bucket_name, key = google.datalab.storage._bucket.parse_name(target) if bucket_name is None: raise Exception( % target) if target: if not re.search(, target): key = key.strip() + if key else if project: for bucket in google.datalab.storage.Buckets(_make_context(project)): if bucket.name == bucket_name: break else: raise Exception( % (target, project)) else: bucket = google.datalab.storage.Bucket(bucket_name) if bucket.exists(): return _gcs_list_keys(bucket, key) else: raise Exception( % target) else:
List the buckets or the contents of a bucket. This command is a bit different in that we allow wildchars in the bucket name and will list the buckets that match.
373,049
def vector_poly_data(orig, vec): if not isinstance(orig, np.ndarray): orig = np.asarray(orig) if not isinstance(vec, np.ndarray): vec = np.asarray(vec) if orig.ndim != 2: orig = orig.reshape((-1, 3)) elif orig.shape[1] != 3: raise Exception() if vec.ndim != 2: vec = vec.reshape((-1, 3)) elif vec.shape[1] != 3: raise Exception() vpts = vtk.vtkPoints() vpts.SetData(numpy_to_vtk(np.ascontiguousarray(orig), deep=True)) npts = orig.shape[0] cells = np.hstack((np.ones((npts, 1), ), np.arange(npts).reshape((-1, 1)))) if cells.dtype != ctypes.c_int64 or cells.flags.c_contiguous: cells = np.ascontiguousarray(cells, ctypes.c_int64) cells = np.reshape(cells, (2*npts)) vcells = vtk.vtkCellArray() vcells.SetCells(npts, numpy_to_vtkIdTypeArray(cells, deep=True)) pdata = vtk.vtkPolyData() pdata.SetPoints(vpts) pdata.SetVerts(vcells) name = vtkfloat = numpy_to_vtk(np.ascontiguousarray(vec), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveVectors(name) name = scalars = (vec * vec).sum(1)**0.5 vtkfloat = numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveScalars(name) return vtki.PolyData(pdata)
Creates a vtkPolyData object composed of vectors
373,050
def same_log10_order_of_magnitude(x, delta=0.1): dmin = np.log10(np.min(x)*(1-delta)) dmax = np.log10(np.max(x)*(1+delta)) return np.floor(dmin) == np.floor(dmax)
Return true if range is approximately in same order of magnitude For example these sequences are in the same order of magnitude: - [1, 8, 5] # [1, 10) - [35, 20, 80] # [10 100) - [232, 730] # [100, 1000) Parameters ---------- x : array-like Values in base 10. Must be size 2 and ``rng[0] <= rng[1]``. delta : float Fuzz factor for approximation. It is multiplicative.
373,051
def parse_network_osm_query(data): if len(data[]) == 0: raise RuntimeError() nodes = [] ways = [] waynodes = [] for e in data[]: if e[] == : nodes.append(process_node(e)) elif e[] == : w, wn = process_way(e) ways.append(w) waynodes.extend(wn) nodes = pd.DataFrame.from_records(nodes, index=) ways = pd.DataFrame.from_records(ways, index=) waynodes = pd.DataFrame.from_records(waynodes, index=) return (nodes, ways, waynodes)
Convert OSM query data to DataFrames of ways and way-nodes. Parameters ---------- data : dict Result of an OSM query. Returns ------- nodes, ways, waynodes : pandas.DataFrame
373,052
def help(self, *args): if len(args) == 0: return help_msg which = args[0].lower() if which == : method = args[1] _method = getattr(self.fv, method) return _method.__doc__ elif which == : chname = args[1] method = args[2] chinfo = self.fv.get_channel(chname) _method = getattr(chinfo.viewer, method) return _method.__doc__ else: return ("Please use or " "")
Get help for a remote interface method. Examples -------- help('ginga', `method`) name of the method for which you want help help('channel', `chname`, `method`) name of the method in the channel for which you want help Returns ------- help : string a help message
373,053
def last(iterable, default=_marker): try: try: return iterable[-1] except (TypeError, AttributeError, KeyError): return deque(iterable, maxlen=1)[0] except IndexError: if default is _marker: raise ValueError( ) return default
Return the last item of *iterable*, or *default* if *iterable* is empty. >>> last([0, 1, 2, 3]) 3 >>> last([], 'some default') 'some default' If *default* is not provided and there are no items in the iterable, raise ``ValueError``.
373,054
def from_bytes_list(cls, function_descriptor_list): assert isinstance(function_descriptor_list, list) if len(function_descriptor_list) == 0: return FunctionDescriptor.for_driver_task() elif (len(function_descriptor_list) == 3 or len(function_descriptor_list) == 4): module_name = ensure_str(function_descriptor_list[0]) class_name = ensure_str(function_descriptor_list[1]) function_name = ensure_str(function_descriptor_list[2]) if len(function_descriptor_list) == 4: return cls(module_name, function_name, class_name, function_descriptor_list[3]) else: return cls(module_name, function_name, class_name) else: raise Exception( "Invalid input for FunctionDescriptor.from_bytes_list")
Create a FunctionDescriptor instance from list of bytes. This function is used to create the function descriptor from backend data. Args: cls: Current class which is required argument for classmethod. function_descriptor_list: list of bytes to represent the function descriptor. Returns: The FunctionDescriptor instance created from the bytes list.
373,055
def reset(cls, newObjs): conn = cls.objects._get_new_connection() transaction = conn.pipeline() transaction.eval( %( .join([INDEXED_REDIS_PREFIX, cls.KEY_NAME, ]), ), 0) saver = IndexedRedisSave(cls) nextID = 1 for newObj in newObjs: saver.save(newObj, False, forceID=nextID, conn=transaction) nextID += 1 transaction.set(saver._get_next_id_key(), nextID) transaction.execute() return list( range( 1, nextID, 1) )
reset - Remove all stored data associated with this model (i.e. all objects of this type), and then save all the provided objects in #newObjs , all in one atomic transaction. Use this method to move from one complete set of objects to another, where any querying applications will only see the complete before or complete after. @param newObjs list<IndexedRedisModel objs> - A list of objects that will replace the current dataset To just replace a specific subset of objects in a single transaction, you can do MyModel.saver.save(objs) and just the objs in "objs" will be inserted/updated in one atomic step. This method, on the other hand, will delete all previous objects and add the newly provided objects in a single atomic step, and also reset the primary key ID generator @return list<int> - The new primary keys associated with each object (same order as provided #newObjs list)
373,056
def make_wsgi_app(registry=REGISTRY): def prometheus_app(environ, start_response): params = parse_qs(environ.get(, )) r = registry encoder, content_type = choose_encoder(environ.get()) if in params: r = r.restricted_registry(params[]) output = encoder(r) status = str() headers = [(str(), content_type)] start_response(status, headers) return [output] return prometheus_app
Create a WSGI app which serves the metrics from a registry.
373,057
def maskMatch(self, mask): if len(mask) > len(self.sequenceData): return False lim = len(mask) for i in range(0, lim): if mask[i] == "N" or mask[i] == "n": continue if mask[i] != self.sequenceData[i]: return False return True
Determine whether this sequence matches the given mask. :param mask: string to match against. Ns in the mask are considered to match anything in the sequence -- all other chars must match exactly. :return: True if the mask matches at all places, otherwise false
373,058
def initialize(self, emt_id, emt_pass): self._emt_id = emt_id self._emt_pass = emt_pass self.bus = BusApi(self) self.geo = GeoApi(self) self.parking = ParkingApi(self)
Manual initialization of the interface attributes. This is useful when the interface must be declare but initialized later on with parsed configuration values. Args: emt_id (str): ID given by the server upon registration emt_pass (str): Token given by the server upon registration
373,059
def estimate(self, observations, weights): N, M = self._output_probabilities.shape K = len(observations) self._output_probabilities = np.zeros((N, M)) if self.__impl__ == self.__IMPL_C__: for k in range(K): dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype) elif self.__impl__ == self.__IMPL_PYTHON__: for k in range(K): for o in range(M): times = np.where(observations[k] == o)[0] self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0) else: raise RuntimeError(+str(self.__impl__)+) self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None]
Maximum likelihood estimation of output model given the observations and weights Parameters ---------- observations : [ ndarray(T_k) ] with K elements A list of K observation trajectories, each having length T_k weights : [ ndarray(T_k, N) ] with K elements A list of K weight matrices, each having length T_k and containing the probability of any of the states in the given time step Examples -------- Generate an observation model and samples from each state. >>> import numpy as np >>> ntrajectories = 3 >>> nobs = 1000 >>> B = np.array([[0.5,0.5],[0.1,0.9]]) >>> output_model = DiscreteOutputModel(B) >>> from scipy import stats >>> nobs = 1000 >>> obs = np.empty(nobs, dtype = object) >>> weights = np.empty(nobs, dtype = object) >>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])] >>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])] >>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])] >>> for i in range(B.shape[0]): weights[i][:, i] = 1.0 Update the observation model parameters my a maximum-likelihood fit. >>> output_model.estimate(obs, weights)
373,060
def add_dirrecord(self, rec): if not self._initialized: raise pycdlibexception.PyCdlibInternalError() self.dirrecords.append(rec)
A method to set the Directory Record associated with this Boot Catalog. Parameters: rec - The DirectoryRecord object to associate with this Boot Catalog. Returns: Nothing.
373,061
def p_matcharglist(p): if len(p) == 2: p[0] = [p[1]] else: p[0] = p[1] p[0].append(p[3])
matcharglist : matcharg | matcharglist COMMA matcharg
373,062
def check_string(value, min_length=None, max_length=None, pattern=None): if type(value) not in [str, unicode]: return False if min_length and len(value) < min_length: return False if max_length and len(value) > max_length: return False if pattern and not re.match(pattern, value): return False return True
verify that a string has a particular size and conforms to a particular alphabet >>> check_string(1) False >>> check_string(None) False >>> check_string(True) False >>> check_string({}) False >>> check_string([]) False >>> check_string((1,2)) False >>> check_string('abc') True >>> check_string('') True >>> check_string(u'') True >>> check_string('abc', min_length=0, max_length=3) True >>> check_string('abc', min_length=3, max_length=3) True >>> check_string('abc', min_length=4, max_length=5) False >>> check_string('abc', min_length=0, max_length=2) False >>> check_string('abc', pattern='^abc$') True >>> check_string('abc', pattern='^abd$') False
373,063
def list_pools(**kwargs): * conn = __get_conn(**kwargs) try: return [pool.name() for pool in conn.listAllStoragePools()] finally: conn.close()
List all storage pools. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.list_pools
373,064
def get_extra_info(self, name, default=None): if name in self._handlers: return self._handlers[name](self) return self._transport.get_extra_info(name, default=default)
Called by the client protocol to return optional transport information. Information requests not recognized by the ``FramerProtocol`` are passed on to the underlying transport. The values of ``name`` recognized directly by ``FramerProtocol`` are: =============== ============================================ Value Description =============== ============================================ send_framer The active framer for the send direction. send_state The state for the send framer. recv_framer The active framer for the receive direction. recv_state The state for the receive framer. recv_buf The current receive buffer. recv_paused ``True`` if reading is paused. client_protocol The client ``FramedProtocol``. transport The underlying transport. =============== ============================================ :param name: A string representing the piece of transport-specific information to get. :param default: The value to return if the information doesn't exist. :returns: The requested data.
373,065
def timestamp(self): "Return POSIX timestamp as float" if self._tzinfo is None: return _time.mktime((self.year, self.month, self.day, self.hour, self.minute, self.second, -1, -1, -1)) + self.microsecond / 1e6 else: return (self - _EPOCH).total_seconds()
Return POSIX timestamp as float
373,066
def add_filter(self, name, query, color=None, item_order=None): args = { : name, : query, : color, : item_order } _perform_command(self, , args) return self.get_filter(name)
Create a new filter. .. warning:: Requires Todoist premium. :param name: The name of the filter. :param query: The query to search for. :param color: The color of the filter. :param item_order: The filter's order in the filter list. :return: The newly created filter. :rtype: :class:`pytodoist.todoist.Filter` >>> from pytodoist import todoist >>> user = todoist.login('john.doe@gmail.com', 'password') >>> overdue_filter = user.add_filter('Overdue', todoist.Query.OVERDUE)
373,067
def export(self, name, columns, points): for i in range(len(columns)): if not isinstance(points[i], Number): continue else: data = {: self.hostname, : name + " " + columns[i], : points[i]} logger.debug(data) try: self.client.send(data) except Exception as e: logger.error("Cannot export stats to Riemann (%s)" % e)
Write the points in Riemann.
373,068
def askyesno(title=None, message=None, **options): return psidialogs.ask_yes_no(title=title, message=message)
Original doc: Ask a question; return true if the answer is yes
373,069
def discover(email, credentials): log.debug(, email) if not isinstance(credentials, Credentials): raise ValueError(" %r must be a Credentials instance" % credentials) domain = get_domain(email) autodiscover_key = (domain, credentials) log.debug() with _autodiscover_cache_lock: return discover(email=email, credentials=credentials)
Performs the autodiscover dance and returns the primary SMTP address of the account and a Protocol on success. The autodiscover and EWS server might not be the same, so we use a different Protocol to do the autodiscover request, and return a hopefully-cached Protocol to the callee.
373,070
def alloc(self): if not self._free: self._expand() id = self._free.pop() self._used.add(id) return id
Allocate an ID value and return it. Raises: ValueError: Out of capacity in ID pool.
373,071
def _validate(self): if not isinstance(self.box[1], FileTypeBox): msg = "{filename} does not contain a valid File Type box." msg = msg.format(filename=self.filename) raise IOError(msg) ftyp = self.box[1] if ftyp.brand == : jp2h = [box for box in self.box if box.box_id == ][0] colrs = [box for box in jp2h.box if box.box_id == ] for colr in colrs: if colr.method not in (core.ENUMERATED_COLORSPACE, core.RESTRICTED_ICC_PROFILE): msg = ("Color Specification box method must specify " "either an enumerated colorspace or a restricted " "ICC profile if the file type box brand is .") warnings.warn(msg, UserWarning)
Validate the JPEG 2000 outermost superbox. These checks must be done at a file level.
373,072
def get_asset_lookup_session(self): if not self.supports_asset_lookup(): raise errors.Unimplemented() return sessions.AssetLookupSession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the asset lookup service. return: (osid.repository.AssetLookupSession) - the new ``AssetLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_lookup()`` is ``true``.*
373,073
def aggregate(self, search): for f, facet in iteritems(self.facets): agg = facet.get_aggregation() agg_filter = MatchAll() for field, filter in iteritems(self._filters): if f == field: continue agg_filter &= filter search.aggs.bucket( + f, , filter=agg_filter ).bucket(f, agg)
Add aggregations representing the facets selected, including potential filters.
373,074
def bbox2wktpolygon(bbox): minx = float(bbox[0]) miny = float(bbox[1]) maxx = float(bbox[2]) maxy = float(bbox[3]) return \ % (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
Return OGC WKT Polygon of a simple bbox list of strings
373,075
def on(message): def decorator(function): try: function._callback_messages.append(message) except AttributeError: function._callback_messages = [message] return function return decorator
Decorator that register a class method as callback for a message.
373,076
def list_logtail_config(self, project_name, offset=0, size=100): if int(size) == -1 or int(size) > MAX_LIST_PAGING_SIZE: return list_more(self.list_logtail_config, int(offset), int(size), MAX_LIST_PAGING_SIZE, project_name) headers = {} params = {} resource = "/configs" params[] = str(offset) params[] = str(size) (resp, header) = self._send("GET", project_name, None, resource, params, headers) return ListLogtailConfigResponse(resp, header)
list logtail config name in a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type offset: int :param offset: the offset of all config names :type size: int :param size: the max return names count, -1 means all :return: ListLogtailConfigResponse :raise: LogException
373,077
def lv_unpack(txt): txt = txt.strip() res = [] while txt: l, v = txt.split(, 1) res.append(v[:int(l)]) txt = v[int(l):] return res
Deserializes a string of the length:value format :param txt: The input string :return: a list og values
373,078
def conditional_entropy(X, Y, base=2): return joint_entropy(X, Y, base=base) - entropy(Y, base=base)
Calculates the conditional entropy, H(X|Y), in the given base Parameters ---------- X: array-like (# samples) An array of values for which to compute the conditional entropy Y: array-like (# samples) An array of values for which to compute the conditional entropy base: integer (default: 2) The base in which to calculate conditional entropy Returns ---------- conditional_entropy: float The conditional entropy calculated according to the equation H(X|Y) = H(X,Y) - H(Y)
373,079
def get_mobilenet(multiplier, pretrained=False, ctx=cpu(), root=os.path.join(base.data_dir(), ), **kwargs): r net = MobileNet(multiplier, **kwargs) if pretrained: from ..model_store import get_model_file version_suffix = .format(multiplier) if version_suffix in (, ): version_suffix = version_suffix[:-1] net.load_parameters( get_model_file( % version_suffix, root=root), ctx=ctx) return net
r"""MobileNet model from the `"MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications" <https://arxiv.org/abs/1704.04861>`_ paper. Parameters ---------- multiplier : float The width multiplier for controling the model size. Only multipliers that are no less than 0.25 are supported. The actual number of channels is equal to the original channel size multiplied by this multiplier. pretrained : bool, default False Whether to load the pretrained weights for model. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default $MXNET_HOME/models Location for keeping the model parameters.
373,080
def parse_date(self, item, field_name, source_name): now = datetime.now().date() val = self.get_value(item, source_name) week_day, day = val.split() day = int(day) if now.day < day: if now.month == 1: now = now.replace(month=12, year=now.year-1) else: now = now.replace(month=now.month-1) now = now.replace(day=day) return now
Converts the date in the format: Thu 03. As only the day is provided, tries to find the best match based on the current date, considering that dates are on the past.
373,081
def _ws_on_open(self, ws: websocket.WebSocketApp): payload = { : WebSocketEvent.IDENTIFY.value, : { : self.token, : { : sys.platform, : , : , : , : }, : True, : 250 } } self.logger.debug() ws.send(json.dumps(payload)) self.connected = True
Callback for sending the initial authentication data This "payload" contains the required data to authenticate this websocket client as a suitable bot connection to the Discord websocket. Args: ws: websocket connection
373,082
def _write_model_stats(self, iteration:int)->None: "Writes gradient statistics to Tensorboard." gen_mode = self.learn.gan_trainer.gen_mode if gen_mode and not self.gen_stats_updated: self._write_gen_model_stats(iteration=iteration) if not gen_mode and not self.crit_stats_updated: self._write_critic_model_stats(iteration=iteration)
Writes gradient statistics to Tensorboard.
373,083
def get_last(self, table=None): if table is None: table = self.main_table query = % table return self.own_cursor.execute(query).fetchone()
Just the last entry.
373,084
def sort_segment_points(Aps, Bps): mid = [] j = 0 mid.append(Aps[0]) for i in range(len(Aps)-1): dist = distance_tt_point(Aps[i], Aps[i+1]) for m in range(j, len(Bps)): distm = distance_tt_point(Aps[i], Bps[m]) if dist > distm: direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr())) if direction > 0: j = m + 1 mid.append(Bps[m]) break mid.append(Aps[i+1]) for m in range(j, len(Bps)): mid.append(Bps[m]) return mid
Takes two line segments and sorts all their points, so that they form a continuous path Args: Aps: Array of tracktotrip.Point Bps: Array of tracktotrip.Point Returns: Array with points ordered
373,085
def query_statements(self, query): params = {} param_keys = [ "registration", "since", "until", "limit", "ascending", "related_activities", "related_agents", "format", "attachments", ] for k, v in query.iteritems(): if v is not None: if k == "agent": params[k] = v.to_json(self.version) elif k == "verb" or k == "activity": params[k] = v.id elif k in param_keys: params[k] = v request = HTTPRequest( method="GET", resource="statements" ) request.query_params = params lrs_response = self._send_request(request) if lrs_response.success: lrs_response.content = StatementsResult.from_json(lrs_response.data) return lrs_response
Query the LRS for statements with specified parameters :param query: Dictionary of query parameters and their values :type query: dict :return: LRS Response object with the returned StatementsResult object as content :rtype: :class:`tincan.lrs_response.LRSResponse` .. note:: Optional query parameters are\n **statementId:** (*str*) ID of the Statement to fetch **voidedStatementId:** (*str*) ID of the voided Statement to fetch **agent:** (*Agent* |*Group*) Filter to return Statements for which the specified Agent or Group is the Actor **verb:** (*Verb id IRI*) Filter to return Statements matching the verb id **activity:** (*Activity id IRI*) Filter to return Statements for which the specified Activity is the Object **registration:** (*UUID*) Filter to return Statements matching the specified registration ID **related_activities:** (*bool*) Include Statements for which the Object, Context Activities or any Sub-Statement properties match the specified Activity **related_agents:** (*bool*) Include Statements for which the Actor, Object, Authority, Instructor, Team, or any Sub-Statement properties match the specified Agent **since:** (*datetime*) Filter to return Statements stored since the specified datetime **until:** (*datetime*) Filter to return Statements stored at or before the specified datetime **limit:** (*positive int*) Allow <limit> Statements to be returned. 0 indicates the maximum supported by the LRS **format:** (*str* {"ids"|"exact"|"canonical"}) Manipulates how the LRS handles importing and returning the statements **attachments:** (*bool*) If true, the LRS will use multipart responses and include all attachment data per Statement returned. Otherwise, application/json is used and no attachment information will be returned **ascending:** (*bool*) If true, the LRS will return results in ascending order of stored time (oldest first)
373,086
def check_state(self): if self.dpad: x_state, y_state = self.handle_dpad() else: x_state, y_state = self.handle_abs() new_state = set(( x_state, y_state, (, 0x130, int(self.microbit.button_a.is_pressed())), (, 0x131, int(self.microbit.button_b.is_pressed())), (, 0x13a, int(self.microbit.pin0.is_touched())), (, 0x133, int(self.microbit.pin1.is_touched())), (, 0x134, int(self.microbit.pin2.is_touched())), )) events = new_state - self.state self.state = new_state return events
Tracks differences in the device state.
373,087
def text(self): genre = getattr(self.genre, , self.genre) pages_at_end = genre in ( , , , , ) thesis = genre in (, ) if self.get(): editors = self[] affix = if in editors or in editors else editors = " %s (%s.)" % (editors, affix) else: editors = None res = [self.get(, editors), self.get(, )] if genre == : res.append(self.get_with_translation() or self.get_with_translation()) series = .join(filter(None, [self.get(), self.get()])) if series: res.append( % series) elif genre == : res.append(self.get_with_translation() or self.get()) else: res.append(self.get_with_translation()) if genre == : atom = .join(filter(None, [self.get(), self.get()])) if self.get(): atom += % self[] res.append(atom) res.append(self.get()) elif genre == or genre == : prefix = atom = if editors: atom += editors if self.get(): if atom: atom += atom += " %s" % self.get_with_translation() if self.get(): atom += ", %s" % self[] res.append(prefix + atom) else:
Linearize the bib source according to the rules of the unified style. Book: author. year. booktitle. (series, volume.) address: publisher. Article: author. year. title. journal volume(issue). pages. Incollection: author. year. title. In editor (ed.), booktitle, pages. address: publisher. .. seealso:: http://celxj.org/downloads/UnifiedStyleSheet.pdf https://github.com/citation-style-language/styles/blob/master/\ unified-style-linguistics.csl
373,088
def register_method(self, func): if not getattr(func, , False): raise ImproperlyConfigured( .format(func.__name__)) name = getattr(func, , func.__name__) logger.debug(.format(name)) if name.startswith(): raise ImproperlyConfigured( ) method = RPCMethod(func) existing_method = self.get_method(method.name, ALL, ALL) if existing_method is not None: if method == existing_method: return method.name else: raise ImproperlyConfigured("A RPC method with name {} has already been registered".format(method.name)) self._registry[method.name] = method logger.debug(.format(len(self._registry))) return method.name
Register a function to be available as RPC method. The given function will be inspected to find external_name, protocol and entry_point values set by the decorator @rpc_method. :param func: A function previously decorated using @rpc_method :return: The name of registered method
373,089
def dump_code_line(disassembly_line, bShowAddress = True, bShowDump = True, bLowercase = True, dwDumpWidth = None, dwCodeWidth = None, bits = None): if bits is None: address_size = HexDump.address_size else: address_size = bits / 4 (addr, size, code, dump) = disassembly_line dump = dump.replace(, ) result = list() fmt = if bShowAddress: result.append( HexDump.address(addr, bits) ) fmt += % address_size if bShowDump: result.append(dump) if dwDumpWidth: fmt += % dwDumpWidth else: fmt += if bLowercase: code = code.lower() result.append(code) if dwCodeWidth: fmt += % dwCodeWidth else: fmt += return fmt % tuple(result)
Dump a single line of code. To dump a block of code use L{dump_code}. @type disassembly_line: tuple( int, int, str, str ) @param disassembly_line: Single item of the list returned by L{Process.disassemble} or L{Thread.disassemble_around_pc}. @type bShowAddress: bool @param bShowAddress: (Optional) If C{True} show the memory address. @type bShowDump: bool @param bShowDump: (Optional) If C{True} show the hexadecimal dump. @type bLowercase: bool @param bLowercase: (Optional) If C{True} convert the code to lowercase. @type dwDumpWidth: int or None @param dwDumpWidth: (Optional) Width in characters of the hex dump. @type dwCodeWidth: int or None @param dwCodeWidth: (Optional) Width in characters of the code. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.address_size} @rtype: str @return: Text suitable for logging.
373,090
def handle_startendtag(self, tag, attrs): if tag.lower() in self.allowed_tag_whitelist: self.result += + tag for (attr, value) in attrs: if attr.lower() in self.allowed_attribute_whitelist: self.result += % \ (attr, self.handle_attribute_value(value)) self.result += else: if self.render_unallowed_tags: self.result += + cgi.escape(tag) for (attr, value) in attrs: self.result += % \ (attr, cgi.escape(value, True)) self.result +=
Function called for empty tags (e.g. <br />)
373,091
def setRecord(self, record): self._record = record if record is not None: self.loadValues(record.recordValues(autoInflate=True)) else: self.loadValues({})
Sets the record instance linked with this widget. :param record | <orb.Table>
373,092
def layer_width(layer): if is_layer(layer, "Dense"): return layer.units if is_layer(layer, "Conv"): return layer.filters raise TypeError("The layer should be either Dense or Conv layer.")
get layer width.
373,093
def update(self): stats = self.get_init_value() if self.input_method == : stats = glances_processes.getlist() elif self.input_method == : pass self.stats = stats self.max_values = copy.deepcopy(glances_processes.max_values()) return self.stats
Update processes stats using the input method.
373,094
def get_cli_version(): directory = os.path.dirname(os.path.abspath(__file__)) version_path = os.path.join(directory, ) if os.path.exists(version_path): with open(version_path) as f: ver = f.read() return ver return get_setup_version()
获取终端命令版本号,若存在VERSION文件则使用其中的版本号, 否则使用 :meth:`.get_setup_version` :return: 终端命令版本号 :rtype: str
373,095
def describe(lcdict, returndesc=False, offsetwith=None): if in lcdict and in lcdict[].lower(): return describe_lcc_csv(lcdict, returndesc=returndesc) columndefs = [] for colind, column in enumerate(lcdict[]): if in column: colkey, colap = column.split() coldesc = COLUMNDEFS[colkey][0] % colap else: coldesc = COLUMNDEFS[column][0] columndefstr = % (colind, column, coldesc) columndefs.append(columndefstr) columndefs = .join(columndefs) filterdefs = [] for row in lcdict[]: filterid, filtername, filterdesc = row filterdefstr = % (filterid, filtername, filterdesc) filterdefs.append(filterdefstr) filterdefs = .join(filterdefs) aperturedefs = [] for key in sorted(lcdict[].keys()): aperturedefstr = % (key, lcdict[][key]) aperturedefs.append(aperturedefstr) aperturedefs = .join(aperturedefs) description = DESCTEMPLATE.format( objectid=lcdict[], hatid=lcdict[][], twomassid=lcdict[][].strip(), ra=lcdict[][], decl=lcdict[][], pmra=lcdict[][], pmra_err=lcdict[][], pmdecl=lcdict[][], pmdecl_err=lcdict[][], jmag=lcdict[][], hmag=lcdict[][], kmag=lcdict[][], bmag=lcdict[][], vmag=lcdict[][], sdssg=lcdict[][], sdssr=lcdict[][], sdssi=lcdict[][], ndet=lcdict[][], lcsortcol=lcdict[], lcbestaperture=json.dumps(lcdict[],ensure_ascii=True), network=lcdict[][], stations=lcdict[][], lastupdated=lcdict[], datarelease=lcdict[], lcversion=lcdict[], lcserver=lcdict[], comment=lcdict[], lcfiltersql=(lcdict[] if in lcdict else ), lcnormcols=(lcdict[] if in lcdict else ), filterdefs=filterdefs, columndefs=columndefs, aperturedefs=aperturedefs ) if offsetwith is not None: description = textwrap.indent( description, % offsetwith, lambda line: True ) print(description) else: print(description) if returndesc: return description
This describes the light curve object and columns present. Parameters ---------- lcdict : dict The input lcdict to parse for column and metadata info. returndesc : bool If True, returns the description string as an str instead of just printing it to stdout. offsetwith : str This is a character to offset the output description lines by. This is useful to add comment characters like '#' to the output description lines. Returns ------- str or None If returndesc is True, returns the description lines as a str, otherwise returns nothing.
373,096
def add_version(self, project, version, egg): url = self._build_url(constants.ADD_VERSION_ENDPOINT) data = { : project, : version } files = { : egg } json = self.client.post(url, data=data, files=files, timeout=self.timeout) return json[]
Adds a new project egg to the Scrapyd service. First class, maps to Scrapyd's add version endpoint.
373,097
def dot_product_attention(q, k, v, bias, dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=True, save_weights_to=None, dropout_broadcast_dims=None, activation_dtype=None, weight_dtype=None, hard_attention_k=0): with tf.variable_scope( name, default_name="dot_product_attention", values=[q, k, v]) as scope: logits = tf.matmul(q, k, transpose_b=True) if bias is not None: bias = common_layers.cast_like(bias, logits) logits += bias logits = maybe_upcast(logits, activation_dtype, weight_dtype) weights = tf.nn.softmax(logits, name="attention_weights") if hard_attention_k > 0: weights = harden_attention_weights(weights, hard_attention_k) weights = common_layers.cast_like(weights, q) if save_weights_to is not None: save_weights_to[scope.name] = weights save_weights_to[scope.name + "/logits"] = logits weights = common_layers.dropout_with_broadcast_dims( weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) if common_layers.should_generate_summaries() and make_image_summary: attention_image_summary(weights, image_shapes) return tf.matmul(weights, v)
Dot-product attention. Args: q: Tensor with shape [..., length_q, depth_k]. k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must match with q. v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must match with q. bias: bias Tensor (see attention_bias()) dropout_rate: a float. image_shapes: optional tuple of integer scalars. see comments for attention_image_summary() name: an optional string make_image_summary: True if you want an image summary. save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). dropout_broadcast_dims: an optional list of integers less than rank of q. Specifies in which dimensions to broadcast the dropout decisions. activation_dtype: Used to define function activation dtype when using mixed precision. weight_dtype: The dtype weights are stored in when using mixed precision hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) Returns: Tensor with shape [..., length_q, depth_v].
373,098
def exp_and_normalise(lw): w = np.exp(lw - lw.max()) return w / w.sum()
Exponentiate, then normalise (so that sum equals one). Arguments --------- lw: ndarray log weights. Returns ------- W: ndarray of the same shape as lw W = exp(lw) / sum(exp(lw)) Note ---- uses the log_sum_exp trick to avoid overflow (i.e. subtract the max before exponentiating) See also -------- log_sum_exp log_mean_exp
373,099
def run(self, scenario=None, only=None, **kwargs): if not exists(self.root_dir): raise FileNotFoundError(self.root_dir) if self.executable is None: raise ValueError( "MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format( self.version ) ) if scenario is not None: kwargs = self.set_emission_scenario_setup(scenario, kwargs) yr_config = {} if "startyear" in kwargs: yr_config["startyear"] = kwargs.pop("startyear") if "endyear" in kwargs: yr_config["endyear"] = kwargs.pop("endyear") if yr_config: self.set_years(**yr_config) kwargs.setdefault("rundate", get_date_time_string()) self.update_config(**kwargs) self.check_config() exec_dir = basename(self.original_dir) command = [join(self.root_dir, exec_dir, self.binary_name)] if not IS_WINDOWS and self.binary_name.endswith(".exe"): command.insert(0, "wine") subprocess.check_call(command, cwd=self.run_dir, shell=IS_WINDOWS) outfiles = self._get_output_filenames() read_cols = {"climate_model": ["MAGICC{}".format(self.version)]} if scenario is not None: read_cols["model"] = scenario["model"].unique().tolist() read_cols["scenario"] = scenario["scenario"].unique().tolist() else: read_cols.setdefault("model", ["unspecified"]) read_cols.setdefault("scenario", ["unspecified"]) mdata = None for filepath in outfiles: try: openscm_var = _get_openscm_var_from_filepath(filepath) if only is None or openscm_var in only: tempdata = MAGICCData( join(self.out_dir, filepath), columns=deepcopy(read_cols) ) mdata = mdata.append(tempdata) if mdata is not None else tempdata except (NoReaderWriterError, InvalidTemporalResError): continue if mdata is None: error_msg = "No output found for only={}".format(only) raise ValueError(error_msg) try: run_paras = self.read_parameters() self.config = run_paras mdata.metadata["parameters"] = run_paras except FileNotFoundError: pass return mdata
Run MAGICC and parse the output. As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its parameters into ``out/PARAMETERS.OUT`` and they will then be read into ``output.metadata["parameters"]`` where ``output`` is the returned object. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. If None MAGICC will simply run with whatever config has already been set. only : list of str If not None, only extract variables in this list. kwargs Other config values to pass to MAGICC for the run Returns ------- :obj:`pymagicc.io.MAGICCData` MAGICCData object containing that data in its ``df`` attribute and metadata and parameters (depending on the value of ``include_parameters``) in its ``metadata`` attribute. Raises ------ ValueError If no output is found which matches the list specified in ``only``.