Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
372,000
def hset(self, key, value): return self.r.hset(self.hash, key, value)
Create key/value pair in Redis. Args: key (string): The key to create in Redis. value (any): The value to store in Redis. Returns: (string): The response from Redis.
372,001
def initDatabase(self, db_file): try: self.__f = io.open(db_file, "rb") except IOError, e: print "[Error]: ", e sys.exit()
" initialize the database for search " param: dbFile
372,002
def _extract_info(archive, info): if isinstance(archive, zipfile.ZipFile): fn = info.filename is_dir = fn.endswith() or fn.endswith() out = archive.read(info) if is_dir and out == b: return None return out info_file = archive.extractfile(info) if info_file: return info_file.read() return None
Extracts the contents of an archive info object ;param archive: An archive from _open_archive() :param info: An info object from _list_archive_members() :return: None, or a byte string of the file contents
372,003
def create_cache_security_group(name, region=None, key=None, keyid=None, profile=None, **args): My Cache Security Group return _create_resource(name, name_param=, desc=, res_type=, region=region, key=key, keyid=keyid, profile=profile, **args)
Create a cache security group. Example: .. code-block:: bash salt myminion boto3_elasticache.create_cache_security_group mycachesecgrp Description='My Cache Security Group'
372,004
def import_checks(path): dir = internal.check_dir / path file = internal.load_config(dir)["checks"] mod = internal.import_file(dir.name, (dir / file).resolve()) sys.modules[dir.name] = mod return mod
Import checks module given relative path. :param path: relative path from which to import checks module :type path: str :returns: the imported module :raises FileNotFoundError: if ``path / .check50.yaml`` does not exist :raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file This function is particularly useful when a set of checks logically extends another, as is often the case in CS50's own problems that have a "less comfy" and "more comfy" version. The "more comfy" version can include all of the "less comfy" checks like so:: less = check50.import_checks("../less") from less import * .. note:: the ``__name__`` of the imported module is given by the basename of the specified path (``less`` in the above example).
372,005
def client_mechanisms(self): return [mech for mech in self.mechs.values() if isinstance(mech, ClientMechanism)]
List of available :class:`ClientMechanism` objects.
372,006
def major_axis_endpoints(self): i = np.argmax(self.axlens) v = self.paxes[:, i] return self.ctr - v, self.ctr + v
Return the endpoints of the major axis.
372,007
def _classify_no_operation(self, regs_init, regs_fini, mem_fini, written_regs, read_regs): matches = [] flags_changed = False mem_changed = mem_fini.get_write_count() != 0 if not regs_changed and not flags_changed and not mem_changed: matches.append({ "op": "nop", }) return matches
Classify no-operation gadgets.
372,008
def polarity_scores(self, text): text_token_list = text.split() text_no_emoji_lst = [] for token in text_token_list: if token in self.emojis: description = self.emojis[token] text_no_emoji_lst.append(description) else: text_no_emoji_lst.append(token) text = " ".join(x for x in text_no_emoji_lst) sentitext = SentiText(text) sentiments = [] words_and_emoticons = sentitext.words_and_emoticons for item in words_and_emoticons: valence = 0 i = words_and_emoticons.index(item) if item.lower() in BOOSTER_DICT: sentiments.append(valence) continue if (i < len(words_and_emoticons) - 1 and item.lower() == "kind" and words_and_emoticons[i + 1].lower() == "of"): sentiments.append(valence) continue sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments) sentiments = self._but_check(words_and_emoticons, sentiments) valence_dict = self.score_valence(sentiments, text) return valence_dict
Return a float for sentiment strength based on the input text. Positive values are positive valence, negative value are negative valence.
372,009
def _run_submission(self, metadata): if self._use_gpu: docker_binary = container_name = metadata[] else: docker_binary = container_name = metadata[] if metadata[] == : cmd = [docker_binary, , , , , .format(self._sample_input_dir), , .format(self._sample_output_dir), , .format(self._extracted_submission_dir), , , container_name, + metadata[], , ] else: epsilon = np.random.choice(ALLOWED_EPS) cmd = [docker_binary, , , , , .format(self._sample_input_dir), , .format(self._sample_output_dir), , .format(self._extracted_submission_dir), , , container_name, + metadata[], , , str(epsilon)] logging.info(, .join(cmd)) return shell_call(cmd)
Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise.
372,010
def compressed_bytes2ibytes(compressed, size): decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS) for i in range(0, mo_math.ceiling(len(compressed), size), size): try: block = compressed[i: i + size] yield decompressor.decompress(block) except Exception as e: Log.error("Not expected", e)
CONVERT AN ARRAY OF BYTES TO A BYTE-BLOCK GENERATOR USEFUL IN THE CASE WHEN WE WANT TO LIMIT HOW MUCH WE FEED ANOTHER GENERATOR (LIKE A DECOMPRESSOR)
372,011
def load_map_projection(filename, center=None, center_right=None, radius=None, method=, registration=, chirality=None, sphere_radius=None, pre_affine=None, post_affine=None, meta_data=None): return MapProjection.load(filename, center=center, center_right=center_right, radius=radius, method=method, registration=registration, chirality=chirality, sphere_radius=sphere_radius, pre_affine=pre_affine, post_affine=post_affine)
load_map_projection(filename) yields the map projection indicated by the given file name. Map projections define the parameters of a projection to the 2D cortical surface via a registartion name and projection parameters. This function is primarily a wrapper around the MapProjection.load() function; for information about options, see MapProjection.load.
372,012
def merge_map(a, b): if isinstance(a, list) and isinstance(b, list): return a + b if not isinstance(a, dict) or not isinstance(b, dict): return b for key in b: a[key] = merge_map(a[key], b[key]) if key in a else b[key] return a
Recursively merge elements of argument b into argument a. Primarly used for merging two dictionaries together, where dict b takes precedence over dict a. If 2 lists are provided, they are concatenated.
372,013
def scheduling_time_index(J,p,r,w): model = Model("scheduling: time index") T = max(r.values()) + sum(p.values()) X = {} for j in J: for t in range(r[j], T-p[j]+2): X[j,t] = model.addVar(vtype="B", name="x(%s,%s)"%(j,t)) for j in J: model.addCons(quicksum(X[j,t] for t in range(1,T+1) if (j,t) in X) == 1, "JobExecution(%s)"%(j)) for t in range(1,T+1): ind = [(j,t2) for j in J for t2 in range(t-p[j]+1,t+1) if (j,t2) in X] if ind != []: model.addCons(quicksum(X[j,t2] for (j,t2) in ind) <= 1, "MachineUB(%s)"%t) model.setObjective(quicksum((w[j] * (t - 1 + p[j])) * X[j,t] for (j,t) in X), "minimize") model.data = X return model
scheduling_time_index: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the time index formulation Parameters: - J: set of jobs - p[j]: processing time of job j - r[j]: earliest start time of job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved.
372,014
def result(self): if self._result is None: self.await_result() chunks, exception = self._result if exception is None: return chunks raise exception
The result from realising the future If the result is not available, block until done. :return: result of the future :raises: any exception encountered during realising the future
372,015
def compile(code, silent=True, ignore_errors=False, optimize=True): assert(isinstance(code, list)) output = [] subroutine = {} builtins = Machine([]).instructions try: it = code.__iter__() while True: word = next(it) if word == ":": name = next(it) if name in builtins: raise CompileError("Cannot shadow internal word definition ." % name) if name in [":", ";"]: raise CompileError("Invalid word name ." % name) subroutine[name] = [] while True: op = next(it) if op == ";": subroutine[name].append(instructions.lookup(instructions.return_)) break else: subroutine[name].append(op) else: output.append(word) except StopIteration: pass for name, code in subroutine.items(): xcode = [] for op in code: xcode.append(op) if op in subroutine: xcode.append(instructions.lookup(instructions.call)) subroutine[name] = xcode xcode = [] for op in output: xcode.append(op) if op in subroutine: xcode.append(instructions.lookup(instructions.call)) output = xcode if len(subroutine) > 0: output += [instructions.lookup(instructions.exit)] if optimize: output = optimizer.optimized(output, silent=silent, ignore_errors=False) location = {} for name, code in subroutine.items(): location[name] = len(output) if optimize: output += optimizer.optimized(code, silent=silent, ignore_errors=False) else: output += code for i, op in enumerate(output): if op in location: output[i] = location[op] output = native_types(output) if not ignore_errors: check(output) return output
Compiles subroutine-forms into a complete working code. A program such as: : sub1 <sub1 code ...> ; : sub2 <sub2 code ...> ; sub1 foo sub2 bar is compiled into: <sub1 address> call foo <sub2 address> call exit <sub1 code ...> return <sub2 code ...> return Optimizations are first done on subroutine bodies, then on the main loop and finally, symbols are resolved (i.e., placeholders for subroutine addresses are replaced with actual addresses). Args: silent: If set to False, will print optimization messages. ignore_errors: Only applies to the optimization engine, if set to False it will not raise any exceptions. The actual compilatio will still raise errors. optimize: Flag to control whether to optimize code. Raises: CompilationError - Raised if invalid code is detected. Returns: An array of code that can be run by a Machine. Typically, you want to pass this to a Machine without doing optimizations. Usage: source = parse("<source code>") code = compile(source) machine = Machine(code, optimize=False) machine.run()
372,016
def get_object(self, ObjectClass, id): try: object = ObjectClass.objects.get(id=id) except (ObjectClass.DoesNotExist, ObjectClass.MultipleObjectsReturned): object = None return object
Retrieve object of type ``ObjectClass`` by ``id``. | Returns object on success. | Returns None otherwise.
372,017
def db_log(self, transition, from_state, instance, *args, **kwargs): if self.log_model: model_class = self._get_log_model_class() extras = {} for db_field, transition_arg, default in model_class.EXTRA_LOG_ATTRIBUTES: extras[db_field] = kwargs.get(transition_arg, default) return model_class.log_transition( modified_object=instance, transition=transition.name, from_state=from_state.name, to_state=transition.target.name, **extras)
Logs the transition into the database.
372,018
def build_article_from_xml(article_xml_filename, detail="brief", build_parts=None, remove_tags=None): build_part = lambda part: build_part_check(part, build_parts) error_count = 0 soup = parser.parse_document(article_xml_filename) doi = parser.doi(soup) article = ea.Article(doi, title=None) utils.set_attr_if_value(article, , utils.version_from_xml_filename(article_xml_filename)) if build_part(): article.journal_title = parser.journal_title(soup) if build_part(): article.journal_issn = parser.journal_issn(soup, "electronic") if article.journal_issn is None: article.journal_issn = parser.journal_issn(soup) if build_part(): article.related_articles = build_related_articles(parser.related_article(soup)) if build_part(): article.pii = parser.publisher_id(soup) if build_part(): manuscript = parser.publisher_id(soup) if not manuscript and doi: manuscript = doi.split()[-1] article.manuscript = manuscript if build_part(): article_type = parser.article_type(soup) if article_type: article.article_type = article_type if build_part(): article.title = parser.full_title(soup) if build_part(): article.publisher_name = parser.publisher(soup) if build_part(): article.abstract = clean_abstract(parser.full_abstract(soup), remove_tags) if build_part(): article.digest = clean_abstract(parser.full_digest(soup), remove_tags) if build_part(): article.elocation_id = parser.elocation_id(soup) if build_part(): article.issue = parser.issue(soup) if build_part(): article.self_uri_list = build_self_uri_list(parser.self_uri(soup)) if build_part(): competing_interests = parser.competing_interests(soup, None) all_contributors = parser.contributors(soup, detail) author_contributors = [con for con in all_contributors if con.get() in [, ]] contrib_type = "author" contributors = build_contributors(author_contributors, contrib_type, competing_interests) contrib_type = "author non-byline" authors = parser.authors_non_byline(soup, detail) contributors_non_byline = build_contributors(authors, contrib_type, competing_interests) article.contributors = contributors + contributors_non_byline if build_part(): license_object = ea.License() license_object.href = parser.license_url(soup) license_object.copyright_statement = parser.copyright_statement(soup) article.license = license_object if build_part(): article.article_categories = parser.category(soup) if build_part(): article.author_keywords = parser.keywords(soup) if build_part(): article.research_organisms = parser.research_organism(soup) if build_part(): article.funding_awards = build_funding(parser.full_award_groups(soup)) if build_part(): datasets_json = parser.datasets_json(soup) article.datasets = build_datasets(datasets_json) article.data_availability = build_data_availability(datasets_json) if build_part(): article.ref_list = build_ref_list(parser.refs(soup)) if build_part(): article.component_list = build_components(parser.components(soup)) if build_part(): date_types = ["received", "accepted"] for date_type in date_types: history_date = parser.history_date(soup, date_type) if history_date: date_instance = ea.ArticleDate(date_type, history_date) article.add_date(date_instance) if build_part(): build_pub_dates(article, parser.pub_dates(soup)) if build_part(): volume = parser.volume(soup) if volume: article.volume = volume if build_part(): article.is_poa = parser.is_poa(soup) return article, error_count
Parse JATS XML with elifetools parser, and populate an eLifePOA article object Basic data crossref needs: article_id, doi, title, contributors with names set detail="brief" is normally enough, detail="full" will populate all the contributor affiliations that are linked by xref tags
372,019
def wait_for_participant_newbalance( raiden: , payment_network_id: PaymentNetworkID, token_address: TokenAddress, partner_address: Address, target_address: Address, target_balance: TokenAmount, retry_timeout: float, ) -> None: if target_address == raiden.address: balance = lambda channel_state: channel_state.our_state.contract_balance elif target_address == partner_address: balance = lambda channel_state: channel_state.partner_state.contract_balance else: raise ValueError() channel_state = views.get_channelstate_for( views.state_from_raiden(raiden), payment_network_id, token_address, partner_address, ) while balance(channel_state) < target_balance: gevent.sleep(retry_timeout) channel_state = views.get_channelstate_for( views.state_from_raiden(raiden), payment_network_id, token_address, partner_address, )
Wait until a given channels balance exceeds the target balance. Note: This does not time out, use gevent.Timeout.
372,020
def items(self, section): for line in self.iter_lines(section): if line.kind == ConfigLine.KIND_DATA: yield line.key, line.value
Retrieve all key/value pairs for a given section.
372,021
def reset(self): self._cli.reset() self._cli.buffers[DEFAULT_BUFFER].reset() self._cli.renderer.request_absolute_cursor_position() self._cli._redraw()
Resets terminal screen
372,022
def paintEvent( self, event ): super(XRolloutItem, self).paintEvent(event) with XPainter(self) as painter: w = self.width() - 3 h = self.height() - 3 color = self.palette().color(QPalette.Midlight) color = color.darker(180) pen = QPen(color) pen.setWidthF(0.5) painter.setPen(pen) painter.setBrush(self.palette().color(QPalette.Midlight)) painter.setRenderHint(XPainter.Antialiasing) painter.drawRoundedRect(1, 1, w, h, 10, 10)
Overloads the paint event to draw rounded edges on this widget. :param event | <QPaintEvent>
372,023
def play(self, wav=None, data=None, rate=16000, channels=1, width=2, block=True, spectrum=None): if wav: f = wave.open(wav, ) rate = f.getframerate() channels = f.getnchannels() width = f.getsampwidth() def gen(w): d = w.readframes(CHUNK_SIZE) while d: yield d d = w.readframes(CHUNK_SIZE) w.close() data = gen(f) self.stop_event.clear() if block: self._play(data, rate, channels, width, spectrum) else: thread = threading.Thread(target=self._play, args=(data, rate, channels, width, spectrum)) thread.start()
play wav file or raw audio (string or generator) Args: wav: wav file path data: raw audio data, str or iterator rate: sample rate, only for raw audio channels: channel number, only for raw data width: raw audio data width, 16 bit is 2, only for raw data block: if true, block until audio is played. spectrum: if true, use a spectrum analyzer thread to analyze data
372,024
def get_commands_from_file(self, mission_file, role): doc = etree.parse(mission_file) mission = doc.getroot() return self.get_commands_from_xml(mission, role)
Get commands from xml file as a list of (command_type:int, turnbased:boolean, command:string)
372,025
def return_real_id_base(dbpath, set_object): engine = create_engine( + dbpath) session_cl = sessionmaker(bind=engine) session = session_cl() return_list = [] for i in session.query(set_object).order_by(set_object.id): return_list.append(i.real_id) session.close() return return_list
Generic function which returns a list of real_id's Parameters ---------- dbpath : string, path to SQLite database file set_object : object (either TestSet or TrainSet) which is stored in the database Returns ------- return_list : list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
372,026
def kill(args): from sregistry.main import Client as cli if len(args.commands) > 0: for name in args.commands: cli.destroy(name) sys.exit(0)
kill is a helper function to call the "kill" function of the client, meaning we bring down an instance.
372,027
def make_grasp_phenotype_file(fn, pheno, out): import subprocess c = NR == 1 || $12 == "{}" \.format( pheno.replace("\\x27'), fn, out) subprocess.check_call(c, shell=True)
Subset the GRASP database on a specific phenotype. Parameters ---------- fn : str Path to GRASP database file. pheno : str Phenotype to extract from database. out : sttr Path to output file for subset of GRASP database.
372,028
def validate_args(self, qubits: Sequence[Qid]) -> None: if len(qubits) == 0: raise ValueError( "Applied a gate to an empty set of qubits. Gate: {}".format( repr(self))) if len(qubits) != self.num_qubits(): raise ValueError( .format( self, self.num_qubits(), qubits)) if any([not isinstance(qubit, Qid) for qubit in qubits]): raise ValueError( )
Checks if this gate can be applied to the given qubits. By default checks if input is of type Qid and qubit count. Child classes can override. Args: qubits: The collection of qubits to potentially apply the gate to. Throws: ValueError: The gate can't be applied to the qubits.
372,029
def cal_frame_according_boundaries(left, right, top, bottom, parent_size, gaphas_editor=True, group=True): margin = cal_margin(parent_size) if group: rel_pos = max(left - margin, 0), max(top - margin, 0) size = (min(right - left + 2 * margin, parent_size[0] - rel_pos[0]), min(bottom - top + 2 * margin, parent_size[1] - rel_pos[1])) else: rel_pos = left, top size = right - left, bottom - top return margin, rel_pos, size
Generate margin and relative position and size handed boundary parameter and parent size
372,030
def create_filter_predicate(self): assert self.query_content_id is not None, \ filter_names = self.query_params.getlist() if len(filter_names) == 0 and in self._filters: filter_names = [] init_filters = [(n, self._filters[n]) for n in filter_names] preds = [lambda _: True] for name, p in init_filters: preds.append(p.set_query_id(self.query_content_id) .set_query_params(self.query_params) .create_predicate()) return lambda (cid, fc): fc is not None and all(p((cid, fc)) for p in preds)
Creates a filter predicate. The list of available filters is given by calls to ``add_filter``, and the list of filters to use is given by parameters in ``params``. In this default implementation, multiple filters can be specified with the ``filter`` parameter. Each filter is initialized with the same set of query parameters given to the search engine. The returned function accepts a ``(content_id, FC)`` and returns ``True`` if and only if every selected predicate returns ``True`` on the same input.
372,031
def _redirect_output(self, statement: Statement) -> Tuple[bool, utils.RedirectionSavedState]: import io import subprocess redir_error = False saved_state = utils.RedirectionSavedState(self.stdout, sys.stdout, self.cur_pipe_proc_reader) if not self.allow_redirection: return redir_error, saved_state if statement.pipe_to: read_fd, write_fd = os.pipe() subproc_stdin = io.open(read_fd, ) new_stdout = io.open(write_fd, ) redir_error = True else: new_stdout = tempfile.TemporaryFile(mode="w+") saved_state.redirecting = True sys.stdout = self.stdout = new_stdout if statement.output == constants.REDIRECTION_APPEND: self.poutput(get_paste_buffer()) return redir_error, saved_state
Handles output redirection for >, >>, and |. :param statement: a parsed statement from the user :return: A bool telling if an error occurred and a utils.RedirectionSavedState object
372,032
def _AddDependencyEdges(self, rdf_artifact): artifact_dependencies = artifact_registry.GetArtifactPathDependencies( rdf_artifact) if artifact_dependencies: for attribute in artifact_dependencies: self._AddEdge(attribute, rdf_artifact.name) else: self.reachable_nodes.add(rdf_artifact.name) self.graph[rdf_artifact.name].is_provided = True
Add an edge for every dependency of the given artifact. This method gets the attribute names for a given artifact and for every attribute it adds a directed edge from the attribute node to the artifact node. If an artifact does not have any dependencies it is added to the set of reachable nodes. Args: rdf_artifact: The artifact object.
372,033
def DbGetDeviceMemberList(self, argin): self._log.debug("In DbGetDeviceMemberList()") argin = replace_wildcard(argin) return self.db.get_device_member_list(argin)
Get a list of device name members for device name matching the specified filter :param argin: The filter :type: tango.DevString :return: Device names member list :rtype: tango.DevVarStringArray
372,034
def heartbeat(self): s user data.Heartbeating %s (ttl = %s)heartbeatHeartbeated %s (ttl = %s)', self.jid, self.ttl) return self.expires_at
Renew the heartbeat, if possible, and optionally update the job's user data.
372,035
def fingerprint(self): if self.num_vertices == 0: return np.zeros(20, np.ubyte) else: return sum(self.vertex_fingerprints)
A total graph fingerprint The result is invariant under permutation of the vertex indexes. The chance that two different (molecular) graphs yield the same fingerprint is small but not zero. (See unit tests.)
372,036
def set(self, path, value, version=-1): value = to_bytes(value) super(XClient, self).set(path, value, version)
wraps the default set() and handles encoding (Py3k)
372,037
def run_osa_differ(): args = parse_arguments() if args.debug: log.setLevel(logging.DEBUG) elif args.verbose: log.setLevel(logging.INFO) "Please create it manually.".format(args.directory)) sys.exit(1) osa_old_commit = args.old_commit[0] osa_new_commit = args.new_commit[0] osa_repo_dir = "{0}/openstack-ansible".format(storage_directory) report_rst = make_osa_report(osa_repo_dir, osa_old_commit, osa_new_commit, args) if args.release_notes: report_rst += ("\nRelease Notes\n" "-------------") report_rst += get_release_notes(osa_repo_dir, osa_old_commit, osa_new_commit) role_yaml = get_roles(osa_repo_dir, osa_old_commit, args.role_requirements) role_yaml_latest = get_roles(osa_repo_dir, osa_new_commit, args.role_requirements) if not args.skip_roles: report_rst += ("\nOpenStack-Ansible Roles\n" "-----------------------") report_rst += make_report(storage_directory, role_yaml, role_yaml_latest, args.update, args.version_mappings) if not args.skip_projects: project_yaml = get_projects(osa_repo_dir, osa_old_commit) project_yaml_latest = get_projects(osa_repo_dir, osa_new_commit) report_rst += ("\nOpenStack Projects\n" "------------------") report_rst += make_report(storage_directory, project_yaml, project_yaml_latest, args.update) output = publish_report(report_rst, args, osa_old_commit, osa_new_commit) print(output)
Start here.
372,038
def get_nn_info(self, structure, n): from pymatgen.io.babel import BabelMolAdaptor obmol = BabelMolAdaptor(structure).openbabel_mol siw = [] site_atom = [a for i, a in enumerate(ob.OBMolAtomDFSIter(obmol)) if [a.GetX(), a.GetY(), a.GetZ()] == list( structure[n].coords)][0] for neighbor in ob.OBAtomAtomIter(site_atom): coords = [neighbor.GetX(), neighbor.GetY(), neighbor.GetZ()] site = [a for a in structure if list(a.coords) == coords][0] index = structure.index(site) bond = site_atom.GetBond(neighbor) if self.order: obmol.PerceiveBondOrders() weight = bond.GetBondOrder() else: weight = bond.GetLength() siw.append({"site": site, "image": (0, 0, 0), "weight": weight, "site_index": index}) return siw
Get all near-neighbor sites and weights (orders) of bonds for a given atom. :param molecule: input Molecule. :param n: index of site for which to determine near neighbors. :return: [dict] representing a neighboring site and the type of bond present between site n and the neighboring site.
372,039
def getHomoloGene(taxfile="build_inputs/taxid_taxname",\ genefile="homologene.data",\ proteinsfile="build_inputs/all_proteins.data",\ proteinsclusterfile="build_inputs/proteins_for_clustering.data",\ baseURL="http://ftp.ncbi.nih.gov/pub/HomoloGene/current/"): def getDf(inputfile): if os.path.isfile(inputfile): df=pd.read_table(inputfile,header=None) else: df = urllib2.urlopen(baseURL+inputfile) df=df.read().split("\n") df=[ s for s in df if len(s) > 0 ] df=[s.split("\t") for s in df] df=pd.DataFrame(df) return df taxdf=getDf(taxfile) taxdf.set_index([0],inplace=True) taxdi=taxdf.to_dict().get(1) genedf=getDf(genefile) genecols=["HID","Taxonomy ID","Gene ID","Gene Symbol","Protein gi","Protein accession"] genedf.columns=genecols genedf["organism"]=genedf["Taxonomy ID"].apply(lambda x:taxdi.get(x)) proteinsdf=getDf(proteinsfile) proteinscols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\ "length of protein listed in column 5","-11) contains data about gene location on the genome",\ "starting position of gene in 0-based coordinate",\ "end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"] proteinsdf.columns=proteinscols proteinsdf["organism"]=proteinsdf["taxid"].apply(lambda x:taxdi.get(x)) protclusdf=getDf(proteinsclusterfile) protclustercols=["taxid","entrez GeneID","gene symbol","gene description","protein accession.ver","mrna accession.ver",\ "length of protein listed in column 5","-11) contains data about gene location on the genome",\ "starting position of gene in 0-based coordinate",\ "end position of the gene in 0-based coordinate","strand","nucleotide gi of genomic sequence where this gene is annotated"] protclusdf.columns=proteinscols protclusdf["organism"]=protclusdf["taxid"].apply(lambda x:taxdi.get(x)) return genedf, protclusdf, proteinsdf
Returns NBCI's Homolog Gene tables. :param taxfile: path to local file or to baseURL/taxfile :param genefile: path to local file or to baseURL/genefile :param proteinsfile: path to local file or to baseURL/proteinsfile :param proteinsclusterfile: path to local file or to baseURL/proteinsclusterfile :param baseURL: baseURL for downloading files :returns genedf: Homolog gene Pandas dataframe :returns protclusdf: Pandas dataframe. Lists one protein per gene that were used for homologene clustering. If a gene has multiple protein accessions derived from alternative splicing, only one protein isoform that give most protein alignment to proteins in other species was selected for clustering and it is listed in this file. :returns proteinsdf: Pandas dataframe. Lists all proteins and their gene information. If a gene has multple protein accessions derived from alternative splicing event, each protein accession is list in a separate line.
372,040
def get_area(self): d_z = self.lower_depth - self.upper_depth self.downdip_width = d_z / np.sin(self.dip * np.pi / 180.) self.surface_width = self.downdip_width * np.cos(self.dip * np.pi / 180.) self.area = self.length * self.downdip_width return self.area
Calculates the area of the fault (km ** 2.) as the product of length (km) and downdip width (km)
372,041
def check_contract_allowed(func): @wraps(func) def decorator(*args, **kwargs): contract = kwargs.get() if (contract and current_user.is_authenticated() and not current_user.allowed(contract)): return current_app.login_manager.unauthorized() return func(*args, **kwargs) return decorator
Check if Contract is allowed by token
372,042
def add_files(self, *filenames, **kw): self.create() self.ensure_working_tree() logger.info("Staging changes to be committed in %s ..", format_path(self.local)) self.context.execute(*self.get_add_files_command(*filenames))
Include added and/or removed files in the working tree in the next commit. :param filenames: The filenames of the files to include in the next commit (zero or more strings). If no arguments are given all untracked files are added. :param kw: Keyword arguments are ignored (instead of raising :exc:`~exceptions.TypeError`) to enable backwards compatibility with older versions of `vcs-repo-mgr` where the keyword argument `all` was used.
372,043
def run(self, endpoint, data=None, headers=None, extra_options=None): extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith() and \ endpoint and not endpoint.startswith(): url = self.base_url + + endpoint else: url = (self.base_url or ) + (endpoint or ) req = None if self.method == : req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == : return self.run_and_check(session, prepped_request, extra_options)
Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict
372,044
def get_language_pack(locale): pack = ALL_LANGUAGE_PACKS.get(locale) if not pack: filename = DIR + .format(locale) try: with open(filename) as f: pack = json.load(f) ALL_LANGUAGE_PACKS[locale] = pack except Exception: pass return pack
Get/cache a language pack Returns the langugage pack from cache if it exists, caches otherwise >>> get_language_pack('fr')['Dashboards'] "Tableaux de bords"
372,045
def bash_complete(self, path, cmd, *cmds): path = pathlib.Path(path) subcmds = list(self.subcmds.keys()) with path.open() as bcf: print(.format(cmd), file=bcf) print(, file=bcf) print(r, end=, file=bcf) optstr = .join(self._bash_comp_command(None)) print(r.format(optstr), end=, file=bcf) if subcmds: print(.format(.join(subcmds)), file=bcf) print(, file=bcf) for sub in subcmds: optstr = .join(self._bash_comp_command(sub)) print(.format(sub, optstr), file=bcf) condstr = for sub in subcmds: print(condstr, r, sub, , file=bcf) print(r, sub, r, sep=, file=bcf) condstr = print(condstr, r, file=bcf) print(r, file=bcf) if subcmds: print(r, file=bcf) print(r, file=bcf) print(, file=bcf) print(, end=, file=bcf) print(.format(cmd), *cmds, file=bcf)
Write bash complete script. Args: path (path-like): desired path of the complete script. cmd (str): command name that should be completed. cmds (str): extra command names that should be completed.
372,046
def connect(self): exceptions = list(__basic_ftp_exceptions__) exceptions.append(OSError) exceptions.append(ConnectionRefusedError) try: self.ftp_client().connect(**self.__ftp_connect_args) self.ftp_client().login(**self.__ftp_auth_args) except tuple(exceptions) as e: raise WClientConnectionError() from e try: path = self.uri().path() if path is None: path = self.directory_sep() self.change_directory(path) except WClientCapabilityError as e: raise WClientConnectionError( ) from e
:meth:`.WNetworkClientProto.connect` method implementation
372,047
def aggregate(self, *args, **kwargs): obj = self._clone() obj._facets = [] from django.db.models import Avg, Max, Min for arg in args: if isinstance(arg, (Avg, Max, Min)): field, djfield = self._django_to_es_field(arg.lookup) if not djfield: obj = obj.annotate(field) from pyes.facets import StatisticalFacet obj = obj.annotate(StatisticalFacet(field, field)) facets = obj.get_facets() result = {} for name, values in facets.items(): for k, v in values.items(): if k.startswith("_"): continue result[u % (name, k)] = v return result
Returns a dictionary containing the calculations (aggregation) over the current queryset If args is present the expression is passed as a kwarg using the Aggregate object's default alias.
372,048
def set_cloexec(fd): flags = fcntl.fcntl(fd, fcntl.F_GETFD) assert fd > 2 fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
Set the file descriptor `fd` to automatically close on :func:`os.execve`. This has no effect on file descriptors inherited across :func:`os.fork`, they must be explicitly closed through some other means, such as :func:`mitogen.fork.on_fork`.
372,049
async def container_load(self, container_type, params=None, container=None, obj=None): if isinstance(obj, IModel): obj = obj.val if obj is None: return NoSetSentinel() c_len = len(obj) elem_type = params[0] if params else None if elem_type is None: elem_type = container_type.ELEM_TYPE res = container if container else [] for i in range(c_len): try: self.tracker.push_index(i) fvalue = await self._load_field(elem_type, params[1:] if params else None, x.eref(res, i) if container else None, obj=obj[i]) self.tracker.pop() except Exception as e: raise helpers.ArchiveException(e, tracker=self.tracker) from e if not container and not isinstance(fvalue, NoSetSentinel): res.append(fvalue) return res
Loads container of elements from the reader. Supports the container ref. Returns loaded container. :param container_type: :param params: :param container: :param obj: :return:
372,050
def T_dependent_property(self, T): r if self.method: if self.test_method_validity(T, self.method): try: prop = self.calculate(T, self.method) if self.test_property_validity(prop): return prop except: pass self.sorted_valid_methods = self.select_valid_methods(T) for method in self.sorted_valid_methods: try: prop = self.calculate(T, method) if self.test_property_validity(prop): self.method = method return prop except: pass return None
r'''Method to calculate the property with sanity checking and without specifying a specific method. `select_valid_methods` is used to obtain a sorted list of methods to try. Methods are then tried in order until one succeeds. The methods are allowed to fail, and their results are checked with `test_property_validity`. On success, the used method is stored in the variable `method`. If `method` is set, this method is first checked for validity with `test_method_validity` for the specified temperature, and if it is valid, it is then used to calculate the property. The result is checked for validity, and returned if it is valid. If either of the checks fail, the function retrieves a full list of valid methods with `select_valid_methods` and attempts them as described above. If no methods are found which succeed, returns None. Parameters ---------- T : float Temperature at which to calculate the property, [K] Returns ------- prop : float Calculated property, [`units`]
372,051
def valid_hacluster_config(): vip = config_get() dns = config_get() if not(bool(vip) ^ bool(dns)): msg = ( ) status_set(, msg) raise HAIncorrectConfig(msg) if dns: dns_settings = [, , , ] for setting in dns_settings: if config_get(setting): log( .format(setting, config_get(setting)), level=DEBUG) return True msg = ( ) status_set(, msg) raise HAIncompleteConfig(msg) log(.format(vip), level=DEBUG) return True
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname must be set. Note: ha-bindiface and ha-macastport both have defaults and will always be set. We only care that either vip or dns-ha is set. :returns: boolean: valid config returns true. raises: HAIncompatibileConfig if settings conflict. raises: HAIncompleteConfig if settings are missing.
372,052
def add(overlay): ALL* ret = list() old_overlays = list_local() cmd = .format(overlay) add_attempt = __salt__[](cmd, python_shell=False, stdin=) if add_attempt[] != 0: raise salt.exceptions.CommandExecutionError(add_attempt[]) new_overlays = list_local() ret = [overlay for overlay in new_overlays if overlay not in old_overlays] return ret
Add the given overlay from the cached remote list to your locally installed overlays. Specify 'ALL' to add all overlays from the remote list. Return a list of the new overlay(s) added: CLI Example: .. code-block:: bash salt '*' layman.add <overlay name>
372,053
def process_priority(self, process_priority): log.info(.format(name=self._name, id=self._id, priority=process_priority)) self._process_priority = process_priority
Sets the process priority. :param process_priority: string
372,054
def _AlignUncompressedDataOffset(self, uncompressed_data_offset): self._file_object.seek(0, os.SEEK_SET) self._decompressor = self._GetDecompressor() self._uncompressed_data = b compressed_data_offset = 0 compressed_data_size = self._file_object.get_size() while compressed_data_offset < compressed_data_size: read_count = self._ReadCompressedData(self._COMPRESSED_DATA_BUFFER_SIZE) if read_count == 0: break compressed_data_offset += read_count if uncompressed_data_offset < self._uncompressed_data_size: self._uncompressed_data_offset = uncompressed_data_offset break uncompressed_data_offset -= self._uncompressed_data_size
Aligns the compressed file with the uncompressed data offset. Args: uncompressed_data_offset (int): uncompressed data offset.
372,055
def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False): if filename is None: if not hasattr(self,): self._filename = None if self._filename: filename = self._filename else: raise ValueError("A file name is required because no default file name was defined.") my_ext = None else: filename, my_ext = os.path.splitext(filename) if set_default: self._filename = filename if my_ext and use_my_ext: ext = my_ext if ext is not None: if ext.startswith(os.extsep): ext = ext[1:] if ext != "": filename = filename + os.extsep + ext return filename
Supply a file name for the class object. Typical uses:: fn = filename() ---> <default_filename> fn = filename('name.ext') ---> 'name' fn = filename(ext='pickle') ---> <default_filename>'.pickle' fn = filename('name.inp','pdf') --> 'name.pdf' fn = filename('foo.pdf',ext='png',use_my_ext=True) --> 'foo.pdf' The returned filename is stripped of the extension (``use_my_ext=False``) and if provided, another extension is appended. Chooses a default if no filename is given. Raises a ``ValueError`` exception if no default file name is known. If ``set_default=True`` then the default filename is also set. ``use_my_ext=True`` lets the suffix of a provided filename take priority over a default ``ext`` tension. .. versionchanged:: 0.3.1 An empty string as *ext* = "" will suppress appending an extension.
372,056
def create(self, request): variant_id = request.data.get("variant_id", None) if variant_id is not None: variant = ProductVariant.objects.get(id=variant_id) quantity = int(request.data.get("quantity", 1)) items, bid = utils.get_basket_items(request) in_basket = False for item in items: if item.variant.id == variant.id: item.increase_quantity(quantity) in_basket = True break if not in_basket: item = BasketItem(variant=variant, quantity=quantity, basket_id=bid) item.save() serializer = BasketItemSerializer(self.get_queryset(request), many=True) response = Response(data=serializer.data, status=status.HTTP_201_CREATED) else: response = Response( {"message": "Missing "}, status=status.HTTP_400_BAD_REQUEST) return response
Add an item to the basket
372,057
def removeEventListener(self, event: str, listener: _EventListenerType ) -> None: self._remove_event_listener(event, listener)
Remove an event listener of this node. The listener is removed only when both event type and listener is matched.
372,058
def _read_file(file_name): with open(file_name) as config_file: data = json.load(config_file) return data
Read the file content and load it as JSON. Arguments: file_name (:py:class:`str`): The filename. Returns: :py:class:`dict`: The loaded JSON data. Raises: :py:class:`FileNotFoundError`: If the file is not found.
372,059
def nlmsg_attrlen(nlh, hdrlen): return max(nlmsg_len(nlh) - libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen), 0)
Length of attributes data. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L154 nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). Returns: Integer.
372,060
def get_file_id(db, user_id, api_path): return _get_file( db, user_id, api_path, [files.c.id], unused_decrypt_func, )[]
Get the value in the 'id' column for the file with the given user_id and path.
372,061
def format_stats(stats): stats_collector = {} for stat_key, stat_value in stats.items(): stat_id, stat_field = stat_key.split(":") stats_collector.setdefault(stat_id, {"id": stat_id}) stats_collector[stat_id][stat_field] = stat_value has_stats = { : , : , : len(stats_collector) > 0, : , : False, } stats_collector[] = has_stats return stats_collector
Given a dictionary following this layout: { 'encoded:label': 'Encoded', 'encoded:value': 'Yes', 'encoded:description': 'Indicates if the column is encoded', 'encoded:include': True, 'size:label': 'Size', 'size:value': 128, 'size:description': 'Size of the table in MB', 'size:include': True, } format_stats will convert the dict into this structure: { 'encoded': { 'id': 'encoded', 'label': 'Encoded', 'value': 'Yes', 'description': 'Indicates if the column is encoded', 'include': True }, 'size': { 'id': 'size', 'label': 'Size', 'value': 128, 'description': 'Size of the table in MB', 'include': True } }
372,062
def _bstar_1effect(beta, alpha, yTBy, yTBX, yTBM, XTBX, XTBM, MTBM): from numpy_sugar import epsilon from numpy_sugar.linalg import dotd from numpy import sum r = full(MTBM[0].shape[0], yTBy) r -= 2 * add.reduce([dot(i, beta) for i in yTBX]) r -= 2 * add.reduce([i * alpha for i in yTBM]) r += add.reduce([dotd(beta.T, dot(i, beta)) for i in XTBX]) r += add.reduce([dotd(beta.T, i * alpha) for i in XTBM]) r += add.reduce([sum(alpha * i * beta, axis=0) for i in XTBM]) r += add.reduce([alpha * i.ravel() * alpha for i in MTBM]) return clip(r, epsilon.tiny, inf)
Same as :func:`_bstar_set` but for single-effect.
372,063
def _dir_additions(self): additions = {c for c in self._info_axis.unique(level=0)[:100] if isinstance(c, str) and c.isidentifier()} return super()._dir_additions().union(additions)
add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used.
372,064
def crack(ciphertext, *fitness_functions, min_key=0, max_key=26, shift_function=shift_case_english): if min_key >= max_key: raise ValueError("min_key cannot exceed max_key") decryptions = [] for key in range(min_key, max_key): plaintext = decrypt(key, ciphertext, shift_function=shift_function) decryptions.append(Decryption(plaintext, key, score(plaintext, *fitness_functions))) return sorted(decryptions, reverse=True)
Break ``ciphertext`` by enumerating keys between ``min_key`` and ``max_key``. Example: >>> decryptions = crack("KHOOR", fitness.english.quadgrams) >>> print(''.join(decryptions[0].plaintext)) HELLO Args: ciphertext (iterable): The symbols to decrypt *fitness_functions (variable length argument list): Functions to score decryption with Keyword Args: min_key (int): Key to start with max_key (int): Key to stop at (exclusive) shift_function (function(shift, symbol)): Shift function to use Returns: Sorted list of decryptions Raises: ValueError: If min_key exceeds max_key ValueError: If no fitness_functions are given
372,065
def get_operation_mtf_dimension_names(self, operation_name): mtf_dimension_names = set() for tensor_name in self.get_operation_input_names(operation_name): mtf_dimension_names.update(self.get_tensor_mtf_dimension_names( tensor_name)) for tensor_name in self.get_operation_output_names(operation_name): mtf_dimension_names.update(self.get_tensor_mtf_dimension_names( tensor_name)) return mtf_dimension_names
The Mesh TensorFlow dimensions associated with an operation. Args: operation_name: a string, name of an operation in the graph. Returns: a set(string), the names of Mesh TensorFlow dimensions.
372,066
def add_flatten(self, name, mode, input_name, output_name): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.flatten if mode == 0: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value() elif mode == 1: spec_layer_params.mode = \ _NeuralNetwork_pb2.FlattenLayerParams.FlattenOrder.Value() else: raise NotImplementedError( % mode)
Add a flatten layer. Only flattens the channel, height and width axis. Leaves the sequence axis as is. Parameters ---------- name: str The name of this layer. mode: int - If mode == 0, the flatten layer is in CHANNEL_FIRST mode. - If mode == 1, the flatten layer is in CHANNEL_LAST mode. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. See Also -------- add_permute, add_reshape
372,067
def load_module_functions(module): module_functions = {} for name, item in vars(module).items(): if validator.is_function(item): module_functions[name] = item return module_functions
load python module functions. Args: module: python module Returns: dict: functions mapping for specified python module { "func1_name": func1, "func2_name": func2 }
372,068
def save_dataset(self, dataset, filename=None, fill_value=None, compute=True, **kwargs): raise NotImplementedError( "Writer has not implemented dataset saving" % (self.name, ))
Saves the ``dataset`` to a given ``filename``. This method must be overloaded by the subclass. Args: dataset (xarray.DataArray): Dataset to save using this writer. filename (str): Optionally specify the filename to save this dataset to. If not provided then `filename` which can be provided to the init method will be used and formatted by dataset attributes. fill_value (int or float): Replace invalid values in the dataset with this fill value if applicable to this writer. compute (bool): If `True` (default), compute and save the dataset. If `False` return either a `dask.delayed.Delayed` object or tuple of (source, target). See the return values below for more information. **kwargs: Other keyword arguments for this particular writer. Returns: Value returned depends on `compute`. If `compute` is `True` then the return value is the result of computing a `dask.delayed.Delayed` object or running `dask.array.store`. If `compute` is `False` then the returned value is either a `dask.delayed.Delayed` object that can be computed using `delayed.compute()` or a tuple of (source, target) that should be passed to `dask.array.store`. If target is provided the the caller is responsible for calling `target.close()` if the target has this method.
372,069
def sentence_starts(self): if not self.is_tagged(SENTENCES): self.tokenize_sentences() return self.starts(SENTENCES)
The list of start positions representing ``sentences`` layer elements.
372,070
def lookup_character_keycode(self, character): keysym = Xlib.XK.string_to_keysym(character) if not keysym: try: keysym = getattr(Xlib.keysymdef.xkb, + character, 0) except: keysym = 0 if not keysym: keysym = Xlib.XK.string_to_keysym(KEYSYMS[character]) return self.display.keysym_to_keycode(keysym)
Looks up the keysym for the character then returns the keycode mapping for that keysym.
372,071
def check_register(self, arg): self.check_parameter(arg) match = re.search(self.REGISTER_REGEX, arg) if match is None: raise iarm.exceptions.RuleError("Parameter {} is not a register".format(arg)) try: r_num = int(match.groups()[0]) except ValueError: r_num = int(match.groups()[0], 16) except TypeError: if arg in : return 14 elif arg in : return 13 elif arg in : return 7 else: raise if r_num > self._max_registers: raise iarm.exceptions.RuleError( "Register {} is greater than defined registers of {}".format(arg, self._max_registers)) return r_num
Is the parameter a register in the form of 'R<d>', and if so is it within the bounds of registers defined Raises an exception if 1. The parameter is not in the form of 'R<d>' 2. <d> is outside the range of registers defined in the init value registers or _max_registers :param arg: The parameter to check :return: The number of the register
372,072
def save_module(self, obj): mod_name = obj.__name__ if hasattr(obj, ): is_dynamic = False else: try: _find_module(mod_name) is_dynamic = False except ImportError: is_dynamic = True self.modules.add(obj) if is_dynamic: self.save_reduce(dynamic_subimport, (obj.__name__, vars(obj)), obj=obj) else: self.save_reduce(subimport, (obj.__name__,), obj=obj)
Save a module as an import
372,073
def idle_task(self): if self.module() is not None and not self.menu_added_console: self.menu_added_console = True self.module().add_menu(self.menu)
called on idle
372,074
def profile_function(self): with _CodeHeatmapCalculator() as prof: result = self._run_object(*self._run_args, **self._run_kwargs) code_lines, start_line = inspect.getsourcelines(self._run_object) source_lines = [] for line in code_lines: source_lines.append((, start_line, line)) start_line += 1 filename = os.path.abspath(inspect.getsourcefile(self._run_object)) heatmap = prof.heatmap[filename] run_time = sum(time for time in heatmap.values()) return { : self._object_name, : run_time, : result, : int(time.time()), : [{ : self._object_name, : heatmap, : prof.execution_count[filename], : source_lines, : run_time }] }
Calculates heatmap for function.
372,075
def _connect(obj): from .columns import MODELS if isinstance(obj, MODELS[]): obj = obj.__class__ if hasattr(obj, ): return obj._conn if hasattr(obj, ): return obj.CONN return get_connection()
Tries to get the _conn attribute from a model. Barring that, gets the global default connection using other methods.
372,076
def _add_tabular_layer(self, tabular_layer, layer_name, save_style=False): output = QFileInfo( self.uri.filePath(layer_name + )) QgsVectorFileWriter.writeAsVectorFormat( tabular_layer, output.absoluteFilePath(), , QgsCoordinateTransform(), ) if save_style: style_path = QFileInfo(self.uri.filePath(layer_name + )) tabular_layer.saveNamedStyle(style_path.absoluteFilePath()) assert output.exists() return True, output.baseName()
Add a tabular layer to the folder. :param tabular_layer: The layer to add. :type tabular_layer: QgsVectorLayer :param layer_name: The name of the layer in the datastore. :type layer_name: str :param save_style: If we have to save a QML too. Default to False. :type save_style: bool :returns: A two-tuple. The first element will be True if we could add the layer to the datastore. The second element will be the layer name which has been used or the error message. :rtype: (bool, str) .. versionadded:: 4.0
372,077
def set_composition(self, composition_id=None): if composition_id is None: raise NullArgument() metadata = Metadata(**settings.METADATA[]) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(composition_id, metadata, array=False): self._my_map[] = str(composition_id) else: raise InvalidArgument()
Sets the composition. :param composition_id: a composition :type composition_id: ``osid.id.Id`` :raise: ``InvalidArgument`` -- ``composition_id`` is invalid :raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true`` :raise: ``NullArgument`` -- ``composition_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
372,078
def mel_to_hz(mels, htk=False): mels = np.asanyarray(mels) if htk: return 700.0 * (10.0**(mels / 2595.0) - 1.0) f_min = 0.0 f_sp = 200.0 / 3 freqs = f_min + f_sp * mels min_log_hz = 1000.0 min_log_mel = (min_log_hz - f_min) / f_sp logstep = np.log(6.4) / 27.0 if mels.ndim: log_t = (mels >= min_log_mel) freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) elif mels >= min_log_mel: freqs = min_log_hz * np.exp(logstep * (mels - min_log_mel)) return freqs
Convert mel bin numbers to frequencies Examples -------- >>> librosa.mel_to_hz(3) 200. >>> librosa.mel_to_hz([1,2,3,4,5]) array([ 66.667, 133.333, 200. , 266.667, 333.333]) Parameters ---------- mels : np.ndarray [shape=(n,)], float mel bins to convert htk : bool use HTK formula instead of Slaney Returns ------- frequencies : np.ndarray [shape=(n,)] input mels in Hz See Also -------- hz_to_mel
372,079
def set_one_time_boot(self, device): sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID) if device.upper() not in DEVICE_COMMON_TO_REDFISH: msg = (self._( ) % {: device}) raise exception.IloInvalidInputError(msg) try: sushy_system.update_persistent_boot( [device], persistent=False) except sushy.exceptions.SushyError as e: msg = (self._( ) % {: device, : str(e)}) LOG.debug(msg) raise exception.IloError(msg)
Configures a single boot from a specific device. :param device: Device to be set as a one time boot device :raises: IloError, on an error from iLO. :raises: IloInvalidInputError, if the given input is not valid.
372,080
def get_all(self, security): url = % security page = self._request(url) soup = BeautifulSoup(page) snapData = soup.find("table", {"class": "snap-data"}) if snapData is None: raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security) data = {} for row in snapData.findAll(): keyTd, valTd = row.findAll() data[keyTd.getText()] = valTd.getText() return data
Get all available quote data for the given ticker security. Returns a dictionary.
372,081
def make_cookies(self, response, request): headers = response.info() rfc2965_hdrs = headers.get_all("Set-Cookie2", []) ns_hdrs = headers.get_all("Set-Cookie", []) rfc2965 = self._policy.rfc2965 netscape = self._policy.netscape if ((not rfc2965_hdrs and not ns_hdrs) or (not ns_hdrs and not rfc2965) or (not rfc2965_hdrs and not netscape) or (not netscape and not rfc2965)): return [] try: cookies = self._cookies_from_attrs_set( split_header_words(rfc2965_hdrs), request) except Exception: _warn_unhandled_exception() cookies = [] if ns_hdrs and netscape: try: ns_cookies = self._cookies_from_attrs_set( parse_ns_headers(ns_hdrs), request) except Exception: _warn_unhandled_exception() ns_cookies = [] self._process_rfc2109_cookies(ns_cookies) if rfc2965: lookup = {} for cookie in cookies: lookup[(cookie.domain, cookie.path, cookie.name)] = None def no_matching_rfc2965(ns_cookie, lookup=lookup): key = ns_cookie.domain, ns_cookie.path, ns_cookie.name return key not in lookup ns_cookies = filter(no_matching_rfc2965, ns_cookies) if ns_cookies: cookies.extend(ns_cookies) return cookies
Return sequence of Cookie objects extracted from response object.
372,082
def replace(self, str1, str2): if str2 != self._replacements.get(str1, None): self._replacements[str1] = str2 self.changed(code_changed=True)
Set verbatim code replacement It is strongly recommended to use function['$foo'] = 'bar' where possible because template variables are less likely to changed than the code itself in future versions of vispy. Parameters ---------- str1 : str String to replace str2 : str String to replace str1 with
372,083
def ext_pillar(minion_id, pillar, url, with_grains=False): url = url.replace(, _quote(minion_id)) grain_pattern = r if with_grains: for match in re.finditer(grain_pattern, url): grain_name = match.group() grain_value = __salt__[](grain_name, None) if not grain_value: log.error("Unable to get minion grain: %s", minion_id, grain_name) return {} grain_value = _quote(six.text_type(grain_value)) url = re.sub(.format(grain_name), grain_value, url) log.debug(, url) data = __salt__[](url=url, decode=True, decode_type=) if in data: return data[] log.error("Error on minion http query: %s\nMore Info:\n", minion_id, url) for key in data: log.error(, key, data[key]) return {}
Read pillar data from HTTP response. :param str url: Url to request. :param bool with_grains: Whether to substitute strings in the url with their grain values. :return: A dictionary of the pillar data to add. :rtype: dict
372,084
def get_comic_format(filename): image_format = None filename_ext = os.path.splitext(filename)[-1].lower() if filename_ext in _COMIC_EXTS: if zipfile.is_zipfile(filename): image_format = _CBZ_FORMAT elif rarfile.is_rarfile(filename): image_format = _CBR_FORMAT return image_format
Return the comic format if it is a comic archive.
372,085
def pause(self): if self.isPlaying is True: self._execute("pause") self._changePlayingState(False)
Pauses playback
372,086
def _apply_rules_no_recurse(expr, rules): try: items = rules.items() except AttributeError: items = enumerate(rules) for key, (pat, replacement) in items: matched = pat.match(expr) if matched: try: return replacement(**matched) except CannotSimplify: pass return expr
Non-recursively match expr again all rules
372,087
def get(cls, id_): with db.session.no_autoflush: query = cls.dbmodel.query.filter_by(id=id_) try: model = query.one() except NoResultFound: raise WorkflowsMissingObject("No object for for id {0}".format( id_ )) return cls(model)
Return a workflow object from id.
372,088
def get_numwords(): numwords = {: (1, 0), : (1, 1), : (1, 1)} for idx, word in enumerate(UNITS): numwords[word] = (1, idx) for idx, word in enumerate(TENS): numwords[word] = (1, idx * 10) for idx, word in enumerate(SCALES): numwords[word] = (10 ** (idx * 3 or 2), 0) all_numbers = ur.join(ur % i for i in numwords.keys() if i) return all_numbers, numwords
Convert number words to integers in a given text.
372,089
def remove_child_vault(self, vault_id, child_id): if self._catalog_session is not None: return self._catalog_session.remove_child_catalog(catalog_id=vault_id, child_id=child_id) return self._hierarchy_session.remove_child(id_=vault_id, child_id=child_id)
Removes a child from a vault. arg: vault_id (osid.id.Id): the ``Id`` of a vault arg: child_id (osid.id.Id): the ``Id`` of the child raise: NotFound - ``vault_id`` not parent of ``child_id`` raise: NullArgument - ``vault_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
372,090
async def pixy_set_servos(self, s0, s1): data = [PrivateConstants.PIXY_SET_SERVOS, s0 & 0x7f, (s0 >> 7) & 0x7f, s1 & 0x7f, (s1 >> 7) & 0x7f] await self._send_sysex(PrivateConstants.PIXY_CONFIG, data)
Sends the setServos Pixy command. This method sets the pan/tilt servos that are plugged into Pixy's two servo ports. :param s0: value 0 to 1000 :param s1: value 0 to 1000 :returns: No return value.
372,091
def create_observations(params: Dict[str, Dict[str, Any]], access_token: str) -> List[Dict[str, Any]]: response = requests.post(url="{base_url}/observations.json".format(base_url=INAT_BASE_URL), json=params, headers=_build_auth_header(access_token)) response.raise_for_status() return response.json()
Create a single or several (if passed an array) observations). :param params: :param access_token: the access token, as returned by :func:`get_access_token()` :return: iNaturalist's JSON response, as a Python object :raise: requests.HTTPError, if the call is not successful. iNaturalist returns an error 422 (unprocessable entity) if it rejects the observation data (for example an observation date in the future or a latitude > 90. In that case the exception's `response` attribute give details about the errors. allowed params: see https://www.inaturalist.org/pages/api+reference#post-observations Example: params = {'observation': {'species_guess': 'Pieris rapae'}, } TODO investigate: according to the doc, we should be able to pass multiple observations (in an array, and in renaming observation to observations, but as far as I saw they are not created (while a status of 200 is returned)
372,092
def get_stripe_dashboard_url(self): if not self.stripe_dashboard_item_name or not self.id: return "" else: return "{base_url}{item}/{id}".format( base_url=self._get_base_stripe_dashboard_url(), item=self.stripe_dashboard_item_name, id=self.id, )
Get the stripe dashboard url for this object.
372,093
def simple_logging_config(func): @functools.wraps(func) def new_func(self, *args, **kwargs): if use_simple_logging(kwargs): if in kwargs: raise ValueError( ) _change_logging_kwargs(kwargs) return func(self, *args, **kwargs) return new_func
Decorator to allow a simple logging configuration. This encompasses giving a `log_folder`, `logger_names` as well as `log_levels`.
372,094
def is_elected_leader(resource): try: return juju_is_leader() except NotImplementedError: log( , level=WARNING) if is_clustered(): if not is_crm_leader(resource): log(, level=INFO) return False else: peers = peer_units() if peers and not oldest_peer(peers): log(, level=INFO) return False return True
Returns True if the charm executing this is the elected cluster leader. It relies on two mechanisms to determine leadership: 1. If juju is sufficiently new and leadership election is supported, the is_leader command will be used. 2. If the charm is part of a corosync cluster, call corosync to determine leadership. 3. If the charm is not part of a corosync cluster, the leader is determined as being "the alive unit with the lowest unit numer". In other words, the oldest surviving unit.
372,095
def sibling(self, name: InstanceName) -> "ObjectMember": ssn = self.parinst._member_schema_node(name) try: sibs = self.siblings.copy() newval = sibs.pop(name) sibs[self.name] = self.value return ObjectMember(name, sibs, newval, self.parinst, ssn, self.timestamp) except KeyError: raise NonexistentInstance(self.json_pointer(), f"member ") from None
Return an instance node corresponding to a sibling member. Args: name: Instance name of the sibling member. Raises: NonexistentSchemaNode: If member `name` is not permitted by the schema. NonexistentInstance: If sibling member `name` doesn't exist.
372,096
def data_from_archive(self): archive_data = super(SetupPyMetadataExtractor, self).data_from_archive archive_data[] = self.has_packages archive_data[] = self.packages archive_data[] = self.has_bundled_egg_info sphinx_dir = self.sphinx_dir if sphinx_dir: archive_data[] = "/".join(sphinx_dir.split("/")[1:]) archive_data[].append( [, self.name_convertor.rpm_name( "sphinx", self.base_python_version)]) return archive_data
Appends setup.py specific metadata to archive_data.
372,097
def count_frames(frame, count_start=0): "Return a count of the number of frames" count = -count_start while frame: count += 1 frame = frame.f_back return count
Return a count of the number of frames
372,098
def search(self, filterstr, attrlist): return self._paged_search_ext_s(self.settings.BASE, ldap.SCOPE_SUBTREE, filterstr=filterstr, attrlist=attrlist, page_size=self.settings.PAGE_SIZE)
Query the configured LDAP server.
372,099
def _handle_reference_cable(self, reference, handler, reifier): cable_ref = psis.cable_psi(reference.value) self._assoc(psis.REFERENCES_TYPE, psis.SOURCE_TYPE, self._cable_psi, psis.TARGET_TYPE, cable_ref, reifier) handler.startTopic(cable_ref) handler.isa(psis.CABLE_TYPE) self._name(reference.value) handler.endTopic() self._sent_by(psis.origin_psi_by_cable_id(reference.value), cable_ref)
\