Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
371,600
def update_compaction(model): logger.debug("Checking %s for compaction differences", model) table = get_table_settings(model) existing_options = table.options.copy() existing_compaction_strategy = existing_options[] existing_options = json.loads(existing_options[]) desired_options = get_compaction_options(model) desired_compact_strategy = desired_options.get(, SizeTieredCompactionStrategy) desired_options.pop(, None) do_update = False if desired_compact_strategy not in existing_compaction_strategy: do_update = True for k, v in desired_options.items(): val = existing_options.pop(k, None) if val != v: do_update = True if do_update: options = get_compaction_options(model) options = json.dumps(options).replace(, "'") cf_name = model.column_family_name() query = "ALTER TABLE {} with compaction = {}".format(cf_name, options) logger.debug(query) execute(query) return True return False
Updates the compaction options for the given model if necessary. :param model: The model to update. :return: `True`, if the compaction options were modified in Cassandra, `False` otherwise. :rtype: bool
371,601
def load_history(self) -> List["IterationRecord"]: if path.isfile(self.history_filename): with open(self.history_filename, "r") as f: try: dicts = json.load(f) except json.decoder.JSONDecodeError as e: self.log.error(f"Got error \n{e}\n decoding JSON history, overwriting it.\n" f"Former history available in {self.history_filename}.bak") copyfile(self.history_filename, f"{self.history_filename}.bak") return [] history: List[IterationRecord] = [] for hdict_pre in dicts: if "_type" in hdict_pre and hdict_pre["_type"] == IterationRecord.__name__: hdict = _repair(hdict_pre) record = IterationRecord.from_dict(hdict) history.append(record) else: item = IterationRecord() extra_keys = hdict_pre.pop("extra_keys", {}) item.extra_keys = extra_keys hdict_obj = TweetRecord.from_dict(hdict_pre) item.timestamp = hdict_obj.timestamp item.output_records["birdsite"] = hdict_obj history.append(item) self.log.debug(f"Loaded history:\n {history}") return history else: return []
Load messaging history from disk to self. :returns: List of iteration records comprising history.
371,602
def safe_unicode(e): try: return unicode(e) except UnicodeError: pass try: return py3compat.str_to_unicode(str(e)) except UnicodeError: pass try: return py3compat.str_to_unicode(repr(e)) except UnicodeError: pass return u
unicode(e) with various fallbacks. Used for exceptions, which may not be safe to call unicode() on.
371,603
def filter(self, **search_args): search_args = search_args or {} raw_resources = [] for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()): search_args.update(paginator_params) response = self.paginator.process_response(self.send(url, "get", params=search_args)) raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json) resources = [] for raw_resource in raw_resources: try: resource = self.resource_class(self.client) except (ValueError, TypeError): continue else: resource.update_from_dict(raw_resource) resources.append(resource) return resources
Get a filtered list of resources :param search_args: To be translated into ?arg1=value1&arg2=value2... :return: A list of resources
371,604
def link_android(self, path, pkg): bundle_id = self.ctx[] pkg_root = join(path, pkg) f.write(main_application_java) raise
Link's the android project to this library. 1. Includes this project's directory in the app's android/settings.gradle It adds: include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../packages/<project-name>/android') 2. Add's this project as a dependency to the android/app/build.gradle It adds: compile project(':<project-name>') to the dependencies. 3. If preset, adds the import and package statement to the android/app/src/main/java/<bundle/id>/MainApplication.java
371,605
def clear_cache(self): errors = [] for rdir in (self.cache_root, self.file_list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append( .format(rdir, exc) ) return errors
Completely clear cache
371,606
def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True, set_as_representative=False, representative_chain=None, force_rerun=False): if self.structures.has_id(pdb_id): if force_rerun: existing = self.structures.get_by_id(pdb_id) self.structures.remove(existing) else: log.debug(.format(pdb_id)) pdb = self.structures.get_by_id(pdb_id) if pdb_file: pdb.load_structure_path(pdb_file, file_type) if mapped_chains: pdb.add_mapped_chain_ids(mapped_chains) if not self.structures.has_id(pdb_id): if is_experimental: pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type) else: pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type) self.structures.append(pdb) if set_as_representative: pdb.parse_structure() self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun) return self.structures.get_by_id(pdb_id)
Load a structure ID and optional structure file into the structures attribute. Args: pdb_id (str): PDB ID mapped_chains (str, list): Chain ID or list of IDs which you are interested in pdb_file (str): Path to PDB file file_type (str): Type of PDB file is_experimental (bool): If this structure file is experimental set_as_representative (bool): If this structure should be set as the representative structure representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures Returns: PDBProp: The object that is now contained in the structures attribute
371,607
def update_default_output_dir(self): if self.scenario_directory_radio.isChecked(): self.output_directory.setText(self.source_directory.text())
Update output dir if set to default.
371,608
def handle_events(self): for event in sys.stdin: if event.startswith(): continue name = json.loads(event.lstrip())[] for obj in self.loader.objects: if obj.output_options[] == name: obj.on_click(json.loads(event.lstrip()))
An event handler that processes events from stdin and calls the on_click function of the respective object. This function is run in another thread, so as to not stall the main thread.
371,609
def _ParsePlistKeyValue(self, knowledge_base, name, value): if not knowledge_base.GetValue(): if name in self._PLIST_KEYS: if isinstance(value, (list, tuple)): value = value[0] _, _, keyboard_layout = value.rpartition() knowledge_base.SetValue(, keyboard_layout)
Parses a plist key value. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. name (str): name of the plist key. value (str): value of the plist key.
371,610
def make_middleware_stack(middleware, base): for ware in reversed(middleware): base = ware(base) return base
Given a list of in-order middleware callable objects `middleware` and a base function `base`, chains them together so each middleware is fed the function below, and returns the top level ready to call. :param middleware: The middleware stack :type middleware: iterable[callable] :param base: The base callable that the lowest-order middleware wraps :type base: callable :return: The topmost middleware, which calls the next middleware ... which calls the lowest-order middleware, which calls the `base` callable. :rtype: callable
371,611
def _log_players(self, players): self._logln(.format(len(players))) for p in self._players: self._logln(.format(p.name, p.color, p.seat))
:param players: list of catan.game.Player objects
371,612
def property_schema(self, key): schema = self.__class__.SCHEMA plain_schema = schema.get("properties", {}).get(key) if plain_schema is not None: return plain_schema pattern_properties = schema.get("patternProperties", {}) for pattern, pattern_schema in pattern_properties.items(): if match(pattern, key): return pattern_schema return schema.get("additionalProperties", True)
Lookup the schema for a specific property.
371,613
def write_collection_from_tmpfile(self, collection_id, tmpfi, parent_sha, auth_info, commit_msg=): return self.write_doc_from_tmpfile(collection_id, tmpfi, parent_sha, auth_info, commit_msg, doctype_display_name="collection")
Given a collection_id, temporary filename of content, branch and auth_info
371,614
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
Distributed version of tree_distance.rfdist Parameters: two valid newick strings and a boolean
371,615
def is_stopword(self, text): found_content_word = False for record in self.analyze(text): if not self.is_stopword_record(record): found_content_word = True break return not found_content_word
Determine whether a single word is a stopword, or whether a short phrase is made entirely of stopwords, disregarding context. Use of this function should be avoided; it's better to give the text in context and let the process determine which words are the stopwords.
371,616
def get_license_manager(service_instance): log.debug() try: lic_manager = service_instance.content.licenseManager except vim.fault.NoPermission as exc: log.exception(exc) raise salt.exceptions.VMwareApiError( .format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise salt.exceptions.VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise salt.exceptions.VMwareRuntimeError(exc.msg) return lic_manager
Returns the license manager. service_instance The Service Instance Object from which to obrain the license manager.
371,617
def page(self, number): number = self.validate_number(number) bottom = (number - 1) * self.per_page top = bottom + self.per_page if top + self.orphans >= self.count: top = self.count return self._get_page(self.object_list[bottom:top], number, self)
Returns a Page object for the given 1-based page number.
371,618
def cli_wrapper(generator): first = True response = None while True: if not first: print() first = False try: message = generator.send(response) if isinstance(message, MultipleChoice): print(message.question) for num, choice in enumerate(message.options): print(" {}: {}".format(num, choice)) option = input("Select an option 0-{}{}: ".format( len(message.options) - 1, " (default: {})".format(message.default) if message.default is not None else "")) if option == "" and message.default is not None: option = message.default try: response = int(option) except ValueError: response = -1 if not (0 <= response < len(message.options)): print("ERROR: {} is not a valid option.".format(option)) return None elif isinstance(message, Text): print(message.question) response = input("> ") elif isinstance(message, Prompt): print(message.message) input("<Press enter to continue>") response = None elif isinstance(message, Info): print(message.message) response = None except Failure as f: print("ERROR: {}".format(str(f))) return None except Success as s: return s.data
Given a wizard, implements an interactive command-line human-friendly interface for it. Parameters ---------- generator A generator such as one created by calling :py:func:`rig.wizard.wizard_generator`. Returns ------- dict or None Returns a dictionary containing the results of the wizard or None if the wizard failed.
371,619
def _get_files(self, attrs=None): try: f_multicall = self._engine._rpc.f.multicall f_params = [self._fields["hash"], 0, "f.path=", "f.size_bytes=", "f.last_touched=", "f.priority=", "f.is_created=", "f.is_open=", ] for attr in (attrs or []): f_params.append("f.%s=" % attr) rpc_result = f_multicall(*tuple(f_params)) except xmlrpc.ERRORS as exc: raise error.EngineError("While %s torrent "getting files for", self._fields["hash"], exc)) else: result = [Bunch( path=i[0], size=i[1], mtime=i[2] / 1000000.0, prio=i[3], created=i[4], opened=i[5], ) for i in rpc_result] if attrs: for idx, attr in enumerate(attrs): if attr.startswith("get_"): attr = attr[4:] for item, rpc_item in zip(result, rpc_result): item[attr] = rpc_item[6+idx] return result
Get a list of all files in this download; each entry has the attributes C{path} (relative to root), C{size} (in bytes), C{mtime}, C{prio} (0=off, 1=normal, 2=high), C{created}, and C{opened}. This is UNCACHED, use C{fetch("files")} instead. @param attrs: Optional list of additional attributes to fetch.
371,620
async def sendto(self, data, component): active_pair = self._nominated.get(component) if active_pair: await active_pair.protocol.send_data(data, active_pair.remote_addr) else: raise ConnectionError()
Send a datagram on the specified component. If the connection is not established, a `ConnectionError` is raised.
371,621
def query_alternative_short_name(): args = get_args( request_args=request.args, allowed_str_args=[, ], allowed_int_args=[] ) return jsonify(query.alternative_short_name(**args))
Returns list of alternative short name by query query parameters --- tags: - Query functions parameters: - name: name in: query type: string required: false description: Alternative short name default: CVAP - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10
371,622
def _addDatasetAction(self, dataset): action = QAction(dataset.name(), self) action.setIcon(XColorIcon(dataset.color())) action.setCheckable(True) action.setChecked(True) action.setData(wrapVariant(dataset)) action.toggled.connect(self.toggleDataset) self.uiDatasetTBAR.addAction(action)
Adds an action for the inputed dataset to the toolbar :param dataset | <XChartDataset>
371,623
def add_hits_to_proteins(self, hmm_hit_list): for org in self.organisms: print "adding SearchIO hit objects for", org.accession for hit in hmm_hit_list: hit_org_id = hit.id.split()[0] hit_prot_id = hit.id.split()[1] if org.accession == hit_org_id: for prot in org.proteins: if prot.accession == hit_prot_id: prot.hmm_hit_list.append(hit)
Add HMMER results to Protein objects
371,624
def myRank(grade, badFormat, year, length): return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
rank of candidateNumber in year Arguments: grade {int} -- a weighted average for a specific candidate number and year badFormat {dict} -- candNumber : [results for candidate] year {int} -- year you are in length {int} -- length of each row in badFormat divided by 2 Returns: int -- rank of candidateNumber in year
371,625
def factory(cls, endpoint, timeout, *args, **kwargs): start = time.time() kwargs[] = timeout conn = cls(endpoint, *args, **kwargs) elapsed = time.time() - start conn.connected_event.wait(timeout - elapsed) if conn.last_error: if conn.is_unsupported_proto_version: raise ProtocolVersionUnsupported(endpoint, conn.protocol_version) raise conn.last_error elif not conn.connected_event.is_set(): conn.close() raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout) else: return conn
A factory function which returns connections which have succeeded in connecting and are ready for service (or raises an exception otherwise).
371,626
def _matrix_adjust(self, X): data_matrix = X.data if sparse.issparse(X) else X data_matrix += len(SPARSE_ENCODINGS) + 1 data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS[] return X
Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities.
371,627
def diff(self, test_id_1, test_id_2, config=None, **kwargs): output_directory = os.path.join(self._output_directory, + str(test_id_1) + + str(test_id_2)) if kwargs: if in kwargs.keys(): output_directory = kwargs[] diff_report = Diff([NaaradReport(self._analyses[test_id_1].output_directory, None), NaaradReport(self._analyses[test_id_2].output_directory, None)], , output_directory, os.path.join(output_directory, self._resource_path), self._resource_path) if config: naarad.utils.extract_diff_sla_from_config_file(diff_report, config) diff_report.generate() if diff_report.sla_failures > 0: return CONSTANTS.SLA_FAILURE if diff_report.status != : return CONSTANTS.ERROR return CONSTANTS.OK
Create a diff report using test_id_1 as a baseline :param: test_id_1: test id to be used as baseline :param: test_id_2: test id to compare against baseline :param: config file for diff (optional) :param: **kwargs: keyword arguments
371,628
def register_signals(self): from .models import Collection from .receivers import CollectionUpdater if self.app.config[]: from .percolator import collection_inserted_percolator, \ collection_removed_percolator, \ collection_updated_percolator listen(Collection, , collection_inserted_percolator) listen(Collection, , collection_updated_percolator) listen(Collection, , collection_removed_percolator) self.update_function = CollectionUpdater(app=self.app) signals.before_record_insert.connect(self.update_function, weak=False) signals.before_record_update.connect(self.update_function, weak=False)
Register signals.
371,629
def get_engine_from_session(dbsession: Session) -> Engine: engine = dbsession.bind assert isinstance(engine, Engine) return engine
Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`.
371,630
def computePerturbedExpectation(self, u_n, A_n, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False): if len(np.shape(u_n)) == 2: u_n = kn_to_n(u_n, N_k=self.N_k) if len(np.shape(A_n)) == 2: A_n = kn_to_n(A_n, N_k=self.N_k) A_n = np.array(A_n, dtype=np.float64) N = self.N K = self.K A_min = np.min(A_n) A_n = A_n - (A_min - 1) Log_W_nk = np.zeros([N, K + 2], dtype=np.float64) N_k = np.zeros([K + 2], dtype=np.int32) f_k = np.zeros([K + 2], dtype=np.float64) Log_W_nk[:, 0:K] = self.Log_W_nk N_k[0:K] = self.N_k log_w_n = self._computeUnnormalizedLogWeights(u_n) f_k[K] = -_logsum(log_w_n) Log_W_nk[:, K] = log_w_n + f_k[K] Log_W_nk[:, K + 1] = np.log(A_n) + Log_W_nk[:, K] f_k[K + 1] = -_logsum(Log_W_nk[:, K + 1]) Log_W_nk[:, K + 1] += f_k[K + 1] A = np.exp(-f_k[K + 1]) if (compute_uncertainty or return_theta): Theta_ij = self._computeAsymptoticCovarianceMatrix( np.exp(Log_W_nk), N_k, method=uncertainty_method) if (compute_uncertainty): dA = np.abs(A) * np.sqrt( Theta_ij[K + 1, K + 1] + Theta_ij[K, K] - 2.0 * Theta_ij[K, K + 1]) A += (A_min - 1) returns = [] returns.append(A) if (compute_uncertainty): returns.append(dA) if (return_theta): returns.append(Theta_ij) return returns
Compute the expectation of an observable of phase space function A(x) for a single new state. Parameters ---------- u_n : np.ndarray, float, shape=(K, N_max) u_n[n] = u(x_n) - the energy of the new state at all N samples previously sampled. A_n : np.ndarray, float, shape=(K, N_max) A_n[n] = A(x_n) - the phase space function of the new state at all N samples previously sampled. If this does NOT depend on state (e.g. position), it's simply the value of the observation. If it DOES depend on the current state, then the observables from the previous states need to be reevaluated at THIS state. compute_uncertainty : bool, optional If False, the uncertainties will not be computed (default: True) uncertainty_method : string, optional Choice of method used to compute asymptotic covariance method, or None to use default See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None) warning_cutoff : float, optional Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10) return_theta : bool, optional Whether or not to return the theta matrix. Can be useful for complicated differences. Returns ------- A : float A is the estimate for the expectation of A(x) for the specified state dA : float dA is uncertainty estimate for A Notes ----- See Section IV of [1]. # Compute estimators and uncertainty. #A = sum(W_n[:,K] * A_n[:]) # Eq. 15 of [1] #dA = abs(A) * np.sqrt(Theta_ij[K,K] + Theta_ij[K+1,K+1] - 2.0 * Theta_ij[K,K+1]) # Eq. 16 of [1]
371,631
def logMsg(self, msg, printMsg=True): time = datetime.datetime.now().strftime() self.log = .format(self.log, time, msg) if printMsg: print msg if self.addLogsToArcpyMessages: from arcpy import AddMessage AddMessage(msg)
logs a message and prints it to the screen
371,632
def __get_all_child_accounts_as_array(self, account: Account) -> List[Account]: result = [] result.append(account) for child in account.children: sub_accounts = self.__get_all_child_accounts_as_array(child) result += sub_accounts return result
Returns the whole tree of child accounts in a list
371,633
def syncScrollbars(self): chart_hbar = self.uiChartVIEW.horizontalScrollBar() chart_vbar = self.uiChartVIEW.verticalScrollBar() x_hbar = self.uiXAxisVIEW.horizontalScrollBar() x_vbar = self.uiXAxisVIEW.verticalScrollBar() y_hbar = self.uiYAxisVIEW.horizontalScrollBar() y_vbar = self.uiYAxisVIEW.verticalScrollBar() x_hbar.setRange(chart_hbar.minimum(), chart_hbar.maximum()) x_hbar.setValue(chart_hbar.value()) x_vbar.setValue(0) chart_vbar.setRange(y_vbar.minimum(), y_vbar.maximum()) chart_vbar.setValue(y_vbar.value()) y_hbar.setValue(4)
Synchronizes the various scrollbars within this chart.
371,634
def call_requests( requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool ) -> Response: if isinstance(requests, collections.Iterable): return BatchResponse(safe_call(r, methods, debug=debug) for r in requests) return safe_call(requests, methods, debug=debug)
Takes a request or list of Requests and calls them. Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. debug: Include more information in error responses.
371,635
def deprecated(operation=None): def inner(o): o.deprecated = True return o return inner(operation) if operation else inner
Mark an operation deprecated.
371,636
def start_msstitch(exec_drivers, sysargs): parser = populate_parser(exec_drivers) args = parser.parse_args(sysargs[1:]) args.func(**vars(args))
Passed all drivers of executable, checks which command is passed to the executable and then gets the options for a driver, parses them from command line and runs the driver
371,637
def export_dist(self, args): ctx = self.ctx dist = dist_from_args(ctx, args) if dist.needs_build: raise BuildInterruptingException( ) if args.symlink: shprint(sh.ln, , dist.dist_dir, args.output_dir) else: shprint(sh.cp, , dist.dist_dir, args.output_dir)
Copies a created dist to an output dir. This makes it easy to navigate to the dist to investigate it or call build.py, though you do not in general need to do this and can use the apk command instead.
371,638
def register_blueprint(self, blueprint: Blueprint, url_prefix: Optional[str]=None) -> None: first_registration = False if blueprint.name in self.blueprints and self.blueprints[blueprint.name] is not blueprint: raise RuntimeError( f"Blueprint name " f"is already registered by {self.blueprints[blueprint.name]}. " "Blueprints must have unique names", ) else: self.blueprints[blueprint.name] = blueprint first_registration = True blueprint.register(self, first_registration, url_prefix=url_prefix)
Register a blueprint on the app. This results in the blueprint's routes, error handlers etc... being added to the app. Arguments: blueprint: The blueprint to register. url_prefix: Optional prefix to apply to all paths.
371,639
def version_cli(ctx, porcelain): if ctx.invoked_subcommand: return from peltak.core import log from peltak.core import versioning current = versioning.current() if porcelain: print(current) else: log.info("Version: <35>{}".format(current))
Show project version. Has sub commands. For this command to work you must specify where the project version is stored. You can do that with version_file conf variable. peltak supports multiple ways to store the project version. Right now you can store it in a python file using built-in __version__ variable. You can use node.js package.json and keep the version there or you can just use a plain text file that just holds the raw project version. The appropriate storage is guessed based on the file type and name. Example Configuration:: version_file: 'src/mypackage/__init__.py' Examples: \b $ peltak version # Pretty print current version $ peltak version --porcelain # Print version as raw string $ peltak version bump patch # Bump patch version component $ peltak version bump minor # Bump minor version component $ peltak version bump major # Bump major version component $ peltak version bump release # same as version bump patch $ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
371,640
def bigquery_schema(table): fields = OrderedDict((el.name, dt.dtype(el)) for el in table.schema) partition_info = table._properties.get(, None) if partition_info is not None: partition_field = partition_info.get(, NATIVE_PARTITION_COL) fields.setdefault(partition_field, dt.timestamp) return sch.schema(fields)
Infer the schema of a BigQuery `table` object.
371,641
def read_roi(fileobj): s ROI format. Points are returned in a nx2 array. Each row is in [row, column] -- that is, (y,x) -- order. readroi: Unexpected EOFIoutMagic number not foundroireader: ROI type %s not supportedroireader: ROI subtype %s not supported (!= 0)' % subtype) if roi_type == RoiType.RECT: if subPixelResolution: return np.array( [[y1, x1], [y1, x1+x2], [y1+y2, x1+x2], [y1+y2, x1]], dtype=np.float32) else: return np.array( [[top, left], [top, right], [bottom, right], [bottom, left]], dtype=np.int16) if subPixelResolution: getc = getfloat points = np.empty((n_coordinates, 2), dtype=np.float32) fileobj.seek(4*n_coordinates, 1) else: getc = get16 points = np.empty((n_coordinates, 2), dtype=np.int16) points[:, 1] = [getc() for i in range(n_coordinates)] points[:, 0] = [getc() for i in range(n_coordinates)] if not subPixelResolution: points[:, 1] += left points[:, 0] += top return points
points = read_roi(fileobj) Read ImageJ's ROI format. Points are returned in a nx2 array. Each row is in [row, column] -- that is, (y,x) -- order.
371,642
def _RemoveAuthorizedKeys(self, user): pw_entry = self._GetUser(user) if not pw_entry: return home_dir = pw_entry.pw_dir authorized_keys_file = os.path.join(home_dir, , ) if os.path.exists(authorized_keys_file): try: os.remove(authorized_keys_file) except OSError as e: message = self.logger.warning(message, user, str(e))
Remove a Linux user account's authorized keys file to prevent login. Args: user: string, the Linux user account to remove access.
371,643
def get_session_token(self): try: response = requests.post(self.__session_url__ + , cert=(self.__crt__, self.__key__), verify=True) except requests.exceptions.RequestException as err: self.logger.error(err) raise if response.status_code == 200: data = json.loads(response.text) self.logger.debug(data) session_token = data[] else: raise Exception( % str(response.status_code)) self.logger.debug(session_token) return session_token
get session token
371,644
def close_monomers(self, group, cutoff=4.0): nearby_residues = [] for self_atom in self.atoms.values(): nearby_atoms = group.is_within(cutoff, self_atom) for res_atom in nearby_atoms: if res_atom.parent not in nearby_residues: nearby_residues.append(res_atom.parent) return nearby_residues
Returns a list of Monomers from within a cut off distance of the Monomer Parameters ---------- group: BaseAmpal or Subclass Group to be search for Monomers that are close to this Monomer. cutoff: float Distance cut off. Returns ------- nearby_residues: [Monomers] List of Monomers within cut off distance.
371,645
def convert(area_um, deform, emodulus, channel_width_in, channel_width_out, flow_rate_in, flow_rate_out, viscosity_in, viscosity_out, inplace=False): copy = not inplace area_um_corr = np.array(area_um, dtype=float, copy=copy) deform_corr = np.array(deform, copy=copy) emodulus_corr = np.array(emodulus, copy=copy) if channel_width_in != channel_width_out: area_um_corr *= (channel_width_out / channel_width_in)**2 if (flow_rate_in != flow_rate_out or viscosity_in != viscosity_out or channel_width_in != channel_width_out): emodulus_corr *= (flow_rate_out / flow_rate_in) \ * (viscosity_out / viscosity_in) \ * (channel_width_in / channel_width_out)**3 return area_um_corr, deform_corr, emodulus_corr
convert area-deformation-emodulus triplet The conversion formula is described in :cite:`Mietke2015`. Parameters ---------- area_um: ndarray Convex cell area [µm²] deform: ndarray Deformation emodulus: ndarray Young's Modulus [kPa] channel_width_in: float Original channel width [µm] channel_width_out: float Target channel width [µm] flow_rate_in: float Original flow rate [µl/s] flow_rate_in: float Target flow rate [µl/s] viscosity_in: float Original viscosity [mPa*s] viscosity_out: float Target viscosity [mPa*s] inplace: bool If True, override input arrays with corrected data Returns ------- area_um_corr: ndarray Corrected cell area [µm²] deform_corr: ndarray Deformation (a copy if `inplace` is False) emodulus_corr: ndarray Corrected emodulus [kPa]
371,646
def create_roles(apps, schema_editor): SystemWideEnterpriseRole = apps.get_model(, ) SystemWideEnterpriseRole.objects.update_or_create(name=ENTERPRISE_ADMIN_ROLE) SystemWideEnterpriseRole.objects.update_or_create(name=ENTERPRISE_LEARNER_ROLE)
Create the enterprise roles if they do not already exist.
371,647
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None): if chain is not None: tables = [self.db._gettables()[chain], ] else: tables = self.db._gettables() for i, table in enumerate(tables): if slicing is not None: burn, stop, thin = slicing.start, slicing.stop, slicing.step if slicing is None or stop is None: stop = table.nrows col = table.read(start=burn, stop=stop, step=thin, field=self.name) if i == 0: data = np.asarray(col) else: data = np.append(data, col, axis=0) return data
Return the trace (last by default). :Parameters: burn : integer The number of transient steps to skip. thin : integer Keep one in thin. chain : integer The index of the chain to fetch. If None, return all chains. The default is to return the last chain. slicing : slice object A slice overriding burn and thin assignement.
371,648
def tocimxmlstr(value, indent=None): if isinstance(value, Element): xml_elem = value else: xml_elem = tocimxml(value) if indent is None: xml_str = xml_elem.toxml() else: if isinstance(indent, six.string_types): pass elif isinstance(indent, six.integer_types): indent = * indent else: raise TypeError( _format("Type of indent must be string or integer, but is: {0}", type(indent))) xml_str = xml_elem.toprettyxml(indent=indent) return _ensure_unicode(xml_str)
Return the CIM-XML representation of the CIM object or CIM data type, as a :term:`unicode string`. *New in pywbem 0.9.* The returned CIM-XML representation is consistent with :term:`DSP0201`. Parameters: value (:term:`CIM object` or :term:`CIM data type` or :term:`Element`): The CIM object or CIM data type to be converted to CIM-XML, or an :term:`Element` object that already is the CIM-XML representation. indent (:term:`string` or :term:`integer`): `None` indicates that a single-line version of the XML should be returned, without any whitespace between the XML elements. Other values indicate that a prettified, multi-line version of the XML should be returned. A string value specifies the indentation string to be used for each level of nested XML elements. An integer value specifies an indentation string of so many blanks. Returns: The CIM-XML representation of the value, as a :term:`unicode string`.
371,649
def is_entailed_by(self, other): other = BoolCell.coerce(other) if self.value == U or other.value == self.value: return True return False
If the other is as or more specific than self
371,650
def dial(self, number, timeout=5, callStatusUpdateCallbackFunc=None): if self._waitForCallInitUpdate: self._dialEvent = threading.Event() try: self.write(.format(number), timeout=timeout, waitForResponse=self._waitForAtdResponse) except Exception: self._dialEvent = None raise else: if self._dialEvent.wait(timeout): self._dialEvent = None callId, callType = self._dialResponse call = Call(self, callId, callType, number, callStatusUpdateCallbackFunc) self.activeCalls[callId] = call return call else: self._dialEvent = None raise TimeoutException()
Calls the specified phone number using a voice phone call :param number: The phone number to dial :param timeout: Maximum time to wait for the call to be established :param callStatusUpdateCallbackFunc: Callback function that is executed if the call's status changes due to remote events (i.e. when it is answered, the call is ended by the remote party) :return: The outgoing call :rtype: gsmmodem.modem.Call
371,651
def get(self, key): value = self._results.get(key) if value is not None: return value option = list(filter(lambda o: o.key == key, self._option_list)) if not option: raise ValueError( % key) option = option[0] return option.default
Get parsed result. After :func:`parse` the argv, we can get the parsed results:: # command.option('-f', 'description of -f') command.get('-f') command.get('verbose') # we can also get ``verbose``: command.verbose
371,652
def ProgramScanner(**kw): kw[] = SCons.Scanner.FindPathDirs() ps = SCons.Scanner.Base(scan, "ProgramScanner", **kw) return ps
Return a prototype Scanner instance for scanning executable files for static-lib dependencies
371,653
def inverse_transform(self, y, lengths=None): y = np.argmax(y, -1) inverse_y = [self._label_vocab.id2doc(ids) for ids in y] if lengths is not None: inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)] return inverse_y
Return label strings. Args: y: label id matrix. lengths: sentences length. Returns: list: list of list of strings.
371,654
def patchproperty(*cls, **kwargs): def _patch(fun): m = kwargs.pop(, None) or fun.__name__ p = property(fun) for c in cls: setattr(c, m, p) def wrap(fun): _patch(fun) return fun return wrap
class getter 함수 패치 decorator EX) class B(A): pass @patchproperty(B) def prop(self): return 'hello' :param cls: :param kwargs:
371,655
def text(self, selector): result = self.__bs4.select(selector) return [r.get_text() for r in result] \ if result.__len__() > 1 else \ result[0].get_text() if result.__len__() > 0 else None
Return text result that executed by given css selector :param selector: `str` css selector :return: `list` or `None`
371,656
def num_samples(self): with self.container.open_if_needed(mode=) as cnt: return cnt.get(self.key)[0].shape[0]
Return the total number of samples.
371,657
def element_exists(self, element): if not self.__elements: return False for item in foundations.walkers.dictionaries_walker(self.__elements): path, key, value = item if key == element: LOGGER.debug("> attribute exists.".format(element)) return True LOGGER.debug("> element doesn't exists.".format(element)) return False
Checks if given element exists. Usage:: >>> plist_file_parser = PlistFileParser("standard.plist") >>> plist_file_parser.parse() True >>> plist_file_parser.element_exists("String A") True >>> plist_file_parser.element_exists("String Nemo") False :param element: Element to check existence. :type element: unicode :return: Element existence. :rtype: bool
371,658
def _run__http(self, action, replace): query = action[] url = .format(path=query[], **action) content = None method = query.get(, "get").lower() self.debug("{} {} url={}\n", action[], method, url) if method == "post": content = query[] headers = query.get(, {}) if replace and action.get(): self.rfxcfg.macro_expand(url, replace) if content: if isinstance(content, dict): for key, value in content.items(): content[key] = self.rfxcfg.macro_expand(value, replace) else: content = self.rfxcfg.macro_expand(content, replace) newhdrs = dict() for key, value in headers.items(): newhdrs[key.lower()] = self.rfxcfg.macro_expand(value, replace) headers = newhdrs self.debug("{} headers={}\n", action[], headers) self.debug("{} content={}\n", action[], content) if content and isinstance(content, dict): content = json.dumps(content) self.logf("Action {name} {type}\n", **action) result = getattr(requests, method)(url, headers=headers, data=content, timeout=action.get(, 5)) expect = action.get(, {}) expected_codes = expect.get("response-codes", (200, 201, 202, 204)) self.debug("{} expect codes={}\n", action[], expected_codes) self.debug("{} status={} content={}\n", action[], result.status_code, result.text) if result.status_code not in expected_codes: self.die("Unable to make {} call, unexpected result ({})", action[], result.status_code) if in expect: self.debug("{} expect content={}\n", action[], expect[]) if expect[] not in result.text: self.die("{} call to {} failed\nExpected: {}\nReceived:\n{}", action[], url, expect[], result.text) if in expect: self.debug("{} expect regex={}\n", action[], expect[]) if not re.search(expect[], result.text): self.die("{} call to {} failed\nRegex: {}\nDid not match:\n{}", action[], url, expect[], result.text) self.log(result.text, level=common.log_msg) self.logf("Success, status={}\n", result.status_code, level=common.log_good) return True
More complex HTTP query.
371,659
def hmmalign_sequences(self, hmm, sequences, output_file): cmd = % (hmm, sequences) output = extern.run(cmd) with open(output_file, ) as f: SeqIO.write(SeqIO.parse(StringIO(output), ), f, )
Run hmmalign and convert output to aligned fasta format Parameters ---------- hmm: str path to hmm file sequences: str path to file of sequences to be aligned output_file: str write sequences to this file Returns ------- nothing
371,660
def get_write_fields(self): rec_write_fields = self.get_write_subset() if self.comments != None: rec_write_fields.append() self.check_field() if self.n_sig > 0: sig_write_fields = self.get_write_subset() else: sig_write_fields = None return rec_write_fields, sig_write_fields
Get the list of fields used to write the header, separating record and signal specification fields. Returns the default required fields, the user defined fields, and their dependencies. Does NOT include `d_signal` or `e_d_signal`. Returns ------- rec_write_fields : list Record specification fields to be written. Includes 'comment' if present. sig_write_fields : dict Dictionary of signal specification fields to be written, with values equal to the channels that need to be present for each field.
371,661
def _group(self, group_data): if isinstance(group_data, dict): xid = group_data.get() else: xid = group_data.xid if self.groups.get(xid) is not None: group_data = self.groups.get(xid) elif self.groups_shelf.get(xid) is not None: group_data = self.groups_shelf.get(xid) else: self.groups[xid] = group_data return group_data
Return previously stored group or new group. Args: group_data (dict|obj): An Group dict or instance of Group object. Returns: dict|obj: The new Group dict/object or the previously stored dict/object.
371,662
def args(self, *args, **kwargs): self._any_args = False self._arguments_rule.set_args(*args, **kwargs) return self
Creates a ArgumentsExpectationRule and adds it to the expectation
371,663
def watch(models, criterion=None, log="gradients", log_freq=100): global watch_called if run is None: raise ValueError( "You must call `wandb.init` before calling watch") if watch_called: raise ValueError( "You can only call `wandb.watch` once per process. If you want to watch multiple models, pass them in as a tuple." ) watch_called = True log_parameters = False log_gradients = True if log == "all": log_parameters = True elif log == "parameters": log_parameters = True log_gradients = False elif log is None: log_gradients = False if not isinstance(models, (tuple, list)): models = (models,) graphs = [] prefix = for idx, model in enumerate(models): if idx > 0: prefix = "graph_%i" % idx run.history.torch.add_log_hooks_to_pytorch_module( model, log_parameters=log_parameters, log_gradients=log_gradients, prefix=prefix, log_freq=log_freq) graph = wandb_torch.TorchGraph.hook_torch(model, criterion, graph_idx=idx) graphs.append(graph) return graphs
Hooks into the torch model to collect gradients and the topology. Should be extended to accept arbitrary ML models. :param (torch.Module) models: The model to hook, can be a tuple :param (torch.F) criterion: An optional loss value being optimized :param (str) log: One of "gradients", "parameters", "all", or None :param (int) log_freq: log gradients and parameters every N batches :return: (wandb.Graph) The graph object that will populate after the first backward pass
371,664
async def handle_action(self, action_type, payload, **kwds): if hasattr(self, ): await roll_call_handler(self.service, action_type, payload, **kwds)
The default action Handler has no action.
371,665
def get_cluster( self, project_id, region, cluster_name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "get_cluster" not in self._inner_api_calls: self._inner_api_calls[ "get_cluster" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_cluster, default_retry=self._method_configs["GetCluster"].retry, default_timeout=self._method_configs["GetCluster"].timeout, client_info=self._client_info, ) request = clusters_pb2.GetClusterRequest( project_id=project_id, region=region, cluster_name=cluster_name ) return self._inner_api_calls["get_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets the resource representation for a cluster in a project. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.get_cluster(project_id, region, cluster_name) Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
371,666
def read(self, output_tile): if self.config.mode not in ["readonly", "continue", "overwrite"]: raise ValueError("process mode must be readonly, continue or overwrite") if isinstance(output_tile, tuple): output_tile = self.config.output_pyramid.tile(*output_tile) elif isinstance(output_tile, BufferedTile): pass else: raise TypeError("output_tile must be tuple or BufferedTile") return self.config.output.read(output_tile)
Read from written process output. Parameters ---------- output_tile : BufferedTile or tile index tuple Member of the output tile pyramid (not necessarily the process pyramid, if output has a different metatiling setting) Returns ------- data : NumPy array or features process output
371,667
def download(self, content, filename=None, media_type=None, charset=): if isinstance(content, int) and content != 200: return content if filename is not None: filename = os.path.basename(filename) elif in self.response.state: filename = self.response.state[] else: filename = os.path.basename(self.request.path) if filename == : raise LogicError() if media_type is not None: self.response.media_type = media_type else: self.response.media_type = mimetypes.guess_type(filename)[0] self.response.charset = charset self.response.add_header(, .format(filename)) return content
Send content as attachment (downloadable file). The *content* is sent after setting Content-Disposition header such that the client prompts the user to save the content locally as a file. An HTTP response status code may be specified as *content*. If the status code is not ``200``, then this method does nothing and returns the status code. The filename used for the download is determined according to the following rules. The rules are followed in the specified order. 1. If *filename* is specified, then the base name from this argument, i.e. ``os.path.basename(filename)``, is used as the filename for the download. 2. If *filename* is not specified or specified as ``None`` (the default), then the base name from the file path specified to a previous :meth:`static` call made while handling the current request is used. 3. If *filename* is not specified and there was no :meth:`static` call made previously for the current request, then the base name from the current HTTP request path is used. 4. As a result of the above steps, if the resultant *filename* turns out to be empty, then :exc:`ice.LogicError` is raised. The *media_type* and *charset* arguments are used in the same manner as they are used in :meth:`static`. Arguments: content (str, bytes or int): Content to be sent as download or HTTP status code of the response to be returned. filename (str): Filename to use for saving the content media_type (str, optional): Media type of file. charset (str, optional): Character set of file. Returns: content, i.e. the first argument passed to this method. Raises: LogicError: When filename cannot be determined.
371,668
def sendEmail(sender, recipients, subject, body, attachments=None, cc=None, bcc=None, contentType=, server=None, useMSExchange=None, encoding=, raiseErrors=False): if attachments is None: attachments = [] if cc is None: cc = [] if bcc is None: bcc = [] if server is None: server = NOTIFY_SERVER if useMSExchange is None: useMSExchange = NOTIFY_SERVER_MSX sender = nstr(sender) recipients = map(nstr, recipients) if not isEmail(sender): err = errors.NotifyError( % sender) logger.error(err) return False if not recipients: err = errors.NotifyError() logger.error(err) return False if not server: err = errors.NotifyError() logger.error(err) return False msg = MIMEMultipart(_subtype=) msg[] = projex.text.toUtf8(subject) msg[] = sender msg[] = .join(recipients) msg[] = .join([nstr(addr) for addr in cc if isEmail(addr)]) msg[] = .join([nstr(addr) for addr in bcc if isEmail(addr)]) msg[] = nstr(datetime.datetime.now()) msg[] = msg.preamble = msg.epilogue = bodyhtml = projex.text.toUtf8(body) eattach = [] filepaths = re.findall(, bodyhtml) for filepath in filepaths: filename = filepath.replace(, ) if os.path.exists(filename) and filename not in attachments: cid = % os.path.basename(filename) bodyhtml = bodyhtml.replace(filename, cid) fp = open(nstr(filename), ) msgImage = MIMEImage(fp.read()) fp.close() content_id = % os.path.basename(filename) inline_link = % os.path.basename(filename) msgImage.add_header(, content_id) msgImage.add_header(, inline_link) eattach.append(msgImage) attachments.append(filename) msgText = MIMEText(bodyhtml, contentType, encoding) msgText[] = contentType for attach in attachments: fp = open(nstr(attach), ) txt = MIMEBase(, ) txt.set_payload(fp.read()) fp.close() encode_base64(txt) attachment = % os.path.basename(attach) txt.add_header(, attachment) eattach.append(txt) eattach.insert(0, msgText) for attach in eattach: msg.attach(attach) try: smtp_server = smtplib.SMTP(nstr(server)) except socket.gaierror, err: logger.error(err) if raiseErrors: raise return False except Exception, err: logger.error(err) if raiseErrors: raise return False if useMSExchange: success, response = connectMSExchange(smtp_server) if not success: logger.debug( + response) try: smtp_server.sendmail(sender, recipients, msg.as_string()) smtp_server.close() except Exception, err: logger.error(err) if raiseErrors: raise return False return True
Sends an email from the inputted email address to the list of given recipients with the inputted subject and body. This will also attach the inputted list of attachments to the email. The server value will default to mail.<sender_domain> and you can use a ':' to specify a port for the server. :param sender <str> :param recipients <list> [ <str>, .. ] :param subject <str> :param body <str> :param attachments <list> [ <str>, .. ] :param cc <list> [ <str>, .. ] :param bcc <list> [ <str>, .. ] :param contentType <str> :param server <str> :return <bool> success
371,669
def stopall(self, sudo=False, quiet=True): t make sense to call from a single instance Parameters ========== sudo: if the command should be done with sudo (exposes different set of instances) instance.stopversion 3--allreturn_code%s : return code %smessagereturn_codereturn_codereturn_code']
stop ALL instances. This command is only added to the command group as it doesn't make sense to call from a single instance Parameters ========== sudo: if the command should be done with sudo (exposes different set of instances)
371,670
def mstmap(args): from jcvi.assembly.geneticmap import MSTMatrix p = OptionParser(mstmap.__doc__) p.add_option("--dh", default=False, action="store_true", help="Double haploid population, no het [default: %default]") p.add_option("--freq", default=.2, type="float", help="Allele must be above frequency [default: %default]") p.add_option("--mindepth", default=3, type="int", help="Only trust genotype calls with depth [default: %default]") p.add_option("--missing_threshold", default=.25, type="float", help="Fraction missing must be below") p.add_option("--noheader", default=False, action="store_true", help="Do not print MSTmap run parameters [default: %default]") p.add_option("--pv4", default=False, action="store_true", help="Enable filtering strand-bias, tail distance bias, etc. " "[default: %default]") p.add_option("--freebayes", default=False, action="store_true", help="VCF output from freebayes") p.set_sep(sep=".", help="Use separator to simplify individual names") p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcffile, = args if vcffile.endswith(".bcf"): bcffile = vcffile vcffile = bcffile.rsplit(".", 1)[0] + ".vcf" cmd = "bcftools view {0}".format(bcffile) cmd += " | vcfutils.pl varFilter" if not opts.pv4: cmd += " -1 0 -2 0 -3 0 -4 0 -e 0" if need_update(bcffile, vcffile): sh(cmd, outfile=vcffile) freq = opts.freq sep = opts.sep depth_index = 1 if opts.freebayes else 2 ptype = "DH" if opts.dh else "RIL6" nohet = ptype == "DH" fp = open(vcffile) genotypes = [] for row in fp: if row[:2] == " continue atoms = row.split() if row[0] == : ind = [x.split(sep)[0] for x in atoms[9:]] nind = len(ind) mh = ["locus_name"] + ind continue marker = "{0}.{1}".format(*atoms[:2]) geno = atoms[9:] geno = [encode_genotype(x, mindepth=opts.mindepth, depth_index=depth_index, nohet=nohet) for x in geno] assert len(geno) == nind f = 1. / nind if geno.count("A") * f < freq: continue if geno.count("B") * f < freq: continue if geno.count("-") * f > opts.missing_threshold: continue genotype = [marker] + geno genotypes.append(genotype) mm = MSTMatrix(genotypes, mh, ptype, opts.missing_threshold) mm.write(opts.outfile, header=(not opts.noheader))
%prog mstmap bcffile/vcffile > matrixfile Convert bcf/vcf format to mstmap input.
371,671
def nearest_neighbor(x, tSet): assert isinstance(x, tuple) and isinstance(tSet, dict) current_key = () min_d = float() for key in tSet: d = distance(x, key) if d < min_d: min_d = d current_key = key return tSet[current_key]
[summary] Implements the nearest neighbor algorithm Arguments: x {[tupel]} -- [vector] tSet {[dict]} -- [training set] Returns: [type] -- [result of the AND-function]
371,672
def loadFromTemplate(template, stim=None): stim = StimulusModel.loadFromTemplate(template, stim=stim) qstim = QStimulusModel(stim) qstim.setEditor(template[]) return qstim
Initialized this stimulus from a saved *template* :param template: doc from a previously stored stimulus via :class:`templateDoc` :type template: dict
371,673
def acceptedUser(self, logType): from urllib2 import urlopen, URLError, HTTPError import json isApproved = False userName = str(self.logui.userName.text()) if userName == "": return False if logType == "MCC": networkFault = False data = [] log_url = "https://mccelog.slac.stanford.edu/elog/dev/mgibbs/dev_json_user_list.php/?username=" + userName try: data = urlopen(log_url, None, 5).read() data = json.loads(data) except URLError as error: print("URLError: " + str(error.reason)) networkFault = True except HTTPError as error: print("HTTPError: " + str(error.reason)) networkFault = True if networkFault: msgBox = QMessageBox() msgBox.setText("Cannot connect to MCC Log Server!") msgBox.setInformativeText("Use entered User name anyway?") msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel) msgBox.setDefaultButton(QMessageBox.Ok) if msgBox.exec_() == QMessageBox.Ok: isApproved = True if data != [] and (data is not None): isApproved = True else: isApproved = True return isApproved
Verify enetered user name is on accepted MCC logbook list.
371,674
def allocate_ids(self, partial_keys): conn = self.get_conn() resp = (conn .projects() .allocateIds(projectId=self.project_id, body={: partial_keys}) .execute(num_retries=self.num_retries)) return resp[]
Allocate IDs for incomplete keys. .. seealso:: https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds :param partial_keys: a list of partial keys. :type partial_keys: list :return: a list of full keys. :rtype: list
371,675
def _prm_write_shared_table(self, key, hdf5_group, fullname, **kwargs): first_row = None description = None if in kwargs: first_row = kwargs.pop() if not in kwargs: description = {} for colname in first_row: data = first_row[colname] column = self._all_get_table_col(key, [data], fullname) description[colname] = column if in kwargs: description = kwargs.pop() if in kwargs: filters = kwargs.pop() else: filters = self._all_get_filters(kwargs) table = self._hdf5file.create_table(where=hdf5_group, name=key, description=description, filters=filters, **kwargs) table.flush() if first_row is not None: row = table.row for key in description: row[key] = first_row[key] row.append() table.flush()
Creates a new empty table
371,676
def text_(s, encoding=, errors=): return s.decode(encoding, errors) if isinstance(s, binary_type) else s
If ``s`` is an instance of ``binary_type``, return ``s.decode(encoding, errors)``, otherwise return ``s``
371,677
def input_value(self, locator, text): self._info("Setting text into text field " % (text, locator)) self._element_input_value_by_locator(locator, text)
Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements.
371,678
def download(sess_id_or_alias, files, dest): if len(files) < 1: return with Session() as session: try: print_wait( .format(sess_id_or_alias)) kernel = session.Kernel(sess_id_or_alias) kernel.download(files, dest, show_progress=True) print_done(.format(dest.resolve())) except Exception as e: print_error(e) sys.exit(1)
Download files from a running container. \b SESSID: Session ID or its alias given when creating the session. FILES: Paths inside container.
371,679
def format_block(block, nlspaces=0): lines = smart_text(block).split() while lines and not lines[0]: del lines[0] while lines and not lines[-1]: del lines[-1] ws = re.match(r, lines[0]).group(0) if ws: lines = map(lambda x: x.replace(ws, , 1), lines) while lines and not lines[0]: del lines[0] while lines and not lines[-1]: del lines[-1] flines = [ % ( * nlspaces, line) for line in lines] return .join(flines) +
Format the given block of text, trimming leading/trailing empty lines and any leading whitespace that is common to all lines. The purpose is to let us list a code block as a multiline, triple-quoted Python string, taking care of indentation concerns. http://code.activestate.com/recipes/145672/
371,680
def _walk_through(job_dir, display_progress=False): serial = salt.payload.Serial(__opts__) for top in os.listdir(job_dir): t_path = os.path.join(job_dir, top) for final in os.listdir(t_path): load_path = os.path.join(t_path, final, ) with salt.utils.files.fopen(load_path, ) as rfh: job = serial.load(rfh) if not os.path.isfile(load_path): continue with salt.utils.files.fopen(load_path, ) as rfh: job = serial.load(rfh) jid = job[] if display_progress: __jid_event__.fire_event( {: .format(jid)}, ) yield jid, job, t_path, final
Walk through the job dir and return jobs
371,681
def dump_data_peek(data, base = 0, separator = , width = 16, bits = None): if data is None: return pointers = compat.keys(data) pointers.sort() result = for offset in pointers: dumped = HexDump.hexline(data[offset], separator, width) address = HexDump.address(base + offset, bits) result += % (address, dumped) return result
Dump data from pointers guessed within the given binary data. @type data: str @param data: Dictionary mapping offsets to the data they point to. @type base: int @param base: Base offset. @type bits: int @param bits: (Optional) Number of bits of the target architecture. The default is platform dependent. See: L{HexDump.address_size} @rtype: str @return: Text suitable for logging.
371,682
def as_list(self, key): result = self[key] if isinstance(result, (tuple, list)): return list(result) return [result]
A convenience method which fetches the specified value, guaranteeing that it is a list. >>> a = ConfigObj() >>> a['a'] = 1 >>> a.as_list('a') [1] >>> a['a'] = (1,) >>> a.as_list('a') [1] >>> a['a'] = [1] >>> a.as_list('a') [1]
371,683
def cli(env, account_id, content_url, type, cname): manager = SoftLayer.CDNManager(env.client) manager.add_origin(account_id, type, content_url, cname)
Create an origin pull mapping.
371,684
def split(self): assert(self.status == SolverStatus.exhausted) scopes = [] next_scopes = [] split_i = None for i, scope in enumerate(self.scopes): if split_i is None: r = scope.split() if r is not None: scope_, next_scope = r scopes.append(scope_) next_scopes.append(next_scope) split_i = i continue scopes.append(scope) next_scopes.append(scope) assert split_i is not None phase = copy.copy(self) phase.scopes = scopes phase.status = SolverStatus.pending phase.changed_scopes_i = set([split_i]) next_phase = copy.copy(phase) next_phase.scopes = next_scopes return (phase, next_phase)
Split the phase. When a phase is exhausted, it gets split into a pair of phases to be further solved. The split happens like so: 1) Select the first unsolved package scope. 2) Find some common dependency in the first N variants of the scope. 3) Split the scope into two: [:N] and [N:]. 4) Create two copies of the phase, containing each half of the split scope. The result of this split is that we have a new phase (the first phase), which contains a package scope with a common dependency. This dependency can now be intersected with the current resolve, thus progressing it. Returns: A 2-tuple of _ResolvePhase objects, where the first phase is the best contender for resolving.
371,685
def remove_pane(self, pane): assert isinstance(pane, Pane) for w in self.windows: w.remove_pane(pane) if not w.has_panes: for app, active_w in self._active_window_for_cli.items(): if w == active_w: with set_app(app): self.focus_next_window() self.windows.remove(w)
Remove a :class:`.Pane`. (Look in all windows.)
371,686
def step(self, path=None, peer_table=None): if path is None: path = self.atlasdb_path num_fetched = 0 missing_zinfo = None peer_hostports = None with AtlasPeerTableLocked(peer_table) as ptbl: missing_zfinfo = atlas_find_missing_zonefile_availability( peer_table=ptbl, path=path ) peer_hostports = ptbl.keys()[:] zonefile_ranking = [ (missing_zfinfo[zfhash][], zfhash) for zfhash in missing_zfinfo.keys() ] zonefile_ranking.sort() zonefile_hashes = list(set([zfhash for (_, zfhash) in zonefile_ranking])) zonefile_names = dict([(zfhash, missing_zfinfo[zfhash][]) for zfhash in zonefile_hashes]) zonefile_txids = dict([(zfhash, missing_zfinfo[zfhash][]) for zfhash in zonefile_hashes]) zonefile_block_heights = dict([(zfhash, missing_zfinfo[zfhash][]) for zfhash in zonefile_hashes]) zonefile_origins = self.find_zonefile_origins( missing_zfinfo, peer_hostports ) for i in xrange(0, len(zonefile_hashes)): zfhash = zonefile_hashes[i] present = is_zonefile_cached( zfhash, self.zonefile_dir, validate=True ) if present: log.debug("%s: zonefile %s already cached. Marking present" % (self.hostport, zfhash)) zonefile_hashes[i] = None self.set_zonefile_present(zfhash, min(zonefile_block_heights[zfhash]), path=path) zonefile_hashes = filter( lambda zfh: zfh is not None, zonefile_hashes ) if len(zonefile_hashes) > 0: log.debug("%s: missing %s unique zonefiles" % (self.hostport, len(zonefile_hashes))) while len(zonefile_hashes) > 0 and self.running: zfhash = zonefile_hashes[0] zfnames = zonefile_names[zfhash] zftxid = zonefile_txids[zfhash] peers = missing_zfinfo[zfhash][] peer_zonefile_hashes = [] for zfh in zonefile_origins[peer_hostport]: if zfh in zonefile_hashes: peer_zonefile_hashes.append( zfh ) if len(peer_zonefile_hashes) == 0: log.debug("%s: No zonefiles available from %s" % (self.hostport, peer_hostport)) continue log.debug("%s: get %s zonefiles from %s" % (self.hostport, len(peer_zonefile_hashes), peer_hostport)) zonefiles = atlas_get_zonefiles( self.hostport, peer_hostport, peer_zonefile_hashes, peer_table=peer_table ) if zonefiles is not None: stored_zfhashes = self.store_zonefiles( zonefile_names, zonefiles, zonefile_txids, zonefile_block_heights, peer_zonefile_hashes, peer_hostport, path ) if zfh in zonefile_origins[peer_hostport]: zonefile_origins[peer_hostport].remove( zfh ) if zfhash in zonefile_hashes: zonefile_hashes.remove(zfhash) if len(zonefile_hashes) > 0 or num_fetched > 0: log.debug("%s: fetched %s zonefiles" % (self.hostport, num_fetched)) return num_fetched
Run one step of this algorithm: * find the set of missing zonefiles * try to fetch each of them * store them * update our zonefile database Fetch rarest zonefiles first, but batch whenever possible. Return the number of zonefiles fetched
371,687
def namedb_get_all_revealed_namespace_ids( self, current_block ): query = "SELECT namespace_id FROM namespaces WHERE op = ? AND reveal_block < ?;" args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE ) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row in namespace_rows: ret.append( namespace_row[] ) return ret
Get all non-expired revealed namespaces.
371,688
def epilogue(app_name): app_name = clr.stringc(app_name, "bright blue") command = clr.stringc("command", "cyan") help = clr.stringc("--help", "green") return "\n%s %s %s for more info on a command\n" % (app_name, command, help)
Return the epilogue for the help command.
371,689
def fqscreen_plot (self): categories = list() getCats = True data = list() p_types = OrderedDict() p_types[] = {: , : } p_types[] = {: , : } p_types[] = {: , : } p_types[] = {: , : } for k, t in p_types.items(): first = True for s in sorted(self.fq_screen_data.keys()): thisdata = list() if len(categories) > 0: getCats = False for org in sorted(self.fq_screen_data[s]): if org == : continue try: thisdata.append(self.fq_screen_data[s][org][][k]) except KeyError: thisdata.append(None) if getCats: categories.append(org) td = { : t[], : s, : thisdata, : t[] } if first: first = False else: td[] = data.append(td) html = .format(json.dumps(data), json.dumps(categories)) return html
Makes a fancy custom plot which replicates the plot seen in the main FastQ Screen program. Not useful if lots of samples as gets too wide.
371,690
def compile_and_process(self, in_path): out_path = self.path_mapping[in_path] if not self.embed: pdebug("[%s::%s] %s -> %s" % ( self.compiler_name, self.name, os.path.relpath(in_path), os.path.relpath(out_path)), groups=["build_task"], autobreak=True) else: pdebug("[%s::%s] %s -> <cache>" % ( self.compiler_name, self.name, os.path.relpath(in_path)), groups=["build_task"], autobreak=True) compiled_string = self.compile_file(in_path) if not self.embed: if compiled_string != "": with open(out_path, "w") as f: f.write(compiled_string) return compiled_string
compile a file, save it to the ouput file if the inline flag true
371,691
def h2o_explained_variance_score(y_actual, y_predicted, weights=None): ModelBase._check_targets(y_actual, y_predicted) _, numerator = _mean_var(y_actual - y_predicted, weights) _, denominator = _mean_var(y_actual, weights) if denominator == 0.0: return 1. if numerator == 0 else 0. return 1 - numerator / denominator
Explained variance regression score function. :param y_actual: H2OFrame of actual response. :param y_predicted: H2OFrame of predicted response. :param weights: (Optional) sample weights :returns: the explained variance score.
371,692
def _get_and_count_containers(self, custom_cgroups=False, healthchecks=False): if custom_cgroups or healthchecks: try: inspect_dict = self.docker_util.client.inspect_container(container_name) container[] = inspect_dict[][] container[] = inspect_dict[].get(, {}) except Exception as e: self.log.debug("Unable to inspect Docker container: %s", e) total_count = 0 for tags, count in running_containers_count.iteritems(): total_count += count self.gauge("docker.containers.running", count, tags=list(tags)) self.gauge("docker.containers.running.total", total_count, tags=self.custom_tags) total_count = 0 for tags, count in all_containers_count.iteritems(): stopped_count = count - running_containers_count[tags] total_count += stopped_count self.gauge("docker.containers.stopped", stopped_count, tags=list(tags)) self.gauge("docker.containers.stopped.total", total_count, tags=self.custom_tags) return containers_by_id
List all the containers from the API, filter and count them.
371,693
def _create_chrome_options(self): options = webdriver.ChromeOptions() if self.config.getboolean_optional(, ): self.logger.debug("Running Chrome in headless mode") options.add_argument() if os.name == : options.add_argument() self._add_chrome_options(options, ) self._add_chrome_options(options, ) self._add_chrome_arguments(options) return options
Create and configure a chrome options object :returns: chrome options object
371,694
def _cldf2wld(dataset): header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != ] D = {0: [] + [h.lower() for h in header]} for idx, row in enumerate(dataset.objects[]): row = deepcopy(row) row[] = .join(row[]) D[idx + 1] = [row[]] + [row[h] for h in header] return D
Make lingpy-compatible dictinary out of cldf main data.
371,695
def build_mine_matrix(self, w, h, minenum): self.minecount = 0 matrix = [[Cell(30, 30, x, y, self) for x in range(w)] for y in range(h)] for i in range(0, minenum): x = random.randint(0, w - 1) y = random.randint(0, h - 1) if matrix[y][x].has_mine: continue self.minecount += 1 matrix[y][x].has_mine = True for coord in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]: _x, _y = coord if not self.coord_in_map(x + _x, y + _y, w, h): continue matrix[y + _y][x + _x].add_nearest_mine() return matrix
random fill cells with mines and increments nearest mines num in adiacent cells
371,696
def _l_cv_weight(self, donor_catchment): try: dist = donor_catchment.similarity_dist except AttributeError: dist = self._similarity_distance(self.catchment, donor_catchment) b = 0.0047 * sqrt(dist) + 0.0023 / 2 c = 0.02609 / (donor_catchment.record_length - 1) return 1 / (b + c)
Return L-CV weighting for a donor catchment. Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a
371,697
def bait(self, maskmiddle=, k=): logging.info(.format(at=self.analysistype)) if self.kmer_size is None: kmer = k else: kmer = self.kmer_size with progressbar(self.runmetadata) as bar: for sample in bar: if sample.general.bestassemblyfile != and sample[self.analysistype].runanalysis: make_path(sample[self.analysistype].outputdir) if len(sample.general.fastqfiles) == 2: sample[self.analysistype].bbdukcmd = \ \ \ .format(mem=self.mem, ref=sample[self.analysistype].baitfile, in1=sample.general.trimmedcorrectedfastqfiles[0], in2=sample.general.trimmedcorrectedfastqfiles[1], kmer=kmer, mm=maskmiddle, c=str(self.cpus), om=sample[self.analysistype].baitedfastq) else: sample[self.analysistype].bbdukcmd = \ \ \ .format(mem=self.mem, ref=sample[self.analysistype].baitfile, in1=sample.general.trimmedcorrectedfastqfiles[0], kmer=kmer, mm=maskmiddle, cpus=str(self.cpus), outm=sample[self.analysistype].baitedfastq) if not os.path.isfile(sample[self.analysistype].baitedfastq): out, err = run_subprocess(sample[self.analysistype].bbdukcmd) write_to_logfile(sample[self.analysistype].bbdukcmd, sample[self.analysistype].bbdukcmd, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, sample[self.analysistype].logout, sample[self.analysistype].logerr)
Use bbduk to perform baiting :param maskmiddle: boolean argument treat the middle base of a kmer as a wildcard; increases sensitivity in the presence of errors. :param k: keyword argument for length of kmers to use in the analyses
371,698
def split_param_vec(param_vec, rows_to_alts, design, return_all_types=False): num_index_coefs = design.shape[1] betas = param_vec[-1 * num_index_coefs:] remaining_idx = param_vec.shape[0] - num_index_coefs if remaining_idx > 0: intercepts = param_vec[:remaining_idx] else: intercepts = None if return_all_types: return None, None, intercepts, betas else: return None, intercepts, betas
Parameters ---------- param_vec : 1D ndarray. Elements should all be ints, floats, or longs. Should have as many elements as there are parameters being estimated. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. return_all_types : bool, optional. Determines whether or not a tuple of 4 elements will be returned (with one element for the nest, shape, intercept, and index parameters for this model). If False, a tuple of 3 elements will be returned, as described below. Returns ------- `(None, intercepts, betas)` : tuple. The first element will be None since the clog-log model has no shape parameters. The second element will either be a 1D array of "outside" intercept parameters for this model or None, depending on whether outside intercepts are being estimated or not. The third element will be a 1D array of the index coefficients. Note ---- If `return_all_types == True` then the function will return a tuple of four objects. In order, these objects will either be None or the arrays representing the arrays corresponding to the nest, shape, intercept, and index parameters.
371,699
def seq_site_length(self): relative_positions_set = set() for peak_descr in self: relative_positions_set.update(peak_descr.relative_positions) return len(relative_positions_set)
Calculate length of a single sequence site based upon relative positions specified in peak descriptions. :return: Length of sequence site. :rtype: :py:class:`int`