positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def _as_log_entry(self, name, now): """Makes a `LogEntry` from this instance for the given log_name. Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. now (:class:`datetime.DateTime`): the current time Return: a ``LogEntry`` generated from this instance with the given name and timestamp Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesReportRequest`` """ # initialize the struct with fields that are always present d = { u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple()) } # compute the severity severity = _SEVERITY.INFO if self.response_code >= 400: severity = _SEVERITY.ERROR d[u'error_cause'] = self.error_cause.name # add 'optional' fields to the struct if self.request_size > 0: d[u'request_size'] = self.request_size if self.response_size > 0: d[u'response_size'] = self.response_size if self.method: d[u'http_method'] = self.method if self.request_time: d[u'request_latency_in_ms'] = self.request_time.total_seconds() * 1000 # add 'copyable' fields to the struct for key in self.COPYABLE_LOG_FIELDS: value = getattr(self, key, None) if value: d[key] = value return sc_messages.LogEntry( name=name, timestamp=timestamp.to_rfc3339(now), severity=severity, structPayload=_struct_payload_from(d))
Makes a `LogEntry` from this instance for the given log_name. Args: rules (:class:`ReportingRules`): determines what labels, metrics and logs to include in the report request. now (:class:`datetime.DateTime`): the current time Return: a ``LogEntry`` generated from this instance with the given name and timestamp Raises: ValueError: if the fields in this instance are insufficient to to create a valid ``ServicecontrolServicesReportRequest``
def _update(collection_name, upsert, multi, spec, doc, check_keys, opts): """Get an OP_UPDATE message.""" flags = 0 if upsert: flags += 1 if multi: flags += 2 encode = _dict_to_bson # Make local. Uses extensions. encoded_update = encode(doc, check_keys, opts) return b"".join([ _ZERO_32, _make_c_string(collection_name), _pack_int(flags), encode(spec, False, opts), encoded_update]), len(encoded_update)
Get an OP_UPDATE message.
def get_padding_lengths(self) -> Dict[str, int]: """ The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by (potentially) several ``TokenIndexers``. This method gets the max length (over tokens) associated with each of these arrays. """ # Our basic outline: we will iterate over `TokenIndexers`, and aggregate lengths over tokens # for each indexer separately. Then we will combine the results for each indexer into a single # dictionary, resolving any (unlikely) key conflicts by taking a max. lengths = [] if self._indexed_tokens is None: raise ConfigurationError("You must call .index(vocabulary) on a " "field before determining padding lengths.") # Each indexer can return a different sequence length, and for indexers that return # multiple arrays each can have a different length. We'll keep track of them here. for indexer_name, indexer in self._token_indexers.items(): indexer_lengths = {} for indexed_tokens_key in self._indexer_name_to_indexed_token[indexer_name]: # This is a list of dicts, one for each token in the field. token_lengths = [indexer.get_padding_lengths(token) for token in self._indexed_tokens[indexed_tokens_key]] if not token_lengths: # This is a padding edge case and occurs when we want to pad a ListField of # TextFields. In order to pad the list field, we need to be able to have an # _empty_ TextField, but if this is the case, token_lengths will be an empty # list, so we add the default empty padding dictionary to the list instead. token_lengths = [{}] # Iterate over the keys and find the maximum token length. # It's fine to iterate over the keys of the first token since all tokens have the same keys. for key in token_lengths[0]: indexer_lengths[key] = max(x[key] if key in x else 0 for x in token_lengths) lengths.append(indexer_lengths) padding_lengths = {} num_tokens = set() for indexer_name, token_list in self._indexed_tokens.items(): padding_lengths[f"{indexer_name}_length"] = len(token_list) num_tokens.add(len(token_list)) # We don't actually use this for padding anywhere, but we used to. We add this key back in # so that older configs still work if they sorted by this key in a BucketIterator. Taking # the max of all of these should be fine for that purpose. padding_lengths['num_tokens'] = max(num_tokens) # Get all keys which have been used for padding for each indexer and take the max if there are duplicates. padding_keys = {key for d in lengths for key in d.keys()} for padding_key in padding_keys: padding_lengths[padding_key] = max(x[padding_key] if padding_key in x else 0 for x in lengths) return padding_lengths
The ``TextField`` has a list of ``Tokens``, and each ``Token`` gets converted into arrays by (potentially) several ``TokenIndexers``. This method gets the max length (over tokens) associated with each of these arrays.
def apply_changes(self, other, with_buffer=False): """Applies updates from the buffer of another filter. Params: other (MeanStdFilter): Other filter to apply info from with_buffer (bool): Flag for specifying if the buffer should be copied from other. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, 1.5, 2] >>> b = MeanStdFilter(()) >>> b(10) >>> a.apply_changes(b, with_buffer=False) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [3, 4.333333333333333, 2] >>> a.apply_changes(b, with_buffer=True) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [4, 5.75, 1] """ self.rs.update(other.buffer) if with_buffer: self.buffer = other.buffer.copy()
Applies updates from the buffer of another filter. Params: other (MeanStdFilter): Other filter to apply info from with_buffer (bool): Flag for specifying if the buffer should be copied from other. Examples: >>> a = MeanStdFilter(()) >>> a(1) >>> a(2) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [2, 1.5, 2] >>> b = MeanStdFilter(()) >>> b(10) >>> a.apply_changes(b, with_buffer=False) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [3, 4.333333333333333, 2] >>> a.apply_changes(b, with_buffer=True) >>> print([a.rs.n, a.rs.mean, a.buffer.n]) [4, 5.75, 1]
def GuinierPorod(q, G, Rg, alpha): """Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719. """ return GuinierPorodMulti(q, G, Rg, alpha)
Empirical Guinier-Porod scattering Inputs: ------- ``q``: independent variable ``G``: factor of the Guinier-branch ``Rg``: radius of gyration ``alpha``: power-law exponent Formula: -------- ``G * exp(-q^2*Rg^2/3)`` if ``q<q_sep`` and ``a*q^alpha`` otherwise. ``q_sep`` and ``a`` are determined from conditions of smoothness at the cross-over. Literature: ----------- B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43, 716-719.
def update_dict(self, label: Dict, pred: Dict): """ If label is missing the right name, copy it from the prediction. """ if not set(self.label_names).issubset(set(label.keys())): label.update({name:pred[name] for name in self.label_names}) super().update_dict(label, pred)
If label is missing the right name, copy it from the prediction.
def get_page(search_text): """ formats the entire search result in a table output """ lst = search_aikif(search_text) txt = '<table class="as-table as-table-zebra as-table-horizontal">' for result in lst: txt += '<TR><TD>' + result + '</TD></TR>' txt += '</TABLE>\n\n' return txt
formats the entire search result in a table output
def docker_monitor(self, cidfile, tmpdir_prefix, cleanup_cidfile, process): # type: (Text, Text, bool, subprocess.Popen) -> None """Record memory usage of the running Docker container.""" # Todo: consider switching to `docker create` / `docker start` # instead of `docker run` as `docker create` outputs the container ID # to stdout, but the container is frozen, thus allowing us to start the # monitoring process without dealing with the cidfile or too-fast # container execution cid = None while cid is None: time.sleep(1) if process.returncode is not None: if cleanup_cidfile: os.remove(cidfile) return try: with open(cidfile) as cidhandle: cid = cidhandle.readline().strip() except (OSError, IOError): cid = None max_mem = self.docker_get_memory(cid) tmp_dir, tmp_prefix = os.path.split(tmpdir_prefix) stats_file = tempfile.NamedTemporaryFile(prefix=tmp_prefix, dir=tmp_dir) with open(stats_file.name, mode="w") as stats_file_handle: stats_proc = subprocess.Popen( ['docker', 'stats', '--no-trunc', '--format', '{{.MemPerc}}', cid], stdout=stats_file_handle, stderr=subprocess.DEVNULL) process.wait() stats_proc.kill() max_mem_percent = 0 with open(stats_file.name, mode="r") as stats: for line in stats: try: mem_percent = float(re.sub( CONTROL_CODE_RE, '', line).replace('%', '')) if mem_percent > max_mem_percent: max_mem_percent = mem_percent except ValueError: break _logger.info(u"[job %s] Max memory used: %iMiB", self.name, int((max_mem_percent * max_mem) / (2 ** 20))) if cleanup_cidfile: os.remove(cidfile)
Record memory usage of the running Docker container.
def generate_downloader(headers: Dict[str, str], args: Any, max_per_hour: int=30 ) -> Callable[..., None]: """Create function to download with rate limiting and text progress.""" def _downloader(url: str, dest: str) -> None: @rate_limited(max_per_hour, args) def _rate_limited_download() -> None: # Create parent directory of file, and its parents, if they don't exist. parent = os.path.dirname(dest) if not os.path.exists(parent): os.makedirs(parent) response = requests.get(url, headers=headers, stream=True) LOG.info(f"Downloading from '{url}'.") LOG.info(f"Trying to save to '{dest}'.") length = response.headers.get("content-length") if length is None: total_length = 0 else: total_length = int(length) expected_size = (total_length / CHUNK_SIZE) + 1 chunks = response.iter_content(chunk_size=CHUNK_SIZE) open(dest, "a", encoding=FORCED_ENCODING).close() # per http://stackoverflow.com/a/20943461 with open(dest, "wb") as stream: for chunk in tui.progress.bar(chunks, expected_size=expected_size): if not chunk: return stream.write(chunk) stream.flush() _rate_limited_download() return _downloader
Create function to download with rate limiting and text progress.
def paired_reader_from_bamfile(args, log, usage_logger, annotated_writer): '''Given a BAM file, return a generator that yields filtered, paired reads''' total_aligns = pysamwrapper.total_align_count(args.input_bam) bamfile_generator = _bamfile_generator(args.input_bam) return _paired_reader(args.umt_length, bamfile_generator, total_aligns, log, usage_logger, annotated_writer)
Given a BAM file, return a generator that yields filtered, paired reads
def data_import(self, json_response): """Import scenes from JSON response.""" if 'data' not in json_response: raise PyVLXException('no element data found: {0}'.format( json.dumps(json_response))) data = json_response['data'] for item in data: self.load_scene(item)
Import scenes from JSON response.
def wr_tex(self, fout_tex="gos_depth01.tex"): """write text table of depth-01 GO terms and their letter representation.""" data_nts = self.get_d1nts() joinchr = " & " #pylint: disable=anomalous-backslash-in-string eol = " \\\\\n" with open(fout_tex, 'w') as prt: prt.write("\\begin{table}[!ht]\n") prt.write("\\begin{tabular}{|p{.5cm} | p{.5cm} | >{\\raggedleft}p{.9cm} ") prt.write("|p{.7cm} |p{1.8cm} |p{9cm}|}\n") prt.write("\multicolumn{6}{c}{} \\\\\n") prt.write("\hline\n") prt.write("\\rowcolor{gray!10}\n") prt.write("{HDRS}{EOL}".format( HDRS=joinchr.join(next(iter(data_nts))._fields), EOL=eol)) prt.write("\hline\n") for idx, line in enumerate(get_lines(data_nts, joinchr=joinchr, eol=eol)): if idx%2 == 1: prt.write("\\rowcolor{gray!7}\n") line.replace('_', '\\_') prt.write(line) prt.write("\hline\n") prt.write("\end{tabular}\n") caption = ("The descendant counts of GO terms at depth-01 are highly skewed. The " "root term, \textit{biological\_process} has over twenty GO children at " "depth-01 shown in the table sorted by their number of descendants " "(dcnt) with \textit{cellular process} at the top having 18k+ " "descendants and \textit{cell killing} near the bottom having only " "about 100 descendants. The first column (D1) contains a letter used as " "an alias for each depth-01 GO term. The second column represents the " "number of descendants from the specified GO term from down to the total " "of its descendant leaf-level GO terms, which have no child GO terms.") prt.write("\caption{{{TEXT}}}\n\n".format(TEXT=caption)) prt.write("\label{table:supptbl_d1}\n") prt.write("\end{table}\n") sys.stdout.write(" {N:>5} items WROTE: {TXT}\n".format( N=len(data_nts), TXT=fout_tex))
write text table of depth-01 GO terms and their letter representation.
def copyFile(input, output, replace=None): """Copy a file whole from input to output.""" _found = findFile(output) if not _found or (_found and replace): shutil.copy2(input, output)
Copy a file whole from input to output.
def compile_binary(source): """ Prepare chkrootkit binary $ tar xzvf chkrootkit.tar.gz $ cd chkrootkit-0.52 $ make sense sudo mv chkrootkit-0.52 /usr/local/chkrootkit sudo ln -s """ cmd = 'make sense' slink = '/usr/local/bin/chkrootkit' target = '/usr/local/chkrootkit/chkrootkit' # Tar Extraction t = tarfile.open(source, 'r') t.extractall(TMPDIR) if isinstance(t.getnames(), list): extract_dir = t.getnames()[0].split('/')[0] os.chdir(TMPDIR + '/' + extract_dir) logger.info('make output: \n%s' % subprocess.getoutput(cmd)) # move directory in place os.rename(TMPDIR + '/' + extract_dir, 'usr/local/chkrootkit') # create symlink to binary in directory os.symlink(target, slink) return True return False
Prepare chkrootkit binary $ tar xzvf chkrootkit.tar.gz $ cd chkrootkit-0.52 $ make sense sudo mv chkrootkit-0.52 /usr/local/chkrootkit sudo ln -s
def ComponentsToPath(components): """Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation. """ precondition.AssertIterableType(components, Text) for component in components: if not component: raise ValueError("Empty path component in: {}".format(components)) if "/" in component: raise ValueError("Path component with '/' in: {}".format(components)) if components: return "/" + "/".join(components) else: return ""
Converts a list of path components to a canonical path representation. Args: components: A sequence of path components. Returns: A canonical MySQL path representation.
def cares_about(self, delta): """Return True if this observer "cares about" (i.e. wants to be called) for a this delta. """ if (self.entity_id and delta.get_id() and not re.match(self.entity_id, str(delta.get_id()))): return False if self.entity_type and self.entity_type != delta.entity: return False if self.action and self.action != delta.type: return False if self.predicate and not self.predicate(delta): return False return True
Return True if this observer "cares about" (i.e. wants to be called) for a this delta.
def get(self, singleExposure=False): """ *get the orbfitPositions object* **Key Arguments:** - ``singleExposure`` -- only execute fot a single exposure (useful for debugging) **Return:** - None **Usage:** See class docstring """ self.log.info('starting the ``get`` method') if singleExposure: batchSize = 1 else: batchSize = int(self.settings["orbfit"]["batch size"]) exposureCount = 1 while exposureCount > 0: expsoureObjects, astorbString, exposureCount = self._get_exposures_requiring_orbfit_positions( batchSize=batchSize) if exposureCount: orbfitPositions = self._get_orbfit_positions( expsoureObjects, astorbString) self._add_orbfit_eph_to_database( orbfitPositions, expsoureObjects) if singleExposure: exposureCount = 0 self.log.info('completed the ``get`` method') return None
*get the orbfitPositions object* **Key Arguments:** - ``singleExposure`` -- only execute fot a single exposure (useful for debugging) **Return:** - None **Usage:** See class docstring
def run_experiment(self): """ Run the job specified in experiment_script """ data=self.data options=self.options result=self.result command = open(self.options.experiment_script).read() result["experiment_script"]=command t0=time.time() exec(command) #creates variable result t1=time.time() print(("Elapsed time for running the experiment is %.2f seconds" % (t1-t0))) self.result=result return self.result
Run the job specified in experiment_script
def create(self, path, mode): # pragma: no cover """ This is currently a read-only filessytem. GetAttr will return a stat for everything if getattr raises FuseOSError(ENOENT) OS may call this function and the write function """ # print("create {}".format(path)) now_time = time() with self.attr_lock: base = NoStat() base.staged = True base.st_mode = stat.S_IFREG | OBJ_PERMISSION base.st_nlink = 1 base.st_size = -1 self.attr[path] = {TIMESTAMP_KEY: now_time, BASE_KEY: base, STAGED_KEY: BytesIO()} return mode
This is currently a read-only filessytem. GetAttr will return a stat for everything if getattr raises FuseOSError(ENOENT) OS may call this function and the write function
def remove(self): """remove item from its class """ DoubleLinkedListItem.remove(self) # remove from double linked list if self.succ is self: # list was a singleton self.theclass.items = None # class is empty elif self.theclass.items is self: # oups we removed the head self.theclass.items = self.succ
remove item from its class
def multi_path_generator(pathnames): """ yields (name,chunkgen) for all of the files found under the list of pathnames given. This is recursive, so directories will have their contents emitted. chunkgen is a function that can called and iterated over to obtain the contents of the file in multiple reads. """ for pathname in pathnames: if isdir(pathname): for entry in directory_generator(pathname): yield entry else: yield pathname, file_chunk(pathname)
yields (name,chunkgen) for all of the files found under the list of pathnames given. This is recursive, so directories will have their contents emitted. chunkgen is a function that can called and iterated over to obtain the contents of the file in multiple reads.
async def blobize(self, elem=None, elem_type=None, params=None): """ Main blobbing :param elem: :param elem_type: :param params: :return: """ if self.writing: await self.field(elem=elem, elem_type=elem_type, params=params) return bytes(self.iobj.buffer) else: return await self.field(elem=elem, elem_type=elem_type, params=params)
Main blobbing :param elem: :param elem_type: :param params: :return:
def create_token_mapping(docgraph_with_old_names, docgraph_with_new_names, verbose=False): """ given two document graphs which annotate the same text and which use the same tokenization, creates a dictionary with a mapping from the token IDs used in the first graph to the token IDs used in the second graph. Parameters ---------- docgraph_with_old_names : DiscourseDocumentGraph a document graph with token IDs that will be replaced later on docgraph_with_new_names : DiscourseDocumentGraph a document graph with token IDs that will replace the token IDs used in ``docgraph_with_old_names`` later on Returns ------- old2new : dict maps from a token ID used in ``docgraph_with_old_names`` to the token ID used in ``docgraph_with_new_names`` to reference the same token """ def kwic_string(docgraph, keyword_index): tokens = [tok for (tokid, tok) in list(docgraph.get_tokens())] before, keyword, after = get_kwic(tokens, keyword_index) return "{0} (Index: {1}): {2} [[{3}]] {4}\n".format( docgraph.name, keyword_index, ' '.join(before), keyword, ' '.join(after)) # generators of (token ID, token) tuples old_token_gen = docgraph_with_old_names.get_tokens() new_token_gen = docgraph_with_new_names.get_tokens() old2new = {} for i, (new_tok_id, new_tok) in enumerate(new_token_gen): old_tok_id, old_tok = old_token_gen.next() if new_tok != old_tok: # token mismatch if verbose: raise ValueError(u"Tokenization mismatch:\n{0}{1}".format( kwic_string(docgraph_with_old_names, i), kwic_string(docgraph_with_new_names, i))) raise ValueError( u"Tokenization mismatch: {0} ({1}) vs. {2} ({3})\n" "\t{4} != {5}".format( docgraph_with_new_names.name, docgraph_with_new_names.ns, docgraph_with_old_names.name, docgraph_with_old_names.ns, new_tok, old_tok).encode('utf-8')) else: old2new[old_tok_id] = new_tok_id return old2new
given two document graphs which annotate the same text and which use the same tokenization, creates a dictionary with a mapping from the token IDs used in the first graph to the token IDs used in the second graph. Parameters ---------- docgraph_with_old_names : DiscourseDocumentGraph a document graph with token IDs that will be replaced later on docgraph_with_new_names : DiscourseDocumentGraph a document graph with token IDs that will replace the token IDs used in ``docgraph_with_old_names`` later on Returns ------- old2new : dict maps from a token ID used in ``docgraph_with_old_names`` to the token ID used in ``docgraph_with_new_names`` to reference the same token
def stop(self, container, instances=None, map_name=None, **kwargs): """ Stops instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will stop all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further affect further actions. Such errors are always logged, but do not raise an exception unless this is set to ``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal. :type raise_on_error: bool :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container stop. :return: Return values of stopped containers. :rtype: list[dockermap.map.runner.ActionOutput] """ return self.run_actions('stop', container, instances=instances, map_name=map_name, **kwargs)
Stops instances for a container configuration. :param container: Container name. :type container: unicode | str :param instances: Instance names to stop. If not specified, will stop all instances as specified in the configuration (or just one default instance). :type instances: collections.Iterable[unicode | str | NoneType] :param map_name: Container map name. Optional - if not provided the default map is used. :type map_name: unicode | str :param raise_on_error: Errors on stop and removal may result from Docker volume problems, that do not further affect further actions. Such errors are always logged, but do not raise an exception unless this is set to ``True``. Please note that 404 errors (on non-existing containers) are always ignored on stop and removal. :type raise_on_error: bool :param kwargs: Additional kwargs. If multiple actions are resulting from this, they will only be applied to the main container stop. :return: Return values of stopped containers. :rtype: list[dockermap.map.runner.ActionOutput]
def copy_and_sum_families(family_source, family_target): """ methods iterates thru source family and copies its entries to target family in case key already exists in both families - then the values are added""" for every in family_source: if every not in family_target: family_target[every] = family_source[every] else: family_target[every] += family_source[every]
methods iterates thru source family and copies its entries to target family in case key already exists in both families - then the values are added
def _bbvi_fit(self, posterior, optimizer='RMSProp', iterations=1000, map_start=True, batch_size=12, mini_batch=None, learning_rate=0.001, record_elbo=False, quiet_progress=False, **kwargs): """ Performs Black Box Variational Inference Parameters ---------- posterior : method Hands bbvi_fit a posterior object optimizer : string Stochastic optimizer: one of RMSProp or ADAM. iterations: int How many iterations for BBVI map_start : boolean Whether to start values from a MAP estimate (if False, uses default starting values) Returns ---------- BBVIResults object """ # Starting values phi = self.latent_variables.get_z_starting_values() phi = kwargs.get('start',phi).copy() # If user supplied if self.model_type not in ['GPNARX','GPR','GP','GASRank'] and map_start is True and mini_batch is None: p = optimize.minimize(posterior, phi, method='L-BFGS-B') # PML starting values start_loc = 0.8*p.x + 0.2*phi else: start_loc = phi start_ses = None # Starting values for approximate distribution for i in range(len(self.latent_variables.z_list)): approx_dist = self.latent_variables.z_list[i].q if isinstance(approx_dist, Normal): if start_ses is None: self.latent_variables.z_list[i].q.mu0 = start_loc[i] self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0) else: self.latent_variables.z_list[i].q.mu0 = start_loc[i] self.latent_variables.z_list[i].q.sigma0 = start_ses[i] q_list = [k.q for k in self.latent_variables.z_list] if mini_batch is None: bbvi_obj = BBVI(posterior, q_list, batch_size, optimizer, iterations, learning_rate, record_elbo, quiet_progress) else: bbvi_obj = BBVIM(posterior, self.neg_logposterior, q_list, mini_batch, optimizer, iterations, learning_rate, mini_batch, record_elbo, quiet_progress) q, q_z, q_ses, elbo_records = bbvi_obj.run() self.latent_variables.set_z_values(q_z,'BBVI',np.exp(q_ses),None) for k in range(len(self.latent_variables.z_list)): self.latent_variables.z_list[k].q = q[k] self.latent_variables.estimation_method = 'BBVI' theta, Y, scores, states, states_var, X_names = self._categorize_model_output(q_z) # Change this in future try: latent_variables_store = self.latent_variables.copy() except: latent_variables_store = self.latent_variables return BBVIResults(data_name=self.data_name, X_names=X_names, model_name=self.model_name, model_type=self.model_type, latent_variables=latent_variables_store, data=Y, index=self.index, multivariate_model=self.multivariate_model, objective_object=self.neg_logposterior, method='BBVI', ses=q_ses, signal=theta, scores=scores, elbo_records=elbo_records, z_hide=self._z_hide, max_lag=self.max_lag, states=states, states_var=states_var)
Performs Black Box Variational Inference Parameters ---------- posterior : method Hands bbvi_fit a posterior object optimizer : string Stochastic optimizer: one of RMSProp or ADAM. iterations: int How many iterations for BBVI map_start : boolean Whether to start values from a MAP estimate (if False, uses default starting values) Returns ---------- BBVIResults object
def can_be_executed_by(self, thread_id): '''By default, it must be in the same thread to be executed ''' return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
By default, it must be in the same thread to be executed
def randomprune(self,n): """prune down to n items at random, disregarding their score""" self.data = random.sample(self.data, n)
prune down to n items at random, disregarding their score
def allocate(self, nodes, append=True): # TODO: check docstring """Allocates all nodes from `nodes` list in this route Parameters ---------- nodes : type Desc append : bool, defaults to True Desc """ nodes_demand = 0 for node in [node for node in nodes]: if node._allocation: node._allocation.deallocate([node]) node._allocation = self nodes_demand = nodes_demand + node.demand() if append: self._nodes.append(node) else: self._nodes.insert(0, node) self._demand = self._demand + nodes_demand
Allocates all nodes from `nodes` list in this route Parameters ---------- nodes : type Desc append : bool, defaults to True Desc
def delete_agile_board(self, board_id): """ Delete agile board by id :param board_id: :return: """ url = 'rest/agile/1.0/board/{}'.format(str(board_id)) return self.delete(url)
Delete agile board by id :param board_id: :return:
def _ConvertParamType(self, paramType): """ Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param definition """ if paramType: name = paramType.name version = paramType.version aType = paramType.type flags = self._ConvertAnnotations(paramType.annotation) privId = paramType.privId param = (name, aType, version, flags, privId) else: param = None return param
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param definition
def start_dag(self, dag, *, data=None): """ Schedule the execution of a dag by sending a signal to the workflow. Args: dag (Dag, str): The dag object or the name of the dag that should be started. data (MultiTaskData): The data that should be passed on to the new dag. Returns: str: The name of the successfully started dag. """ return self._client.send( Request( action='start_dag', payload={'name': dag.name if isinstance(dag, Dag) else dag, 'data': data if isinstance(data, MultiTaskData) else None} ) ).payload['dag_name']
Schedule the execution of a dag by sending a signal to the workflow. Args: dag (Dag, str): The dag object or the name of the dag that should be started. data (MultiTaskData): The data that should be passed on to the new dag. Returns: str: The name of the successfully started dag.
def run_network_operation(self, task, wait_timeout=None, close_timeout=None, name='Network operation'): '''Run the task and raise appropriate exceptions. Coroutine. ''' if wait_timeout is not None and close_timeout is not None: raise Exception( 'Cannot use wait_timeout and close_timeout at the same time') try: if close_timeout is not None: with self._close_timer.with_timeout(): data = yield from task if self._close_timer.is_timeout(): raise NetworkTimedOut( '{name} timed out.'.format(name=name)) else: return data elif wait_timeout is not None: data = yield from asyncio.wait_for(task, wait_timeout) return data else: return (yield from task) except asyncio.TimeoutError as error: self.close() raise NetworkTimedOut( '{name} timed out.'.format(name=name)) from error except (tornado.netutil.SSLCertificateError, SSLVerificationError) \ as error: self.close() raise SSLVerificationError( '{name} certificate error: {error}' .format(name=name, error=error)) from error except AttributeError as error: self.close() raise NetworkError( '{name} network error: connection closed unexpectedly: {error}' .format(name=name, error=error)) from error except (socket.error, ssl.SSLError, OSError, IOError) as error: self.close() if isinstance(error, NetworkError): raise if error.errno == errno.ECONNREFUSED: raise ConnectionRefused( error.errno, os.strerror(error.errno)) from error # XXX: This quality case brought to you by OpenSSL and Python. # Example: _ssl.SSLError: [Errno 1] error:14094418:SSL # routines:SSL3_READ_BYTES:tlsv1 alert unknown ca error_string = str(error).lower() if 'certificate' in error_string or 'unknown ca' in error_string: raise SSLVerificationError( '{name} certificate error: {error}' .format(name=name, error=error)) from error else: if error.errno: raise NetworkError( error.errno, os.strerror(error.errno)) from error else: raise NetworkError( '{name} network error: {error}' .format(name=name, error=error)) from error
Run the task and raise appropriate exceptions. Coroutine.
def fix_orientation(image): """ adapted from https://stackoverflow.com/a/30462851/318857 Apply Image.transpose to ensure 0th row of pixels is at the visual top of the image, and 0th column is the visual left-hand side. Return the original image if unable to determine the orientation. As per CIPA DC-008-2012, the orientation field contains an integer, 1 through 8. Other values are reserved. """ exif_orientation_tag = 0x0112 exif_transpose_sequences = [ [], [], [PIL.Image.FLIP_LEFT_RIGHT], [PIL.Image.ROTATE_180], [PIL.Image.FLIP_TOP_BOTTOM], [PIL.Image.FLIP_LEFT_RIGHT, PIL.Image.ROTATE_90], [PIL.Image.ROTATE_270], [PIL.Image.FLIP_TOP_BOTTOM, PIL.Image.ROTATE_90], [PIL.Image.ROTATE_90], ] try: # pylint:disable=protected-access orientation = image._getexif()[exif_orientation_tag] sequence = exif_transpose_sequences[orientation] return functools.reduce(type(image).transpose, sequence, image) except (TypeError, AttributeError, KeyError): # either no EXIF tags or no orientation tag pass return image
adapted from https://stackoverflow.com/a/30462851/318857 Apply Image.transpose to ensure 0th row of pixels is at the visual top of the image, and 0th column is the visual left-hand side. Return the original image if unable to determine the orientation. As per CIPA DC-008-2012, the orientation field contains an integer, 1 through 8. Other values are reserved.
def plt_goids(gosubdag, fout_img, goids, **kws_plt): """Plot GO IDs in a DAG (Directed Acyclic Graph).""" gosubdag_plt = GoSubDag(goids, gosubdag.go2obj, rcntobj=gosubdag.rcntobj, **kws_plt) godagplot = GoSubDagPlot(gosubdag_plt, **kws_plt) godagplot.plt_dag(fout_img) return godagplot
Plot GO IDs in a DAG (Directed Acyclic Graph).
def forum_id(self): """小组发帖要用的 forum_id""" html = self.request(self.team_url).text soup = BeautifulSoup(html) return soup.find(id='forum_id').attrs['value']
小组发帖要用的 forum_id
def visitIriRange(self, ctx: ShExDocParser.IriRangeContext): """ iriRange: iri (STEM_MARK iriExclusion*)? """ baseiri = self.context.iri_to_iriref(ctx.iri()) if not ctx.STEM_MARK(): vsvalue = baseiri # valueSetValue = objectValue / objectValue = IRI else: if ctx.iriExclusion(): # valueSetValue = IriStemRange / iriStemRange = stem + exclusions vsvalue = IriStemRange(baseiri, exclusions=[]) self._iri_exclusions(vsvalue, ctx.iriExclusion()) else: vsvalue = IriStem(baseiri) # valueSetValue = IriStem / IriStem: {stem:IRI} self.nodeconstraint.values.append(vsvalue)
iriRange: iri (STEM_MARK iriExclusion*)?
def elementTypeName(self): """ String representation of the element type. """ if self._array is None: return super(ArrayRti, self).elementTypeName else: dtype = self._array.dtype return '<structured>' if dtype.names else str(dtype)
String representation of the element type.
def upsert_users_from_legacy_publication_trigger(plpy, td): """A compatibility trigger to upsert users from legacy persons table.""" modified_state = "OK" authors = td['new']['authors'] and td['new']['authors'] or [] maintainers = td['new']['maintainers'] and td['new']['maintainers'] or [] licensors = td['new']['licensors'] and td['new']['licensors'] or [] is_legacy_publication = td['new']['version'] is not None if not is_legacy_publication: return modified_state # Upsert all roles into the users table. users = [] users.extend(authors) users.extend(maintainers) users.extend(licensors) users = list(set(users)) plan = plpy.prepare("""\ SELECT username FROM users WHERE username = any($1)""", ['text[]']) existing_users = set([r['username'] for r in plpy.execute(plan, (users,))]) new_users = set(users).difference(existing_users) for username in new_users: plan = plpy.prepare("""\ INSERT INTO users (username, first_name, last_name, full_name, title) SELECT personid, firstname, surname, fullname, honorific FROM persons where personid = $1""", ['text']) plpy.execute(plan, (username,)) return modified_state
A compatibility trigger to upsert users from legacy persons table.
def _ctypes_out(parameter): """Returns a parameter variable declaration for an output variable for the specified parameter. """ if (parameter.dimension is not None and ":" in parameter.dimension and "out" in parameter.direction and ("allocatable" in parameter.modifiers or "pointer" in parameter.modifiers)): if parameter.direction == "(inout)": return ("type(C_PTR), intent(inout) :: {}_o".format(parameter.name), True) else: #self.direction == "(out)" since that is the only other option. return ("type(C_PTR), intent(inout) :: {}_c".format(parameter.name), True)
Returns a parameter variable declaration for an output variable for the specified parameter.
def _show_or_dump(self, dump=False, indent=3, lvl="", label_lvl="", first_call=True): """ Reproduced from packet.py """ ct = AnsiColorTheme() if dump else conf.color_theme s = "%s%s %s %s \n" % (label_lvl, ct.punct("###["), ct.layer_name(self.name), ct.punct("]###")) for f in self.fields_desc[:-1]: ncol = ct.field_name vcol = ct.field_value fvalue = self.getfieldval(f.name) begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name), ct.punct("="),) reprval = f.i2repr(self, fvalue) if isinstance(reprval, str): reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) + len(lvl) + len(f.name) + 4)) s += "%s%s\n" % (begn, vcol(reprval)) f = self.fields_desc[-1] ncol = ct.field_name vcol = ct.field_value fvalue = self.getfieldval(f.name) begn = "%s %-10s%s " % (label_lvl + lvl, ncol(f.name), ct.punct("="),) reprval = f.i2repr(self, fvalue) if isinstance(reprval, str): reprval = reprval.replace("\n", "\n" + " " * (len(label_lvl) + len(lvl) + len(f.name) + 4)) s += "%s%s\n" % (begn, vcol(reprval)) if self.payload: s += self.payload._show_or_dump(dump=dump, indent=indent, lvl=lvl + (" " * indent * self.show_indent), # noqa: E501 label_lvl=label_lvl, first_call=False) # noqa: E501 if first_call and not dump: print(s) else: return s
Reproduced from packet.py
def get_orthology_matrix(self, pid_cutoff=None, bitscore_cutoff=None, evalue_cutoff=None, filter_condition='OR', remove_strains_with_no_orthology=True, remove_strains_with_no_differences=False, remove_genes_not_in_base_model=True): """Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences. Args: pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100] bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits evalue_cutoff (float): Maximum E-value allowed between BLAST hits filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue). remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model. Default is False because since orthology is found using a PID cutoff, all genes may be present but differences may be on the sequence level. remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our base model. This happens if we use a genome file for our model that has other genes in it. Returns: DataFrame: Orthology matrix calculated from best bidirectional BLAST hits. """ # TODO: document and test other cutoffs # Get the path to the reference genome r_file = self.reference_gempro.genome_path bbh_files = {} log.info('Running bidirectional BLAST and finding best bidirectional hits (BBH)...') for strain_gempro in tqdm(self.strains): g_file = strain_gempro.genome_path # Run bidirectional BLAST log.debug('{} vs {}: Running bidirectional BLAST'.format(self.reference_gempro.id, strain_gempro.id)) r_vs_g, g_vs_r = ssbio.protein.sequence.utils.blast.run_bidirectional_blast(reference=r_file, other_genome=g_file, dbtype='prot', outdir=self.sequences_by_organism_dir) # Using the BLAST files, find the BBH log.debug('{} vs {}: Finding BBHs'.format(self.reference_gempro.id, strain_gempro.id)) bbh = ssbio.protein.sequence.utils.blast.calculate_bbh(blast_results_1=r_vs_g, blast_results_2=g_vs_r, outdir=self.sequences_by_organism_dir) bbh_files[strain_gempro.id] = bbh # Make the orthologous genes matrix log.info('Creating orthology matrix from BBHs...') ortho_matrix = ssbio.protein.sequence.utils.blast.create_orthology_matrix(r_name=self.reference_gempro.id, genome_to_bbh_files=bbh_files, pid_cutoff=pid_cutoff, bitscore_cutoff=bitscore_cutoff, evalue_cutoff=evalue_cutoff, filter_condition=filter_condition, outname='{}_{}_orthology.csv'.format(self.reference_gempro.id, 'prot'), outdir=self.data_dir) log.info('Saved orthology matrix at {}. See the "df_orthology_matrix" attribute.'.format(ortho_matrix)) self.df_orthology_matrix = pd.read_csv(ortho_matrix, index_col=0) # Filter the matrix to genes only in our analysis, and also check for strains with no differences or no orthologous genes self._filter_orthology_matrix(remove_strains_with_no_orthology=remove_strains_with_no_orthology, remove_strains_with_no_differences=remove_strains_with_no_differences, remove_genes_not_in_base_model=remove_genes_not_in_base_model)
Create the orthology matrix by finding best bidirectional BLAST hits. Genes = rows, strains = columns Runs run_makeblastdb, run_bidirectional_blast, and calculate_bbh for protein sequences. Args: pid_cutoff (float): Minimum percent identity between BLAST hits to filter for in the range [0, 100] bitscore_cutoff (float): Minimum bitscore allowed between BLAST hits evalue_cutoff (float): Maximum E-value allowed between BLAST hits filter_condition (str): 'OR' or 'AND', how to combine cutoff filters. 'OR' gives more results since it is less stringent, as you will be filtering for hits with (>80% PID or >30 bitscore or <0.0001 evalue). remove_strains_with_no_orthology (bool): Remove strains which have no orthologous genes found remove_strains_with_no_differences (bool): Remove strains which have all the same genes as the base model. Default is False because since orthology is found using a PID cutoff, all genes may be present but differences may be on the sequence level. remove_genes_not_in_base_model (bool): Remove genes from the orthology matrix which are not present in our base model. This happens if we use a genome file for our model that has other genes in it. Returns: DataFrame: Orthology matrix calculated from best bidirectional BLAST hits.
def set_selection(self, time, freqs, blarr, calname='', radec=(), dist=1, spwind=[], pols=['XX','YY']): """ Set select parameter that defines time, spw, and pol solutions to apply. time defines the time to find solutions near in mjd. freqs defines frequencies to select bandpass solution blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr). radec (radian tuple) and dist (deg) define optional location of source for filtering solutions. spwind is list of indices to be used (e.g., [0,2,4,10]) pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. calname not used. here for uniformity with telcal_sol. """ self.spwind = spwind if calname: self.logger.warn('calname option not used for casa_sol. Applied based on radec.') # define pol index if 'X' in ''.join(pols) or 'Y' in ''.join(pols): polord = ['XX', 'YY'] elif 'R' in ''.join(pols) or 'L' in ''.join(pols): polord = ['RR', 'LL'] self.polind = [polord.index(pol) for pol in pols] self.ant1ind = [n.where(ant1 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr] self.ant2ind = [n.where(ant2 == n.unique(blarr))[0][0] for (ant1,ant2) in blarr] # select by smallest time distance for source within some angular region of target if radec: ra, dec = radec calra = n.array(self.radec)[:,0] caldec = n.array(self.radec)[:,1] fields = n.where( (n.abs(calra - ra) < n.radians(dist)) & (n.abs(caldec - dec) < n.radians(dist)) )[0] if len(fields) == 0: self.logger.warn('Warning: no close calibrator found. Removing radec restriction.') fields = n.unique(self.uniquefield) else: fields = n.unique(self.uniquefield) sel = [] for field in fields: sel += list(n.where(field == self.uniquefield)[0]) mjddist = n.abs(time - self.uniquemjd[sel]) closestgain = n.where(mjddist == mjddist.min())[0][0] self.logger.info('Using gain solution for field %d at MJD %.5f, separated by %d min ' % (self.uniquefield[n.where(self.uniquemjd == self.uniquemjd[sel][closestgain])], self.uniquemjd[closestgain], mjddist[closestgain]*24*60)) self.gain = self.gain.take(self.spwind, axis=2).take(self.polind, axis=3)[closestgain] if hasattr(self, 'bandpass'): bins = [n.where(n.min(n.abs(self.bpfreq-selfreq)) == n.abs(self.bpfreq-selfreq))[0][0] for selfreq in freqs] self.bandpass = self.bandpass.take(bins, axis=1).take(self.polind, axis=2) self.freqs = freqs self.logger.debug('Using bandpass at BP bins (1000 bins per spw): %s', str(bins))
Set select parameter that defines time, spw, and pol solutions to apply. time defines the time to find solutions near in mjd. freqs defines frequencies to select bandpass solution blarr is array of size 2xnbl that gives pairs of antennas in each baseline (a la tpipe.blarr). radec (radian tuple) and dist (deg) define optional location of source for filtering solutions. spwind is list of indices to be used (e.g., [0,2,4,10]) pols is from d['pols'] (e.g., ['RR']). single or dual parallel allowed. calname not used. here for uniformity with telcal_sol.
def _raw_sql(self, values): """Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.""" if isinstance(self.model._meta.pk, CharField): when_clauses = " ".join( [self._when("'{}'".format(x), y) for (x, y) in values] ) else: when_clauses = " ".join([self._when(x, y) for (x, y) in values]) table_name = self.model._meta.db_table primary_key = self.model._meta.pk.column return 'SELECT CASE {}."{}" {} ELSE 0 END'.format( table_name, primary_key, when_clauses )
Prepare SQL statement consisting of a sequence of WHEN .. THEN statements.
def get_input_kwargs(self, key=None, default=None): """ Deprecated. Use `get_catalog_info` instead. Get information from the catalog config file. If *key* is `None`, return the full dict. """ warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning) return self.get_catalog_info(key, default)
Deprecated. Use `get_catalog_info` instead. Get information from the catalog config file. If *key* is `None`, return the full dict.
def assessModel(self, target: str, prediction: str, nominal: bool = True, event: str = '', **kwargs): """ This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner. Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns TODO: add code example of build, score, and then assess :param target: string that represents the target variable in the data :param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1). :param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different. :param event: string which indicates which value of the nominal target variable is the event vs non-event :param kwargs: :return: SAS result object """ # submit autocall macro self.sas.submit("%aamodel;") objtype = "datastep" objname = '{s:{c}^{n}}'.format(s=self.table[:3], n=3, c='_') + self.sas._objcnt() # translate to a libname so needs to be less than 8 code = "%macro proccall(d);\n" # build parameters score_table = str(self.libref + '.' + self.table) binstats = str(objname + '.' + "ASSESSMENTSTATISTICS") out = str(objname + '.' + "ASSESSMENTBINSTATISTICS") level = 'interval' # var = 'P_' + target if nominal: level = 'class' # the user didn't specify the event for a nominal Give them the possible choices try: if len(event) < 1: raise Exception(event) except Exception: print("No event was specified for a nominal target. Here are possible options:\n") event_code = "proc hpdmdb data=%s.%s %s classout=work._DMDBCLASSTARGET(keep=name nraw craw level frequency nmisspercent);" % ( self.libref, self.table, self._dsopts()) event_code += "\nclass %s ; \nrun;" % target event_code += "data _null_; set work._DMDBCLASSTARGET; where ^(NRAW eq . and CRAW eq '') and lowcase(name)=lowcase('%s');" % target ec = self.sas._io.submit(event_code) HTML(ec['LST']) # TODO: Finish output of the list of nominals variables if nominal: code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s, EVENT=%s);" \ % (score_table, self._dsopts(), target, prediction, level, binstats, out, event) else: code += "%%aa_model_eval(DATA=%s%s, TARGET=%s, VAR=%s, level=%s, BINSTATS=%s, bins=100, out=%s);" \ % (score_table, self._dsopts(), target, prediction, level, binstats, out) rename_char = """ data {0}; set {0}; if level in ("INTERVAL", "INT") then do; rename _sse_ = SumSquaredError _div_ = Divsor _ASE_ = AverageSquaredError _RASE_ = RootAverageSquaredError _MEANP_ = MeanPredictionValue _STDP_ = StandardDeviationPrediction _CVP_ = CoefficientVariationPrediction; end; else do; rename CR = MaxClassificationRate KSCut = KSCutOff CRDEPTH = MaxClassificationDepth MDepth = MedianClassificationDepth MCut = MedianEventDetectionCutOff CCut = ClassificationCutOff _misc_ = MisClassificationRate; end; run; """ code += rename_char.format(binstats) if nominal: # TODO: add graphics code here to return to the SAS results object graphics =""" ODS PROCLABEL='ERRORPLOT' ; proc sgplot data={0}; title "Error and Correct rate by Depth"; series x=depth y=correct_rate; series x=depth y=error_rate; yaxis label="Percentage" grid; run; /* roc chart */ ODS PROCLABEL='ROCPLOT' ; proc sgplot data={0}; title "ROC Curve"; series x=one_minus_specificity y=sensitivity; yaxis grid; run; /* Lift and Cumulative Lift */ ODS PROCLABEL='LIFTPLOT' ; proc sgplot data={0}; Title "Lift and Cumulative Lift"; series x=depth y=c_lift; series x=depth y=lift; yaxis grid; run; """ code += graphics.format(out) code += "run; quit; %mend;\n" code += "%%mangobj(%s,%s,%s);" % (objname, objtype, self.table) if self.sas.nosub: print(code) return ll = self.sas.submit(code, 'text') obj1 = sp2.SASProcCommons._objectmethods(self, objname) return sp2.SASresults(obj1, self.sas, objname, self.sas.nosub, ll['LOG'])
This method will calculate assessment measures using the SAS AA_Model_Eval Macro used for SAS Enterprise Miner. Not all datasets can be assessed. This is designed for scored data that includes a target and prediction columns TODO: add code example of build, score, and then assess :param target: string that represents the target variable in the data :param prediction: string that represents the numeric prediction column in the data. For nominal targets this should a probability between (0,1). :param nominal: boolean to indicate if the Target Variable is nominal because the assessment measures are different. :param event: string which indicates which value of the nominal target variable is the event vs non-event :param kwargs: :return: SAS result object
def format_price(self, price): """Formats the price with the set decimal mark and correct currency """ return u"{} {}{}{:02d}".format( self.currency_symbol, price[0], self.decimal_mark, price[1], )
Formats the price with the set decimal mark and correct currency
def _cross_check(self, pub_key): """ In Ecdsa, both the key and the algorithm define the curve. Therefore, we must cross check them to make sure they're the same. :param key: :raises: ValueError is the curves are not the same """ if self.curve_name != pub_key.curve.name: raise ValueError( "The curve in private key {} and in algorithm {} don't " "match".format(pub_key.curve.name, self.curve_name))
In Ecdsa, both the key and the algorithm define the curve. Therefore, we must cross check them to make sure they're the same. :param key: :raises: ValueError is the curves are not the same
def map_gid2(self, tiled_gid): """ WIP. need to refactor the gid code :param tiled_gid: :return: """ tiled_gid = int(tiled_gid) # gidmap is a default dict, so cannot trust to raise KeyError if tiled_gid in self.gidmap: return self.gidmap[tiled_gid] else: gid = self.register_gid(tiled_gid) return [(gid, None)]
WIP. need to refactor the gid code :param tiled_gid: :return:
def eye(N, M=0, k=0, dtype=None, **kwargs): """Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere. Parameters ---------- N: int Number of rows in the output. M: int, optional Number of columns in the output. If 0, defaults to N. k: int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : str or numpy.dtype, optional The value type of the inner value, default to ``np.float32``. Returns ------- out : Symbol The created Symbol. """ if dtype is None: dtype = _numpy.float32 return _internal._eye(N, M, k, dtype=dtype, **kwargs)
Returns a new symbol of 2-D shpae, filled with ones on the diagonal and zeros elsewhere. Parameters ---------- N: int Number of rows in the output. M: int, optional Number of columns in the output. If 0, defaults to N. k: int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : str or numpy.dtype, optional The value type of the inner value, default to ``np.float32``. Returns ------- out : Symbol The created Symbol.
def render(self, mode='human'): """ Render the environment. Args: mode (str): the mode to render with: - human: render to the current display - rgb_array: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image Returns: a numpy array if mode is 'rgb_array', None otherwise """ if mode == 'human': # if the viewer isn't setup, import it and create one if self.viewer is None: from ._image_viewer import ImageViewer # get the caption for the ImageViewer if self.spec is None: # if there is no spec, just use the .nes filename caption = self._rom_path.split('/')[-1] else: # set the caption to the OpenAI Gym id caption = self.spec.id # create the ImageViewer to display frames self.viewer = ImageViewer( caption=caption, height=SCREEN_HEIGHT, width=SCREEN_WIDTH, ) # show the screen on the image viewer self.viewer.show(self.screen) elif mode == 'rgb_array': return self.screen else: # unpack the modes as comma delineated strings ('a', 'b', ...) render_modes = [repr(x) for x in self.metadata['render.modes']] msg = 'valid render modes are: {}'.format(', '.join(render_modes)) raise NotImplementedError(msg)
Render the environment. Args: mode (str): the mode to render with: - human: render to the current display - rgb_array: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image Returns: a numpy array if mode is 'rgb_array', None otherwise
def calculate_single_terms(self): """Lines of model method with the same name.""" lines = self._call_methods('calculate_single_terms', self.model.PART_ODE_METHODS) if lines: lines.insert(1, (' self.numvars.nmb_calls =' 'self.numvars.nmb_calls+1')) return lines
Lines of model method with the same name.
def remove_files(root_dir): """ Remove all files and directories in supplied root directory. """ for dirpath, dirnames, filenames in os.walk(os.path.abspath(root_dir)): for filename in filenames: os.remove(os.path.join(root_dir, filename)) for dirname in dirnames: shutil.rmtree(os.path.join(root_dir, dirname))
Remove all files and directories in supplied root directory.
def gauge(self, stats, value): """ Log gauges >>> client = StatsdClient() >>> client.gauge('example.gauge', 47) >>> client.gauge(('example.gauge41', 'example.gauge43'), 47) """ self.update_stats(stats, value, self.SC_GAUGE)
Log gauges >>> client = StatsdClient() >>> client.gauge('example.gauge', 47) >>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
def handle_error(self, error, req, schema, error_status_code, error_headers): """Handles errors during parsing. Raises a `tornado.web.HTTPError` with a 400 error. """ status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS if status_code == 422: reason = "Unprocessable Entity" else: reason = None raise HTTPError( status_code, log_message=str(error.messages), reason=reason, messages=error.messages, headers=error_headers, )
Handles errors during parsing. Raises a `tornado.web.HTTPError` with a 400 error.
def operands(self, value): """Set instruction operands. """ if len(value) != 3: raise Exception("Invalid instruction operands : %s" % str(value)) self._operands = value
Set instruction operands.
def field_types(self): """ Access the field_types :returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList :rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList """ if self._field_types is None: self._field_types = FieldTypeList(self._version, assistant_sid=self._solution['sid'], ) return self._field_types
Access the field_types :returns: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList :rtype: twilio.rest.autopilot.v1.assistant.field_type.FieldTypeList
def profile_solver(ml, accel=None, **kwargs): """Profile a particular multilevel object. Parameters ---------- ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags, csr_matrix >>> from scipy.sparse.linalg import cg >>> from pyamg.classical import ruge_stuben_solver >>> from pyamg.util.utils import profile_solver >>> n=100 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = csr_matrix(spdiags(data,[-1,0,1],n,n)) >>> b = A*np.ones(A.shape[0]) >>> ml = ruge_stuben_solver(A, max_coarse=10) >>> res = profile_solver(ml,accel=cg) """ A = ml.levels[0].A b = A * sp.rand(A.shape[0], 1) residuals = [] if accel is None: ml.solve(b, residuals=residuals, **kwargs) else: def callback(x): residuals.append(norm(np.ravel(b) - np.ravel(A*x))) M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V')) accel(A, b, M=M, callback=callback, **kwargs) return np.asarray(residuals)
Profile a particular multilevel object. Parameters ---------- ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags, csr_matrix >>> from scipy.sparse.linalg import cg >>> from pyamg.classical import ruge_stuben_solver >>> from pyamg.util.utils import profile_solver >>> n=100 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = csr_matrix(spdiags(data,[-1,0,1],n,n)) >>> b = A*np.ones(A.shape[0]) >>> ml = ruge_stuben_solver(A, max_coarse=10) >>> res = profile_solver(ml,accel=cg)
def get_all_parents(self): """Return all parent GO IDs.""" all_parents = set() for parent in self.parents: all_parents.add(parent.item_id) all_parents |= parent.get_all_parents() return all_parents
Return all parent GO IDs.
def item_land_adapter(obj, request): """ Adapter for rendering an item of :class: `pycountry.db.Data` to json. """ return { 'id': obj.alpha_2, 'alpha2': obj.alpha_2, 'alpha3': obj.alpha_3, 'naam': _(obj.name) }
Adapter for rendering an item of :class: `pycountry.db.Data` to json.
def generate_data(path, tokenizer, char_vcb, word_vcb, is_training=False): ''' Generate data ''' global root_path qp_pairs = data.load_from_file(path=path, is_training=is_training) tokenized_sent = 0 # qp_pairs = qp_pairs[:1000]1 for qp_pair in qp_pairs: tokenized_sent += 1 data.tokenize(qp_pair, tokenizer, is_training) for word in qp_pair['question_tokens']: word_vcb.add(word['word']) for char in word['word']: char_vcb.add(char) for word in qp_pair['passage_tokens']: word_vcb.add(word['word']) for char in word['word']: char_vcb.add(char) max_query_length = max(len(x['question_tokens']) for x in qp_pairs) max_passage_length = max(len(x['passage_tokens']) for x in qp_pairs) #min_passage_length = min(len(x['passage_tokens']) for x in qp_pairs) cfg.max_query_length = max_query_length cfg.max_passage_length = max_passage_length return qp_pairs
Generate data
def runExperiment5A(dirName): """ This runs the first experiment in the section "Simulations with Sensorimotor Sequences", an example sensorimotor sequence. """ # Results are put into a pkl file which can be used to generate the plots. # dirName is the absolute path where the pkl file will be placed. resultsFilename = os.path.join(dirName, "sensorimotor_sequence_example.pkl") results = runExperiment( { "numSequences": 0, "seqLength": 10, "numFeatures": 100, "trialNum": 4, "numObjects": 50, "numLocations": 100, } ) # Pickle results for plotting and possible later debugging with open(resultsFilename, "wb") as f: cPickle.dump(results, f)
This runs the first experiment in the section "Simulations with Sensorimotor Sequences", an example sensorimotor sequence.
def _get_stddevs(self, C, stddev_types, num_sites): """ Return total standard deviation. """ stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES stddevs.append(np.log(10 ** C['sigma']) + np.zeros(num_sites)) return stddevs
Return total standard deviation.
def change_env(name, val): """ Args: name(str), val(str): Returns: a context where the environment variable ``name`` being set to ``val``. It will be set back after the context exits. """ oldval = os.environ.get(name, None) os.environ[name] = val yield if oldval is None: del os.environ[name] else: os.environ[name] = oldval
Args: name(str), val(str): Returns: a context where the environment variable ``name`` being set to ``val``. It will be set back after the context exits.
def reindex(self, ind): """reindex Arguments: ind {[type]} -- [description] Raises: RuntimeError -- [description] RuntimeError -- [description] Returns: [type] -- [description] """ if isinstance(ind, pd.MultiIndex): try: return self.new(self.data.reindex(ind)) except: raise RuntimeError('QADATASTRUCT ERROR: CANNOT REINDEX') else: raise RuntimeError( 'QADATASTRUCT ERROR: ONLY ACCEPT MULTI-INDEX FORMAT' )
reindex Arguments: ind {[type]} -- [description] Raises: RuntimeError -- [description] RuntimeError -- [description] Returns: [type] -- [description]
def analyze(self, scratch, **kwargs): """Categorize instances of attempted say and sound synchronization.""" errors = Counter() for script in self.iter_scripts(scratch): prev_name, prev_depth, prev_block = '', 0, script.blocks[0] gen = self.iter_blocks(script.blocks) for name, depth, block in gen: if prev_depth == depth: if prev_name in self.SAY_THINK: if name == 'play sound %s until done': if not self.is_blank(prev_block.args[0]): errors += self.check(gen) # TODO: What about play sound? elif prev_name in self.SAY_THINK_DURATION and \ 'play sound %s' in name: errors['1'] += 1 elif prev_name == 'play sound %s': if name in self.SAY_THINK: errors[self.INCORRECT] += 1 elif name in self.SAY_THINK_DURATION: if self.is_blank(block.args[0]): errors[self.ERROR] += 1 else: errors[self.HACKISH] += 1 elif prev_name == 'play sound %s until done' and \ name in self.ALL_SAY_THINK: if not self.is_blank(block.args[0]): errors[self.INCORRECT] += 1 # TODO: Should there be an else clause here? prev_name, prev_depth, prev_block = name, depth, block return {'sound': errors}
Categorize instances of attempted say and sound synchronization.
def _expand_endpoint_name(endpoint_name, flags): """ Populate any ``{endpoint_name}`` tags in the flag names for the given handler, based on the handlers module / file name. """ return tuple(flag.format(endpoint_name=endpoint_name) for flag in flags)
Populate any ``{endpoint_name}`` tags in the flag names for the given handler, based on the handlers module / file name.
def encrypt(self, plaintext): """Return ciphertext for given plaintext.""" # String to bytes. plainbytes = plaintext.encode('utf8') # Compress plaintext bytes. compressed = zlib.compress(plainbytes) # Construct AES-GCM cipher, with 96-bit nonce. cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce=random_bytes(12)) # Encrypt and digest. encrypted, tag = cipher.encrypt_and_digest(compressed) # Combine with nonce. combined = cipher.nonce + tag + encrypted # Encode as Base64. cipherbytes = base64.b64encode(combined) # Bytes to string. ciphertext = cipherbytes.decode('utf8') # Return ciphertext. return ciphertext
Return ciphertext for given plaintext.
def _auth(profile=None): ''' Set up neutron credentials ''' if profile: credentials = __salt__['config.option'](profile) user = credentials['keystone.user'] password = credentials['keystone.password'] tenant = credentials['keystone.tenant'] auth_url = credentials['keystone.auth_url'] region_name = credentials.get('keystone.region_name', None) service_type = credentials.get('keystone.service_type', 'network') os_auth_system = credentials.get('keystone.os_auth_system', None) use_keystoneauth = credentials.get('keystone.use_keystoneauth', False) verify = credentials.get('keystone.verify', True) else: user = __salt__['config.option']('keystone.user') password = __salt__['config.option']('keystone.password') tenant = __salt__['config.option']('keystone.tenant') auth_url = __salt__['config.option']('keystone.auth_url') region_name = __salt__['config.option']('keystone.region_name') service_type = __salt__['config.option']('keystone.service_type') os_auth_system = __salt__['config.option']('keystone.os_auth_system') use_keystoneauth = __salt__['config.option']('keystone.use_keystoneauth') verify = __salt__['config.option']('keystone.verify') if use_keystoneauth is True: project_domain_name = credentials['keystone.project_domain_name'] user_domain_name = credentials['keystone.user_domain_name'] kwargs = { 'username': user, 'password': password, 'tenant_name': tenant, 'auth_url': auth_url, 'region_name': region_name, 'service_type': service_type, 'os_auth_plugin': os_auth_system, 'use_keystoneauth': use_keystoneauth, 'verify': verify, 'project_domain_name': project_domain_name, 'user_domain_name': user_domain_name } else: kwargs = { 'username': user, 'password': password, 'tenant_name': tenant, 'auth_url': auth_url, 'region_name': region_name, 'service_type': service_type, 'os_auth_plugin': os_auth_system } return suoneu.SaltNeutron(**kwargs)
Set up neutron credentials
def sequence(start, stop, step=None): """ Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])] """ sc = SparkContext._active_spark_context if step is None: return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop))) else: return Column(sc._jvm.functions.sequence( _to_java_column(start), _to_java_column(stop), _to_java_column(step)))
Generate a sequence of integers from `start` to `stop`, incrementing by `step`. If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`, otherwise -1. >>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2')) >>> df1.select(sequence('C1', 'C2').alias('r')).collect() [Row(r=[-2, -1, 0, 1, 2])] >>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3')) >>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect() [Row(r=[4, 2, 0, -2, -4])]
def parse(self, parser): """Main method to render data into the template.""" lineno = next(parser.stream).lineno if parser.stream.skip_if('name:short'): parser.stream.skip(1) short = parser.parse_expression() else: short = nodes.Const(False) result = self.call_method('_commit_hash', [short], [], lineno=lineno) return nodes.Output([result], lineno=lineno)
Main method to render data into the template.
def remove_rally(self, key): '''remove a rally point''' a = key.split(' ') if a[0] != 'Rally' or len(a) != 2: print("Bad rally object %s" % key) return i = int(a[1]) self.mpstate.functions.process_stdin('rally remove %u' % i)
remove a rally point
def c32checkDecode(c32data): """ >>> c32checkDecode('P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7') (22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE') (0, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR') (31, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV') (11, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG') (17, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9') (2, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('P37JJX3D') (22, '') >>> c32checkDecode('P000000000000000000002Q6VF78') (22, '0000000000000000000000000000000000000000') >>> c32checkDecode('P00000000000000000005JA84HQ') (22, '0000000000000000000000000000000000000001') >>> c32checkDecode('P80000000000000000000000000000004R0CMNV') (22, '1000000000000000000000000000000000000001') >>> c32checkDecode('P800000000000000000000000000000033H8YKK') (22, '1000000000000000000000000000000000000000') >>> c32checkDecode('04C407K6') (0, '01') >>> c32checkDecode('049Q1W6AP') (0, '22') >>> c32checkDecode('006NZP224') (0, '0001') >>> c32checkDecode('Z004720442') (31, '000001') >>> c32checkDecode('Z00073C2AR7') (31, '00000001') >>> c32checkDecode('B20QX4FW0') (11, '10') >>> c32checkDecode('B102PC6RCC') (11, '0100') >>> c32checkDecode('BG02G1QXCQ') (11, '1000') >>> c32checkDecode('H40003YJA8JD') (17, '100000') >>> c32checkDecode('H200001ZTRYYH') (17, '01000000') >>> c32checkDecode('H1000002QFX7E6') (17, '10000000') >>> c32checkDecode('2G000003FNKA3P') (2, '0100000000') """ if not re.match(r'^[' + C32 + ']*$', c32data): raise ValueError('Must be c32 data') c32data = c32normalize(c32data) data_hex = c32decode(c32data[1:]) if len(data_hex) < 8: raise ValueError('Not a c32check string') version_chr = c32data[0] version = C32.index(version_chr) version_hex = '{:02x}'.format(version) checksum = data_hex[-8:] if c32checksum('{}{}'.format(version_hex, data_hex[0:len(data_hex)-8])) != checksum: raise ValueError('Invalid c32check string: checksum mismatch') return (version, data_hex[0:len(data_hex)-8])
>>> c32checkDecode('P2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7') (22, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE') (0, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('Z2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR') (31, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('B2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNGTQ5XV') (11, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('H2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPZJKGHG') (17, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('22J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKMQMB2T9') (2, 'a46ff88886c2ef9762d970b4d2c63678835bd39d') >>> c32checkDecode('P37JJX3D') (22, '') >>> c32checkDecode('P000000000000000000002Q6VF78') (22, '0000000000000000000000000000000000000000') >>> c32checkDecode('P00000000000000000005JA84HQ') (22, '0000000000000000000000000000000000000001') >>> c32checkDecode('P80000000000000000000000000000004R0CMNV') (22, '1000000000000000000000000000000000000001') >>> c32checkDecode('P800000000000000000000000000000033H8YKK') (22, '1000000000000000000000000000000000000000') >>> c32checkDecode('04C407K6') (0, '01') >>> c32checkDecode('049Q1W6AP') (0, '22') >>> c32checkDecode('006NZP224') (0, '0001') >>> c32checkDecode('Z004720442') (31, '000001') >>> c32checkDecode('Z00073C2AR7') (31, '00000001') >>> c32checkDecode('B20QX4FW0') (11, '10') >>> c32checkDecode('B102PC6RCC') (11, '0100') >>> c32checkDecode('BG02G1QXCQ') (11, '1000') >>> c32checkDecode('H40003YJA8JD') (17, '100000') >>> c32checkDecode('H200001ZTRYYH') (17, '01000000') >>> c32checkDecode('H1000002QFX7E6') (17, '10000000') >>> c32checkDecode('2G000003FNKA3P') (2, '0100000000')
def delete_view(self, request, object_id, extra_context=None): """ Overrides the default to enable redirecting to the directory view after deletion of a image. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to. """ parent_folder = None try: obj = self.get_queryset(request).get(pk=unquote(object_id)) parent_folder = obj.folder except self.model.DoesNotExist: obj = None r = super(FileAdmin, self).delete_view( request=request, object_id=object_id, extra_context=extra_context) url = r.get("Location", None) # Check against filer_file_changelist as file deletion is always made by # the base class if (url in ["../../../../", "../../"] or url == reverse("admin:media_file_changelist") or url == reverse("admin:media_image_changelist")): if parent_folder: url = reverse('admin:filer-directory_listing', kwargs={'folder_id': parent_folder.id}) else: url = reverse('admin:filer-directory_listing-unfiled_images') url = "%s%s%s" % (url, popup_param(request), selectfolder_param(request, "&")) return HttpResponseRedirect(url) return r
Overrides the default to enable redirecting to the directory view after deletion of a image. we need to fetch the object and find out who the parent is before super, because super will delete the object and make it impossible to find out the parent folder to redirect to.
def _make_request(self, method, params): """Make request to API endpoint Note: Ignoring SSL cert validation due to intermittent failures http://requests.readthedocs.org/en/latest/user/advanced/#ssl-cert-verification """ if self.blocked_until is not None and \ datetime.datetime.utcnow() < self.blocked_until: raise SlackError("Too many requests - wait until {0}" \ .format(self.blocked_until)) url = "%s/%s" % (SlackClient.BASE_URL, method) params['token'] = self.token response = requests.post(url, data=params, verify=self.verify) if response.status_code == 429: # Too many requests retry_after = int(response.headers.get('retry-after', '1')) self.blocked_until = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=retry_after) raise SlackError("Too many requests - retry after {0} second(s)" \ .format(retry_after)) result = response.json() if not result['ok']: raise SlackError(result['error']) return result
Make request to API endpoint Note: Ignoring SSL cert validation due to intermittent failures http://requests.readthedocs.org/en/latest/user/advanced/#ssl-cert-verification
def get_qpimage(self, idx=0): """Return background-corrected QPImage""" if self._bgdata: # The user has explicitly chosen different background data # using `get_qpimage_raw`. qpi = super(SingleHdf5Qpimage, self).get_qpimage() else: # We can use the background data stored in the qpimage hdf5 file qpi = qpimage.QPImage(h5file=self.path, h5mode="r", h5dtype=self.as_type, ).copy() # Force meta data for key in self.meta_data: qpi[key] = self.meta_data[key] # set identifier qpi["identifier"] = self.get_identifier(idx) return qpi
Return background-corrected QPImage
def _equaBreaks(self, orbit_index_period=24.): """Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not exist in ' + 'loaded data') # get difference in orbit index around the orbit lt_diff = self.sat[self.orbit_index].diff() # universal time values, from datetime index ut_vals = Series(self.sat.data.index) # UT difference ut_diff = ut_vals.diff() # get locations where orbit index derivative is less than 0 # then do some basic checks on these locations ind, = np.where((lt_diff < -0.1)) if len(ind) > 0: ind = np.hstack((ind, np.array([len(self.sat[self.orbit_index])]))) # look at distance between breaks dist = ind[1:] - ind[0:-1] # only keep orbit breaks with a distance greater than 1 # done for robustness if len(ind) > 1: if min(dist) == 1: print('There are orbit breaks right next to each other') ind = ind[:-1][dist > 1] # check for large positive gradients around the break that would # suggest not a true orbit break, but rather bad orbit_index values new_ind = [] for idx in ind: tidx, = np.where(lt_diff[idx - 5:idx + 6] > 0.1) if len(tidx) != 0: # there are large changes, suggests a false alarm # iterate over samples and check for tidx in tidx: # look at time change vs local time change if(ut_diff[idx - 5:idx + 6].iloc[tidx] < lt_diff[idx - 5:idx + 6].iloc[tidx] / orbit_index_period * self.orbit_period): # change in ut is small compared to the change in # the orbit index this is flagged as a false alarm, # or dropped from consideration pass else: # change in UT is significant, keep orbit break new_ind.append(idx) break else: # no large positive gradients, current orbit break passes # the first test new_ind.append(idx) # replace all breaks with those that are 'good' ind = np.array(new_ind) # now, assemble some orbit breaks that are not triggered by changes in # the orbit index # check if there is a UT break that is larger than orbital period, aka # a time gap ut_change_vs_period = ( ut_diff > self.orbit_period ) # characterize ut change using orbital period norm_ut = ut_diff / self.orbit_period # now, look for breaks because the length of time between samples is # too large, thus there is no break in slt/mlt/etc, lt_diff is small # but UT change is big norm_ut_vs_norm_lt = norm_ut.gt(np.abs(lt_diff.values / orbit_index_period)) # indices when one or other flag is true ut_ind, = np.where(ut_change_vs_period | (norm_ut_vs_norm_lt & (norm_ut > 0.95))) # added the or and check after or on 10/20/2014 # & lt_diff.notnull() ))# & (lt_diff != 0) ) ) # combine these UT determined orbit breaks with the orbit index orbit # breaks if len(ut_ind) > 0: ind = np.hstack((ind, ut_ind)) ind = np.sort(ind) ind = np.unique(ind) print('Time Gap') # now that most problems in orbits should have been caught, look at # the time difference between orbits (not individual orbits) orbit_ut_diff = ut_vals[ind].diff() orbit_lt_diff = self.sat[self.orbit_index][ind].diff() # look for time gaps between partial orbits. The full orbital time # period is not required between end of one orbit and begining of next # if first orbit is partial. Also provides another general test of the # orbital breaks determined. idx, = np.where((orbit_ut_diff / self.orbit_period - orbit_lt_diff.values / orbit_index_period) > 0.97) # pull out breaks that pass the test, need to make sure the first one # is always included it gets dropped via the nature of diff if len(idx) > 0: if idx[0] != 0: idx = np.hstack((0, idx)) else: idx = np.array([0]) # only keep the good indices if len(ind) > 0: ind = ind[idx] # create orbitbreak index, ensure first element is always 0 if ind[0] != 0: ind = np.hstack((np.array([0]), ind)) else: ind = np.array([0]) # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
Determine where breaks in an equatorial satellite orbit occur. Looks for negative gradients in local time (or longitude) as well as breaks in UT. Parameters ---------- orbit_index_period : float The change in value of supplied index parameter for a single orbit
def gen_psi(self, x): """Generates the activity of the basis functions for a given canonical system rollout. x float, array: the canonical system state or path """ if isinstance(x, np.ndarray): x = x[:,None] return np.exp(-self.h * (x - self.c)**2)
Generates the activity of the basis functions for a given canonical system rollout. x float, array: the canonical system state or path
def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None): """Imports *vpn_path* to qualifying VRF tables. Import RTs of VRF table is matched with RTs from *vpn4_path* and if we have any common RTs we import the path into VRF. """ LOG.debug('Importing path %s to qualifying VRFs', vpn_path) # If this path has no RTs we are done. if not path_rts: LOG.info('Encountered a path with no RTs: %s', vpn_path) return # We match path RTs with all VRFs that are interested in them. interested_tables = set() # Get route family of VRF to when this VPN Path can be imported to if vpn_path.route_family == RF_IPv4_VPN: route_family = RF_IPv4_UC elif vpn_path.route_family == RF_IPv6_VPN: route_family = RF_IPv6_UC elif vpn_path.route_family == RF_L2_EVPN: route_family = RF_L2_EVPN elif vpn_path.route_family == RF_VPNv4_FLOWSPEC: route_family = RF_IPv4_FLOWSPEC elif vpn_path.route_family == RF_VPNv6_FLOWSPEC: route_family = RF_IPv6_FLOWSPEC elif vpn_path.route_family == RF_L2VPN_FLOWSPEC: route_family = RF_L2VPN_FLOWSPEC else: raise ValueError('Unsupported route family for VRF: %s' % vpn_path.route_family) for rt in path_rts: rt_rf_id = rt + ':' + str(route_family) vrf_rt_tables = self._tables_for_rt.get(rt_rf_id) if vrf_rt_tables: interested_tables.update(vrf_rt_tables) if interested_tables: # We iterate over all VRF tables that are interested in the RT # of the given path and import this path into them. route_dist = vpn_path.nlri.route_dist for vrf_table in interested_tables: if (vpn_path.source is not None or route_dist != vrf_table.vrf_conf.route_dist): update_vrf_dest = vrf_table.import_vpn_path(vpn_path) # Queue the destination for further processing. if update_vrf_dest is not None: self._signal_bus.\ dest_changed(update_vrf_dest) else: # If we do not have any VRF with import RT that match with path RT LOG.debug('No VRF table found that imports RTs: %s', path_rts)
Imports *vpn_path* to qualifying VRF tables. Import RTs of VRF table is matched with RTs from *vpn4_path* and if we have any common RTs we import the path into VRF.
def qsub(script, job_name, dryrun=False, *args, **kwargs): """Submit a job via qsub.""" print("Preparing job script...") job_string = gen_job(script=script, job_name=job_name, *args, **kwargs) env = os.environ.copy() if dryrun: print( "This is a dry run! Here is the generated job file, which will " "not be submitted:" ) print(job_string) else: print("Calling qsub with the generated job script.") p = subprocess.Popen( 'qsub -V', stdin=subprocess.PIPE, env=env, shell=True ) p.communicate(input=bytes(job_string.encode('ascii')))
Submit a job via qsub.
def collapse(self, remove=False): """ Move all ``sequence_ids`` in the subtree below this node to this node. If ``remove`` is True, nodes below this one are deleted from the taxonomy. """ descendants = iter(self) # Skip this node assert next(descendants) is self for descendant in descendants: self.sequence_ids.update(descendant.sequence_ids) descendant.sequence_ids.clear() if remove: for node in self.children: self.remove_child(node)
Move all ``sequence_ids`` in the subtree below this node to this node. If ``remove`` is True, nodes below this one are deleted from the taxonomy.
def _cwl_workflow_template(inputs, top_level=False): """Retrieve CWL inputs shared amongst different workflows. """ ready_inputs = [] for inp in inputs: cur_inp = copy.deepcopy(inp) for attr in ["source", "valueFrom", "wf_duplicate"]: cur_inp.pop(attr, None) if top_level: cur_inp = workflow._flatten_nested_input(cur_inp) cur_inp = _clean_record(cur_inp) ready_inputs.append(cur_inp) return {"class": "Workflow", "cwlVersion": "v1.0", "hints": [], "requirements": [{"class": "EnvVarRequirement", "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]}, {"class": "ScatterFeatureRequirement"}, {"class": "SubworkflowFeatureRequirement"}], "inputs": ready_inputs, "outputs": [], "steps": []}
Retrieve CWL inputs shared amongst different workflows.
def values(self): """Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return [self.policy.header_fetch_parse(k, v) for k, v in self._headers]
Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.
def _board_from_game_image(self, game_image): """Return a board object matching the board in the game image. Return None if any tiles are not identified. """ # board image board_rect = self._board_tools['board_region'].region_in(game_image) t, l, b, r = board_rect board_image = game_image[t:b, l:r] # board grid and tiles --> fill in a Board object board = Board() grid = self._board_tools['grid'] tile_id = self._board_tools['tile_id'] for p, borders in grid.borders_by_grid_position(board_image): t, l, b, r = borders tile = board_image[t:b, l:r] tile_character = tile_id.identify(tile) if tile_character is None: return None # soft failure board[p] = Tile.singleton(tile_character) return board
Return a board object matching the board in the game image. Return None if any tiles are not identified.
def script_and_notebook_to_rst(spth, npth, rpth): """ Convert a script and the corresponding executed notebook to rst. The script is converted to notebook format *without* replacement of sphinx cross-references with links to online docs, and the resulting markdown cells are inserted into the executed notebook, which is then converted to rst. """ # Read entire text of script at spth with open(spth) as f: stxt = f.read() # Process script text stxt = preprocess_script_string(stxt) # Convert script text to notebook object nbs = script_string_to_notebook_object(stxt) # Read notebook file npth nbn = nbformat.read(npth, as_version=4) # Overwrite markdown cells in nbn with those from nbs try: replace_markdown_cells(nbs, nbn) except ValueError: raise ValueError('mismatch between source script %s and notebook %s' % (spth, npth)) # Convert notebook object to rst notebook_object_to_rst(nbn, rpth)
Convert a script and the corresponding executed notebook to rst. The script is converted to notebook format *without* replacement of sphinx cross-references with links to online docs, and the resulting markdown cells are inserted into the executed notebook, which is then converted to rst.
def inasafe_analysis_summary_field_value(field, feature, parent): """Retrieve a value from a field in the analysis summary layer. e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3 """ _ = feature, parent # NOQA project_context_scope = QgsExpressionContextUtils.projectScope( QgsProject.instance()) registry = QgsProject.instance() key = provenance_layer_analysis_impacted_id['provenance_key'] if not project_context_scope.hasVariable(key): return None layer = registry.mapLayer(project_context_scope.variable(key)) if not layer: return None index = layer.fields().lookupField(field) if index < 0: return None feature = next(layer.getFeatures()) return feature[index]
Retrieve a value from a field in the analysis summary layer. e.g. inasafe_analysis_summary_field_value('total_not_exposed') -> 3
def forward(self, x, **kwargs): """ Perform a forward pass through the network. The forward pass in recursive som is based on a combination between the activation in the last time-step and the current time-step. Parameters ---------- x : numpy array The input data. prev_activation : numpy array. The activation of the network in the previous time-step. Returns ------- activations : tuple of activations and differences A tuple containing the activation of each unit, the differences between the weights and input and the differences between the context input and context weights. """ prev = kwargs['prev_activation'] # Differences is the components of the weights subtracted from # the weight vector. distance_x, diff_x = self.distance_function(x, self.weights) distance_y, diff_y = self.distance_function(prev, self.context_weights) x_ = distance_x * self.alpha y_ = distance_y * self.beta activation = np.exp(-(x_ + y_)) return activation, diff_x, diff_y
Perform a forward pass through the network. The forward pass in recursive som is based on a combination between the activation in the last time-step and the current time-step. Parameters ---------- x : numpy array The input data. prev_activation : numpy array. The activation of the network in the previous time-step. Returns ------- activations : tuple of activations and differences A tuple containing the activation of each unit, the differences between the weights and input and the differences between the context input and context weights.
def pos3(self): ''' Use pos-sc1-sc2 as POS ''' parts = [self.pos] if self.sc1 and self.sc1 != '*': parts.append(self.sc1) if self.sc2 and self.sc2 != '*': parts.append(self.sc2) return '-'.join(parts)
Use pos-sc1-sc2 as POS
def search_individuals(self, dataset_id, name=None): """ Returns an iterator over the Individuals fulfilling the specified conditions. :param str dataset_id: The dataset to search within. :param str name: Only Individuals matching the specified name will be returned. :return: An iterator over the :class:`ga4gh.protocol.Biosample` objects defined by the query parameters. """ request = protocol.SearchIndividualsRequest() request.dataset_id = dataset_id request.name = pb.string(name) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "individuals", protocol.SearchIndividualsResponse)
Returns an iterator over the Individuals fulfilling the specified conditions. :param str dataset_id: The dataset to search within. :param str name: Only Individuals matching the specified name will be returned. :return: An iterator over the :class:`ga4gh.protocol.Biosample` objects defined by the query parameters.
def _set_apply_exp_dscp_map_name(self, v, load=False): """ Setter method for apply_exp_dscp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_dscp_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_exp_dscp_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_exp_dscp_map_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """apply_exp_dscp_map_name must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=apply_exp_dscp_map_name.apply_exp_dscp_map_name, is_container='container', presence=False, yang_name="apply-exp-dscp-map-name", rest_name="exp-dscp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply exp dscp map', u'cli-sequence-commands': None, u'alt-name': u'exp-dscp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""", }) self.__apply_exp_dscp_map_name = t if hasattr(self, '_set'): self._set()
Setter method for apply_exp_dscp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_exp_dscp_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_exp_dscp_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_exp_dscp_map_name() directly.
def fetch(self, url, listener, chunk_size_bytes=None, timeout_secs=None): """Fetches data from the given URL notifying listener of all lifecycle events. :param string url: the url to GET data from :param listener: the listener to notify of all download lifecycle events :param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default :param timeout_secs: the maximum time to wait for data to be available, 1 second by default :raises: Fetcher.Error if there was a problem fetching all data from the given url """ if not isinstance(listener, self.Listener): raise ValueError('listener must be a Listener instance, given {}'.format(listener)) chunk_size_bytes = chunk_size_bytes or 10 * 1024 timeout_secs = timeout_secs or 1.0 with closing(self._fetch(url, timeout_secs=timeout_secs)) as resp: if resp.status_code != requests.codes.ok: listener.status(resp.status_code) raise self.PermanentError('Fetch of {} failed with status code {}' .format(url, resp.status_code), response_code=resp.status_code) listener.status(resp.status_code, content_length=resp.size) read_bytes = 0 for data in resp.iter_content(chunk_size_bytes=chunk_size_bytes): listener.recv_chunk(data) read_bytes += len(data) if resp.size and read_bytes != resp.size: raise self.Error('Expected {} bytes, read {}'.format(resp.size, read_bytes)) listener.finished()
Fetches data from the given URL notifying listener of all lifecycle events. :param string url: the url to GET data from :param listener: the listener to notify of all download lifecycle events :param chunk_size_bytes: the chunk size to use for buffering data, 10 KB by default :param timeout_secs: the maximum time to wait for data to be available, 1 second by default :raises: Fetcher.Error if there was a problem fetching all data from the given url
def add_candidate_peer_endpoints(self, peer_endpoints): """Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with. """ if self._topology: self._topology.add_candidate_peer_endpoints(peer_endpoints) else: LOGGER.debug("Could not add peer endpoints to topology. " "ConnectionManager does not exist.")
Adds candidate endpoints to the list of endpoints to attempt to peer with. Args: peer_endpoints ([str]): A list of public uri's which the validator can attempt to peer with.
def flatten_dict(x): """Flatten a dict Flatten an arbitrarily nested dict as output by to_dict .. note:: Keys in the flattened dict may get very long. Args: x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values Returns: dict: flattened 1D dict """ out = {} for k, v in x.items(): out = _recur_flatten(k, v, out) return out
Flatten a dict Flatten an arbitrarily nested dict as output by to_dict .. note:: Keys in the flattened dict may get very long. Args: x (dict): Arbitrarily nested dict (maybe resembling a tree) with literal/scalar leaf values Returns: dict: flattened 1D dict
def run(self): """Threading callback""" self.viewing = True while self.viewing and self._lock.acquire(): try: line = self._readline() except: pass else: logger.info(line) self._lock.release() time.sleep(0)
Threading callback
def info(self): """Delivers some info to you about the class.""" print("Sorry, but I don't have much to share.") print("This is me:") print(self) print("And these are the experiments assigned to me:") print(self.experiments)
Delivers some info to you about the class.
def _op_to_matrix(self, op: Optional[ops.Operation], qubits: Tuple[ops.Qid, ...] ) -> Optional[np.ndarray]: """Determines the effect of an operation on the given qubits. If the operation is a 1-qubit operation on one of the given qubits, or a 2-qubit operation on both of the given qubits, and also the operation has a known matrix, then a matrix is returned. Otherwise None is returned. Args: op: The operation to understand. qubits: The qubits we care about. Order determines matrix tensor order. Returns: None, or else a matrix equivalent to the effect of the operation. """ q1, q2 = qubits matrix = protocols.unitary(op, None) if matrix is None: return None assert op is not None if op.qubits == qubits: return matrix if op.qubits == (q2, q1): return MergeInteractions._flip_kron_order(matrix) if op.qubits == (q1,): return np.kron(matrix, np.eye(2)) if op.qubits == (q2,): return np.kron(np.eye(2), matrix) return None
Determines the effect of an operation on the given qubits. If the operation is a 1-qubit operation on one of the given qubits, or a 2-qubit operation on both of the given qubits, and also the operation has a known matrix, then a matrix is returned. Otherwise None is returned. Args: op: The operation to understand. qubits: The qubits we care about. Order determines matrix tensor order. Returns: None, or else a matrix equivalent to the effect of the operation.
def weather_history_at_place(self, name, start=None, end=None): """ Queries the OWM Weather API for weather history for the specified location (eg: "London,uk"). A list of *Weather* objects is returned. It is possible to query for weather history in a closed time period, whose boundaries can be passed as optional parameters. :param name: the location's toponym :type name: str or unicode :param start: the object conveying the time value for the start query boundary (defaults to ``None``) :type start: int, ``datetime.datetime`` or ISO8601-formatted string :param end: the object conveying the time value for the end query boundary (defaults to ``None``) :type end: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a list of *Weather* instances or ``None`` if history data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if the time boundaries are not in the correct chronological order, if one of the time boundaries is not ``None`` and the other is or if one or both of the time boundaries are after the current time """ assert isinstance(name, str), "Value must be a string" encoded_name = name params = {'q': encoded_name, 'lang': self._language} if start is None and end is None: pass elif start is not None and end is not None: unix_start = timeformatutils.to_UNIXtime(start) unix_end = timeformatutils.to_UNIXtime(end) if unix_start >= unix_end: raise ValueError("Error: the start time boundary must " \ "precede the end time!") current_time = time() if unix_start > current_time: raise ValueError("Error: the start time boundary must " \ "precede the current time!") params['start'] = str(unix_start) params['end'] = str(unix_end) else: raise ValueError("Error: one of the time boundaries is None, " \ "while the other is not!") uri = http_client.HttpClient.to_url(CITY_WEATHER_HISTORY_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers['weather_history'].parse_JSON(json_data)
Queries the OWM Weather API for weather history for the specified location (eg: "London,uk"). A list of *Weather* objects is returned. It is possible to query for weather history in a closed time period, whose boundaries can be passed as optional parameters. :param name: the location's toponym :type name: str or unicode :param start: the object conveying the time value for the start query boundary (defaults to ``None``) :type start: int, ``datetime.datetime`` or ISO8601-formatted string :param end: the object conveying the time value for the end query boundary (defaults to ``None``) :type end: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a list of *Weather* instances or ``None`` if history data is not available for the specified location :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* if the time boundaries are not in the correct chronological order, if one of the time boundaries is not ``None`` and the other is or if one or both of the time boundaries are after the current time
def _get_sorted_actions_list(self, raw_set): """ This returns a list of dictionaries with the actions got in the raw_set. :raw_set: is the dict representing a set of rules and conditions. """ keys_list = raw_set.keys() # actions_dicts_l is the final list which will contain the the # dictionaries with the actions. # The dictionaries will be sorted by the index obtained in the # template. actions_dicts_l = [] # a_count is a counter which is incremented every time a new action is # added to the list, so we can give it a index. a_count = 0 # actions_list will contain the keys starting with 'action-' but sorted # by their index actions_list = self._get_sorted_action_keys(keys_list) for key in actions_list: # Getting the key for otherWS element otherWS_key = 'otherWS-'+str(a_count) # Getting the value for otherWS selector otherWS = raw_set.get(otherWS_key, '') # Getting the key for analyst element analyst_key = 'analyst-'+str(a_count) # Getting the value for analyst analyst = raw_set.get(analyst_key, '') # Getting which analysis should has its result set setresulton_key = 'setresulton-'+str(a_count) setresulton = raw_set.get(setresulton_key, '') # Getting the discrete result to set setresultdiscrete_key = 'setresultdiscrete-'+str(a_count) setresultdiscrete = raw_set.get(setresultdiscrete_key, '') # Getting the numeric result to set setresultvalue_key = 'setresultvalue-'+str(a_count) setresultvalue = raw_set.get(setresultvalue_key, '') # Getting the local analysis id local_id_key = 'an_result_id-'+str(a_count) local_id = raw_set.get(local_id_key, '') # Getting the local analysis id worksheettemplate_key = 'worksheettemplate-'+str(a_count) worksheettemplate = raw_set.get(worksheettemplate_key, '') # Getting the visibility in report showinreport_key = 'showinreport-'+str(a_count) showinreport = raw_set.get(showinreport_key, '') # Getting the analysis to show or hide in report setvisibilityof_key = 'setvisibilityof-'+str(a_count) setvisibilityof = raw_set.get(setvisibilityof_key, '') # Building the action dict action_dict = { 'action': raw_set[key], 'act_row_idx': a_count, 'otherWS': otherWS, 'worksheettemplate': worksheettemplate, 'analyst': analyst, 'setresulton': setresulton, 'setresultdiscrete': setresultdiscrete, 'setresultvalue': setresultvalue, 'an_result_id': local_id, 'showinreport': showinreport, 'setvisibilityof': setvisibilityof, } # Saves the action as a new dict inside the actions list actions_dicts_l.append(action_dict) a_count += 1 return actions_dicts_l
This returns a list of dictionaries with the actions got in the raw_set. :raw_set: is the dict representing a set of rules and conditions.
def CryptProtectData( data, description=None, optional_entropy=None, prompt_struct=None, flags=0, ): """ Encrypt data """ data_in = DATA_BLOB(data) entropy = DATA_BLOB(optional_entropy) if optional_entropy else None data_out = DATA_BLOB() res = _CryptProtectData( data_in, description, entropy, None, # reserved prompt_struct, flags, data_out, ) handle_nonzero_success(res) res = data_out.get_data() data_out.free() return res
Encrypt data
def disable_host_flap_detection(self, host): """Disable flap detection for a host Format of the line that triggers function call:: DISABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ if host.flap_detection_enabled: host.modified_attributes |= DICT_MODATTR["MODATTR_FLAP_DETECTION_ENABLED"].value host.flap_detection_enabled = False # Maybe the host was flapping, if so, stop flapping if host.is_flapping: host.is_flapping = False host.flapping_changes = [] self.send_an_element(host.get_update_status_brok())
Disable flap detection for a host Format of the line that triggers function call:: DISABLE_HOST_FLAP_DETECTION;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None