code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def uint8sc(im): im = np.asarray(im) immin = im.min() immax = im.max() imrange = immax - immin return cv2.convertScaleAbs(im - immin, alpha=255 / imrange)
Scale the image to uint8 Parameters: ----------- im: 2d array The image Returns: -------- im: 2d array (dtype uint8) The scaled image to uint8
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): if seed is not None: N.random.seed(seed) efacvec = N.ones(psr.nobs) if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.
def contains(self, expected): return self._encode_invoke(atomic_reference_contains_codec, expected=self._to_data(expected))
Checks if the reference contains the value. :param expected: (object), the value to check (is allowed to be ``None``). :return: (bool), ``true`` if the value is found, ``false`` otherwise.
def disable(self): w.ActButton.disable(self) g = get_root(self).globals if self._expert: self.config(bg=g.COL['start']) else: self.config(bg=g.COL['startD'])
Disable the button, if in non-expert mode.
def _table_to_csv(table_): f = cStringIO.StringIO() try: _write_csv(f, table_) return f.getvalue() finally: f.close()
Return the given table converted to a CSV string. :param table: the table to convert :type table: list of OrderedDicts each with the same keys in the same order :rtype: UTF8-encoded, CSV-formatted string
def enable(instrumentation_key, *args, **kwargs): if not instrumentation_key: raise Exception('Instrumentation key was required but not provided') global original_excepthook global telemetry_channel telemetry_channel = kwargs.get('telemetry_channel') if not original_excepthook: original_excepthook = sys.excepthook sys.excepthook = intercept_excepthook if instrumentation_key not in enabled_instrumentation_keys: enabled_instrumentation_keys.append(instrumentation_key)
Enables the automatic collection of unhandled exceptions. Captured exceptions will be sent to the Application Insights service before being re-thrown. Multiple calls to this function with different instrumentation keys result in multiple instances being submitted, one for each key. .. code:: python from applicationinsights.exceptions import enable # set up exception capture enable('<YOUR INSTRUMENTATION KEY GOES HERE>') # raise an exception (this will be sent to the Application Insights service as an exception telemetry object) raise Exception('Boom!') Args: instrumentation_key (str). the instrumentation key to use while sending telemetry to the service.
def fuzzy_match(self, other): magic, fuzzy = False, False try: magic = self.alias == other.magic except AttributeError: pass if '.' in self.alias: major = self.alias.split('.')[0] fuzzy = major == other.alias return magic or fuzzy
Given another token, see if either the major alias identifier matches the other alias, or if magic matches the alias.
def save_to_files(self, directory: str) -> None: os.makedirs(directory, exist_ok=True) if os.listdir(directory): logging.warning("vocabulary serialization directory %s is not empty", directory) with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', 'utf-8') as namespace_file: for namespace_str in self._non_padded_namespaces: print(namespace_str, file=namespace_file) for namespace, mapping in self._index_to_token.items(): with codecs.open(os.path.join(directory, namespace + '.txt'), 'w', 'utf-8') as token_file: num_tokens = len(mapping) start_index = 1 if mapping[0] == self._padding_token else 0 for i in range(start_index, num_tokens): print(mapping[i].replace('\n', '@@NEWLINE@@'), file=token_file)
Persist this Vocabulary to files so it can be reloaded later. Each namespace corresponds to one file. Parameters ---------- directory : ``str`` The directory where we save the serialized vocabulary.
def _shorten_render(renderer, max_len): def short_renderer(expr): res = renderer(expr) if len(res) > max_len: return '...' else: return res return short_renderer
Return a modified that returns the representation of expr, or '...' if that representation is longer than `max_len`
def executable_path(conn, executable): executable_path = conn.remote_module.which(executable) if not executable_path: raise ExecutableNotFound(executable, conn.hostname) return executable_path
Remote validator that accepts a connection object to ensure that a certain executable is available returning its full path if so. Otherwise an exception with thorough details will be raised, informing the user that the executable was not found.
def match_case_tokens(loc, tokens, check_var, top): if len(tokens) == 2: matches, stmts = tokens cond = None elif len(tokens) == 3: matches, cond, stmts = tokens else: raise CoconutInternalException("invalid case match tokens", tokens) matching = Matcher(loc, check_var) matching.match(matches, match_to_var) if cond: matching.add_guard(cond) return matching.build(stmts, set_check_var=top)
Build code for matching the given case.
def bytes_to_string(raw_data: bytes, prefix: bool = False) -> str: prefix_string = '' if prefix: prefix_string = '0x' suffix = ''.join([format(c, "02x") for c in raw_data]) return prefix_string + suffix.upper()
Convert a byte array to a hex string.
def list_releases(): response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME)) if response: data = response.json() releases_dict = data.get('releases', {}) if releases_dict: for version, release in releases_dict.items(): release_formats = [] published_on_date = None for fmt in release: release_formats.append(fmt.get('packagetype')) published_on_date = fmt.get('upload_time') release_formats = ' | '.join(release_formats) print('{:<10}{:>15}{:>25}'.format(version, published_on_date, release_formats)) else: print('No releases found for {}'.format(PYPI_PACKAGE_NAME)) else: print('Package "{}" not found on Pypi.org'.format(PYPI_PACKAGE_NAME))
Lists all releases published on pypi.
def make_hmap(pmap, imtls, poes): M, P = len(imtls), len(poes) hmap = probability_map.ProbabilityMap.build(M, P, pmap, dtype=F32) if len(pmap) == 0: return hmap for i, imt in enumerate(imtls): curves = numpy.array([pmap[sid].array[imtls(imt), 0] for sid in pmap.sids]) data = compute_hazard_maps(curves, imtls[imt], poes) for sid, value in zip(pmap.sids, data): array = hmap[sid].array for j, val in enumerate(value): array[i, j] = val return hmap
Compute the hazard maps associated to the passed probability map. :param pmap: hazard curves in the form of a ProbabilityMap :param imtls: DictArray with M intensity measure types :param poes: P PoEs where to compute the maps :returns: a ProbabilityMap with size (N, M, P)
def alter_poms(pom_dir, additional_params, repo_url=None, mvn_repo_local=None): work_dir = os.getcwd() os.chdir(pom_dir) try: if repo_url: settings_filename = create_mirror_settings(repo_url) else: settings_filename = None args = ["mvn", "clean"] if mvn_repo_local: args.extend(["-s", settings_filename]) if mvn_repo_local: args.append("-Dmaven.repo.local=%s" % mvn_repo_local) param_list = additional_params.split(" ") args.extend(param_list) logging.debug("Running command: %s", " ".join(args)) command = Popen(args, stdout=PIPE, stderr=STDOUT) stdout = command.communicate()[0] if command.returncode: logging.error("POM manipulation failed. Output:\n%s" % stdout) else: logging.debug("POM manipulation succeeded. Output:\n%s" % stdout) finally: os.chdir(work_dir)
Runs mvn clean command with provided additional parameters to perform pom updates by pom-manipulation-ext.
def render(self, hosts, vars={}): if self.tpl_file.endswith(".tpl"): return self._render_mako(hosts, vars) elif self.tpl_file.endswith(".py"): return self._render_py(hosts, vars) else: raise ValueError("Don't know how to handle '{0}'".format(self.tpl_file))
Render a mako or .py file.
def dump_database(id): tmp_dir = tempfile.mkdtemp() current_dir = os.getcwd() os.chdir(tmp_dir) FNULL = open(os.devnull, "w") heroku_app = HerokuApp(dallinger_uid=id, output=FNULL) heroku_app.backup_capture() heroku_app.backup_download() for filename in os.listdir(tmp_dir): if filename.startswith("latest.dump"): os.rename(filename, "database.dump") os.chdir(current_dir) return os.path.join(tmp_dir, "database.dump")
Dump the database to a temporary directory.
def _normalize_words(words, acronyms): for i, _ in enumerate(words): if words[i].upper() in acronyms: words[i] = words[i].upper() else: if not words[i].isupper(): words[i] = words[i].capitalize() return words
Normalize case of each word to PascalCase.
def find_path_package_name(thepath): module_found = False last_module_found = None continue_ = True while continue_: module_found = is_path_python_module(thepath) next_path = path.dirname(thepath) if next_path == thepath: continue_ = False if module_found: init_names = ['__init__%s' % suffix.lower() for suffix in _py_suffixes] if path.basename(thepath).lower() in init_names: last_module_found = path.basename(path.dirname(thepath)) else: last_module_found = path.basename(thepath) if last_module_found and not module_found: continue_ = False thepath = next_path return last_module_found
Takes a file system path and returns the name of the python package the said path belongs to. If the said path can not be determined, it returns None.
def bodc2s(code, lenout=_default_len_out): code = ctypes.c_int(code) name = stypes.stringToCharP(" " * lenout) lenout = ctypes.c_int(lenout) libspice.bodc2s_c(code, lenout, name) return stypes.toPythonString(name)
Translate a body ID code to either the corresponding name or if no name to ID code mapping exists, the string representation of the body ID value. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodc2s_c.html :param code: Integer ID code to translate to a string. :type code: int :param lenout: Maximum length of output name. :type lenout: int :return: String corresponding to 'code'. :rtype: str
def excel_key(index): X = lambda n: ~n and X((n // 26)-1) + chr(65 + (n % 26)) or '' return X(int(index))
create a key for index by converting index into a base-26 number, using A-Z as the characters.
def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name): if not frames: logger.info("Could save summaries - no summaries to save!") logger.info("You have no frames - aborting") return None if not keys: logger.info("Could save summaries - no summaries to save!") logger.info("You have no keys - aborting") return None selected_summaries_dict = create_selected_summaries_dict(selected_summaries) summary_df = pd.concat(frames, keys=keys, axis=1) for key, value in selected_summaries_dict.items(): _summary_file_name = os.path.join(batch_dir, "summary_%s_%s.csv" % ( key, batch_name)) _summary_df = summary_df.iloc[:, summary_df.columns.get_level_values(1) == value] _header = _summary_df.columns _summary_df.to_csv(_summary_file_name, sep=";") logger.info( "saved summary (%s) to:\n %s" % (key, _summary_file_name)) logger.info("finished saving summaries") return summary_df
Writes the summaries to csv-files Args: frames: list of ``cellpy`` summary DataFrames keys: list of indexes (typically run-names) for the different runs selected_summaries: list defining which summary data to save batch_dir: directory to save to batch_name: the batch name (will be used for making the file-name(s)) Returns: a pandas DataFrame with your selected summaries.
def write_to_file(path, contents, file_type='text'): FILE_TYPES = ('json', 'text', 'binary') if file_type not in FILE_TYPES: raise ScriptWorkerException("Unknown file_type {} not in {}!".format(file_type, FILE_TYPES)) if file_type == 'json': contents = format_json(contents) if file_type == 'binary': with open(path, 'wb') as fh: fh.write(contents) else: with open(path, 'w') as fh: print(contents, file=fh, end="")
Write ``contents`` to ``path`` with optional formatting. Small helper function to write ``contents`` to ``file`` with optional formatting. Args: path (str): the path to write to contents (str, object, or bytes): the contents to write to the file file_type (str, optional): the type of file. Currently accepts ``text`` or ``binary`` (contents are unchanged) or ``json`` (contents are formatted). Defaults to ``text``. Raises: ScriptWorkerException: with an unknown ``file_type`` TypeError: if ``file_type`` is ``json`` and ``contents`` isn't JSON serializable
def register_writer(klass): if not callable(klass): raise ValueError("Can only register callables as engines") engine_name = klass.engine _writers[engine_name] = klass
Add engine to the excel writer registry.io.excel. You must use this method to integrate with ``to_excel``. Parameters ---------- klass : ExcelWriter
def add_path_to_sys_path(self): for path in reversed(self.get_spyder_pythonpath()): sys.path.insert(1, path)
Add Spyder path to sys.path
def zero(self): def zero_vec(x, out=None, **kwargs): if is_valid_input_meshgrid(x, self.domain.ndim): scalar_out_shape = out_shape_from_meshgrid(x) elif is_valid_input_array(x, self.domain.ndim): scalar_out_shape = out_shape_from_array(x) else: raise TypeError('invalid input type') out_shape = self.out_shape + scalar_out_shape if out is None: return np.zeros(out_shape, dtype=self.scalar_out_dtype) else: fill_value = np.zeros(1, dtype=self.scalar_out_dtype)[0] out.fill(fill_value) return self.element_type(self, zero_vec)
Function mapping anything to zero.
def transform(self, raw_X, y=None): return [self._extract_features(sequence1, sequence2) for sequence1, sequence2 in raw_X]
Transform sequence pairs to feature arrays that can be used as input to `Hacrf` models. Parameters ---------- raw_X : List of (sequence1_n, sequence2_n) pairs, one for each training example n. y : (ignored) Returns ------- X : List of numpy ndarrays, each with shape = (I_n, J_n, K), where I_n is the length of sequence1_n, J_n is the length of sequence2_n, and K is the number of features. Feature matrix list, for use with estimators or further transformers.
def set_nsxcontroller_port(self, **kwargs): name = kwargs.pop('name') port = str(kwargs.pop('port')) port_args = dict(name=name, port=port) method_name = 'nsx_controller_connection_addr_port' method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**port_args) output = self._callback(config) return output
Set Nsx Controller pot on the switch Args: port (int): 1 to 65535. callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
def rms(self, stride=1): stridesamp = int(stride * self.sample_rate.value) nsteps = int(self.size // stridesamp) data = numpy.zeros(nsteps) for step in range(nsteps): idx = int(stridesamp * step) idx_end = idx + stridesamp stepseries = self[idx:idx_end] rms_ = numpy.sqrt(numpy.mean(numpy.abs(stepseries.value)**2)) data[step] = rms_ name = '%s %.2f-second RMS' % (self.name, stride) return self.__class__(data, channel=self.channel, t0=self.t0, name=name, sample_rate=(1/float(stride)))
Calculate the root-mean-square value of this `TimeSeries` once per stride. Parameters ---------- stride : `float` stride (seconds) between RMS calculations Returns ------- rms : `TimeSeries` a new `TimeSeries` containing the RMS value with dt=stride
def confirm_push(self, coord, version): if not self.get_options().prompt: return True try: isatty = os.isatty(sys.stdin.fileno()) except ValueError: isatty = False if not isatty: return True push = input('\nPublish {} with revision {} ? [y|N] '.format( coord, version )) print('\n') return push.strip().lower() == 'y'
Ask the user if a push should be done for a particular version of a particular coordinate. Return True if the push should be done
def save_plots(self, directory, format="png", recommended_only=False): for i, session in enumerate(self): session.save_plots( directory, prefix=str(i), format=format, recommended_only=recommended_only )
Save images of dose-response curve-fits for each model. Parameters ---------- directory : str Directory where the PNG files will be saved. format : str, optional Image output format. Valid options include: png, pdf, svg, ps, eps recommended_only : bool, optional If True, only recommended models for each session are included. If no model is recommended, then a row with it's ID will be included, but all fields will be null. Returns ------- None
def lower_comparisons_to_between(match_query): new_match_traversals = [] for current_match_traversal in match_query.match_traversals: new_traversal = [] for step in current_match_traversal: if step.where_block: expression = step.where_block.predicate new_where_block = Filter(_lower_expressions_to_between(expression)) new_traversal.append(step._replace(where_block=new_where_block)) else: new_traversal.append(step) new_match_traversals.append(new_traversal) return match_query._replace(match_traversals=new_match_traversals)
Return a new MatchQuery, with all eligible comparison filters lowered to between clauses.
def reload(self, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): self.client.get(self, r=r, pr=pr, timeout=timeout, head_only=head_only) return self
Reload the object from Riak. When this operation completes, the object could contain new metadata and a new value, if the object was updated in Riak since it was last retrieved. .. note:: Even if the key is not found in Riak, this will return a :class:`RiakObject`. Check the :attr:`exists` property to see if the key was found. :param r: R-Value, wait for this many partitions to respond before returning to client. :type r: integer :param pr: PR-value, require this many primary partitions to be available before performing the read that precedes the put :type pr: integer :param timeout: a timeout value in milliseconds :type timeout: int :param basic_quorum: whether to use the "basic quorum" policy for not-founds :type basic_quorum: bool :param notfound_ok: whether to treat not-found responses as successful :type notfound_ok: bool :param head_only: whether to fetch without value, so only metadata (only available on PB transport) :type head_only: bool :rtype: :class:`RiakObject`
def rename_with_num(self, prefix="", new_path=None, remove_desc=True): if new_path is None: numbered = self.__class__(new_temp_path()) else: numbered = self.__class__(new_path) def numbered_iterator(): for i,read in enumerate(self): read.id = prefix + str(i) read.seq = read.seq.upper() if remove_desc: read.description = "" yield read numbered.write(numbered_iterator()) numbered.close() if new_path is None: os.remove(self.path) shutil.move(numbered, self.path) return numbered
Rename every sequence based on a prefix and a number.
def _get_ann(dbs, features): value = "" for db, feature in zip(dbs, features): value += db + ":" + feature return value
Gives format to annotation for html table output
def help(self, *args): if len(args) == 0: return help_msg which = args[0].lower() if which == 'ginga': method = args[1] _method = getattr(self.fv, method) return _method.__doc__ elif which == 'channel': chname = args[1] method = args[2] chinfo = self.fv.get_channel(chname) _method = getattr(chinfo.viewer, method) return _method.__doc__ else: return ("Please use 'help ginga <method>' or " "'help channel <chname> <method>'")
Get help for a remote interface method. Examples -------- help('ginga', `method`) name of the method for which you want help help('channel', `chname`, `method`) name of the method in the channel for which you want help Returns ------- help : string a help message
def get_attribute_references(instring): parsed = ConditionGrammar().conditions.parseString(instring) result = parsed if isinstance(parsed[0], str) else parsed[0] return result.attribute_reference.asList() if result.attribute_reference else []
Return a list of attribute references in the condition expression. attribute_reference ::= relation_name "." attribute_name | attribute_name :param instring: a condition expression. :return: a list of attribute references.
def update(self, fmt={}, replot=False, auto_update=False, draw=None, force=False, todefault=False, **kwargs): if self.disabled: return fmt = dict(fmt) if kwargs: fmt.update(kwargs) if not self._initialized: for key, val in six.iteritems(fmt): self[key] = val return self._register_update(fmt=fmt, replot=replot, force=force, todefault=todefault) if not self.no_auto_update or auto_update: self.start_update(draw=draw)
Update the formatoptions and the plot If the :attr:`data` attribute of this plotter is None, the plotter is updated like a usual dictionary (see :meth:`dict.update`). Otherwise the update is registered and the plot is updated if `auto_update` is True or if the :meth:`start_update` method is called (see below). Parameters ---------- %(Plotter._register_update.parameters)s %(InteractiveBase.start_update.parameters)s %(InteractiveBase.update.parameters.auto_update)s ``**kwargs`` Any other formatoption that shall be updated (additionally to those in `fmt`) Notes ----- %(InteractiveBase.update.notes)s
def __metadata_helper(json_path): url = shakedown.dcos_url_path('dcos-metadata/{}'.format(json_path)) try: response = dcos.http.request('get', url) if response.status_code == 200: return response.json() except: pass return None
Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None
def get_required_kwonly_args(argspecs): try: kwonly = argspecs.kwonlyargs if argspecs.kwonlydefaults is None: return kwonly res = [] for name in kwonly: if not name in argspecs.kwonlydefaults: res.append(name) return res except AttributeError: return []
Determines whether given argspecs implies required keywords-only args and returns them as a list. Returns empty list if no such args exist.
def insert(self, tag, identifier, parent, data): if self.global_plate_definitions.contains(identifier): raise KeyError("Identifier {} already exists in tree".format(identifier)) self.global_plate_definitions.create_node(tag=tag, identifier=identifier, parent=parent, data=data) with switch_db(MetaDataModel, 'hyperstream'): meta_data = MetaDataModel(tag=tag, parent=parent, data=data) meta_data.save() logging.info("Meta data {} inserted".format(identifier))
Insert the given meta data into the database :param tag: The tag (equates to meta_data_id) :param identifier: The identifier (a combination of the meta_data_id and the plate value) :param parent: The parent plate identifier :param data: The data (plate value) :return: None
def RunWMIQuery(query, baseobj=r"winmgmts:\root\cimv2"): pythoncom.CoInitialize() wmi_obj = win32com.client.GetObject(baseobj) wmi_obj.Security_.Privileges.AddAsString("SeDebugPrivilege") try: query_results = wmi_obj.ExecQuery(query) except pythoncom.com_error as e: raise RuntimeError("Failed to run WMI query \'%s\' err was %s" % (query, e)) try: for result in query_results: response = rdf_protodict.Dict() properties = ( list(result.Properties_) + list(getattr(result, "SystemProperties_", []))) for prop in properties: if prop.Name not in IGNORE_PROPS: response.SetItem(prop.Name, prop.Value, raise_on_error=False) yield response except pythoncom.com_error as e: raise RuntimeError("WMI query data error on query \'%s\' err was %s" % (e, query))
Run a WMI query and return a result. Args: query: the WMI query to run. baseobj: the base object for the WMI query. Yields: rdf_protodict.Dicts containing key value pairs from the resulting COM objects.
def condense(input_string): try: assert isinstance(input_string, basestring) except AssertionError: raise TypeError removed_leading_whitespace = re.sub('>\s+', '>', input_string).strip() removed_trailing_whitespace = re.sub('\s+<', '<', removed_leading_whitespace).strip() return removed_trailing_whitespace
Trims leadings and trailing whitespace between tags in an html document Args: input_string: A (possible unicode) string representing HTML. Returns: A (possibly unicode) string representing HTML. Raises: TypeError: Raised if input_string isn't a unicode string or string.
def remove_if_same(self, key, value): check_not_none(key, "key can't be None") check_not_none(value, "value can't be None") key_data = self._to_data(key) value_data = self._to_data(value) return self._remove_if_same_internal_(key_data, value_data)
Removes the entry for a key only if it is currently mapped to a given value. This is equivalent to: >>> if map.contains_key(key) and map.get(key).equals(value): >>> map.remove(key) >>> return true >>> else: >>> return false except that the action is performed atomically. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), the specified key. :param value: (object), remove the key if it has this value. :return: (bool), ``true`` if the value was removed.
def get_offset_topic_partition_count(kafka_config): metadata = get_topic_partition_metadata(kafka_config.broker_list) if CONSUMER_OFFSET_TOPIC not in metadata: raise UnknownTopic("Consumer offset topic is missing.") return len(metadata[CONSUMER_OFFSET_TOPIC])
Given a kafka cluster configuration, return the number of partitions in the offset topic. It will raise an UnknownTopic exception if the topic cannot be found.
def patch(self, path, data, **options): data, options = self._update_request(data, options) return self.request('patch', path, data=data, **options)
Parses PATCH request options and dispatches a request
def minute_to_session(column, close_locs, data, out): if column == 'open': _minute_to_session_open(close_locs, data, out) elif column == 'high': _minute_to_session_high(close_locs, data, out) elif column == 'low': _minute_to_session_low(close_locs, data, out) elif column == 'close': _minute_to_session_close(close_locs, data, out) elif column == 'volume': _minute_to_session_volume(close_locs, data, out) return out
Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions.
def nn_getsockopt(socket, level, option, value): if memoryview(value).readonly: raise TypeError('Writable buffer is required') size_t_size = ctypes.c_size_t(len(value)) rtn = _nn_getsockopt(socket, level, option, ctypes.addressof(value), ctypes.byref(size_t_size)) return (rtn, size_t_size.value)
retrieve a socket option socket - socket number level - option level option - option value - a writable byte buffer (e.g. a bytearray) which the option value will be copied to returns - number of bytes copied or on error nunber < 0
def _start_connection_setup(self): logger.info('We are connected[%s], request connection setup', self.link_uri) self.platform.fetch_platform_informations(self._platform_info_fetched)
Start the connection setup by refreshing the TOCs
def validate_json_schema(data, schema, name="task"): try: jsonschema.validate(data, schema) except jsonschema.exceptions.ValidationError as exc: raise ScriptWorkerTaskException( "Can't validate {} schema!\n{}".format(name, str(exc)), exit_code=STATUSES['malformed-payload'] )
Given data and a jsonschema, let's validate it. This happens for tasks and chain of trust artifacts. Args: data (dict): the json to validate. schema (dict): the jsonschema to validate against. name (str, optional): the name of the json, for exception messages. Defaults to "task". Raises: ScriptWorkerTaskException: on failure
def _stripslashes(s): r = re.sub(r"\\(n|r)", "\n", s) r = re.sub(r"\\", "", r) return r
Removes trailing and leading backslashes from string
def insert(self, row, ensure=None, types=None): row = self._sync_columns(row, ensure, types=types) res = self.db.executable.execute(self.table.insert(row)) if len(res.inserted_primary_key) > 0: return res.inserted_primary_key[0] return True
Add a ``row`` dict by inserting it into the table. If ``ensure`` is set, any of the keys of the row are not table columns, they will be created automatically. During column creation, ``types`` will be checked for a key matching the name of a column to be created, and the given SQLAlchemy column type will be used. Otherwise, the type is guessed from the row value, defaulting to a simple unicode field. :: data = dict(title='I am a banana!') table.insert(data) Returns the inserted row's primary key.
def search(self, **kwargs): return super(ApiV4Neighbor, self).get(self.prepare_url( 'api/v4/neighbor/', kwargs))
Method to search neighbors based on extends search. :param search: Dict containing QuerySets to find neighbors. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing neighbors
def register(id, url=None): bucket = registration_s3_bucket() key = registration_key(id) obj = bucket.Object(key) obj.put(Body=url or "missing") return _generate_s3_url(bucket, key)
Register a UUID key in the global S3 bucket.
def base_url(self, base_url=None): if base_url: if urlparse(base_url).scheme != 'https': raise ValueError( '`base_url`s need to be over `https`. Update your ' '`base_url`.' ) self._base_url = base_url else: return self._base_url
Return or set `base_url`. Args: base_url (str, optional): If set, updates the `base_url`. Otherwise returns current `base_url`. Note: This does not update the `username` attribute. Separately update the username with ``Credentials.username`` or update `base_url` and `username` at the same time with ``Credentials.set``. Example: .. code:: >>> from cartoframes import Credentials # load credentials saved in previous session >>> creds = Credentials() # returns current base_url >>> creds.base_url() 'https://eschbacher.carto.com/' # updates base_url with new value >>> creds.base_url('new_base_url')
def meth_wdl(args): r = fapi.get_repository_method(args.namespace, args.method, args.snapshot_id, True) fapi._check_response_code(r, 200) return r.text
Retrieve WDL for given version of a repository method
def __generate_really (self, prop_set): assert isinstance(prop_set, property_set.PropertySet) best_alternative = self.__select_alternatives (prop_set, debug=0) self.best_alternative = best_alternative if not best_alternative: self.manager_.errors()( "No best alternative for '%s'.\n" % (self.full_name(),)) result = best_alternative.generate (prop_set) return result
Generates the main target with the given property set and returns a list which first element is property_set object containing usage_requirements of generated target and with generated virtual target in other elements. It's possible that no targets are generated.
def scroll_deck_x(self, decknum, scroll_x): if decknum >= len(self.decks): raise IndexError("I have no deck at {}".format(decknum)) if decknum >= len(self.deck_x_hint_offsets): self.deck_x_hint_offsets = list(self.deck_x_hint_offsets) + [0] * ( decknum - len(self.deck_x_hint_offsets) + 1 ) self.deck_x_hint_offsets[decknum] += scroll_x self._trigger_layout()
Move a deck left or right.
def wait_for_states(self, timeout=40, *states): state = self.get_attribute('state') for _ in range(timeout): if state in states: return time.sleep(1) state = self.get_attribute('state') raise TgnError('Failed to reach states {}, port state is {} after {} seconds'.format(states, state, timeout))
Wait until port reaches one of the requested states. :param timeout: max time to wait for requested port states.
def create_user(self, projects=None, tasks=None): projects = projects or [] tasks = tasks or [] dialog = UserCreatorDialog(projects=projects, tasks=tasks, parent=self) dialog.exec_() user = dialog.user if user: userdata = djitemdata.UserItemData(user) treemodel.TreeItem(userdata, self.users_model.root) return user
Create and return a new user :param projects: the projects for the user :type projects: list of :class:`jukeboxcore.djadapter.models.Project` :param tasks: the tasks for the user :type tasks: list of :class:`jukeboxcore.djadapter.models.Task` :returns: The created user or None :rtype: None | :class:`jukeboxcore.djadapter.models.User` :raises: None
def get_all_tags(self, filters=None, max_records=None, next_token=None): params = {} if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token return self.get_list('DescribeTags', params, [('member', Tag)])
Lists the Auto Scaling group tags. This action supports pagination by returning a token if there are more pages to retrieve. To get the next page, call this action again with the returned token as the NextToken parameter. :type filters: dict :param filters: The value of the filter type used to identify the tags to be returned. NOT IMPLEMENTED YET. :type max_records: int :param max_records: Maximum number of tags to return. :rtype: list :returns: List of :class:`boto.ec2.autoscale.tag.Tag` instances.
def from_extension(extension): if not extension.startswith('.'): raise ValueError("Extensions must begin with a period.") try: return EXTENSION_TO_TYPE[extension.lower()] except KeyError: raise UnknownExtensionError( "seqmagick does not know how to handle " + "files with extensions like this: " + extension)
Look up the BioPython file type corresponding with input extension. Look up is case insensitive.
def find(self, datum): if isinstance(datum.value, dict) and self.expressions: return datum if isinstance(datum.value, dict) or isinstance(datum.value, list): key = (functools.cmp_to_key(self._compare) if self.expressions else None) return [jsonpath_rw.DatumInContext.wrap( [value for value in sorted(datum.value, key=key)])] return datum
Return sorted value of This if list or dict.
def run_script_with_context(script_path, cwd, context): _, extension = os.path.splitext(script_path) contents = io.open(script_path, 'r', encoding='utf-8').read() with tempfile.NamedTemporaryFile( delete=False, mode='wb', suffix=extension ) as temp: env = StrictEnvironment( context=context, keep_trailing_newline=True, ) template = env.from_string(contents) output = template.render(**context) temp.write(output.encode('utf-8')) run_script(temp.name, cwd)
Execute a script after rendering it with Jinja. :param script_path: Absolute path to the script to run. :param cwd: The directory to run the script from. :param context: Cookiecutter project template context.
def process_allow_action(processors, action, argument): for processor in processors: processor(action, argument) db.session.commit()
Process allow action.
def shape(self) -> Tuple[int, int]: return self.sort_timeplaceentries( len(hydpy.pub.timegrids.init), len(self.sequences))
Required shape of |NetCDFVariableAgg.array|. For the default configuration, the first axis corresponds to the number of devices, and the second one to the number of timesteps. We show this for the 1-dimensional input sequence |lland_fluxes.NKor|: >>> from hydpy.core.examples import prepare_io_example_1 >>> nodes, elements = prepare_io_example_1() >>> from hydpy.core.netcdftools import NetCDFVariableAgg >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=1) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (3, 4) When using the first axis as the "timeaxis", the order of |tuple| entries turns: >>> ncvar = NetCDFVariableAgg('flux_nkor', isolate=False, timeaxis=0) >>> for element in elements: ... ncvar.log(element.model.sequences.fluxes.nkor, None) >>> ncvar.shape (4, 3)
def find_pattern(search_base, pattern='*.rpm'): if (not os.path.isdir(search_base)) and os.path.exists(search_base): yield search_base else: for root, dirs, files in os.walk(search_base): for filename in fnmatch.filter(files, pattern): yield os.path.join(root, filename)
`search_base` - The directory to begin walking down. `pattern` - File pattern to match for. This is a generator which yields the full path to files (one at a time) which match the given glob (`pattern`).
async def peers(self): response = await self._api.get("/v1/status/peers") if response.status == 200: return set(response.body)
Returns the current Raft peer set Returns: Collection: addresses of peers This endpoint retrieves the Raft peers for the datacenter in which the agent is running. It returns a collection of addresses, such as:: [ "10.1.10.12:8300", "10.1.10.11:8300", "10.1.10.10:8300" ] This list of peers is strongly consistent and can be useful in determining when a given server has successfully joined the cluster.
def name_replace(self, to_replace, replacement): self.name = self.name.replace(to_replace, replacement)
Replaces part of tag name with new value
def list_qos_policies(self, retrieve_all=True, **_params): return self.list('policies', self.qos_policies_path, retrieve_all, **_params)
Fetches a list of all qos policies for a project.
def _retrieve_tag(self, text): if self.tagger == 'tag_ngram_123_backoff': tags = POSTag('latin').tag_ngram_123_backoff(text.lower()) return [(tag[0], tag[1]) for tag in tags] elif self.tagger == 'tag_tnt': tags = POSTag('latin').tag_tnt(text.lower()) return [(tag[0], tag[1]) for tag in tags] elif self.tagger == 'tag_crf': tags = POSTag('latin').tag_crf(text.lower()) return [(tag[0], tag[1]) for tag in tags]
Tag text with chosen tagger and clean tags. Tag format: [('word', 'tag')] :param text: string :return: list of tuples, with each tuple containing the word and its pos tag :rtype : list
def CreateAllStaticECMWFRAPIDFiles(in_drainage_line, river_id, length_id, slope_id, next_down_id, in_catchment, catchment_river_id, rapid_output_folder, kfac_celerity=1000.0/3600.0, kfac_formula_type=3, kfac_length_units="km", lambda_k=0.35, x_value=0.3, nhdplus=False, taudem_network_connectivity_tree_file=None, file_geodatabase=None): CreateAllStaticRAPIDFiles(in_drainage_line, river_id, length_id, slope_id, next_down_id, rapid_output_folder, kfac_celerity, kfac_formula_type, kfac_length_units, lambda_k, x_value, nhdplus, taudem_network_connectivity_tree_file, file_geodatabase) rapid_connect_file = os.path.join(rapid_output_folder, 'rapid_connect.csv') CreateAllStaticECMWFFiles(in_catchment, catchment_river_id, rapid_output_folder, rapid_connect_file, file_geodatabase)
This creates all of the static RAPID files and ECMWF grid weight tables. Parameters ---------- in_drainage_line: str Path to the stream network (i.e. Drainage Line) shapefile. river_id: str The name of the field with the river ID (Ex. 'HydroID', 'COMID', or 'LINKNO'). length_id: str The field name containging the length of the river segment (Ex. 'LENGTHKM' or 'Length'). slope_id: str The field name containging the slope of the river segment (Ex. 'Avg_Slope' or 'Slope'). next_down_id: str The name of the field with the river ID of the next downstream river segment (Ex. 'NextDownID' or 'DSLINKNO'). in_catchment: str Path to the Catchment shapefile. catchment_river_id: str The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO'). rapid_output_folder: str The path to the folder where all of the RAPID output will be generated. kfac_celerity: float, optional The flow wave celerity for the watershed in meters per second. 1 km/hr or 1000.0/3600.0 m/s is a reasonable value if unknown. kfac_formula_type: int, optional An integer representing the formula type to use when calculating kfac. Default is 3. kfac_length_units: str, optional The units for the length_id field. Supported types are "m" for meters and "km" for kilometers. Default is "km". lambda_k: float, optional The value for lambda given from RAPID after the calibration process. Default is 0.35. x_value: float, optional Value for the muskingum X parameter [0-0.5].Default is 0.3. nhdplus: bool, optional If True, the drainage line is from the NHDPlus dataset with the VAA fields COMID, FROMNODE, TONODE, and DIVERGENCE. Default is False. taudem_network_connectivity_tree_file: str, optional If set, the connectivity file will be generated from the TauDEM connectivity tree file. file_geodatabase: str, optional Path to the file geodatabase. If you use this option, in_drainage_line is the name of the stream network feature class (WARNING: Not always stable with GDAL). Example:: from RAPIDpy.gis.workflow import CreateAllStaticECMWFRAPIDFiles CreateAllStaticECMWFRAPIDFiles( in_drainage_line="/path/to/drainage_line.shp", river_id="HydroID", length_id="LENGTHKM", slope_id="SLOPE", next_down_id="NextDownID", in_catchment="/path/to/catchment.shp", catchment_river_id="DrainLnID", rapid_output_folder="/path/to/rapid/output", )
def recv_from_app(timeout=_default_timeout): try: return ApplicationLayer._from_app.get(timeout=timeout) except Empty: pass raise NoPackets()
Called by a network stack implementer to receive application-layer data for sending on to a remote location. Can optionally take a timeout value. If no data are available, raises NoPackets exception. Returns a 2-tuple: flowaddr and data. The flowaddr consists of 5 items: protocol, localaddr, localport, remoteaddr, remoteport.
def _write_html_file(word, translations, data_dir, native=False): content_str = _create_html_file_content(translations) html_string = HTML_TEMPLATE.replace("{% word %}", word) html_string = html_string.replace("{% content %}", content_str) trans_dir = "translations" if native: trans_dir += "_native" translations_dir = data_dir.joinpath(trans_dir) fname = translations_dir.joinpath("{word}.html".format(word=word)) save_file(fname, html_string, mk_parents=True)
Create html file of word translations. Parameters ---------- word : str Word that was translated. tralnslations : dict Dictionary of word translations. data_dir : pathlib.Path Location where html files are saved.
def createUser(self, localpart, domain, password=None): loginSystem = self.browser.store.parent.findUnique(userbase.LoginSystem) if password is None: password = u''.join([random.choice(string.ascii_letters + string.digits) for i in xrange(8)]) loginSystem.addAccount(localpart, domain, password)
Create a new, blank user account with the given name and domain and, if specified, with the given password. @type localpart: C{unicode} @param localpart: The local portion of the username. ie, the C{'alice'} in C{'alice@example.com'}. @type domain: C{unicode} @param domain: The domain portion of the username. ie, the C{'example.com'} in C{'alice@example.com'}. @type password: C{unicode} or C{None} @param password: The password to associate with the new account. If C{None}, generate a new password automatically.
def update_dns(self, new_ip): headers = None if self.auth_type == 'T': api_call_url = self._base_url.format(hostname=self.hostname, token=self.auth.token, ip=new_ip) else: api_call_url = self._base_url.format(hostname=self.hostname, ip=new_ip) headers = { 'Authorization': "Basic %s" % self.auth.base64key.decode('utf-8'), 'User-Agent': "%s/%s %s" % (__title__, __version__, __email__) } r = requests.get(api_call_url, headers=headers) self.last_ddns_response = str(r.text).strip() return r.status_code, r.text
Call No-IP API based on dict login_info and return the status code.
def _add_edge(self, layer, input_id, output_id): if layer in self.layer_to_id: layer_id = self.layer_to_id[layer] if input_id not in self.layer_id_to_input_node_ids[layer_id]: self.layer_id_to_input_node_ids[layer_id].append(input_id) if output_id not in self.layer_id_to_output_node_ids[layer_id]: self.layer_id_to_output_node_ids[layer_id].append(output_id) else: layer_id = len(self.layer_list) self.layer_list.append(layer) self.layer_to_id[layer] = layer_id self.layer_id_to_input_node_ids[layer_id] = [input_id] self.layer_id_to_output_node_ids[layer_id] = [output_id] self.adj_list[input_id].append((output_id, layer_id)) self.reverse_adj_list[output_id].append((input_id, layer_id))
Add a new layer to the graph. The nodes should be created in advance.
def _updateToDeleted(self,directory,fn,dentry,db,service): if fn not in db: print("%s - rm: not in DB, skipping!"%(fn)) return services=self.sman.GetServices(fn) if (not services): print("%s - no manger of this file type found"%(fn)) return if service: if db[fn]['services'].has_key(service): db[fn]['services'][service]['status']=self.ST_DELETED else: print("%s - Service %s doesn't exist, can't delete"%(fn,service)) return for service in db[fn]['services']: db[fn]['services'][service]['status']=self.ST_DELETED return
Changes to status to 'D' as long as a handler exists, directory - DIR where stuff is happening fn - File name to be added dentry - dictionary entry as returned by GetStatus for this file db - pusher DB for this directory service - service to delete, None means all
def _logfile_sigterm_handler(*_): logging.error('Received SIGTERM.') write_logfile() print('Received signal. Please see the log file for more information.', file=sys.stderr) sys.exit(signal)
Handle exit signals and write out a log file. Raises: SystemExit: Contains the signal as the return code.
def _ensure_product_string(cls, product): if isinstance(product, str): return product if isinstance(product, list): return os.path.join(*product) raise DataError("Unknown object (not str or list) specified as a component product", product=product)
Ensure that all product locations are strings. Older components specify paths as lists of path components. Join those paths into a normal path string.
def read_mesh(fname): fmt = op.splitext(fname)[1].lower() if fmt == '.gz': fmt = op.splitext(op.splitext(fname)[0])[1].lower() if fmt in ('.obj'): return WavefrontReader.read(fname) elif not format: raise ValueError('read_mesh needs could not determine format.') else: raise ValueError('read_mesh does not understand format %s.' % fmt)
Read mesh data from file. Parameters ---------- fname : str File name to read. Format will be inferred from the filename. Currently only '.obj' and '.obj.gz' are supported. Returns ------- vertices : array Vertices. faces : array | None Triangle face definitions. normals : array Normals for the mesh. texcoords : array | None Texture coordinates.
def from_config(cls, name, config): cls.validate_config(config) instance = cls() if not instance.name: instance.name = config.get("name", name) instance.apply_config(config) return instance
Returns a Configurable instance with the given name and config. By default this is a simple matter of calling the constructor, but subclasses that are also `Pluggable` instances override this in order to check that the plugin is installed correctly first.
def get_resource(self, path): response = self._http_request(path) try: return response.json() except ValueError: raise exception.ServiceException("Invalid service response.")
Getting the required information from the API.
def redirect_stream(system_stream, target_stream): if target_stream is None: target_fd = os.open(os.devnull, os.O_RDWR) else: target_fd = target_stream.fileno() os.dup2(target_fd, system_stream.fileno())
Redirect a system stream to a specified file. `system_stream` is a standard system stream such as ``sys.stdout``. `target_stream` is an open file object that should replace the corresponding system stream object. If `target_stream` is ``None``, defaults to opening the operating system's null device and using its file descriptor.
def get_clouds(wxdata: [str]) -> ([str], list): clouds = [] for i, item in reversed(list(enumerate(wxdata))): if item[:3] in CLOUD_LIST or item[:2] == 'VV': cloud = wxdata.pop(i) clouds.append(make_cloud(cloud)) return wxdata, sorted(clouds, key=lambda cloud: (cloud.altitude, cloud.type))
Returns the report list and removed list of split cloud layers
def get_time(self, force_uptime=False): if force_uptime: return self.uptime time = self.uptime + self.time_offset if self.is_utc: time |= (1 << 31) return time
Get the current UTC time or uptime. By default, this method will return UTC time if possible and fall back to uptime if not. If you specify, force_uptime=True, it will always return uptime even if utc time is available. Args: force_uptime (bool): Always return uptime, defaults to False. Returns: int: The current uptime or encoded utc time.
def started_tasks(self, task_registry_id=None, task_cls=None): if task_registry_id is not None: task = None for registered_task in self.__started: if registered_task.__registry_tag__ == task_registry_id: task = registered_task if task_cls is not None and task is not None: if isinstance(task, task_cls) is True: return task return None return task result = filter(lambda x: x is not None, self.__started) if task_cls is not None: result = filter(lambda x: isinstance(x, task_cls), result) return tuple(result)
Return tasks that was started. Result way be filtered by the given arguments. :param task_registry_id: if it is specified, then try to return single task which id is the same as \ this value. :param task_cls: if it is specified then result will be consists of this subclass only :return: None or WTask or tuple of WTask
def parse_unifrac_v1_8(unifrac, file_data): for line in file_data: if line == "": break line = line.split("\t") unifrac["pcd"][line[0]] = [float(e) for e in line[1:]] unifrac["eigvals"] = [float(entry) for entry in file_data[-2].split("\t")[1:]] unifrac["varexp"] = [float(entry) for entry in file_data[-1].split("\t")[1:]] return unifrac
Function to parse data from older version of unifrac file obtained from Qiime version 1.8 and earlier. :type unifrac: dict :param unifracFN: The path to the unifrac results file :type file_data: list :param file_data: Unifrac data lines after stripping whitespace characters.
def server_document(url="default", relative_urls=False, resources="default", arguments=None): url = _clean_url(url) app_path = _get_app_path(url) elementid = make_id() src_path = _src_path(url, elementid) src_path += _process_app_path(app_path) src_path += _process_relative_urls(relative_urls, url) src_path += _process_resources(resources) src_path += _process_arguments(arguments) tag = AUTOLOAD_TAG.render( src_path = src_path, app_path = app_path, elementid = elementid, ) return encode_utf8(tag)
Return a script tag that embeds content from a Bokeh server. Bokeh apps embedded using these methods will NOT set the browser window title. Args: url (str, optional) : A URL to a Bokeh application on a Bokeh server (default: "default") If ``"default"`` the default URL ``{DEFAULT_SERVER_HTTP_URL}`` will be used. relative_urls (bool, optional) : Whether to use relative URLs for resources. If ``True`` the links generated for resources such a BokehJS JavaScript and CSS will be relative links. This should normally be set to ``False``, but must be set to ``True`` in situations where only relative URLs will work. E.g. when running the Bokeh behind reverse-proxies under certain configurations resources (str) : A string specifying what resources need to be loaded along with the document. If ``default`` then the default JS/CSS bokeh files will be loaded. If None then none of the resource files will be loaded. This is useful if you prefer to serve those resource files via other means (e.g. from a caching server). Be careful, however, that the resource files you'll load separately are of the same version as that of the server's, otherwise the rendering may not work correctly. arguments (dict[str, str], optional) : A dictionary of key/values to be passed as HTTP request arguments to Bokeh application code (default: None) Returns: A ``<script>`` tag that will embed content from a Bokeh Server.
def tag_myself(project='cwc', **other_tags): base_url = "http://169.254.169.254" try: resp = requests.get(base_url + "/latest/meta-data/instance-id") except requests.exceptions.ConnectionError: logger.warning("Could not connect to service. Note this should only " "be run from within a batch job.") return instance_id = resp.text tag_instance(instance_id, project=project, **other_tags) return
Function run when indra is used in an EC2 instance to apply tags.
def extract_xz(archive, compression, cmd, verbosity, interactive, outdir): return _extract(archive, compression, cmd, 'xz', verbosity, outdir)
Extract an XZ archive with the lzma Python module.
def percent_records_missing_location(user, method=None): if len(user.records) == 0: return 0. missing_locations = sum([1 for record in user.records if record.position._get_location(user) is None]) return float(missing_locations) / len(user.records)
Return the percentage of records missing a location parameter.
def construct_user_list(raw_users=None): users = Users(oktypes=User) for user_dict in raw_users: public_keys = None if user_dict.get('public_keys'): public_keys = [PublicKey(b64encoded=x, raw=None) for x in user_dict.get('public_keys')] users.append(User(name=user_dict.get('name'), passwd=user_dict.get('passwd'), uid=user_dict.get('uid'), gid=user_dict.get('gid'), home_dir=user_dict.get('home_dir'), gecos=user_dict.get('gecos'), shell=user_dict.get('shell'), public_keys=public_keys, sudoers_entry=user_dict.get('sudoers_entry'))) return users
Construct a list of User objects from a list of dicts.
def has_variantcalls(data): analysis = get_analysis(data).lower() variant_pipeline = analysis.startswith(("standard", "variant", "variant2")) variantcaller = get_variantcaller(data) return variant_pipeline or variantcaller
returns True if the data dictionary is configured for variant calling
def validate_ok_for_replace(replacement): validate_is_mapping("replacement", replacement) if replacement and not isinstance(replacement, RawBSONDocument): first = next(iter(replacement)) if first.startswith('$'): raise ValueError('replacement can not include $ operators')
Validate a replacement document.
def batch_delete_intents(self, parent, intents, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if 'batch_delete_intents' not in self._inner_api_calls: self._inner_api_calls[ 'batch_delete_intents'] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_delete_intents, default_retry=self._method_configs[ 'BatchDeleteIntents'].retry, default_timeout=self._method_configs['BatchDeleteIntents'] .timeout, client_info=self._client_info, ) request = intent_pb2.BatchDeleteIntentsRequest( parent=parent, intents=intents, ) operation = self._inner_api_calls['batch_delete_intents']( request, retry=retry, timeout=timeout, metadata=metadata) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=struct_pb2.Struct, )
Deletes intents in the specified agent. Operation <response: ``google.protobuf.Empty``> Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> parent = client.project_agent_path('[PROJECT]') >>> >>> # TODO: Initialize ``intents``: >>> intents = [] >>> >>> response = client.batch_delete_intents(parent, intents) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: parent (str): Required. The name of the agent to delete all entities types for. Format: ``projects/<Project ID>/agent``. intents (list[Union[dict, ~google.cloud.dialogflow_v2.types.Intent]]): Required. The collection of intents to delete. Only intent ``name`` must be filled in. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.dialogflow_v2.types.Intent` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dialogflow_v2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def merge(self, df: pd.DataFrame, on: str, how: str="outer", **kwargs): try: df = pd.merge(self.df, df, on=on, how=how, **kwargs) self.df = df except Exception as e: self.err(e, self.merge, "Can not merge dataframes")
Set the main dataframe from the current dataframe and the passed dataframe :param df: the pandas dataframe to merge :type df: pd.DataFrame :param on: param for ``pd.merge`` :type on: str :param how: param for ``pd.merge``, defaults to "outer" :type how: str, optional :param kwargs: keyword arguments for ``pd.merge``
def get_cached_element_by_path(data_tree, path): if not isinstance(data_tree, ArTree): logger.warning("%s not called with ArTree, return None", get_cached_element_by_path.__name__) return None ptr = data_tree for name in path.split('/'): if ptr is None: return None if name.strip(): ptr = ptr.get_child_by_name(name) return ptr.ref if ptr else None
Get element from ArTree by path.
def isin(self, values): if not isinstance(values, list): raise TypeError("Input should be a string. {} was provided".format(type(values))) if not (self.name.startswith("(") and self.name.endswith(")")): first = True new_condition = None for v in values: if first: first = False new_condition = self.__eq__(v) else: new_condition = new_condition.__or__(self.__eq__(v)) return new_condition else: raise SyntaxError("You cannot use 'isin' with a complex condition")
Selects the samples having the metadata attribute between the values provided as input :param values: a list of elements :return a new complex condition
def _check_transition_target(self, transition): to_state_id = transition.to_state to_outcome_id = transition.to_outcome if to_state_id == self.state_id: if to_outcome_id not in self.outcomes: return False, "to_outcome is not existing" else: if to_state_id not in self.states: return False, "to_state is not existing" if to_outcome_id is not None: return False, "to_outcome must be None as transition goes to child state" return True, "valid"
Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid