code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def split(cls, n, contiguous, can_query=itertools.chain(itertools.repeat(True, 50), itertools.repeat(False)).next, _app=None): if (n < 1): raise ValueError('n must be >= 1') ranges = None if can_query(): if (not contiguous): ns_keys = get_namespace_keys(_app, (n + 1)) if (not ns_keys): return [] else: if (len(ns_keys) <= n): ns_range = [] for ns_key in ns_keys: ns_range.append(NamespaceRange((ns_key.name() or ''), (ns_key.name() or ''), _app=_app)) return sorted(ns_range, key=(lambda ns_range: ns_range.namespace_start)) ranges = [NamespaceRange((ns_keys[0].name() or ''), _app=_app)] else: ns_range = NamespaceRange(_app=_app).normalized_start() if (ns_range is None): return [NamespaceRange(_app=_app)] ranges = [ns_range] else: ranges = [NamespaceRange(_app=_app)] singles = [] while (ranges and ((len(ranges) + len(singles)) < n)): namespace_range = ranges.pop(0) if namespace_range.is_single_namespace: singles.append(namespace_range) else: (left, right) = namespace_range.split_range() if can_query(): right = right.normalized_start() if (right is not None): ranges.append(right) ranges.append(left) ns_ranges = sorted((singles + ranges), key=(lambda ns_range: ns_range.namespace_start)) if contiguous: if (not ns_ranges): return [NamespaceRange(_app=_app)] continuous_ns_ranges = [] for i in range(len(ns_ranges)): if (i == 0): namespace_start = MIN_NAMESPACE else: namespace_start = ns_ranges[i].namespace_start if (i == (len(ns_ranges) - 1)): namespace_end = MAX_NAMESPACE else: namespace_end = _ord_to_namespace((_namespace_to_ord(ns_ranges[(i + 1)].namespace_start) - 1)) continuous_ns_ranges.append(NamespaceRange(namespace_start, namespace_end, _app=_app)) return continuous_ns_ranges else: return ns_ranges
Splits the complete NamespaceRange into n equally-sized NamespaceRanges. Args: n: The maximum number of NamespaceRanges to return. Fewer than n namespaces may be returned. contiguous: If True then the returned NamespaceRanges will cover the entire space of possible namespaces (i.e. from MIN_NAMESPACE to MAX_NAMESPACE) without gaps. If False then the returned NamespaceRanges may exclude namespaces that don't appear in the datastore. can_query: A function that returns True if split() can query the datastore to generate more fair namespace range splits, and False otherwise. If not set then split() is allowed to make 50 datastore queries. Returns: A list of at most n NamespaceRanges representing a near-equal distribution of actual existant datastore namespaces. The returned list will be sorted lexographically. Raises: ValueError: if n is < 1.
codesearchnet
def get_raw_entry(self, variant_line=None, variant_dict=None, vcf_header=None, individual_id=None, dict_key=None): if variant_line: variant_line = variant_line.rstrip().split() entry = None if self.field == 'CHROM': if variant_line: entry = variant_line[0] elif variant_dict: entry = variant_dict['CHROM'] elif self.field == 'POS': if variant_line: entry = variant_line[1] elif variant_dict: entry = variant_dict['POS'] elif self.field == 'ID': if variant_line: entry = variant_line[2] elif variant_dict: entry = variant_dict['ID'] elif self.field == 'REF': if variant_line: entry = variant_line[3] elif variant_dict: entry = variant_dict['REF'] elif self.field == 'ALT': if variant_line: entry = variant_line[4] elif variant_dict: entry = variant_dict['ALT'] elif self.field == 'QUAL': if variant_line: entry = variant_line[5] elif variant_dict: entry = variant_dict['QUAL'] elif self.field == 'FILTER': if variant_line: entry = variant_line[6] elif variant_dict: entry = variant_dict['FILTER'] elif self.field == 'INFO': if variant_line: for info_annotation in variant_line[7].split(';'): splitted_annotation = info_annotation.split('=') if self.info_key == splitted_annotation[0]: if len(splitted_annotation) == 2: entry = splitted_annotation[1] elif variant_dict: entry = variant_dict.get('info_dict',{}).get(self.info_key) if self.dict_entry and entry: first_split = entry.split(self.separators[0]) for annotation in first_split: splitted_entry = annotation.split(self.separators[1]) key = splitted_entry[0] value = splitted_entry[1] if dict_key: if key == dict_key: entry = value else: entry = value elif self.field == 'FORMAT': if variant_line: entry = variant_line[8] elif variant_dict: entry = variant_dict['FORMAT'] elif self.field == "sample_id": if not individual_id: raise IOError("If 'sample_id' a individual id must be provided") if not self.gt_key: raise IOError("If 'sample_id' a genotype key must be provided") if variant_line: if not vcf_header: raise IOError("If 'sample_id' the vcf header must be provided") format_info = variant_line[8] for i, head in enumerate(vcf_header): if head == individual_id: raw_gt_call = variant_line[i] elif variant_dict: format_info = variant_dict['FORMAT'] raw_gt_call = variant_dict[individual_id] entry_dict = dict(zip( format_info.split(':'), raw_gt_call.split(':') )) entry = entry_dict.get(self.gt_key, '.') return entry
Return the raw entry from the vcf field If no entry was found return None Args: variant_line (str): A vcf formated variant line vcf_header (list): A list with the vcf header line individual_id (str): The individual id to get gt call Returns: The raw entry found in variant line
juraj-google-style
def tf_initialize(self, x_init, base_value, target_value, estimated_improvement): self.base_value = base_value if (estimated_improvement is None): estimated_improvement = tf.abs(x=base_value) first_step = super(LineSearch, self).tf_initialize(x_init) improvement = tf.divide(x=(target_value - self.base_value), y=tf.maximum(x=estimated_improvement, y=util.epsilon)) last_improvement = (improvement - 1.0) if (self.mode == 'linear'): deltas = [((- t) * self.parameter) for t in x_init] self.estimated_incr = ((- estimated_improvement) * self.parameter) elif (self.mode == 'exponential'): deltas = [((- t) * self.parameter) for t in x_init] return (first_step + (deltas, improvement, last_improvement, estimated_improvement))
Initialization step preparing the arguments for the first iteration of the loop body. Args: x_init: Initial solution guess $x_0$. base_value: Value $f(x')$ at $x = x'$. target_value: Value $f(x_0)$ at $x = x_0$. estimated_improvement: Estimated value at $x = x_0$, $f(x')$ if None. Returns: Initial arguments for tf_step.
codesearchnet
def split(self): ranges = [] for bound in self.bounds: range = VersionRange(None) range.bounds = [bound] ranges.append(range) return ranges
Split into separate contiguous ranges. Returns: A list of VersionRange objects. For example, the range "3|5+" will be split into ["3", "5+"].
codesearchnet
def MakeZip(self, input_dir, output_file): logging.info("Generating zip template file at %s", output_file) basename, _ = os.path.splitext(output_file) shutil.make_archive( basename, "zip", base_dir=".", root_dir=input_dir, verbose=True)
Creates a ZIP archive of the files in the input directory. Args: input_dir: the name of the input directory. output_file: the name of the output ZIP archive without extension.
juraj-google-style
def sheets_tab_id(config, auth, sheet_url_or_name, sheet_tab): sheet_id = None tab_id = None spreadsheet = sheets_get(config, auth, sheet_url_or_name) if spreadsheet: sheet_id = spreadsheet['spreadsheetId'] for tab in spreadsheet.get('sheets', []): if tab['properties']['title'] == sheet_tab: tab_id = tab['properties']['sheetId'] break return (sheet_id, tab_id)
Pull sheet tab id from URL, name, or id itself. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id sheet_tab - name of tab to get id for Returns: Pair of sheet id and tab id.
github-repos
def locked_get(self): credentials = None if self._cache: json = self._cache.get(self._key_name) if json: credentials = client.Credentials.new_from_json(json) if (credentials is None): entity = self._get_entity() if (entity is not None): credentials = getattr(entity, self._property_name) if self._cache: self._cache.set(self._key_name, credentials.to_json()) if (credentials and hasattr(credentials, 'set_store')): credentials.set_store(self) return credentials
Retrieve Credential from datastore. Returns: oauth2client.Credentials
codesearchnet
def _count_nonzero(input_tensor, dtype=dtypes.int64): with ops.name_scope('count_nonzero', values=[input_tensor]): zero = array_ops.zeros([], dtype=input_tensor.dtype) nonzero_count = math_ops.reduce_sum(math_ops.cast(math_ops.not_equal(input_tensor, zero), dtype=dtype), name='nonzero_count') return nonzero_count
Same as math_ops.count_nonzero. The reduction is done in dtype, which can be faster for 32-bit dtypes. Args: input_tensor: numeric tensor dtype: reduction dtype Returns: number of nonzero values with type dtype
github-repos
def one_hot_encoding(labels, num_classes, scope=None): with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense(concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels.
codesearchnet
def db_en010(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `db_en010`'.format(value)) self._db_en010 = value
Corresponds to IDD Field `db_en010` mean coincident dry-bulb temperature to Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_en010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def fold_point(p, lattice, coords_are_cartesian=False): if coords_are_cartesian: p = lattice.get_fractional_coords(p) else: p = np.array(p) p = ((np.mod(((p + 0.5) - 1e-10), 1) - 0.5) + 1e-10) p = lattice.get_cartesian_coords(p) closest_lattice_point = None smallest_distance = 10000 for i in ((- 1), 0, 1): for j in ((- 1), 0, 1): for k in ((- 1), 0, 1): lattice_point = np.dot((i, j, k), lattice.matrix) dist = np.linalg.norm((p - lattice_point)) if ((closest_lattice_point is None) or (dist < smallest_distance)): closest_lattice_point = lattice_point smallest_distance = dist if (not np.allclose(closest_lattice_point, (0, 0, 0))): p = (p - closest_lattice_point) return p
Folds a point with coordinates p inside the first Brillouin zone of the lattice. Args: p: coordinates of one point lattice: Lattice object used to convert from reciprocal to cartesian coordinates coords_are_cartesian: Set to True if you are providing coordinates in cartesian coordinates. Defaults to False. Returns: The cartesian coordinates folded inside the first Brillouin zone
codesearchnet
def __init__(self, resolver_context): super(VShadowFileSystem, self).__init__(resolver_context) self._file_object = None self._vshadow_volume = None
Initializes a file system. Args: resolver_context (Context): resolver context.
juraj-google-style
def GetEntries(self, parser_mediator, match=None, **unused_kwargs): stores = match.get('Stores', {}) for volume_name, volume in iter(stores.items()): datetime_value = volume.get('CreationDate', None) if not datetime_value: continue partial_path = volume['PartialPath'] event_data = plist_event.PlistTimeEventData() event_data.desc = 'Spotlight Volume {0:s} ({1:s}) activated.'.format( volume_name, partial_path) event_data.key = '' event_data.root = '/Stores' event = time_events.PythonDatetimeEvent( datetime_value, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts relevant Volume Configuration Spotlight entries. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. match (Optional[dict[str: object]]): keys extracted from PLIST_KEYS.
juraj-google-style
async def update_server_data(server): data = datatools.get_data() send_welcome_message = False if server.id not in data["discord"]["servers"]: logger.debug("Adding new server to serverdata") data["discord"]["servers"][server.id] = {"prefix": "!"} if "mute_intro" not in data or not data["mute_intro"]: send_welcome_message = True _dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) _dir_modules = "{}/../".format(_dir) for module_name in os.listdir(_dir_modules): if module_name.startswith("_") or module_name.startswith("!"): continue if not os.path.isfile("{}/{}/_data.py".format(_dir_modules, module_name)): logger.warning("No _data.py file found for module {}".format(module_name)) continue try: import_name = ".discord_modis.modules.{}.{}".format(module_name, "_data") _data = importlib.import_module(import_name, "modis") if _data.modulename not in data["discord"]["servers"][server.id]: data["discord"]["servers"][server.id][_data.modulename] = _data.sd_structure datatools.write_data(data) except Exception as e: logger.error("Could not initialise module {}".format(module_name)) logger.exception(e) datatools.write_data(data) if send_welcome_message: default_channel = server.default_channel if not default_channel: for channel in server.channels: if channel.name == "general": default_channel = channel break if not default_channel: for channel in server.channels: if "general" in channel.name: default_channel = channel break if not default_channel: for channel in server.channels: if channel.type == discord.ChannelType.text: default_channel = channel break if default_channel: hello_message = "Hello! I'm Modis.\n\n" + \ "The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n" + \ "You can use `!help` to get help commands for all modules, " + \ "or {} me to get the server prefix and help commands.".format(server.me.mention) await client.send_message(default_channel, hello_message)
Updates the server info for the given server Args: server: The Discord server to update info for
juraj-google-style
def release(self): if not self.acquired: return False os.close(self.fd) if os.path.exists(self.path): os.remove(self.path) self.acquired = False return True
Cleans up the lockfile if it was acquired. Args: self (JLock): the ``JLock`` instance Returns: ``False`` if the lock was not released or the lock is not acquired, otherwise ``True``.
juraj-google-style
def scan_message(self, message, regex): for line in message.split('\n'): if bool(re.search(regex, line, flags=(re.IGNORECASE | re.MULTILINE))): return line return ''
Scans regex from msg and returns the line that matches Keyword arguments: message -- A (long) string, e.g. email body that will be scanned. regex -- A regular expression string that the message will be scanned against. Returns: Matching line or empty string
codesearchnet
def join(self, *args, **kwargs): super(ThreadReturn, self).join(*args, **kwargs) return self._return
Joins the thread. Args: self (ThreadReturn): the ``ThreadReturn`` instance args: optional list of arguments kwargs: optional key-word arguments Returns: The return value of the exited thread.
codesearchnet
def get_type(name, env, non_generic): if name in env: if isinstance(env[name], MultiType): return clone(env[name]) return fresh(env[name], non_generic) else: print("W: Undefined symbol {0}".format(name)) return TypeVariable()
Get the type of identifier name from the type environment env. Args: name: The identifier name env: The type environment mapping from identifier names to types non_generic: A set of non-generic TypeVariables Raises: ParseError: Raised if name is an undefined symbol in the type environment.
juraj-google-style
def generate(passphrase, trees=['primary']): (seeds, multi_wallet) = MultiWallet.generate(trees, entropy=True) result = {} for tree in trees: result[tree] = dict(private_seed=seeds[tree], public_seed=multi_wallet.public_wif(tree), encrypted_seed=PassphraseBox.encrypt(passphrase, seeds[tree])) return result
Generate a seed for the primary tree of a Gem wallet. You may choose to store the passphrase for a user so the user doesn't have to type it in every time. This is okay (although the security risks should be obvious) but Gem strongly discourages storing even the encrypted private seed, and storing both the passphrase and the private seed is completely insane. Don't do it. Args: passphrase (str): The passphrase that will be used to encrypt the seed before it's send to Gem. Key-stretching is done with PBDKF2 and encryption is done with nacl's SecretBox. trees (list of str): A list of names to generate trees for. For User Wallets this will be ['primary'], for Application Wallets it will be ['primary', 'backup']. Returns: A dict of dicts containing the serialized public master node, and a sub-dict with the encrypted private seed for each tree in `trees`.
codesearchnet
def binary_n(total_N, min_n=50): max_exp = np.log2(((1.0 * total_N) / min_n)) max_exp = int(np.floor(max_exp)) return [int(np.floor(((1.0 * total_N) / (2 ** i)))) for i in range(1, (max_exp + 1))]
Creates a list of values by successively halving the total length total_N until the resulting value is less than min_n. Non-integer results are rounded down. Args: total_N (int): total length Kwargs: min_n (int): minimal length after division Returns: list of integers: total_N/2, total_N/4, total_N/8, ... until total_N/2^i < min_n
codesearchnet
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES): summary_ops = ops.get_collection(key) if not summary_ops: return None else: return merge_summary(summary_ops)
Merges all summaries collected in the default graph. This op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which has identical behavior. Args: key: `GraphKey` used to collect the summaries. Defaults to `GraphKeys.SUMMARIES`. Returns: If no summaries were collected, returns None. Otherwise returns a scalar `Tensor` of type `string` containing the serialized `Summary` protocol buffer resulting from the merging.
github-repos
def _maybe_download_corpora(tmp_dir, dataset_split): cnn_filename = 'cnn_stories.tgz' cnn_finalpath = os.path.join(tmp_dir, 'cnn/stories/') dailymail_filename = 'dailymail_stories.tgz' dailymail_finalpath = os.path.join(tmp_dir, 'dailymail/stories/') if (not tf.gfile.Exists(cnn_finalpath)): cnn_file = generator_utils.maybe_download_from_drive(tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL) with tarfile.open(cnn_file, 'r:gz') as cnn_tar: cnn_tar.extractall(tmp_dir) if (not tf.gfile.Exists(dailymail_finalpath)): dailymail_file = generator_utils.maybe_download_from_drive(tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL) with tarfile.open(dailymail_file, 'r:gz') as dailymail_tar: dailymail_tar.extractall(tmp_dir) cnn_files = tf.gfile.Glob((cnn_finalpath + '*')) dailymail_files = tf.gfile.Glob((dailymail_finalpath + '*')) all_files = (cnn_files + dailymail_files) if (dataset_split == problem.DatasetSplit.TRAIN): urls_path = generator_utils.maybe_download(tmp_dir, 'all_train.txt', _TRAIN_URLS) elif (dataset_split == problem.DatasetSplit.EVAL): urls_path = generator_utils.maybe_download(tmp_dir, 'all_val.txt', _DEV_URLS) else: urls_path = generator_utils.maybe_download(tmp_dir, 'all_test.txt', _TEST_URLS) return (all_files, urls_path)
Download corpora if necessary and unzip them. Args: tmp_dir: directory containing dataset. dataset_split: whether we're in train/dev/test mode. Returns: List of all files generated and path to file containing train/dev/test split info.
codesearchnet
def validate(self): if (not isinstance(self.value, bytes)): raise TypeError('opaque value must be bytes') elif (not isinstance(self.opaque_type, enums.OpaqueDataType)): raise TypeError('opaque data type must be an OpaqueDataType enumeration') name_count = len(self.names) for i in range(name_count): name = self.names[i] if (not isinstance(name, six.string_types)): position = '({0} in list)'.format(i) raise TypeError('opaque data name {0} must be a string'.format(position))
Verify that the contents of the OpaqueObject are valid. Raises: TypeError: if the types of any OpaqueObject attributes are invalid.
codesearchnet
def _upload_artifacts_to_path(self, mirror=False): if ((not os.listdir(self.artifact_path)) or (not self.artifact_path)): raise S3ArtifactNotFound uploaded = False if self.s3props.get('content_metadata'): LOG.info('Uploading in multiple parts to set metadata') uploaded = self.content_metadata_uploads(mirror=mirror) if (not uploaded): cmd = self._get_upload_cmd(mirror=mirror) result = subprocess.run(cmd, check=True, shell=True, stdout=subprocess.PIPE) LOG.debug('Upload Command Ouput: %s', result.stdout) LOG.info('Uploaded artifacts to %s bucket', self.bucket)
Recursively upload directory contents to S3. Args: mirror (bool): If true, uses a flat directory structure instead of nesting under a version.
codesearchnet
def from_file(cls, path, directory=None, modules=None, active=None): name = basename(path) if name.endswith('.rpp'): name = name[:(- 4)] lines = _repp_lines(path) directory = (dirname(path) if (directory is None) else directory) r = cls(name=name, modules=modules, active=active) _parse_repp(lines, r, directory) return r
Instantiate a REPP from a `.rpp` file. The *path* parameter points to the top-level module. Submodules are loaded from *directory*. If *directory* is not given, it is the directory part of *path*. A REPP module may utilize external submodules, which may be defined in two ways. The first method is to map a module name to an instantiated REPP instance in *modules*. The second method assumes that an external group call `>abc` corresponds to a file `abc.rpp` in *directory* and loads that file. The second method only happens if the name (e.g., `abc`) does not appear in *modules*. Only one module may define a tokenization pattern. Args: path (str): the path to the base REPP file to load directory (str, optional): the directory in which to search for submodules modules (dict, optional): a mapping from identifiers to REPP modules active (iterable, optional): an iterable of default module activations
codesearchnet
def delete(script, layer_num=None): filter_xml = ' <filter name="Delete Current Mesh"/>\n' if isinstance(script, mlx.FilterScript): if (layer_num is None) or (layer_num == script.current_layer()): util.write_filter(script, filter_xml) script.del_layer(script.current_layer()) else: cur_layer = script.current_layer() change(script, layer_num) util.write_filter(script, filter_xml) if layer_num < script.current_layer(): change(script, cur_layer - 1) else: change(script, cur_layer) script.del_layer(layer_num) else: util.write_filter(script, filter_xml) return None
Delete layer Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): the number of the layer to delete. Default is the current layer. Not supported on the file base API. Layer stack: Deletes a layer will change current layer if deleted layer is lower in the stack MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def post_command(self, command, args): self._loop.log_coroutine(self.send_command(command, args, Verifier()))
Post a command asynchronously and don't wait for a response. There is no notification of any error that could happen during command execution. A log message will be generated if an error occurred. The command's response is discarded. This method is thread-safe and may be called from inside or ouside of the background event loop. If there is no websockets connection, no error will be raised (though an error will be logged). Args: command (string): The command name args (dict): Optional arguments
codesearchnet
def change(script, layer_num=None): if layer_num is None: if isinstance(script, mlx.FilterScript): layer_num = script.last_layer() else: layer_num = 0 filter_xml = ''.join([ ' <filter name="Change the current layer">\n', ' <Param name="mesh" ', 'value="{:d}" '.format(layer_num), 'description="Mesh" ', 'type="RichMesh" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) if isinstance(script, mlx.FilterScript): script.set_current_layer(layer_num) return None
Change the current layer by specifying the new layer number. Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): the number of the layer to change to. Default is the last layer if script is a mlx.FilterScript object; if script is a filename the default is the first layer. Layer stack: Modifies current layer MeshLab versions: 2016.12 1.3.4BETA
juraj-google-style
def push(self, x): self._queue.append(x)
Adds a new value to the data window. Args: x: The value to be added to the window.
github-repos
def _shuffle_single(fname, extra_fn=None): records = read_records(fname) random.shuffle(records) if (extra_fn is not None): records = extra_fn(records) out_fname = fname.replace(UNSHUFFLED_SUFFIX, '') write_records(records, out_fname) tf.gfile.Remove(fname)
Shuffle a single file of records. Args: fname: a string extra_fn: an optional function from list of TFRecords to list of TFRecords to be called after shuffling.
codesearchnet
def poll(self, timeout=None): p = select.poll() p.register(self._fd, select.POLLIN | select.POLLPRI) events = p.poll(int(timeout * 1000)) if len(events) > 0: return True return False
Poll for data available for reading from the serial port. `timeout` can be positive for a timeout in seconds, 0 for a non-blocking poll, or negative or None for a blocking poll. Default is a blocking poll. Args: timeout (int, float, None): timeout duration in seconds. Returns: bool: ``True`` if data is available for reading from the serial port, ``False`` if not.
juraj-google-style
def update_variant_compounds(self, variant, variant_objs = None): compound_objs = [] for compound in variant.get('compounds', []): not_loaded = True gene_objs = [] if variant_objs: variant_obj = variant_objs.get(compound['variant']) else: variant_obj = self.variant_collection.find_one({'_id': compound['variant']}) if variant_obj: not_loaded = False compound['rank_score'] = variant_obj['rank_score'] for gene in variant_obj.get('genes', []): gene_obj = { 'hgnc_id': gene['hgnc_id'], 'hgnc_symbol': gene.get('hgnc_symbol'), 'region_annotation': gene.get('region_annotation'), 'functional_annotation': gene.get('functional_annotation'), } gene_objs.append(gene_obj) compound['genes'] = gene_objs compound['not_loaded'] = not_loaded compound_objs.append(compound) return compound_objs
Update compounds for a variant. This will add all the necessary information of a variant on a compound object. Args: variant(scout.models.Variant) variant_objs(dict): A dictionary with _ids as keys and variant objs as values. Returns: compound_objs(list(dict)): A dictionary with updated compound objects.
juraj-google-style
def einsum_vecmul_index(gate_indices, number_of_qubits): (mat_l, mat_r, tens_lin, tens_lout) = _einsum_matmul_index_helper(gate_indices, number_of_qubits) return ('{mat_l}{mat_r}, '.format(mat_l=mat_l, mat_r=mat_r) + '{tens_lin}->{tens_lout}'.format(tens_lin=tens_lin, tens_lout=tens_lout))
Return the index string for Numpy.eignsum matrix-vector multiplication. The returned indices are to perform a matrix multiplication A.v where the matrix A is an M-qubit matrix, vector v is an N-qubit vector, and M <= N, and identity matrices are implied on the subsystems where A has no support on v. Args: gate_indices (list[int]): the indices of the right matrix subsystems to contract with the left matrix. number_of_qubits (int): the total number of qubits for the right matrix. Returns: str: An indices string for the Numpy.einsum function.
codesearchnet
def is_dsub_operation(op): if not is_pipeline(op): return False for name in ['dsub-version', 'job-id', 'job-name', 'user-id']: if not get_label(op, name): return False return True
Determine if a pipelines operation is a dsub request. We don't have a rigorous way to identify an operation as being submitted by dsub. Our best option is to check for certain fields that have always been part of dsub operations. - labels: job-id, job-name, and user-id have always existed. The dsub-version label has always existed for the google-v2 provider. Args: op: a pipelines operation. Returns: Boolean, true if the pipeline run was generated by dsub.
juraj-google-style
def get_export_outputs(export_outputs, predictions): if export_outputs is None: default_output = export_output_lib.PredictOutput(predictions) export_outputs = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: default_output} if not isinstance(export_outputs, dict): raise TypeError('export_outputs must be dict, given: {}'.format(export_outputs)) for v in export_outputs.values(): if not isinstance(v, export_output_lib.ExportOutput): raise TypeError('Values in export_outputs must be ExportOutput objects. Given: {}'.format(export_outputs)) _maybe_add_default_serving_output(export_outputs) return export_outputs
Validate export_outputs or create default export_outputs. Args: export_outputs: Describes the output signatures to be exported to `SavedModel` and used during serving. Should be a dict or None. predictions: Predictions `Tensor` or dict of `Tensor`. Returns: Valid export_outputs dict Raises: TypeError: if export_outputs is not a dict or its values are not ExportOutput instances.
github-repos
def set_timer(self, num_secs): status = self.status() devices = status['dps'] devices_numbers = list(devices.keys()) devices_numbers.sort() dps_id = devices_numbers[-1] payload = self.generate_payload(SET, {dps_id:num_secs}) data = self._send_receive(payload) log.debug('set_timer received data=%r', data) return data
Set a timer. Args: num_secs(int): Number of seconds
juraj-google-style
def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO: sx, sz = size rx, rz = resolution dx, dz = sx / rx, sz / rz ox, oz = -sx / 2, -sz / 2 def gen_pos(): for z in range(rz): for x in range(rx): yield ox + x * dx yield 0 yield oz + z * dz def gen_uv(): for z in range(rz): for x in range(rx): yield x / (rx - 1) yield 1 - z / (rz - 1) def gen_normal(): for _ in range(rx * rz): yield 0.0 yield 1.0 yield 0.0 def gen_index(): for z in range(rz - 1): for x in range(rx - 1): yield z * rz + x + 1 yield z * rz + x yield z * rz + x + rx yield z * rz + x + 1 yield z * rz + x + rx yield z * rz + x + rx + 1 pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32) uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32) normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32) index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32) vao = VAO("plane_xz", mode=moderngl.TRIANGLES) vao.buffer(pos_data, '3f', ['in_position']) vao.buffer(uv_data, '2f', ['in_uv']) vao.buffer(normal_data, '3f', ['in_normal']) vao.index_buffer(index_data, index_element_size=4) return vao
Generates a plane on the xz axis of a specific size and resolution. Normals and texture coordinates are also included. Args: size: (x, y) tuple resolution: (x, y) tuple Returns: A :py:class:`demosys.opengl.vao.VAO` instance
juraj-google-style
def log2(x): if any_symbolic_tensors((x,)): return Log2().symbolic_call(x) return backend.numpy.log2(x)
Base-2 logarithm of `x`, element-wise. Args: x: Input tensor. Returns: Output tensor, element-wise base-2 logarithm of `x`.
github-repos
def _ProcessEvent(self, mediator, event): try: self._analysis_plugin.ExamineEvent(mediator, event) except Exception as exception: self.SignalAbort() if self._debug_output: logger.warning('Unhandled exception while processing event object.') logger.exception(exception)
Processes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event.
codesearchnet
def get_default_padding(self): high = ((1024 * 10) + (self.size low = (1024 + (self.size if (self.padding >= 0): if (self.padding > high): return low return self.padding else: return low
The default implementation which tries to select a reasonable amount of padding and which might change in future versions. Returns: int: Amount of padding after saving
codesearchnet
def get_definition_directive(self, node, directive, arg, default): defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ()) if not defs: return default arg_values_found = [] for def_ in defs: if directive in def_.directives and arg in def_.directives[directive]: arg_values_found.append(def_.directives[directive][arg]) if not arg_values_found: return default if len(arg_values_found) == 1: return arg_values_found[0] first_value = arg_values_found[0] for other_value in arg_values_found[1:]: if not ast_util.matches(first_value, other_value): qn = anno.getanno(node, anno.Basic.QN) raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, parser.unparse(other_value).strip(), parser.unparse(first_value).strip())) return first_value
Returns the unique directive argument for a symbol. See lang/directives.py for details on directives. Example: # Given a directive in the code: ag.foo_directive(bar, baz=1) # One can write for an AST node Name(id='bar'): get_definition_directive(node, ag.foo_directive, 'baz') Args: node: ast.AST, the node representing the symbol for which the directive argument is needed. directive: Callable[..., Any], the directive to search. arg: str, the directive argument to return. default: Any Raises: ValueError: if conflicting annotations have been found
github-repos
def integers(start, count): if count < 0: raise ValueError("integers() count cannot be negative") return query(irange(start, start + count))
Generates in sequence the integral numbers within a range. Note: This method uses deferred execution. Args: start: The first integer in the sequence. count: The number of sequential integers to generate. Returns: A Queryable over the specified range of integers. Raises: ValueError: If count is negative.
juraj-google-style
def find(self, id): url = "{}/{}/{}".format(__endpoint__, self.type.RESOURCE, id) response = RestClient.get(url)[self.type.RESOURCE[:-1]] return self.type(response)
Get a resource by its id Args: id (string): Resource id Returns: object: Instance of the resource type
juraj-google-style
def call(self, inputs, **kwargs): return inputs
This is where the layer's logic lives. Args: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors.
github-repos
def status(self, job_ids): if job_ids: self._status() return [self.resources[jid]['status'] for jid in job_ids]
Get the status of a list of jobs identified by the job identifiers returned from the submit request. Args: - job_ids (list) : A list of job identifiers Returns: - A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED', 'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list. Raises: - ExecutionProviderException or its subclasses
juraj-google-style
def verify_abort(func, *args, **kwargs): expected_exception = kwargs.pop("expected_exception", runez.system.AbortException) with CaptureOutput() as logged: try: value = func(*args, **kwargs) assert False, "%s did not raise, but returned %s" % (func, value) except expected_exception: return str(logged)
Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise
juraj-google-style
def prepare_background_data(self): self.background_data = [] background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME) if not gfile.Exists(background_dir): return self.background_data with tf.compat.v1.Session(graph=tf.Graph()) as sess: wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, []) wav_loader = io_ops.read_file(wav_filename_placeholder) wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1) search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME, '*.wav') for wav_path in gfile.Glob(search_path): wav_data = sess.run(wav_decoder, feed_dict={wav_filename_placeholder: wav_path}).audio.flatten() self.background_data.append(wav_data) if not self.background_data: raise Exception('No background wav files were found in ' + search_path)
Searches a folder for background noise audio, and loads it into memory. It's expected that the background audio samples will be in a subdirectory named '_background_noise_' inside the 'data_dir' folder, as .wavs that match the sample rate of the training data, but can be much longer in duration. If the '_background_noise_' folder doesn't exist at all, this isn't an error, it's just taken to mean that no background noise augmentation should be used. If the folder does exist, but it's empty, that's treated as an error. Returns: List of raw PCM-encoded audio samples of background noise. Raises: Exception: If files aren't found in the folder.
github-repos
def load_file_to_str(path): with open(path, 'rt') as f: string = f.read().replace(linesep, '') if (not string): raise LoadError(('%s file is empty!' % path)) return string
Load file into a string removing newlines Args: path (str): Path to file Returns: str: String contents of file
codesearchnet
def sort_dependencies(self, image, dependencies=None): if (dependencies is None): dependencies = OrderedDict() if (image in dependencies): return requires = self.ymldefs[image].get('requires', []) for dep in requires: self.sort_dependencies(dep, dependencies) dependencies[image] = None return dependencies.keys()
Topologically sort the docker commands by their requirements Note: Circular "requires" dependencies are assumed to have already been checked in get_external_base_image, they are not checked here Args: image (str): process this docker image's dependencies dependencies (OrderedDict): running cache of sorted dependencies (ordered dict) Returns: List[str]: list of dependencies a topologically-sorted build order
codesearchnet
def histogram(self, tag, values, bins, step=None): if (step is None): step = self._step else: self._step = step values = onp.array(values) bins = onp.array(bins) values = onp.reshape(values, (- 1)) (counts, limits) = onp.histogram(values, bins=bins) cum_counts = onp.cumsum(onp.greater(counts, 0, dtype=onp.int32)) (start, end) = onp.searchsorted(cum_counts, [0, (cum_counts[(- 1)] - 1)], side='right') (start, end) = (int(start), (int(end) + 1)) counts = (counts[(start - 1):end] if (start > 0) else onp.concatenate([[0], counts[:end]])) limits = limits[start:(end + 1)] sum_sq = values.dot(values) histo = HistogramProto(min=values.min(), max=values.max(), num=len(values), sum=values.sum(), sum_squares=sum_sq, bucket_limit=limits.tolist(), bucket=counts.tolist()) summary = Summary(value=[Summary.Value(tag=tag, histo=histo)]) self.add_summary(summary, step)
Saves histogram of values. Args: tag: str: label for this data values: ndarray: will be flattened by this routine bins: number of bins in histogram, or array of bins for onp.histogram step: int: training step
codesearchnet
def __replaceSpecialValues(self, decisions): error = [] for (row, line) in enumerate(decisions): if ('.' in line): for (i, element) in enumerate(line): if (row == 0): error.append("Row: {}colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4))) if (element == self.__parentSymbol): if (decisions[(row - 1)][i] == '.'): error.append("Row: {}Colume: {}==> don't have parent value".format(str(row).ljust(4), str(i).ljust(4))) decisions[row][i] = decisions[(row - 1)][i] if error: view.Tli.showErrors('ReplaceSpecialValuesError', error) else: return decisions
Will replace special values in decisions array. Args: decisions (array of array of str): Standard decision array format. Raises: ValueError: Row element don't have parent value. Returns: New decision array with updated values.
codesearchnet
def iter_archive(self, resource): if isinstance(resource, six.string_types): resource = resource_lib.Resource(path=resource) return extractor.iter_archive(resource.path, resource.extract_method)
Returns iterator over files within archive. **Important Note**: caller should read files as they are yielded. Reading out of order is slow. Args: resource: path to archive or `tfds.download.Resource`. Returns: Generator yielding tuple (path_within_archive, file_obj).
juraj-google-style
def _flatten_tensors(tensors): if not tensors: raise ValueError('tensors cannot be empty') shape = tensors[0].shape for tensor in tensors: shape = shape.merge_with(tensor.shape) if not shape.is_fully_defined(): raise ValueError('Tensors must have statically known shape.') if len(shape) != 1: reshaped = [] for t in tensors: with ops.colocate_with(t): reshaped.append(array_ops.reshape(t, [-1])) tensors = reshaped return (tensors, shape)
Check tensors for isomorphism and flatten. Args: tensors: list of `tf.Tensor` which must all have the same shape. Returns: tensors: a list of `tf.Tensor` which are flattened (1D) views of tensors shape: the original shape of each element of input tensors Raises: ValueError: tensors are empty or non-isomorphic or have unknown shape.
github-repos
def get_kerberos_ticket(username, password): cache = ('/tmp/ion-%s' % uuid.uuid4()) logger.debug("Setting KRB5CCNAME to 'FILE:{}'".format(cache)) os.environ['KRB5CCNAME'] = ('FILE:' + cache) try: realm = settings.CSL_REALM kinit = pexpect.spawnu('/usr/bin/kinit {}@{}'.format(username, realm), timeout=settings.KINIT_TIMEOUT) kinit.expect(':') kinit.sendline(password) returned = kinit.expect([pexpect.EOF, 'password:']) if (returned == 1): logger.debug('Password for {}@{} expired, needs reset'.format(username, realm)) return 'reset' kinit.close() exitstatus = kinit.exitstatus except pexpect.TIMEOUT: KerberosAuthenticationBackend.kinit_timeout_handle(username, realm) exitstatus = 1 if (exitstatus != 0): try: realm = settings.AD_REALM kinit = pexpect.spawnu('/usr/bin/kinit {}@{}'.format(username, realm), timeout=settings.KINIT_TIMEOUT) kinit.expect(':') kinit.sendline(password) returned = kinit.expect([pexpect.EOF, 'password:']) if (returned == 1): return False kinit.close() exitstatus = kinit.exitstatus except pexpect.TIMEOUT: KerberosAuthenticationBackend.kinit_timeout_handle(username, realm) exitstatus = 1 if ('KRB5CCNAME' in os.environ): subprocess.check_call(['kdestroy', '-c', os.environ['KRB5CCNAME']]) del os.environ['KRB5CCNAME'] if (exitstatus == 0): logger.debug('Kerberos authorized {}@{}'.format(username, realm)) return True else: logger.debug('Kerberos failed to authorize {}'.format(username)) return False
Attempts to create a Kerberos ticket for a user. Args: username The username. password The password. Returns: Boolean indicating success or failure of ticket creation
codesearchnet
def post_process(self, dir_name, d): logger.info('Post-processing dir:{}'.format(dir_name)) fullpath = os.path.abspath(dir_name) transformations = {} filenames = glob.glob(os.path.join(fullpath, 'transformations.json*')) if (len(filenames) >= 1): with zopen(filenames[0], 'rt') as f: transformations = json.load(f) try: m = re.match('(\\d+)-ICSD', transformations['history'][0]['source']) if m: d['icsd_id'] = int(m.group(1)) except Exception as ex: logger.warning('Cannot parse ICSD from transformations file.') pass else: logger.warning('Transformations file does not exist.') other_parameters = transformations.get('other_parameters') new_tags = None if other_parameters: new_tags = other_parameters.pop('tags', None) new_author = other_parameters.pop('author', None) if new_author: d['author'] = new_author if (not other_parameters): transformations.pop('other_parameters') d['transformations'] = transformations filenames = glob.glob(os.path.join(fullpath, 'custodian.json*')) if (len(filenames) >= 1): with zopen(filenames[0], 'rt') as f: d['custodian'] = json.load(f) try: run_stats = {} for filename in glob.glob(os.path.join(fullpath, 'OUTCAR*')): outcar = Outcar(filename) i = (1 if re.search('relax2', filename) else 0) taskname = ('relax2' if re.search('relax2', filename) else 'relax1') d['calculations'][i]['output']['outcar'] = outcar.as_dict() run_stats[taskname] = outcar.run_stats except: logger.error('Bad OUTCAR for {}.'.format(fullpath)) try: overall_run_stats = {} for key in ['Total CPU time used (sec)', 'User time (sec)', 'System time (sec)', 'Elapsed time (sec)']: overall_run_stats[key] = sum([v[key] for v in run_stats.values()]) run_stats['overall'] = overall_run_stats except: logger.error('Bad run stats for {}.'.format(fullpath)) d['run_stats'] = run_stats if self.use_full_uri: d['dir_name'] = get_uri(dir_name) if new_tags: d['tags'] = new_tags logger.info(('Post-processed ' + fullpath))
Simple post-processing for various files other than the vasprun.xml. Called by generate_task_doc. Modify this if your runs have other kinds of processing requirements. Args: dir_name: The dir_name. d: Current doc generated.
codesearchnet
def argsort(*args, **kwargs): if ((len(args) == 1) and isinstance(args[0], dict)): dict_ = args[0] index_list = list(dict_.keys()) value_list = list(dict_.values()) return sortedby2(index_list, value_list) else: index_list = list(range(len(args[0]))) return sortedby2(index_list, *args, **kwargs)
like np.argsort but for lists Args: *args: multiple lists to sort by **kwargs: reverse (bool): sort order is descending if True else acscending CommandLine: python -m utool.util_list argsort Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100}) >>> print(result)
codesearchnet
def items(self, prefix=None, delimiter=None): return _item.Items(self._name, prefix, delimiter, context=self._context)
Get an iterator for the items within this bucket. Args: prefix: an optional prefix to match items. delimiter: an optional string to simulate directory-like semantics. The returned items will be those whose names do not contain the delimiter after the prefix. For the remaining items, the names will be returned truncated after the delimiter with duplicates removed (i.e. as pseudo-directories). Returns: An iterable list of items within this bucket.
codesearchnet
def add_primitives_path(path): if (path not in _PRIMITIVES_PATHS): if (not os.path.isdir(path)): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before. Args: path (str): path to add Raises: ValueError: A `ValueError` will be raised if the path is not valid.
codesearchnet
def zero_state(self, batch_size, dtype): state_size = self.state_size is_eager = context.executing_eagerly() if is_eager and _hasattr(self, '_last_zero_state'): last_state_size, last_batch_size, last_dtype, last_output = getattr(self, '_last_zero_state') if last_batch_size == batch_size and last_dtype == dtype and (last_state_size == state_size): return last_output with backend.name_scope(type(self).__name__ + 'ZeroState'): output = _zero_state_tensors(state_size, batch_size, dtype) if is_eager: self._last_zero_state = (state_size, batch_size, dtype, output) return output
Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size, state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size, s]` for each s in `state_size`.
github-repos
def clean_headers(headers): clean = {} try: for (k, v) in six.iteritems(headers): if (not isinstance(k, six.binary_type)): k = str(k) if (not isinstance(v, six.binary_type)): v = str(v) clean[_helpers._to_bytes(k)] = _helpers._to_bytes(v) except UnicodeEncodeError: from oauth2client.client import NonAsciiHeaderError raise NonAsciiHeaderError(k, ': ', v) return clean
Forces header keys and values to be strings, i.e not unicode. The httplib module just concats the header keys and values in a way that may make the message header a unicode string, which, if it then tries to contatenate to a binary request body may result in a unicode decode error. Args: headers: dict, A dictionary of headers. Returns: The same dictionary but with all the keys converted to strings.
codesearchnet
def _get_linear_trajectory(x0, velocity, t): x0 = tf.convert_to_tensor(x0) velocity = tf.convert_to_tensor(velocity) t = tf.convert_to_tensor(t) if (x0.shape.ndims != 1): raise ValueError('x0 must be a rank 1 tensor') if (velocity.shape.ndims != 1): raise ValueError('velocity must be a rank 1 tensor') if (t.shape.ndims != 1): raise ValueError('t must be a rank 1 tensor') x0 = tf.expand_dims(x0, axis=0) velocity = tf.expand_dims(velocity, axis=0) dx = (velocity * tf.expand_dims(t, axis=(- 1))) linear_trajectories = (x0 + dx) assert (linear_trajectories.shape.ndims == 2), 'linear_trajectories should be a rank 2 tensor' return linear_trajectories
Construct a linear trajectory from x0. Args: x0: N-D float tensor. velocity: N-D float tensor t: [sequence_length]-length float tensor Returns: x: [sequence_length, ndims] float tensor.
codesearchnet
def get_item(self, *key): item = self._get_item_or_section(key) if not item.is_item: raise RuntimeError('{} is a section, not an item'.format(key)) return item
The recommended way of retrieving an item by key when extending configmanager's behaviour. Attribute and dictionary key access is configurable and may not always return items (see PlainConfig for example), whereas this method will always return the corresponding Item as long as NOT_FOUND hook callbacks don't break this convention. Args: *key Returns: item (:class:`.Item`):
juraj-google-style
def export_msdt(self, filename): fmt = ('csv' if filename.lower().endswith('.csv') else 'dat') delimiter = (', ' if (fmt == 'csv') else ' ') with open(filename, 'wt') as f: if (fmt == 'dat'): f.write(' f.write(delimiter.join(['t', 'MSD', 'MSD_a', 'MSD_b', 'MSD_c', 'MSCD'])) f.write('\n') for (dt, msd, msdc, mscd) in zip(self.dt, self.msd, self.msd_components, self.mscd): f.write(delimiter.join([('%s' % v) for v in (([dt, msd] + list(msdc)) + [mscd])])) f.write('\n')
Writes MSD data to a csv file that can be easily plotted in other software. Args: filename (str): Filename. Supported formats are csv and dat. If the extension is csv, a csv file is written. Otherwise, a dat format is assumed.
codesearchnet
def convert_to_shape(x): if (x is None): return None if isinstance(x, Shape): return x if isinstance(x, str): x = _parse_string_to_list_of_pairs(x, seconds_to_int=True) return Shape(x)
Converts input to a Shape. Args: x: Shape, str, or None. Returns: Shape or None. Raises: ValueError: If x cannot be converted to a Shape.
codesearchnet
def _ReduceParserFilters(cls, includes, excludes): if ((not includes) or (not excludes)): return for parser_name in set(includes).intersection(excludes): if (includes[parser_name] == excludes[parser_name]): logger.warning('Parser {0:s} was in both the inclusion and exclusion lists. Ignoring included parser.'.format(parser_name)) includes.pop(parser_name) continue plugin_includes = includes[parser_name] plugin_excludes = excludes[parser_name] intersection = set(plugin_includes).intersection(plugin_excludes) if (not intersection): continue logger.warning('Parser {0:s} plugins: {1:s} in both the inclusion and exclusion lists. Ignoring included plugins.'.format(parser_name, ', '.join(intersection))) plugins_list = list(set(plugin_includes).difference(intersection)) includes[parser_name] = plugins_list parsers_to_pop = [] for parser_name in excludes: if (parser_name in includes): continue logger.warning('The excluded parser: {0:s} is not associated with the included parsers: {1:s}. Ignoring excluded parser.'.format(parser_name, ', '.join(includes.keys()))) parsers_to_pop.append(parser_name) for parser_name in parsers_to_pop: excludes.pop(parser_name)
Reduces the parsers and plugins to include and exclude. If an intersection is found, the parser or plugin is removed from the inclusion set. If a parser is not in inclusion set there is no need to have it in the exclusion set. Args: includes (dict[str, BaseParser]): included parsers and plugins by name. excludes (dict[str, BaseParser]): excluded parsers and plugins by name.
codesearchnet
def playback_trajectory(env, ep_dir): xml_path = os.path.join(ep_dir, "model.xml") with open(xml_path, "r") as f: env.reset_from_xml_string(f.read()) state_paths = os.path.join(ep_dir, "state_*.npz") t = 0 for state_file in sorted(glob(state_paths)): print(state_file) dic = np.load(state_file) states = dic["states"] for state in states: env.sim.set_state_from_flattened(state) env.sim.forward() env.render() t += 1 if t % 100 == 0: print(t)
Playback data from an episode. Args: ep_dir: The path to the directory containing data for an episode.
juraj-google-style
def remove_node(self, node_id, force=False): url = self._url('/nodes/{0}', node_id) params = {'force': force} res = self._delete(url, params=params) self._raise_for_status(res) return True
Remove a node from the swarm. Args: node_id (string): ID of the node to be removed. force (bool): Force remove an active node. Default: `False` Raises: :py:class:`docker.errors.NotFound` If the node referenced doesn't exist in the swarm. :py:class:`docker.errors.APIError` If the server returns an error. Returns: `True` if the request was successful.
codesearchnet
def from_dict(d): i = Tags() for k, v in d.items(): if k not in ("@module", "@class"): i[k] = v return i
Creates Tags object from a dictionary. Args: d: Dict of feff parameters and values. Returns: Tags object
juraj-google-style
def _get_ngram_counter(ids, n): ids = [token_id for token_id in ids if (token_id != 0)] ngram_list = [tuple(ids[i:(i + n)]) for i in range(((len(ids) + 1) - n))] ngrams = set(ngram_list) counts = collections.Counter() for ngram in ngrams: counts[ngram] = 1 return counts
Get a Counter with the ngrams of the given ID list. Args: ids: np.array or a list corresponding to a single sentence n: n-gram size Returns: collections.Counter with ID tuples as keys and 1s as values.
codesearchnet
def first(self): def _transform(xs): try: return [six.next(iter(xs))] except StopIteration: return [] return self.transform(_transform, 'first')
Return a Query that selects only the first element of this Query. If no elements are available, returns a query with no results. Example usage: .. code:: python >> q = Query(lambda: list(range(5))) >> q.first.results [0] Returns: Query
codesearchnet
def _define_loop(graph, eval_steps): loop = tools.Loop( None, graph.step, graph.should_log, graph.do_report, graph.force_reset) loop.add_phase( 'eval', graph.done, graph.score, graph.summary, eval_steps, report_every=eval_steps, log_every=None, checkpoint_every=None, feed={graph.is_training: False}) return loop
Create and configure an evaluation loop. Args: graph: Object providing graph elements via attributes. eval_steps: Number of evaluation steps per epoch. Returns: Loop object.
juraj-google-style
def option(self, key, value=None, **kwargs): if (not isinstance(self._container, Section)): raise ValueError('Options can only be added inside a section!') option = Option(key, value, container=self._container, **kwargs) option.value = value self._container.structure.insert(self._idx, option) self._idx += 1 return self
Creates a new option inside a section Args: key (str): key of the option value (str or None): value of the option **kwargs: are passed to the constructor of :class:`Option` Returns: self for chaining
codesearchnet
def _reraise_with_traceback(f): def wrap(*args, **kwargs): try: return f(*args, **kwargs) except Exception as e: traceback_str = traceback.format_exc() e.traceback = traceback_str raise e return wrap
Call the function normally. But if the function raises an error, attach the str(traceback) into the function.traceback attribute, then reraise the error. Args: f: The function to run. Returns: A function that wraps f, attaching the traceback if an error occurred.
juraj-google-style
def diff_commonPrefix(self, text1, text2): if not text1 or not text2 or text1[0] != text2[0]: return 0 pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerstart = 0 while pointermin < pointermid: if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: pointermin = pointermid pointerstart = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) return pointermid
Determine the common prefix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the start of each string.
juraj-google-style
def postprocess_image(x, rows, cols, hparams): batch = common_layers.shape_list(x)[0] x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) likelihood = getattr(hparams, 'likelihood', DistributionType.CAT) if (likelihood == DistributionType.DMOL): depth = (hparams.num_mixtures * 10) targets = tf.layers.dense(x, depth, use_bias=False, activation=None, name='output_conv') else: depth = 256 targets = tf.layers.dense(x, depth, use_bias=True, activation=None, name='output_conv') if ((hparams.mode == tf.estimator.ModeKeys.PREDICT) and hparams.block_raster_scan): y = targets yshape = common_layers.shape_list(y) block_length = hparams.query_shape[0] block_width = hparams.query_shape[1] y = tf.reshape(y, [batch, (yshape[1] yshape = common_layers.shape_list(y) y_blocks = tf.reshape(y, [batch, yshape[1], yshape[2], (yshape[3] targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) return targets
Postprocessing after decoding. Args: x: Tensor of shape [batch, ...], where ... can be any rank such that the number of elements in x is batch * rows * cols * hparams.hidden_size. rows: Integer representing number of rows in a 2-D data point. cols: Integer representing number of columns in a 2-D data point. hparams: HParams set. Returns: Tensor of shape [batch, rows, cols, depth], where depth is hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In the special case of inference and block raster scan order, it is a Tensor of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, depth].
codesearchnet
def enable(self, information, id_or_uri, timeout=(- 1)): uri = self._client.build_uri(id_or_uri) return self._client.update(information, uri, timeout=timeout)
Enables or disables a range. Args: information (dict): Information to update. id_or_uri: ID or URI of range. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Updated resource.
codesearchnet
def cosine_similarity(y_true, y_pred, axis=-1): y_pred = ops.convert_to_tensor(y_pred) y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype) y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred) y_pred = normalize(y_pred, axis=axis) y_true = normalize(y_true, axis=axis) return ops.sum(y_true * y_pred, axis=axis)
Computes the cosine similarity between labels and predictions. Formula: ```python loss = sum(l2_norm(y_true) * l2_norm(y_pred)) ``` Args: y_true: Tensor of true targets. y_pred: Tensor of predicted targets. axis: Axis along which to determine similarity. Defaults to `-1`. Returns: Cosine similarity tensor. Example: >>> y_true = [[0., 1.], [1., 1.], [1., 1.]] >>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]] >>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1) [0., 0.99999994, -0.99999994]
github-repos
def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes: return self.mglo.read(viewport, components, attachment, alignment, dtype)
Read the content of the framebuffer. Args: viewport (tuple): The viewport. components (int): The number of components to read. Keyword Args: attachment (int): The color attachment. alignment (int): The byte alignment of the pixels. dtype (str): Data type. Returns: bytes
codesearchnet
def get_outputs_filtered(self, owner, spent=None): outputs = self.fastquery.get_outputs_by_public_key(owner) if spent is None: return outputs elif spent is True: return self.fastquery.filter_unspent_outputs(outputs) elif spent is False: return self.fastquery.filter_spent_outputs(outputs)
Get a list of output links filtered on some criteria Args: owner (str): base58 encoded public_key. spent (bool): If ``True`` return only the spent outputs. If ``False`` return only unspent outputs. If spent is not specified (``None``) return all outputs. Returns: :obj:`list` of TransactionLink: list of ``txid`` s and ``output`` s pointing to another transaction's condition
juraj-google-style
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]]=None): if tensor_type is None: return self is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type) for key, value in self.items(): try: if not is_tensor(value): tensor = as_tensor(value) self[key] = tensor except: if key == 'overflowing_values': raise ValueError('Unable to create tensor returning overflowing values of different lengths. ') raise ValueError("Unable to create tensor, you should probably activate padding with 'padding=True' to have batched tensors with the same length.") return self
Convert the inner content to tensors. Args: tensor_type (`str` or [`~utils.TensorType`], *optional*): The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If `None`, no modification is done.
github-repos
def joinpaths(self, *paths): if sys.version_info >= (3, 6): paths = [os.fspath(path) for path in paths] if len(paths) == 1: return paths[0] if self.is_windows_fs: return self._join_paths_with_drive_support(*paths) joined_path_segments = [] sep = self._path_separator(paths[0]) for path_segment in paths: if self._starts_with_root_path(path_segment): joined_path_segments = [path_segment] else: if (joined_path_segments and not joined_path_segments[-1].endswith(sep)): joined_path_segments.append(sep) if path_segment: joined_path_segments.append(path_segment) return self._matching_string(paths[0], '').join(joined_path_segments)
Mimic os.path.join using the specified path_separator. Args: *paths: (str) Zero or more paths to join. Returns: (str) The paths joined by the path separator, starting with the last absolute path in paths.
juraj-google-style
def requires(self, require=None): if require is None: return self._requires if not isinstance(require, dict): raise ValueError('__require__') for k,v in iteritems(require): if k not in self._nodes: raise ValueError('__require__[%s]' % str(k)) if isinstance(v, basestring): v = [v] elif not isinstance(v, (tuple,list)): raise ValueError('__require__[%s]' % str(k)) for s in v: if s not in self._nodes: raise ValueError('__require__[%s]: %s' % (str(k), str(v))) self._requires[k] = v
Requires Sets the require rules used to validate the Parent Arguments: require {dict} -- A dictionary expressing requirements of fields Raises: ValueError Returns: None
juraj-google-style
def ip_mask(ip_addr_and_mask, return_tuple=True): regex_ip_and_mask = __re.compile("^((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2]?[0-9]))$") if return_tuple: while not regex_ip_and_mask.match(ip_addr_and_mask): print("Not a good IP and CIDR mask combo.") print("Please try again.") ip_addr_and_mask = input("Please enter a IP address and mask in the follwing format x.x.x.x/x: ") ip_cidr_split = ip_addr_and_mask.split("/") ip_addr = ip_cidr_split[0] cidr = ip_cidr_split[1] return ip_addr, cidr elif not return_tuple: if not regex_ip_and_mask.match(ip_addr_and_mask): return False else: return True
Function to check if a address and CIDR mask is good Args: ip_addr_and_mask: IP address and mask in the following format 192.168.1.1/24 return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False Returns: see return_tuple for return options
juraj-google-style
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError('Length of the specified weight list (' + str(len(weights)) + ') does not match the number of weights of the optimizer (' + str(len(params)) + ')') weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError('Optimizer weight shape ' + str(pv.shape) + ' not compatible with provided weight shape ' + str(w.shape)) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)
Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes.
github-repos
def get_inst_info(qry_string): qry_prefix = "EC2C.describe_instances(" qry_real = qry_prefix + qry_string + ")" qry_results = eval(qry_real) return qry_results
Get details for instances that match the qry_string. Execute a query against the AWS EC2 client object, that is based on the contents of qry_string. Args: qry_string (str): the query to be used against the aws ec2 client. Returns: qry_results (dict): raw information returned from AWS.
juraj-google-style
def is_scalar(value): return (np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0)))
Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not
codesearchnet
def _obj_to_path(obj): if obj is None: return obj if inspect.isclass(obj) or inspect.isfunction(obj): fetched = getattr(sys.modules[obj.__module__], obj.__name__, None) if fetched is None: raise ValueError( "Object %r must be defined on the top level of a module." % obj) return "%s.%s" % (obj.__module__, obj.__name__) raise TypeError("Unexpected type %s." % type(obj))
Returns the fully qualified path to the object. Args: obj: obj must be a new style top level class, or a top level function. No inner function or static method. Returns: Fully qualified path to the object. Raises: TypeError: when argument obj has unsupported type. ValueError: when obj can't be discovered on the top level.
juraj-google-style
def get_sequence_length_feature_key_name_from_feature_key_name(feature_name): return feature_name + _SEQUENCE_FEATURE_LENGTH_POSTFIX
Gets the name of the sequence length feature from that of the base feature. Args: feature_name: The feature key of a sequence column. Returns: A string which is the feature key for the associated feature length column.
github-repos
def _output_dir( self, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False, ): assert not (is_variable and any([is_instance, interpolatable])) if is_variable: dir_prefix = "variable_" elif is_instance: dir_prefix = "instance_" else: dir_prefix = "master_" dir_suffix = "_interpolatable" if interpolatable else "" output_dir = dir_prefix + ext + dir_suffix if autohinted: output_dir = os.path.join("autohinted", output_dir) return output_dir
Generate an output directory. Args: ext: extension string. is_instance: The output is instance font or not. interpolatable: The output is interpolatable or not. autohinted: The output is autohinted or not. is_variable: The output is variable font or not. Return: output directory string.
juraj-google-style
def convert_elementwise_sub( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_sub ...') model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'S' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) sub = keras.layers.Subtract(name=tf_name) layers[scope_name] = sub([model0, model1])
Convert elementwise subtraction. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def _flag_value_as_list(self, wanted_flag_name): string_value_list = [] found, flag_value = self.get_flag_value(wanted_flag_name) if found: assert flag_value is not None string_value_list = flag_value.split(',') return string_value_list
Returns the string list of a TensorTracer flag. Args: wanted_flag_name: the name of the flag we are looking for. Returns: The list value of the flag.
github-repos
def map_shape_structure(func, structure): return tree_impl.map_shape_structure(func, structure)
Variant of keras.tree.map_structure that operates on shape tuples. Tuples containing ints and Nones are considered shapes and passed to `func`. Args: structure: Arbitrarily nested structure. Returns: The same structure with `func` applied.
github-repos
def detect_response_encoding(response, is_html=False, peek=131072): encoding = get_heading_encoding(response) encoding = wpull.string.detect_encoding( wpull.util.peek_file(response.body, peek), encoding=encoding, is_html=is_html ) _logger.debug(__('Got encoding: {0}', encoding)) return encoding
Return the likely encoding of the response document. Args: response (Response): An instance of :class:`.http.Response`. is_html (bool): See :func:`.util.detect_encoding`. peek (int): The maximum number of bytes of the document to be analyzed. Returns: ``str``, ``None``: The codec name.
juraj-google-style
def _build_case(branch_index, branch_graphs, branch_inputs, name=None, lower_using_switch_merge=None): _make_indexed_slices_indices_types_match(_CASE, branch_graphs) _check_same_outputs(_CASE, branch_graphs) case_inputs = _make_inputs_match(branch_graphs, branch_inputs) stateful_ops = [] for bg in branch_graphs: stateful_ops.extend([op for op in bg.get_operations() if auto_control_deps.op_is_stateful(op)]) if stateful_ops: op_fn = gen_functional_ops.case else: op_fn = gen_functional_ops.stateless_case with ops.control_dependencies(sum((list(bg.function_captures.control) for bg in branch_graphs), [])): def _make_op(inputs): case_op, tensors = util.get_op_and_outputs(op_fn(branch_index, inputs, [t.dtype for t in branch_graphs[0].outputs], [util.create_new_tf_function(g) for g in branch_graphs], output_shapes=_get_output_shapes(*[g.outputs for g in branch_graphs]), name=name)) _copy_handle_data(tensors, *[g.outputs for g in branch_graphs]) if case_op is not None: util.maybe_set_lowering_attr(case_op, lower_using_switch_merge) util.maybe_propagate_compile_time_consts_in_xla(case_op) _set_read_only_resource_inputs_attr(case_op, branch_graphs) case_op.graph.prevent_fetching(case_op) for i, bg in enumerate(branch_graphs): bg.outer_graph = ops.get_default_graph() setattr(case_op, '_branch_graph_{}'.format(i), bg) return tensors tensors = util.run_as_function_for_tape_gradients(_make_op, case_inputs) tensors = [array_ops.identity(t) for t in tensors] return _pack_sequence_as(branch_graphs[0].structured_outputs, tensors)
Creates an `Case` op from `branch_index`, branch graphs and inputs. Note that this modifies `branch_graphs` to make the inputs match, and to output all intermediates values so they're available for the gradient computation. `branch_graphs` need not have the same input types, but they must have the same output types. Args: branch_index: integer Tensor branch_graphs: List of FuncGraph branch_inputs: List of lists of Tensors to be passed to corresponding branch_graph as input. name: the name for the Case op. lower_using_switch_merge: Lower this op using switch merge ops (optional). Returns: A list of Tensors which are the outputs of the Case op. Does not include added intermediate outputs.
github-repos
def Delete(self, request, global_params=None): config = self.GetMethodConfig('Delete') return self._RunMethod(config, request, global_params=global_params)
Delete an association between a GCP project and a GitHub Enterprise server. Args: request: (CloudbuildProjectsLocationsGithubEnterpriseConfigsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Operation) The response message.
github-repos
def Process(self, parser_mediator, plist_name, top_level, **kwargs): if not plist_name.startswith(self.PLIST_PATH): raise errors.WrongPlistPlugin(self.NAME, plist_name) super(AppleAccountPlugin, self).Process( parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
Check if it is a valid Apple account plist file name. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. plist_name (str): name of the plist. top_level (dict[str, object]): plist top-level key.
juraj-google-style
def _parse_ospf_process_id(self, config): match = re.search('^router ospf (\\d+)', config) return dict(ospf_process_id=int(match.group(1)))
Parses config file for the OSPF proc ID Args: config(str): Running configuration Returns: dict: key: ospf_process_id (int)
codesearchnet
def __init__(self, action, *, payload=None): self.action = action self.payload = payload if payload is not None else {} self.uid = uuid.uuid4()
Initialise the request object. Args: action (str): A string representing the requested action that should be executed by the server. payload (dict): A dictionary with data that is available to the action.
juraj-google-style
def replace_symbols(text, form='NFKD', excluded=None, replacement=''): if (excluded is None): excluded = set() categories = set(['Mn', 'Sc', 'Sk', 'Sm', 'So']) return ''.join(((c if ((unicodedata.category(c) not in categories) or (c in excluded)) else replacement) for c in unicodedata.normalize(form, text)))
Replace symbols in text. Removes symbols from input text or replaces them with a string if specified. Args: text: The text to be processed. form: Unicode form. excluded: Set of unicode characters to exclude. replacement: New text that will replace symbols. Returns: The text without symbols.
codesearchnet
def pop(self, name, defval=None): valu = self.info.pop(name, defval) lkey = (self.pref + name.encode('utf8')) self.slab.pop(lkey, db=self.db) return valu
Pop a name from the SlabDict. Args: name (str): The name to remove. defval (obj): The default value to return if the name is not present. Returns: object: The object stored in the SlabDict, or defval if the object was not present.
codesearchnet