code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, cache_folder, genome_build): self.api_version = ('1') self.genome_build = genome_build self.today = datetime.today() if not os.path.exists(cache_folder): os.mkdir(cache_folder) path = os.path.join(cache_folder, "ensembl_cache.db") if not os.path.exists(path): try: with sqlite3.connect(path) as conn: with conn as cursor: cursor.execute("CREATE TABLE ensembl " \ "(key text PRIMARY KEY, genome_build text, " \ "cache_date text, api_version text, data blob)") except sqlite3.OperationalError: time.sleep(random.uniform(1, 5)) self.conn = sqlite3.connect(path) self.conn.row_factory = sqlite3.Row
initialise the class with the local cache folder Args: cache_folder: path to the cache
juraj-google-style
def get_historical_data(nmr_problems): observations = np.tile(np.array([[10, 256, 202, 97]]), (nmr_problems, 1)) nmr_tanks_ground_truth = (np.ones((nmr_problems,)) * 276) return (observations, nmr_tanks_ground_truth)
Get the historical tank data. Args: nmr_problems (int): the number of problems Returns: tuple: (observations, nmr_tanks_ground_truth)
codesearchnet
def force_rerun(flag, outfile): if flag: return True elif ((not flag) and (not op.exists(outfile))): return True elif ((not flag) and (not is_non_zero_file(outfile))): return True else: return False
Check if we should force rerunning of a command if an output file exists. Args: flag (bool): Flag to force rerun. outfile (str): Path to output file which may already exist. Returns: bool: If we should force rerunning of a command Examples: >>> force_rerun(flag=True, outfile='/not/existing/file.txt') True >>> force_rerun(flag=False, outfile='/not/existing/file.txt') True >>> force_rerun(flag=True, outfile='./utils.py') True >>> force_rerun(flag=False, outfile='./utils.py') False
codesearchnet
def quantile_for_single_value(self, **kwargs): if self._is_transposed: kwargs['axis'] = (kwargs.get('axis', 0) ^ 1) return self.transpose().quantile_for_single_value(**kwargs) axis = kwargs.get('axis', 0) q = kwargs.get('q', 0.5) assert (type(q) is float) def quantile_builder(df, **kwargs): try: return pandas.DataFrame.quantile(df, **kwargs) except ValueError: return pandas.Series() func = self._build_mapreduce_func(quantile_builder, **kwargs) result = self._full_axis_reduce(axis, func) if (axis == 0): result.index = [q] else: result.columns = [q] return result
Returns quantile of each column or row. Returns: A new QueryCompiler object containing the quantile of each column or row.
codesearchnet
def guaranteed_no_diff(modular_file_path, dependencies, models_in_diff): model_name = modular_file_path.rsplit('modular_', 1)[1].replace('.py', '') if model_name in models_in_diff: return False for dep in dependencies[modular_file_path]: dependency_model_name = dep.split('.')[-2] if dependency_model_name in models_in_diff: return False return True
Returns whether it is guaranteed to have no differences between the modular file and the modeling file. Model is in the diff -> not guaranteed to have no differences Dependency is in the diff -> not guaranteed to have no differences Otherwise -> guaranteed to have no differences Args: modular_file_path: The path to the modular file. dependencies: A dictionary containing the dependencies of each modular file. models_in_diff: A set containing the names of the models that have been modified. Returns: A boolean indicating whether the model (code and tests) is guaranteed to have no differences.
github-repos
def set_disk_usage(self, total_size, path=None): if path is None: path = self.root.name mount_point = self._mount_point_for_path(path) if (mount_point['total_size'] is not None and mount_point['used_size'] > total_size): self.raise_io_error(errno.ENOSPC, path) mount_point['total_size'] = total_size
Changes the total size of the file system, preserving the used space. Example usage: set the size of an auto-mounted Windows drive. Args: total_size: The new total size of the filesystem in bytes. path: The disk space is changed for the file system device where `path` resides. Defaults to the root path (e.g. '/' on Unix systems). Raises: IOError: if the new space is smaller than the used size.
juraj-google-style
def movies_box_office(self, **kwargs): path = self._get_path('movies_box_office') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the top box office earning movies from the API. Sorted by most recent weekend gross ticket sales. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def get_data(self, url, *args, **kwargs): res = self._conn.get(url, headers=self._prepare_headers(**kwargs)) if res.status_code == 200: return res.text else: return None
Gets data from url as text Returns content under the provided url as text Args: **url**: address of the wanted data .. versionadded:: 0.3.2 **additional_headers**: (optional) Additional headers to be used with request Returns: string
juraj-google-style
def base_multinode_parser(): base_parser = ArgumentParser(add_help=False) base_parser.add_argument('urls', type=str, nargs='+', help="The URLs of the validator's REST APIs of interest, separated by commas or spaces. (no default)") base_parser.add_argument('--users', type=str, action='append', metavar='USERNAME[:PASSWORD]', help='Specify the users to authorize requests, in the same order as the URLs, separate by commas. Passing empty strings between commas is supported.') return base_parser
Creates a parser with arguments specific to sending HTTP requests to multiple REST APIs. Returns: {ArgumentParser}: Base parser with default HTTP args
codesearchnet
def _get_flags(osm_obj): flags = [] if osm_obj.visible: flags.append('visible') if osm_obj.user: flags.append('user: %s' % osm_obj.user) if osm_obj.timestamp: flags.append('timestamp: %s' % osm_obj.timestamp.isoformat()) if osm_obj.tags: flags.append(', '.join('%s: %s' % (k, v) for k, v in sorted(osm_obj.tags.items()))) return flags
Create element independent flags output. Args: osm_obj (Node): Object with OSM-style metadata Returns: list: Human readable flags output
juraj-google-style
def _ConstructAndTestGradient(self, pool_func, input_sizes, output_sizes, window_rows, window_cols, row_stride, col_stride, padding, data_format, use_gpu, x_init_value=None): assert input_sizes[0] == output_sizes[0] assert input_sizes[3] == output_sizes[3] total_size = 1 for s in input_sizes: total_size *= s x = [f * 1.0 for f in range(1, total_size + 1)] with self.cached_session(use_gpu=use_gpu): input_tensor = constant_op.constant(x, shape=input_sizes, name='input') if pool_func == nn_ops.avg_pool: func_name = 'avg_pool' err_tolerance = 0.0001 else: if x_init_value is None: x_init_value = np.asarray(np.arange(1, total_size + 1), dtype=np.float32).reshape(input_sizes) func_name = 'max_pool' err_tolerance = 0.001 if data_format == 'NCHW': ksize = [1, 1, window_rows, window_cols] strides = [1, 1, row_stride, col_stride] if isinstance(padding, list): padding = test_util.NHWCToNCHW(padding) t = test_util.NHWCToNCHW(input_tensor) else: ksize = [1, window_rows, window_cols, 1] strides = [1, row_stride, col_stride, 1] t = input_tensor t = pool_func(t, ksize=ksize, strides=strides, padding=padding, data_format=data_format, name=func_name) if data_format == 'NCHW': t = test_util.NCHWToNHWC(t) err = gradient_checker.compute_gradient_error(input_tensor, input_sizes, t, output_sizes, x_init_value=x_init_value, delta=0.01) tf_logging.info('%s gradient error = %.4f' % (func_name, err)) self.assertLess(err, err_tolerance)
Verifies the gradients of the max or avg pooling function. Args: pool_func: Function to be called, co.MaxPool, co.AvgPool, or the Lua version. input_sizes: Input tensor dimensions. output_sizes: Output tensor dimensions. window_rows: kernel size in row dim window_cols: kernel size in col dim row_stride: Row Stride. col_stride: Col Stride. padding: Padding type. data_format: Data format. use_gpu: whether we are running on GPU x_init_value: Values to be passed to the gradient checker.
github-repos
def Reference(uri, meaning=None): attrib = {'uri': uri} if meaning is not None: attrib['meaning'] = meaning return objectify.Element('Reference', attrib)
Represents external information, typically original obs data and metadata. Args: uri(str): Uniform resource identifier for external data, e.g. FITS file. meaning(str): The nature of the document referenced, e.g. what instrument and filter was used to create the data?
juraj-google-style
def _parse_schema_resource(info): if ('fields' not in info): return () schema = [] for r_field in info['fields']: name = r_field['name'] field_type = r_field['type'] mode = r_field.get('mode', 'NULLABLE') description = r_field.get('description') sub_fields = _parse_schema_resource(r_field) schema.append(SchemaField(name, field_type, mode, description, sub_fields)) return schema
Parse a resource fragment into a schema field. Args: info: (Mapping[str->dict]): should contain a "fields" key to be parsed Returns: (Union[Sequence[:class:`google.cloud.bigquery.schema.SchemaField`],None]) a list of parsed fields, or ``None`` if no "fields" key found.
codesearchnet
def SetValue(self, value, raise_on_error=True): type_mappings = [(Text, "string"), (bytes, "data"), (bool, "boolean"), (int, "integer"), (long, "integer"), (dict, "dict"), (float, "float")] if value is None: self.none = "None" elif isinstance(value, rdfvalue.RDFValue): self.rdf_value.data = value.SerializeToString() self.rdf_value.age = int(value.age) self.rdf_value.name = value.__class__.__name__ elif isinstance(value, (list, tuple)): self.list.content.Extend([ DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value ]) elif isinstance(value, set): self.set.content.Extend([ DataBlob().SetValue(v, raise_on_error=raise_on_error) for v in value ]) elif isinstance(value, dict): self.dict.FromDict(value, raise_on_error=raise_on_error) else: for type_mapping, member in type_mappings: if isinstance(value, type_mapping): setattr(self, member, value) return self message = "Unsupported type for ProtoDict: %s" % type(value) if raise_on_error: raise TypeError(message) setattr(self, "string", message) return self
Receives a value and fills it into a DataBlob. Args: value: value to set raise_on_error: if True, raise if we can't serialize. If False, set the key to an error string. Returns: self Raises: TypeError: if the value can't be serialized and raise_on_error is True
juraj-google-style
def remove_profile(self, profile=None): with self.db: return self.db.remove(self.query.profile == profile)
Remove profile from credentials file. Args: profile (str): Credentials profile to remove. Returns: list: List of affected document IDs.
juraj-google-style
def protect(self, developers_can_push=False, developers_can_merge=False, **kwargs): id = self.get_id().replace('/', '%2F') path = ('%s/%s/protect' % (self.manager.path, id)) post_data = {'developers_can_push': developers_can_push, 'developers_can_merge': developers_can_merge} self.manager.gitlab.http_put(path, post_data=post_data, **kwargs) self._attrs['protected'] = True
Protect the branch. Args: developers_can_push (bool): Set to True if developers are allowed to push to the branch developers_can_merge (bool): Set to True if developers are allowed to merge to the branch **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabProtectError: If the branch could not be protected
codesearchnet
def get_paths(self): paths = [] for (key, child) in six.iteritems(self): if (isinstance(child, TreeMap) and child): for path in child.get_paths(): path.insert(0, key) paths.append(path) else: paths.append([key]) return paths
Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths.
codesearchnet
def load_dot_env_file(dot_env_path): if (not os.path.isfile(dot_env_path)): return {} logger.log_info('Loading environment variables from {}'.format(dot_env_path)) env_variables_mapping = {} with io.open(dot_env_path, 'r', encoding='utf-8') as fp: for line in fp: if ('=' in line): (variable, value) = line.split('=', 1) elif (':' in line): (variable, value) = line.split(':', 1) else: raise exceptions.FileFormatError('.env format error') env_variables_mapping[variable.strip()] = value.strip() utils.set_os_environ(env_variables_mapping) return env_variables_mapping
load .env file. Args: dot_env_path (str): .env file path Returns: dict: environment variables mapping { "UserName": "debugtalk", "Password": "123456", "PROJECT_KEY": "ABCDEFGH" } Raises: exceptions.FileFormatError: If .env file format is invalid.
codesearchnet
def get_meshes_vec(step, var): if step.geom.twod_xz: xmesh, ymesh = step.geom.x_mesh[:, 0, :], step.geom.z_mesh[:, 0, :] vec1 = step.fields[var + '1'][:, 0, :, 0] vec2 = step.fields[var + '3'][:, 0, :, 0] elif step.geom.cartesian and step.geom.twod_yz: xmesh, ymesh = step.geom.y_mesh[0, :, :], step.geom.z_mesh[0, :, :] vec1 = step.fields[var + '2'][0, :, :, 0] vec2 = step.fields[var + '3'][0, :, :, 0] else: xmesh, ymesh = step.geom.x_mesh[0, :, :], step.geom.y_mesh[0, :, :] pmesh = step.geom.p_mesh[0, :, :] vec_phi = step.fields[var + '2'][0, :, :, 0] vec_r = step.fields[var + '3'][0, :, :, 0] vec1 = vec_r * np.cos(pmesh) - vec_phi * np.sin(pmesh) vec2 = vec_phi * np.cos(pmesh) + vec_r * np.sin(pmesh) return xmesh, ymesh, vec1, vec2
Return vector field components along with coordinates meshes. Only works properly in 2D geometry. Args: step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData instance. var (str): vector field name. Returns: tuple of :class:`numpy.array`: xmesh, ymesh, fldx, fldy 2D arrays containing respectively the x position, y position, x component and y component of the requested vector field.
juraj-google-style
def option(self, key, value=None, **kwargs): if not isinstance(self._container, Section): raise ValueError("Options can only be added inside a section!") option = Option(key, value, container=self._container, **kwargs) option.value = value self._container.structure.insert(self._idx, option) self._idx += 1 return self
Creates a new option inside a section Args: key (str): key of the option value (str or None): value of the option **kwargs: are passed to the constructor of :class:`Option` Returns: self for chaining
juraj-google-style
def truncated_normal_log_likelihood(params, low, high, data): mu = params[0] sigma = params[1] if (sigma == 0): return np.inf ll = np.sum(norm.logpdf(data, mu, sigma)) ll -= (len(data) * np.log((norm.cdf(high, mu, sigma) - norm.cdf(low, mu, sigma)))) return (- ll)
Calculate the log likelihood of the truncated normal distribution. Args: params: tuple with (mean, std), the parameters under which we evaluate the model low (float): the lower truncation bound high (float): the upper truncation bound data (ndarray): the one dimension list of data points for which we want to calculate the likelihood Returns: float: the negative log likelihood of observing the given data under the given parameters. This is meant to be used in minimization routines.
codesearchnet
def __init__(self, maxsize, out_deque=None, **kw): super(DequeOutLRUCache, self).__init__(maxsize, **kw) if out_deque is None: out_deque = collections.deque() elif not isinstance(out_deque, collections.deque): raise ValueError(u'out_deque should be collections.deque') self._out_deque = out_deque self._tracking = {}
Constructor. Args: maxsize (int): the maximum number of entries in the queue out_deque :class:`collections.deque`: a `deque` in which to add items that expire from the cache **kw: the other keyword args supported by constructor to :class:`cachetools.LRUCache` Raises: ValueError: if out_deque is not a collections.deque
juraj-google-style
def alternative_titles(self, **kwargs): path = self._get_id_path('alternative_titles') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the alternative titles for a specific movie id. Args: country: (optional) ISO 3166-1 code. append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def get_gene_info(ensembl_ids=None, hgnc_symbols=None): uniq_ensembl_ids = set(ensembl_id for ensembl_id in (ensembl_ids or [])) uniq_hgnc_symbols = set(hgnc_symbol for hgnc_symbol in (hgnc_symbols or [])) genes = [] gene_data = [] if uniq_ensembl_ids: for ensembl_id in uniq_ensembl_ids: for res in query_gene(ensembl_id=ensembl_id): gene_data.append(res) elif uniq_hgnc_symbols: for hgnc_symbol in uniq_hgnc_symbols: query_res = query_gene(hgnc_symbol=hgnc_symbol) if query_res: for res in query_res: gene_data.append(res) else: gene_data.append({ 'hgnc_symbol': hgnc_symbol, 'hgnc_id': None, 'ensembl_id': None, 'description': None, 'chrom': 'unknown', 'start': 0, 'stop': 0, 'hi_score': None, 'constraint_score': None, }) for gene in gene_data: genes.append(Gene( symbol=gene ['hgnc_symbol'], hgnc_id=gene['hgnc_id'], ensembl_id=gene['ensembl_id'], description=gene['description'], chrom=gene['chrom'], start=gene['start'], stop=gene['stop'], location=get_cytoband_coord(gene['chrom'], gene['start']), hi_score=gene['hi_score'], constraint_score=gene['constraint_score'], omim_number=get_omim_number(gene['hgnc_symbol']) )) return genes
Return the genes info based on the transcripts found Args: ensembl_ids (Optional[list]): list of Ensembl gene ids hgnc_symbols (Optional[list]): list of HGNC gene symbols Returns: iterable: an iterable with `Gene` objects
juraj-google-style
def _WriteRow(self, output_writer, values): maximum_row_width = ((self._MAXIMUM_WIDTH - self._column_width) - 3) primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\n'.format(self._column_width) secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\n'.format((self._column_width + 3)) if isinstance(values[1], py2to3.STRING_TYPES): value_string = values[1] else: value_string = '{0!s}'.format(values[1]) if (len(value_string) < maximum_row_width): output_writer.Write(primary_format_string.format(values[0], value_string)) return words = value_string.split() current = 0 lines = [] word_buffer = [] for word in words: current += (len(word) + 1) if (current >= maximum_row_width): current = len(word) lines.append(' '.join(word_buffer)) word_buffer = [word] else: word_buffer.append(word) lines.append(' '.join(word_buffer)) output_writer.Write(primary_format_string.format(values[0], lines[0])) for line in lines[1:]: output_writer.Write(secondary_format_string.format('', line))
Writes a row of values aligned to the column width. Args: output_writer (OutputWriter): output writer. values (list[object]): values.
codesearchnet
def DeregisterMountPoint(cls, mount_point): if (mount_point not in cls._mount_points): raise KeyError('Mount point: {0:s} not set.'.format(mount_point)) del cls._mount_points[mount_point]
Deregisters a path specification mount point. Args: mount_point (str): mount point identifier. Raises: KeyError: if the corresponding mount point is not set.
codesearchnet
def run_multiple_processes(args_list: List[List[str]], die_on_failure: bool=True) -> None: for procargs in args_list: start_process(procargs) wait_for_processes(die_on_failure=die_on_failure)
Fire up multiple processes, and wait for them to finihs. Args: args_list: command arguments for each process die_on_failure: see :func:`wait_for_processes`
codesearchnet
def ask_when_work_is_populated(self, work): work.read_all_from_datastore() if work.work: print('Work is already written to datastore.\nIf you continue these data will be overwritten and possible corrupted.') inp = input_str('Do you want to continue? (type "yes" without quotes to confirm): ') return (inp == 'yes') else: return True
When work is already populated asks whether we should continue. This method prints warning message that work is populated and asks whether user wants to continue or not. Args: work: instance of WorkPiecesBase Returns: True if we should continue and populate datastore, False if we should stop
codesearchnet
def stats(path, hash_type='sha256', follow_symlinks=True): if (not os.path.exists(path)): raise CommandExecutionError('Path not found: {0}'.format(path)) if (follow_symlinks and (sys.getwindowsversion().major >= 6)): path = _resolve_symlink(path) pstat = os.stat(path) ret = {} ret['inode'] = pstat.st_ino ret['uid'] = get_uid(path, follow_symlinks=False) ret['gid'] = ret['uid'] ret['user'] = uid_to_user(ret['uid']) ret['group'] = ret['user'] ret['pgid'] = get_pgid(path, follow_symlinks) ret['pgroup'] = gid_to_group(ret['pgid']) ret['atime'] = pstat.st_atime ret['mtime'] = pstat.st_mtime ret['ctime'] = pstat.st_ctime ret['size'] = pstat.st_size ret['mode'] = six.text_type(oct(stat.S_IMODE(pstat.st_mode))) if hash_type: ret['sum'] = get_sum(path, hash_type) ret['type'] = 'file' if stat.S_ISDIR(pstat.st_mode): ret['type'] = 'dir' if stat.S_ISCHR(pstat.st_mode): ret['type'] = 'char' if stat.S_ISBLK(pstat.st_mode): ret['type'] = 'block' if stat.S_ISREG(pstat.st_mode): ret['type'] = 'file' if stat.S_ISLNK(pstat.st_mode): ret['type'] = 'link' if stat.S_ISFIFO(pstat.st_mode): ret['type'] = 'pipe' if stat.S_ISSOCK(pstat.st_mode): ret['type'] = 'socket' ret['target'] = os.path.realpath(path) return ret
Return a dict containing the stats about a given file Under Windows, `gid` will equal `uid` and `group` will equal `user`. While a file in Windows does have a 'primary group', this rarely used attribute generally has no bearing on permissions unless intentionally configured and is only used to support Unix compatibility features (e.g. Services For Unix, NFS services). Salt, therefore, remaps these properties to keep some kind of compatibility with Unix behavior. If the 'primary group' is required, it can be accessed in the `pgroup` and `pgid` properties. Args: path (str): The path to the file or directory hash_type (str): The type of hash to return follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: dict: A dictionary of file/directory stats CLI Example: .. code-block:: bash salt '*' file.stats /etc/passwd
codesearchnet
def json_compat_obj_encode(data_type, obj, caller_permissions=None, alias_validators=None, old_style=False, for_msgpack=False, should_redact=False): serializer = StoneToPythonPrimitiveSerializer(caller_permissions, alias_validators, for_msgpack, old_style, should_redact) return serializer.encode(data_type, obj)
Encodes an object into a JSON-compatible dict based on its type. Args: data_type (Validator): Validator for obj. obj (object): Object to be serialized. caller_permissions (list): The list of raw-string caller permissions with which to serialize. Returns: An object that when passed to json.dumps() will produce a string giving the JSON-encoded object. See json_encode() for additional information about validation.
codesearchnet
def like_shared_file(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{sharekey}/like'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.liked = True return sf except: raise Exception("{0}".format(data['error']))
'Like' a SharedFile. mlkshk doesn't allow you to unlike a sharedfile, so this is ~~permanent~~. Args: sharekey (str): Sharekey for the file you want to 'like'. Returns: Either a SharedFile on success, or an exception on error.
juraj-google-style
def delete_resource_group(access_token, subscription_id, rgname): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) return do_delete(endpoint, access_token)
Delete the named resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response.
codesearchnet
def generate_private_key(self): random_string = base64.b64encode(os.urandom(4096)).decode('utf-8') binary_data = bytes(random_string, 'utf-8') hash_object = hashlib.sha256(binary_data) message_digest_bin = hash_object.digest() message_digest_hex = binascii.hexlify(message_digest_bin) return message_digest_hex
Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data.
codesearchnet
def __init__(self, unexpected_method, expected): Error.__init__(self) self._unexpected_method = unexpected_method self._expected = expected
Init exception. Args: # unexpected_method: MockMethod that was called but was not at the head of # the expected_method queue. # expected: MockMethod or UnorderedGroup the method should have # been in. unexpected_method: MockMethod expected: MockMethod or UnorderedGroup
juraj-google-style
def RetrievePluginAsset(self, plugin_name, asset_name): return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)
Return the contents of a given plugin asset. Args: plugin_name: The string name of a plugin. asset_name: The string name of an asset. Returns: The string contents of the plugin asset. Raises: KeyError: If the asset is not available.
juraj-google-style
def watch(self, key, pipeline=False): if pipeline: self._pipeline.watch(key) else: self._db.watch(key)
Watch the given key. Marks the given key to be watch for conditional execution of a transaction. Args: key (str): Key that needs to be watched pipeline (bool): True, start a transaction block. Default false.
codesearchnet
def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename): if not tf.gfile.Exists(data_dir): tf.gfile.MakeDirs(data_dir) file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL) tar_all = tarfile.open(file_path) tar_all.extractall(tmp_dir) tar_all.close() tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar")) tar_train.extractall(tmp_dir) tar_train.close() vocab_path = os.path.join(data_dir, vocab_filename) if not tf.gfile.Exists(vocab_path): with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile: reader = csv.reader(infile, delimiter="\t") words = [row[0] for row in reader] words = [_UNK] + words[:vocab_size] with tf.gfile.GFile(vocab_path, "w") as outfile: outfile.write("\n".join(words))
Downloading and preparing the dataset. Args: tmp_dir: tem directory data_dir: data directory vocab_size: size of vocabulary vocab_filename: name of vocab file
juraj-google-style
def _replace_tensors_for_gradient(x, grad): if not isinstance(x, composite_tensor.CompositeTensor): return grad if not isinstance(x, CompositeTensorGradientProtocol): raise ValueError(f'Type {type(x).__name__} is not supported as a gradient source.') composite_gradient = x.__composite_gradient__ x_components = composite_gradient.get_gradient_components(x) if x_components is x: grad_components = grad else: grad_components = nest.map_structure_up_to(x_components, _replace_tensors_for_gradient, x_components, grad) if grad_components is None: return None return composite_gradient.replace_gradient_components(x, grad_components)
Replaces the tensors in `x` that should be differentiated with `grad`. Args: x: A `Tensor` or `CompositeTensor`. grad: A nested structure of `Tensor`, with the same structure as the value returned by `_get_tensors_for_gradient(x)`. Returns: A `Tensor` or `CompositeTensor`.
github-repos
def get_organization(self): return hdx.data.organization.Organization.read_from_hdx(self.data['owner_org'], configuration=self.configuration)
Get the dataset's organization. Returns: Organization: Dataset's organization
codesearchnet
def _run_static_range_ptq(src_saved_model_path: str, dst_saved_model_path: str, quant_opts: _QuantizationOptions, representative_dataset: Mapping[str, _RepresentativeDatasetFile], signature_def_map: _SignatureDefMap) -> None: logging.info('Running static-range post-training quantization.') signature_def_map_serialized = _serialize_signature_def_map(signature_def_map) dataset_file_map_serialized = {signature_key: dataset_file.SerializeToString() for signature_key, dataset_file in representative_dataset.items()} pywrap_quantize_model.quantize_ptq_static_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quant_opts.SerializeToString(), signature_keys=list(quant_opts.signature_keys), signature_def_map_serialized=signature_def_map_serialized, py_function_library=py_function_lib.PyFunctionLibrary(), representative_dataset_file_map_serialized=dataset_file_map_serialized)
Runs static-range Post-Training Quantization. Runs static-range PTQ for the model. Runs the calibration step with `representative_dataset` to collect statistics required for quantization. This produces the quantized GraphDef along with the SignatureDefs which might have been modified according to the changes in the graph. Args: src_saved_model_path: Path to the source SavedModel directory. dst_saved_model_path: Path to the destination SavedModel directory. quant_opts: Quantization options. representative_dataset: A map from signature key to the saved representative dataset file. signature_def_map: Signature def key -> SignatureDef mapping. Raises: ValueError if the graph doesn't contain a valid signature.
github-repos
def psd(data, dt, ndivide=1, window=hanning, overlap_half=False): logger = getLogger('decode.utils.ndarray.psd') if overlap_half: step = int((len(data) / (ndivide + 1))) size = (step * 2) else: step = int((len(data) / ndivide)) size = step if (bin(len(data)).count('1') != 1): logger.warning('warning: length of data is not power of 2: {}'.format(len(data))) size = int((len(data) / ndivide)) if (bin(size).count('1') != 1.0): if overlap_half: logger.warning('warning: ((length of data) / (ndivide+1)) * 2 is not power of 2: {}'.format(size)) else: logger.warning('warning: (length of data) / ndivide is not power of 2: {}'.format(size)) psd = np.zeros(size) T = ((size - 1) * dt) vs = (1 / dt) vk_ = fftfreq(size, dt) vk = vk_[np.where((vk_ >= 0))] for i in range(ndivide): d = data[(i * step):((i * step) + size)] if (window is None): w = np.ones(size) corr = 1.0 else: w = window(size) corr = np.mean((w ** 2)) psd = (psd + ((((2 * (np.abs(fft((d * w))) ** 2)) / size) * dt) / corr)) return (vk, (psd[:len(vk)] / ndivide))
Calculate power spectrum density of data. Args: data (np.ndarray): Input data. dt (float): Time between each data. ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them). ax (matplotlib.axes): Axis you want to plot on. doplot (bool): Plot how averaging works. overlap_half (bool): Split data to half-overlapped regions. Returns: vk (np.ndarray): Frequency. psd (np.ndarray): PSD
codesearchnet
def observe_reward_value(self, state_key, action_key): x, y = state_key if self.__map_arr[y][x] == self.__end_point_label: return 100.0 elif self.__map_arr[y][x] == self.__start_point_label: return 0.0 elif self.__map_arr[y][x] == self.__wall_label: raise ValueError("It is the wall. (x, y)=(%d, %d)" % (x, y)) else: reward_value = float(self.__map_arr[y][x]) self.save_r_df(state_key, reward_value) return reward_value
Compute the reward value. Args: state_key: The key of state. action_key: The key of action. Returns: Reward value.
juraj-google-style
def edgelist_to_adjacency(edgelist): adjacency = dict() for u, v in edgelist: if u in adjacency: adjacency[u].add(v) else: adjacency[u] = {v} if v in adjacency: adjacency[v].add(u) else: adjacency[v] = {u} return adjacency
Converts an iterator of edges to an adjacency dict. Args: edgelist (iterable): An iterator over 2-tuples where each 2-tuple is an edge. Returns: dict: The adjacency dict. A dict of the form {v: Nv, ...} where v is a node in a graph and Nv is the neighbors of v as an set.
juraj-google-style
def get_available_transcript_languages(video_id): available_languages = VideoTranscript.objects.filter(video__edx_video_id=video_id).values_list('language_code', flat=True) return list(available_languages)
Get available transcript languages Arguments: video_id(unicode): An id identifying the Video. Returns: A list containing transcript language codes for the Video.
codesearchnet
def __discovery_doc_descriptor(self, services, hostname=None): merged_api_info = self.__get_merged_api_info(services) descriptor = self.get_descriptor_defaults(merged_api_info, hostname=hostname) description = merged_api_info.description if ((not description) and (len(services) == 1)): description = services[0].__doc__ if description: descriptor['description'] = description descriptor['parameters'] = self.__standard_parameters_descriptor() descriptor['auth'] = self.__standard_auth_descriptor(services) if merged_api_info.namespace: descriptor['ownerDomain'] = merged_api_info.namespace.owner_domain descriptor['ownerName'] = merged_api_info.namespace.owner_name descriptor['packagePath'] = (merged_api_info.namespace.package_path or '') else: if (merged_api_info.owner_domain is not None): descriptor['ownerDomain'] = merged_api_info.owner_domain if (merged_api_info.owner_name is not None): descriptor['ownerName'] = merged_api_info.owner_name if (merged_api_info.package_path is not None): descriptor['packagePath'] = merged_api_info.package_path method_map = {} method_collision_tracker = {} rest_collision_tracker = {} resource_index = collections.defaultdict(list) resource_map = {} for service in services: remote_methods = service.all_remote_methods() for (protorpc_meth_name, protorpc_meth_info) in remote_methods.iteritems(): method_info = getattr(protorpc_meth_info, 'method_info', None) if (method_info is None): continue path = method_info.get_path(service.api_info) method_id = method_info.method_id(service.api_info) canonical_method_id = self._get_canonical_method_id(method_id) resource_path = self._get_resource_path(method_id) if (method_id in method_collision_tracker): raise api_exceptions.ApiConfigurationError(('Method %s used multiple times, in classes %s and %s' % (method_id, method_collision_tracker[method_id], service.__name__))) else: method_collision_tracker[method_id] = service.__name__ rest_identifier = (method_info.http_method, path) if (rest_identifier in rest_collision_tracker): raise api_exceptions.ApiConfigurationError(('%s path "%s" used multiple times, in classes %s and %s' % (method_info.http_method, path, rest_collision_tracker[rest_identifier], service.__name__))) else: rest_collision_tracker[rest_identifier] = service.__name__ if resource_path: resource_index[resource_path[0]].append((service, protorpc_meth_info)) else: method_map[canonical_method_id] = self.__method_descriptor(service, method_info, protorpc_meth_info) for (resource, resource_methods) in resource_index.items(): resource_map[resource] = self.__resource_descriptor(resource, resource_methods) if method_map: descriptor['methods'] = method_map if resource_map: descriptor['resources'] = resource_map schemas = self.__schemas_descriptor() if schemas: descriptor['schemas'] = schemas return descriptor
Builds a discovery doc for an API. Args: services: List of protorpc.remote.Service instances implementing an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: A dictionary that can be deserialized into JSON in discovery doc format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature.
codesearchnet
def include_revision(revision_num, skip_factor=1.1): if skip_factor <= 1.0: return True return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( math.log(revision_num + 2.0) / math.log(skip_factor)))
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_factor: a floating point number >= 1.0 Returns: a boolean
juraj-google-style
def _get_tables(self, base_dir): table_dict = {} for table in self.metadata['tables']: if table['use']: relative_path = os.path.join(base_dir, self.metadata['path'], table['path']) data_table = pd.read_csv(relative_path) pii_fields = self._get_pii_fields(table) data_table = self._anonymize_table(data_table, pii_fields) table_dict[table['name']] = (data_table, table) return table_dict
Load the contents of meta_file and the corresponding data. If fields containing Personally Identifiable Information are detected in the metadata they are anonymized before asign them into `table_dict`. Args: base_dir(str): Root folder of the dataset files. Returns: dict: Mapping str -> tuple(pandas.DataFrame, dict)
juraj-google-style
def fn(x: tuple[int]): return x
Test function Args: x: The input Returns: The output
github-repos
def find(self, _id, instance = None): if instance is None: return self.service_instance.find(_id) else: return self.service_binding.find(_id, instance)
Find Args: _id (str): instance id or binding Id Keyword Arguments: instance (AtlasServiceInstance.Instance): Existing instance Returns: AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
juraj-google-style
def hide_tool(self, context_name, tool_name): data = self._context(context_name) hidden_tools = data['hidden_tools'] if (tool_name not in hidden_tools): self._validate_tool(context_name, tool_name) hidden_tools.add(tool_name) self._flush_tools()
Hide a tool so that it is not exposed in the suite. Args: context_name (str): Context containing the tool. tool_name (str): Name of tool to hide.
codesearchnet
def __init__(self, context_type=ContextType.PATH, debug=False): self._selector = DefaultSelector() self._interface = Interface() if context_type == ContextType.UDEV: self._udev = self._libudev.udev_new() self._li = self._libinput.libinput_udev_create_context( byref(self._interface), None, self._udev) elif context_type == ContextType.PATH: self._li = self._libinput.libinput_path_create_context( byref(self._interface), None) self._log_handler = lambda pr, strn: print(pr.name, ': ', strn) self._set_default_log_handler() if debug: self._libinput.libinput_log_set_priority( self._li, LogPriority.DEBUG) self._selector.register( self._libinput.libinput_get_fd(self._li), EVENT_READ)
Initialize context. Args: context_type (~libinput.constant.ContextType): If :attr:`~libinput.constant.ContextType.UDEV` devices are added/removed from udev seat. If :attr:`~libinput.constant.ContextType.PATH` devices have to be added/removed manually. debug (bool): If false, only errors are printed.
juraj-google-style
def _get_predictions(self, data, break_ties='random', return_probs=False, **kwargs): data_loader = self._create_data_loader(data) Y_p = [] Y = [] Y_s = [] for (batch_num, data) in enumerate(data_loader): (Xb, Yb) = data Y.append(self._to_numpy(Yb)) if (self.config['device'] != 'cpu'): Xb = place_on_gpu(Xb) (Y_pb, Y_sb) = self.predict(Xb, break_ties=break_ties, return_probs=True, **kwargs) Y_p.append(self._to_numpy(Y_pb)) Y_s.append(self._to_numpy(Y_sb)) (Y_p, Y, Y_s) = map(self._stack_batches, [Y_p, Y, Y_s]) if return_probs: return (Y_p, Y, Y_s) else: return (Y_p, Y)
Computes predictions in batch, given a labeled dataset Args: data: a Pytorch DataLoader, Dataset, or tuple with Tensors (X,Y): X: The input for the predict method Y: An [n] or [n, 1] torch.Tensor or np.ndarray of target labels in {1,...,k} break_ties: How to break ties when making predictions return_probs: Return the predicted probabilities as well Returns: Y_p: A Tensor of predictions Y: A Tensor of labels [Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]
codesearchnet
def write_to_file(src, dst): n = 0 for block in src: dst.write(block) n += len(block) return n
Write data from `src` into `dst`. Args: src (iterable): iterable that yields blocks of data to write dst (file-like object): file-like object that must support .write(block) Returns: number of bytes written to `dst`
codesearchnet
def _Open(self, path_spec, mode='rb'): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') compression_method = getattr(path_spec, 'compression_method', None) if not compression_method: raise errors.PathSpecError( 'Unsupported path specification without compression method.') self._compression_method = compression_method
Opens the file system defined by path specification. Args: path_spec (PathSpec): a path specification. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
juraj-google-style
def make_block_creator(yaml_path, filename=None): (sections, yamlname, docstring) = Section.from_yaml(yaml_path, filename) yamldir = os.path.dirname(yaml_path) controller_sections = [s for s in sections if (s.section == 'controllers')] assert (len(controller_sections) == 1), ('Expected exactly 1 controller, got %s' % (controller_sections,)) controller_section = controller_sections[0] def block_creator(kwargs): defines = _create_defines(sections, yamlname, yamldir, kwargs) (controllers, parts) = _create_blocks_and_parts(sections, defines) controller = controller_section.instantiate(defines) for part in parts: controller.add_part(part) controllers.append(controller) return controllers creator = creator_with_nice_signature(block_creator, sections, yamlname, yaml_path, docstring) return creator
Make a collection function that will create a list of blocks Args: yaml_path (str): File path to YAML file, or a file in the same dir filename (str): If give, use this filename as the last element in the yaml_path (so yaml_path can be __file__) Returns: function: A collection function decorated with @takes. This can be used in other blocks or instantiated by the process. If the YAML text specified controllers or parts then a block instance with the given name will be instantiated. If there are any blocks listed then they will be called. All created blocks by this or any sub collection will be returned
codesearchnet
def __init__(self, name, collections=None, capture_by_value=None, structured_input_signature=None, structured_outputs=None): super().__init__() self.name = name self.inputs = [] self.outputs = [] self.control_outputs = [] self.structured_input_signature = structured_input_signature self.structured_outputs = structured_outputs self._resource_tensor_inputs = object_identity.ObjectIdentitySet() self._weak_variables = [] self._watched_variables = object_identity.ObjectIdentityWeakSet() self.is_control_flow_graph = False self._function_captures = capture_container.FunctionCaptures() outer_graph = ops.get_default_graph() self._weak_outer_graph = weakref.ref(outer_graph) while outer_graph.building_function: outer_graph = outer_graph.outer_graph self._fallback_outer_graph = outer_graph self._output_names = None if capture_by_value is not None: self.capture_by_value = capture_by_value elif self.outer_graph is not None and isinstance(self.outer_graph, FuncGraph): self.capture_by_value = self.outer_graph.capture_by_value else: self.capture_by_value = False self._building_function = True graph = self.outer_graph if context.executing_eagerly(): self.seed = context.global_seed() self._seed_used = False else: self.seed = graph.seed self._seed_used = False self._colocation_stack = graph._colocation_stack.copy() if collections is None: for collection_name in graph.get_all_collection_keys(): if collection_name not in ALLOWLIST_COLLECTIONS: self._collections[collection_name] = graph.get_collection(collection_name) for collection_name in ALLOWLIST_COLLECTIONS: self._collections[collection_name] = graph.get_collection_ref(collection_name) else: self._collections = collections self._saveable = True self._saving_errors = set() self._scope_exit_callbacks = None
Construct a new FuncGraph. The graph will inherit its graph key, collections, seed, and distribution strategy stack from the current context or graph. Args: name: the name of the function. collections: a dictionary of collections this FuncGraph should start with. If not specified (None), the FuncGraph will read (but not write to) the outer graph's collections that are not allowlisted, and both read and write to the outer graph's collections that are allowlisted. The current allowlisted collections are the global variables, the local variables, and the trainable variables. Defaults to None. capture_by_value: An optional boolean. If True, the func graph will capture Variables by value instead of reference. By default inherit from outer graphs, and failing that will default to False. structured_input_signature: Optional. The structured input signature to use for initializing the FuncGraph. See the docstring for FuncGraph for more information. structured_outputs: Optional. The structured outputs to use for initializing the FuncGraph. See the docstring for FuncGraph for more information.
github-repos
def join_pretty_tensors(tensors, output, join_function=None, name='join'): if not tensors: raise ValueError('pretty_tensors must be a non-empty sequence.') with output.g.name_scope(name): if join_function is None: last_dim = len(tensors[0].shape) - 1 return output.with_tensor(tf.concat(tensors, last_dim)) else: return output.with_tensor(join_function(tensors))
Joins the list of pretty_tensors and sets head of output_pretty_tensor. Args: tensors: A sequence of Layers or SequentialLayerBuilders to join. output: A pretty_tensor to set the head with the result. join_function: A function to join the tensors, defaults to concat on the last dimension. name: A name that is used for the name_scope Returns: The result of calling with_tensor on output Raises: ValueError: if pretty_tensors is None or empty.
juraj-google-style
def decode_terminated(data, encoding, strict=True): codec_info = codecs.lookup(encoding) encoding = codec_info.name if (encoding in ('utf-8', 'iso8859-1')): index = data.find(b'\x00') if (index == (- 1)): res = (data.decode(encoding), b'') if strict: raise ValueError('not null terminated') else: return res return (data[:index].decode(encoding), data[(index + 1):]) decoder = codec_info.incrementaldecoder() r = [] for (i, b) in enumerate(iterbytes(data)): c = decoder.decode(b) if (c == u'\x00'): return (u''.join(r), data[(i + 1):]) r.append(c) else: r.append(decoder.decode(b'', True)) if strict: raise ValueError('not null terminated') return (u''.join(r), b'')
Returns the decoded data until the first NULL terminator and all data after it. Args: data (bytes): data to decode encoding (str): The codec to use strict (bool): If True will raise ValueError in case no NULL is found but the available data decoded successfully. Returns: Tuple[`text`, `bytes`]: A tuple containing the decoded text and the remaining data after the found NULL termination. Raises: UnicodeError: In case the data can't be decoded. LookupError:In case the encoding is not found. ValueError: In case the data isn't null terminated (even if it is encoded correctly) except if strict is False, then the decoded string will be returned anyway.
codesearchnet
def convert(self, graph_def, input_tensors, output_tensors): self._validate_inputs(graph_def, input_tensors) converter_kwargs = self._get_base_converter_args() converter_kwargs.update(self._quant_mode.converter_flags()) if not self.experimental_new_converter: logging.warning('Please consider switching to the new converter by setting experimental_new_converter=True. The old converter is deprecated.') else: logging.info('Using new converter: If you encounter a problem please file a bug. You can opt-out by setting experimental_new_converter=False') result = _convert_graphdef(input_data=graph_def, input_tensors=input_tensors, output_tensors=output_tensors, **converter_kwargs) return self._optimize_tflite_model(result, self._quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)
Converts a TensorFlow GraphDef based on instance variables. Args: graph_def: Frozen TensorFlow GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors. Returns: The converted data in serialized format. Raises: ValueError: No concrete function is specified. Multiple concrete functions are specified. Input shape is not specified. Invalid quantization parameters.
github-repos
def get_space_group_info(self, symprec=0.01, angle_tolerance=5.0): from pymatgen.symmetry.analyzer import SpacegroupAnalyzer a = SpacegroupAnalyzer(self, symprec=symprec, angle_tolerance=angle_tolerance) return (a.get_space_group_symbol(), a.get_space_group_number())
Convenience method to quickly get the spacegroup of a structure. Args: symprec (float): Same definition as in SpacegroupAnalyzer. Defaults to 1e-2. angle_tolerance (float): Same definition as in SpacegroupAnalyzer. Defaults to 5 degrees. Returns: spacegroup_symbol, international_number
codesearchnet
def random_density_matrix(length, rank=None, method='Hilbert-Schmidt', seed=None): if (method == 'Hilbert-Schmidt'): return __random_density_hs(length, rank, seed) elif (method == 'Bures'): return __random_density_bures(length, rank, seed) else: raise QiskitError('Error: unrecognized method {}'.format(method))
Generate a random density matrix rho. Args: length (int): the length of the density matrix. rank (int or None): the rank of the density matrix. The default value is full-rank. method (string): the method to use. 'Hilbert-Schmidt': sample rho from the Hilbert-Schmidt metric. 'Bures': sample rho from the Bures metric. seed (int): Optional. To set a random seed. Returns: ndarray: rho (length, length) a density matrix. Raises: QiskitError: if the method is not valid.
codesearchnet
def get_coding_intervals(self, build='37', genes=None): intervals = {} if not genes: genes = self.all_genes(build=build) LOG.info("Building interval trees...") for i,hgnc_obj in enumerate(genes): chrom = hgnc_obj['chromosome'] start = max((hgnc_obj['start'] - 5000), 1) end = hgnc_obj['end'] + 5000 if chrom not in intervals: intervals[chrom] = intervaltree.IntervalTree() intervals[chrom].addi(start, end, i) continue res = intervals[chrom].search(start, end) if not res: intervals[chrom].addi(start, end, i) continue for interval in res: if interval.begin < start: start = interval.begin if interval.end > end: end = interval.end intervals[chrom].remove(interval) intervals[chrom].addi(start, end, i) return intervals
Return a dictionary with chromosomes as keys and interval trees as values Each interval represents a coding region of overlapping genes. Args: build(str): The genome build genes(iterable(scout.models.HgncGene)): Returns: intervals(dict): A dictionary with chromosomes as keys and overlapping genomic intervals as values
juraj-google-style
def query_dns(domain, record_type, cache=None, nameservers=None, timeout=2.0): domain = str(domain).lower() record_type = record_type.upper() cache_key = "{0}_{1}".format(domain, record_type) if cache: records = cache.get(cache_key, None) if records: return records resolver = dns.resolver.Resolver() timeout = float(timeout) if nameservers is None: nameservers = ["1.1.1.1", "1.0.0.1", "2606:4700:4700::1111", "2606:4700:4700::1001", ] resolver.nameservers = nameservers resolver.timeout = timeout resolver.lifetime = timeout if record_type == "TXT": resource_records = list(map( lambda r: r.strings, resolver.query(domain, record_type, tcp=True))) _resource_record = [ resource_record[0][:0].join(resource_record) for resource_record in resource_records if resource_record] records = [r.decode() for r in _resource_record] else: records = list(map( lambda r: r.to_text().replace('"', '').rstrip("."), resolver.query(domain, record_type, tcp=True))) if cache: cache[cache_key] = records return records
Queries DNS Args: domain (str): The domain or subdomain to query about record_type (str): The record type to query for cache (ExpiringDict): Cache storage nameservers (list): A list of one or more nameservers to use (Cloudflare's public DNS resolvers by default) timeout (float): Sets the DNS timeout in seconds Returns: list: A list of answers
juraj-google-style
def hwvtep_add_rbridgeid(self, **kwargs): name = kwargs.pop('name') id = kwargs.pop('rb_range') ip_args = dict(name=name, rb_add=id) method_name = 'overlay_gateway_attach_rbridge_id_rb_add' method_class = self._brocade_tunnels gw_attr = getattr(method_class, method_name) config = gw_attr(**ip_args) output = self._callback(config) return output
Add a range of rbridge-ids Args: name (str): gateway-name vlan (str): rbridge-ids range callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def get_connection_id(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError('You must supply either an int connection id or a string internal id to _get_connection_state', id=key) try: data = table[key] except KeyError: raise ArgumentError('Could not find connection by id', id=key) return data['conn_id']
Get the connection id. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid.
codesearchnet
def allsame(list_, strict=True): if (len(list_) == 0): return True first_item = list_[0] return list_all_eq_to(list_, first_item, strict)
checks to see if list is equal everywhere Args: list_ (list): Returns: True if all items in the list are equal
codesearchnet
def _secant_step(x1, x2, y1, y2): x_difference = x1 - x2 y_difference = y1 - y2 return -y1 * x_difference / y_difference
Returns the step size at the current position if using the secant method. This function is meant for exclusive use by the `_brent_loop_body` function: - It does not guard against divisions by zero, and instead assumes that `y1` is distinct from `y2`. The `_brent_loop_body` function guarantees this property. - It does not guard against overflows which may occur if the difference between `y1` and `y2` is small while that between `x1` and `x2` is not. In this case, the resulting step size will be larger than `bisection_step` and thus ignored by the `_brent_loop_body` function. Args: x1: `Tensor` containing the current position. x2: `Tensor` containing the previous position. y1: `Tensor` containing the value of `objective_fn` at `x1`. y2: `Tensor` containing the value of `objective_fn` at `x2`. Returns: A `Tensor` with the same shape and dtype as `current`.
github-repos
def __format__(self, format_spec='dms'): location = [super(Trigpoint, self).__format__(format_spec), ] if self.altitude: location.append('alt %im' % self.altitude) if self.name: return '%s (%s)' % (self.name, ' '.join(location)) else: return ' '.join(location)
Extended pretty printing for location strings. Args: format_spec (str): Coordinate formatting system to use Returns: str: Human readable string representation of ``Trigpoint`` object Raises: ValueError: Unknown value for ``format_spec``
juraj-google-style
def find_interface_by_mac(self, **kwargs): mac = kwargs.pop('mac_address') results = [x for x in self.mac_table if (x['mac_address'] == mac)] return results
Find the interface through which a MAC can be reached. Args: mac_address (str): A MAC address in 'xx:xx:xx:xx:xx:xx' format. Returns: list[dict]: a list of mac table data. Raises: KeyError: if `mac_address` is not specified. Examples: >>> from pprint import pprint >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... x = dev.find_interface_by_mac( ... mac_address='10:23:45:67:89:ab') ... pprint(x) # doctest: +ELLIPSIS [{'interface'...'mac_address'...'state'...'type'...'vlan'...}]
codesearchnet
def sum(self, selector=identity): if self.closed(): raise ValueError('Attempt to call sum() on a closed Queryable.') if (not is_callable(selector)): raise TypeError('sum() parameter selector={0} is not callable'.format(repr(selector))) return sum(self.select(selector))
Return the arithmetic sum of the values in the sequence.. All of the source sequence will be consumed. Note: This method uses immediate execution. Args: selector: An optional single argument function which will be used to project the elements of the sequence. If omitted, the identity function is used. Returns: The total value of the projected sequence, or zero for an empty sequence. Raises: ValueError: If the Queryable has been closed.
codesearchnet
def get_config_dict(self, services, hostname=None): if not isinstance(services, (tuple, list)): services = [services] endpoints_util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False) return self.__api_descriptor(services, hostname=hostname)
JSON dict description of a protorpc.remote.Service in API format. Args: services: Either a single protorpc.remote.Service or a list of them that implements an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: dict, The API descriptor document as a JSON dict.
juraj-google-style
def list(self, **kwargs): return [ self.prepare_model(s) for s in self.client.api.services(**kwargs) ]
List services. Args: filters (dict): Filters to process on the nodes list. Valid filters: ``id``, ``name`` , ``label`` and ``mode``. Default: ``None``. Returns: list of :py:class:`Service`: The services. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def make_repr(inst, attrs): arg_str = ', '.join((('%s=%r' % (a, getattr(inst, a))) for a in attrs if hasattr(inst, a))) repr_str = ('%s(%s)' % (inst.__class__.__name__, arg_str)) return repr_str
Create a repr from an instance of a class Args: inst: The class instance we are generating a repr of attrs: The attributes that should appear in the repr
codesearchnet
def assign_methods(self, resource_class): assert all([(x.upper() in VALID_METHODS) for x in resource_class.Meta.methods]) for method in resource_class.Meta.methods: self._assign_method(resource_class, method.upper())
Given a resource_class and it's Meta.methods tuple, assign methods for communicating with that resource. Args: resource_class: A single resource class
codesearchnet
def is_allowed(self, filepath, excludes=[]): if os.path.isabs(filepath): raise FinderException("'Finder.is_allowed()' only accept relative filepath") if excludes: for pattern in excludes: if fnmatch.fnmatch(filepath, pattern): return False return True
Check from exclude patterns if a relative filepath is allowed Args: filepath (str): A relative file path. (exclude patterns are allways based from the source directory). Keyword Arguments: excludes (list): A list of excluding (glob) patterns. If filepath matchs one of patterns, filepath is not allowed. Raises: boussole.exception.FinderException: If given filepath is absolute. Returns: str: Filepath with new extension.
codesearchnet
def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset, clip_duration, sample_volume, ramp_in, ramp_out): ramp_out_index = clip_duration - ramp_out track_end = min(track_offset + clip_duration, track_data.shape[0]) track_end = min(track_end, track_offset + (sample_data.shape[0] - sample_offset)) sample_range = track_end - track_offset for i in range(sample_range): if i < ramp_in: envelope_scale = i / ramp_in elif i > ramp_out_index: envelope_scale = (clip_duration - i) / ramp_out else: envelope_scale = 1 sample_input = sample_data[sample_offset + i] track_data[track_offset + i] += sample_input * envelope_scale * sample_volume
Mixes the sample data into the main track at the specified offset. Args: track_data: Numpy array holding main audio data. Modified in-place. track_offset: Where to mix the sample into the main track. sample_data: Numpy array of audio data to mix into the main track. sample_offset: Where to start in the audio sample. clip_duration: How long the sample segment is. sample_volume: Loudness to mix the sample in at. ramp_in: Length in samples of volume increase stage. ramp_out: Length in samples of volume decrease stage.
github-repos
def qhalf(options, halfspaces, interior_point): points = [(list(h.normal) + [h.offset]) for h in halfspaces] data = [[len(interior_point), 1]] data.append(map(repr, interior_point)) data.append([len(points[0])]) data.append([len(points)]) data.extend([map(repr, row) for row in points]) prep_str = [' '.join(map(str, line)) for line in data] output = getattr(hull, 'qhalf')(options, '\n'.join(prep_str)) return list(map(str.strip, output.strip().split('\n')))
Similar to qvoronoi command in command-line qhull. Args: option: An options string. Up to two options separated by spaces are supported. See Qhull's qhalf help for info. Typically used options are: Fp halfspaces: List of Halfspaces as input. interior_point: An interior point (see qhalf documentation) Returns: Output as a list of strings. E.g., ['3', '4', ' 1 1 0 ', ' 1 -1 2 ', ' -1 1 2 ', ' 1 1 2 ']
codesearchnet
def call_rpc(self, address, rpc_id, payload=b""): if rpc_id < 0 or rpc_id > 0xFFFF: raise RPCInvalidIDError("Invalid RPC ID: {}".format(rpc_id)) if address not in self._rpc_overlays and address not in self._tiles: raise TileNotFoundError("Unknown tile address, no registered handler", address=address) overlay = self._rpc_overlays.get(address, None) tile = self._tiles.get(address, None) if overlay is not None and overlay.has_rpc(rpc_id): return overlay.call_rpc(rpc_id, payload) elif tile is not None and tile.has_rpc(rpc_id): return tile.call_rpc(rpc_id, payload) raise RPCNotFoundError("Could not find RPC 0x%X at address %d" % (rpc_id, address))
Call an RPC by its address and ID. Args: address (int): The address of the mock tile this RPC is for rpc_id (int): The number of the RPC payload (bytes): A byte string of payload parameters up to 20 bytes Returns: bytes: The response payload from the RPC
juraj-google-style
def __init__(self, filename=None): assert isinstance(filename, str) or filename is None self._parser = self._setup_parser(filename) self._warn_on_old_config() self._engine = self._parse_engine() self._path = self._parse_path() self._directory = self._parse_directory() self._apps_to_ignore = self._parse_apps_to_ignore() self._apps_to_sync = self._parse_apps_to_sync()
Create a Config instance. Args: filename (str): Optional filename of the config file. If empty, defaults to MACKUP_CONFIG_FILE
juraj-google-style
def create_event_model(event): if event['type'].startswith('task'): factory = {JobEventName.Started: JobStartedEvent, JobEventName.Succeeded: JobSucceededEvent, JobEventName.Stopped: JobStoppedEvent, JobEventName.Aborted: JobAbortedEvent} if (event['type'] in factory): return factory[event['type']].from_event(event) else: raise JobEventTypeUnsupported('Unsupported event type {}'.format(event['type'])) elif event['type'].startswith('worker'): raise WorkerEventTypeUnsupported('Unsupported event type {}'.format(event['type'])) else: raise EventTypeUnknown('Unknown event type {}'.format(event['type']))
Factory function that turns a celery event into an event object. Args: event (dict): A dictionary that represents a celery event. Returns: object: An event object representing the received event. Raises: JobEventTypeUnsupported: If an unsupported celery job event was received. WorkerEventTypeUnsupported: If an unsupported celery worker event was received. EventTypeUnknown: If an unknown event type (neither job nor worker) was received.
codesearchnet
def load(self, filename, bs=512): with open(filename, 'rb') as f: f.seek(GPT_HEADER_OFFSET + 0x0C) header_size = struct.unpack("<I", f.read(4))[0] f.seek(GPT_HEADER_OFFSET) header_data = f.read(header_size) self.header = GPT_HEADER(header_data) if (self.header.signature != GPT_SIGNATURE): raise Exception("Invalid GPT signature") self.__load_partition_entries(f, bs)
Loads GPT partition table. Args: filename (str): path to file or device to open for reading bs (uint): Block size of the volume, default: 512 Raises: IOError: If file does not exist or not readable
juraj-google-style
def _GetTypeFromScope(self, package, type_name, scope): if type_name not in scope: components = _PrefixWithDot(package).split('.') while components: possible_match = '.'.join(components + [type_name]) if possible_match in scope: type_name = possible_match break else: components.pop(-1) return scope[type_name]
Finds a given type name in the current scope. Args: package: The package the proto should be located in. type_name: The name of the type to be found in the scope. scope: Dict mapping short and full symbols to message and enum types. Returns: The descriptor for the requested type.
juraj-google-style
def wulff_gform_and_r(self, wulffshape, bulk_entry, r, from_sphere_area=False, r_units='nanometers', e_units='keV', normalize=False, scale_per_atom=False): miller_se_dict = wulffshape.miller_energy_dict new_wulff = self.scaled_wulff(wulffshape, r) new_wulff_area = new_wulff.miller_area_dict if (not from_sphere_area): w_vol = new_wulff.volume tot_wulff_se = 0 for hkl in new_wulff_area.keys(): tot_wulff_se += (miller_se_dict[hkl] * new_wulff_area[hkl]) Ebulk = (self.bulk_gform(bulk_entry) * w_vol) new_r = new_wulff.effective_radius else: w_vol = (((4 / 3) * np.pi) * (r ** 3)) sphere_sa = ((4 * np.pi) * (r ** 2)) tot_wulff_se = (wulffshape.weighted_surface_energy * sphere_sa) Ebulk = (self.bulk_gform(bulk_entry) * w_vol) new_r = r new_r = ((new_r / 10) if (r_units == 'nanometers') else new_r) e = (Ebulk + tot_wulff_se) e = ((e / 1000) if (e_units == 'keV') else e) e = ((e / (((4 / 3) * np.pi) * (new_r ** 3))) if normalize else e) bulk_struct = bulk_entry.structure density = (len(bulk_struct) / bulk_struct.lattice.volume) e = ((e / (density * w_vol)) if scale_per_atom else e) return (e, new_r)
Calculates the formation energy of the particle with arbitrary radius r. Args: wulffshape (WulffShape): Initial, unscaled WulffShape bulk_entry (ComputedStructureEntry): Entry of the corresponding bulk. r (float (Ang)): Arbitrary effective radius of the WulffShape from_sphere_area (bool): There are two ways to calculate the bulk formation energy. Either by treating the volume and thus surface area of the particle as a perfect sphere, or as a Wulff shape. r_units (str): Can be nanometers or Angstrom e_units (str): Can be keV or eV normalize (bool): Whether or not to normalize energy by volume scale_per_atom (True): Whether or not to normalize by number of atoms in the particle Returns: particle formation energy (float in keV), effective radius
codesearchnet
def lookup(self, keys, name=None): if keys.dtype.base_dtype != self._key_dtype: raise TypeError(f'Dtype of argument `keys` must be {self._key_dtype}, received: {keys.dtype}') values = keys if isinstance(keys, (sparse_tensor.SparseTensor, internal.RaggedTensor)): values = keys.values if self._table and self._table.key_dtype.base_dtype == dtypes.int64: values = math_ops.cast(values, dtypes.int64) with ops.name_scope(name, '%s_Lookup' % self.name): buckets = string_ops.string_to_hash_bucket_fast(_as_string(values), num_buckets=self._num_oov_buckets, name='hash_bucket') if self._table: ids = self._table.lookup(values) buckets = math_ops.add(buckets, self._table.size()) is_id_non_default = math_ops.not_equal(ids, self._table.default_value) ids = array_ops.where_v2(is_id_non_default, ids, buckets) else: ids = buckets if isinstance(keys, sparse_tensor.SparseTensor): return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape) elif isinstance(keys, internal.RaggedTensor): return keys.with_values(ids) return ids
Looks up `keys` in the table, outputs the corresponding values. It assigns out-of-vocabulary keys to buckets based in their hashes. Args: keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`. name: Optional name for the op. Returns: A `SparseTensor` if keys are sparse, a `RaggedTensor` if keys are ragged, otherwise a dense `Tensor`. Raises: TypeError: when `keys` doesn't match the table key data type.
github-repos
def resolve(self, strict=None): if sys.version_info >= (3, 6) or pathlib2: if strict is None: strict = False else: if strict is not None: raise TypeError( "resolve() got an unexpected keyword argument 'strict'") strict = True if self._closed: self._raise_closed() path = self._flavour.resolve(self, strict=strict) if path is None: self.stat() path = str(self.absolute()) path = self.filesystem.absnormpath(path) return FakePath(path)
Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). Args: strict: If False (default) no exception is raised if the path does not exist. New in Python 3.6. Raises: IOError: if the path doesn't exist (strict=True or Python < 3.6)
juraj-google-style
def get_filename(self, **kwargs): if (self.filename_parser is None): raise RuntimeError('No filename pattern or specific filename provided') output_filename = self.filename_parser.compose(kwargs) dirname = os.path.dirname(output_filename) if (dirname and (not os.path.isdir(dirname))): LOG.info('Creating output directory: {}'.format(dirname)) os.makedirs(dirname) return output_filename
Create a filename where output data will be saved. Args: kwargs (dict): Attributes and other metadata to use for formatting the previously provided `filename`.
codesearchnet
def create_summary_metadata(display_name, description, num_thresholds): pr_curve_plugin_data = plugin_data_pb2.PrCurvePluginData( version=PROTO_VERSION, num_thresholds=num_thresholds) content = pr_curve_plugin_data.SerializeToString() return summary_pb2.SummaryMetadata( display_name=display_name, summary_description=description, plugin_data=summary_pb2.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content))
Create a `summary_pb2.SummaryMetadata` proto for pr_curves plugin data. Arguments: display_name: The display name used in TensorBoard. description: The description to show in TensorBoard. num_thresholds: The number of thresholds to use for PR curves. Returns: A `summary_pb2.SummaryMetadata` protobuf object.
juraj-google-style
def __strict(self): def conc(a, b): return (a + b) b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self])) return all((np.diff(b) >= 0))
Private method. Checks if striplog is monotonically increasing in depth. Returns: Bool.
codesearchnet
def dict_values(src): for v in src.values(): if isinstance(v, dict): for v in dict_values(v): (yield v) else: (yield v)
Recursively get values in dict. Unlike the builtin dict.values() function, this method will descend into nested dicts, returning all nested values. Arguments: src (dict): Source dict. Returns: list: List of values.
codesearchnet
def __init__(self, message='Hello!'): self.message = message
Constructor of the test class. Constructs a new ClassWithDocstring object. Args: message: The default message to print.
github-repos
def get_channel_id(turn_context: TurnContext) -> str: if turn_context.activity.channel_id is None: return "" else: return turn_context.activity.channel_id
Get the Channel Id from the current Activity on the Turn Context. Args: turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from. Returns: str: The Channel Id from the Turn Context's Activity.
juraj-google-style
def get_function_descriptor_list(self): descriptor_list = [] if self.is_for_driver_task: return descriptor_list else: descriptor_list.append(self.module_name.encode('ascii')) descriptor_list.append(self.class_name.encode('ascii')) descriptor_list.append(self.function_name.encode('ascii')) if (len(self._function_source_hash) != 0): descriptor_list.append(self._function_source_hash) return descriptor_list
Return a list of bytes representing the function descriptor. This function is used to pass this function descriptor to backend. Returns: A list of bytes.
codesearchnet
def __init__(self, root=None, **kwargs): super().__init__() global _END_TIME_OF_LAST_WRITE with _END_TIME_OF_LAST_WRITE_LOCK: if _END_TIME_OF_LAST_WRITE is None: _END_TIME_OF_LAST_WRITE = time.time() self._root = root self._kwargs = kwargs self._delete_tracking('_kwargs') self._async_checkpointer_impl = None self._checkpoint_options = None attached_dependencies = None self._save_counter = None self._save_assign_op = None if root: trackable_root = root() if isinstance(root, weakref.ref) else root _assert_trackable(trackable_root, 'root') attached_dependencies = [] kwargs['root'] = root trackable_root._maybe_initialize_trackable() self._save_counter = data_structures.NoDependency(trackable_root._lookup_dependency('save_counter')) for k, v in sorted(kwargs.items(), key=lambda item: item[0]): setattr(self, k, v) converted_v = getattr(self, k) if isinstance(converted_v, weakref.ref): converted_v = converted_v() _assert_trackable(converted_v, k) if root: child = trackable_root._lookup_dependency(k) if child is None: attached_dependencies.append(base.WeakTrackableReference(k, converted_v)) elif child != converted_v: raise ValueError(f'Cannot create a Checkpoint with keyword argument {k} if root.{k} already exists.') self._saver = TrackableSaver(graph_view_lib.ObjectGraphView(root if root else self, attached_dependencies=attached_dependencies)) self._attached_dependencies = data_structures.NoDependency(attached_dependencies)
Creates a training checkpoint for a single or group of objects. Args: root: The root object to checkpoint. `root` may be a trackable object or `WeakRef` of a trackable object. **kwargs: Keyword arguments are set as attributes of this object, and are saved with the checkpoint. All `kwargs` must be trackable objects, or a nested structure of trackable objects (`list`, `dict`, or `tuple`). Raises: ValueError: If `root` or the objects in `kwargs` are not trackable. A `ValueError` is also raised if the `root` object tracks different objects from the ones listed in attributes in kwargs (e.g. `root.child = A` and `tf.train.Checkpoint(root, child=B)` are incompatible).
github-repos
def swf2png(swf_path, png_path, swfrender_path='swfrender'): try: cmd = [swfrender_path, swf_path, '-o', png_path] subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: raise ConversionError(('Failed to convert SWF file %s.\n\tCommand: %s\n\tExit status: %s.\n\tOutput:\n%s' % (swf_path, ' '.join(cmd), e.returncode, e.output)))
Convert SWF slides into a PNG image Raises: OSError is raised if swfrender is not available. ConversionError is raised if image cannot be created.
codesearchnet
def __init__(self, _args): super(TcExValidate, self).__init__(_args) self._app_packages = [] self._install_json_schema = None self._layout_json_schema = None self.config = {} if 'pkg_resources' in sys.modules: self.install_json_schema_file = pkg_resources.resource_filename( __name__, '/'.join(['schema', 'install-json-schema.json']) ) self.layout_json_schema_file = pkg_resources.resource_filename( __name__, '/'.join(['schema', 'layout-json-schema.json']) ) else: self.install_json_schema_file = None self.layout_json_schema_file = None self.validation_data = self._validation_data
Init Class properties. Args: _args (namespace): The argparser args Namespace.
juraj-google-style
def parse_raw_fact(raw_fact): def at_split(string): result = string.split('@', 1) length = len(result) if length == 1: front, back = result[0].strip(), None else: front, back = result front, back = front.strip(), back.strip() return (front, back) def comma_split(string): result = string.split(',', 1) length = len(result) if length == 1: category, description = result[0].strip(), None else: category, description = tuple(result) category, description = category.strip(), description.strip() return (category.strip(), description) time_info, rest = time_helpers.extract_time_info(raw_fact) activity_name, back = at_split(rest) if back: category_name, description = comma_split(back) else: category_name, description = None, None return { 'timeinfo': time_info, 'category': category_name, 'activity': activity_name, 'description': description, }
Extract semantically meaningful sub-components from a ``raw fact`` text. Args: raw_fact (text_type): ``raw fact`` text to be parsed. Returns: dict: dict with sub-components as values.
juraj-google-style
def _parse_ports(port_values: dict) -> dict: endpoints = {} for port_element in port_values: target_port = port_element.split(':') for port in target_port: endpoints[int(port)] = int(port) endpoint_spec = docker.types.EndpointSpec(ports=endpoints) return endpoint_spec
Parse ports key. Args: port_values (dict): ports configuration values Returns: dict, Ports specification which contains exposed ports
juraj-google-style
def load_default(self): path = ctypes_util.find_library(self._sdk) if path is None: if self._windows or self._cygwin: path = next(self.find_library_windows(), None) elif sys.platform.startswith('linux'): path = next(self.find_library_linux(), None) elif sys.platform.startswith('darwin'): path = next(self.find_library_darwin(), None) if path is not None: return self.load(path) return False
Loads the default J-Link SDK DLL. The default J-Link SDK is determined by first checking if ``ctypes`` can find the DLL, then by searching the platform-specific paths. Args: self (Library): the ``Library`` instance Returns: ``True`` if the DLL was loaded, otherwise ``False``.
juraj-google-style
def onkeydown(self, key, keycode, ctrl, shift, alt): return (key, keycode, ctrl, shift, alt)
Called when user types and releases a key. The widget should be able to receive the focus in order to emit the event. Assign a 'tabindex' attribute to make it focusable. Args: key (str): the character value keycode (str): the numeric char code
juraj-google-style
def detect_deprecated_references_in_node(self, node): results = [] if node.expression: results += self.detect_deprecation_in_expression(node.expression) for dep_node in self.DEPRECATED_NODE_TYPES: if (node.type == dep_node[0]): results.append(dep_node) return results
Detects if a node makes use of any deprecated standards. Returns: list of tuple: (detecting_signature, original_text, recommended_text)
codesearchnet