code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def fix(self, value=None): if value is None: self._impl.fix() else: self._impl.fix(value)
Fix all instances of this variable to a value if provided or to their current value otherwise. Args: value: value to be set.
juraj-google-style
def build_estimator(tf_transform_output, config, hidden_units=None): transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy() transformed_feature_spec.pop(taxi.transformed_name(taxi.LABEL_KEY)) real_valued_columns = [tf.feature_column.numeric_column(key, shape=()) for key in taxi.transformed_names(taxi.DENSE_FLOAT_FEATURE_KEYS)] categorical_columns = [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.VOCAB_SIZE + taxi.OOV_SIZE, default_value=0) for key in taxi.transformed_names(taxi.VOCAB_FEATURE_KEYS)] categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.FEATURE_BUCKET_COUNT, default_value=0) for key in taxi.transformed_names(taxi.BUCKET_FEATURE_KEYS)] categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip(taxi.transformed_names(taxi.CATEGORICAL_FEATURE_KEYS), taxi.MAX_CATEGORICAL_FEATURE_VALUES)] return tf_estimator.DNNLinearCombinedClassifier(config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25])
Build an estimator for predicting the tipping behavior of taxi riders. Args: tf_transform_output: A TFTransformOutput. config: tf.contrib.learn.RunConfig defining the runtime environment for the estimator (including model_dir). hidden_units: [int], the layer sizes of the DNN (input layer first) Returns: Resulting DNNLinearCombinedClassifier.
github-repos
def delete_container_service(access_token, subscription_id, resource_group, service_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerService/ContainerServices/', service_name, '?api-version=', ACS_API]) return do_delete(endpoint, access_token)
Delete a named container. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. service_name (str): Name of container service. Returns: HTTP response.
juraj-google-style
async def logs(self, service_id: str, *, details: bool=False, follow: bool=False, stdout: bool=False, stderr: bool=False, since: int=0, timestamps: bool=False, is_tty: bool=False, tail: str='all') -> Union[(str, AsyncIterator[str])]: if ((stdout is False) and (stderr is False)): raise TypeError('Need one of stdout or stderr') params = {'details': details, 'follow': follow, 'stdout': stdout, 'stderr': stderr, 'since': since, 'timestamps': timestamps, 'tail': tail} response = (await self.docker._query('services/{service_id}/logs'.format(service_id=service_id), method='GET', params=params)) return (await multiplexed_result(response, follow, is_tty=is_tty))
Retrieve logs of the given service Args: details: show service context and extra details provided to logs follow: return the logs as a stream. stdout: return logs from stdout stderr: return logs from stderr since: return logs since this time, as a UNIX timestamp timestamps: add timestamps to every log line is_tty: the service has a pseudo-TTY allocated tail: only return this number of log lines from the end of the logs, specify as an integer or `all` to output all log lines.
codesearchnet
def _transform_value_range(self, images, original_range, target_range, dtype='float32'): if original_range[0] == target_range[0] and original_range[1] == target_range[1]: return images images = self.backend.cast(images, dtype=dtype) original_min_value, original_max_value = self._unwrap_value_range(original_range, dtype=dtype) target_min_value, target_max_value = self._unwrap_value_range(target_range, dtype=dtype) images = (images - original_min_value) / (original_max_value - original_min_value) scale_factor = target_max_value - target_min_value return images * scale_factor + target_min_value
Convert input values from `original_range` to `target_range`. This function is intended to be used in preprocessing layers that rely upon color values. This allows us to assume internally that the input tensor is always in the range `(0, 255)`. Args: images: the set of images to transform to the target range. original_range: the value range to transform from. target_range: the value range to transform to. dtype: the dtype to compute the conversion with, defaults to "float32". Returns: a new Tensor with values in the target range. Example: ```python original_range = [0, 1] target_range = [0, 255] images = layer.preprocessing.transform_value_range( images, original_range, target_range ) images = ops.minimum(images + 10, 255) images = layer.preprocessing.transform_value_range( images, target_range, original_range ) ```
github-repos
def RemapOperatorType(operator_type): old_to_new = {'PoolOptions': 'Pool2DOptions', 'DepthwiseConvolutionOptions': 'DepthwiseConv2DOptions', 'ConvolutionOptions': 'Conv2DOptions', 'LocalResponseNormOptions': 'LocalResponseNormalizationOptions', 'BasicRNNOptions': 'RNNOptions'} return old_to_new[operator_type] if operator_type in old_to_new else operator_type
Remap operator structs from old names to new names. Args: operator_type: String representing the builtin operator data type string. (see :schema.fbs). Raises: ValueError: When the model has consistency problems. Returns: Upgraded builtin operator data type as a string.
github-repos
def DownloadCollection(coll_path, target_path, token=None, overwrite=False, dump_client_info=False, flatten=False, max_threads=10): completed_clients = set() coll = _OpenCollectionPath(coll_path) if (coll is None): logging.error('%s is not a valid collection. Typo? Are you sure something was written to it?', coll_path) return thread_pool = threadpool.ThreadPool.Factory('Downloader', max_threads) thread_pool.Start() try: collection_urn = coll.collection_id except AttributeError: collection_urn = coll.urn try: original_client_id = rdf_client.ClientURN(collection_urn.Split()[0]) except IOError: original_client_id = None logging.info('Expecting to download %s files', len(coll)) for grr_message in coll: source = None if isinstance(grr_message, rdf_flows.GrrMessage): source = grr_message.source grr_message = grr_message.payload if isinstance(grr_message, rdfvalue.RDFURN): urn = grr_message elif isinstance(grr_message, rdf_client_fs.StatEntry): urn = rdfvalue.RDFURN(grr_message.AFF4Path((source or original_client_id))) elif isinstance(grr_message, rdf_file_finder.FileFinderResult): urn = rdfvalue.RDFURN(grr_message.stat_entry.AFF4Path((source or original_client_id))) elif isinstance(grr_message, collectors.ArtifactFilesDownloaderResult): if grr_message.HasField('downloaded_file'): urn = grr_message.downloaded_file.AFF4Path((source or original_client_id)) else: continue elif isinstance(grr_message, rdfvalue.RDFBytes): try: os.makedirs(target_path) except OSError: pass try: client_id = source.Split()[0] with open(os.path.join(target_path, client_id), 'wb') as fd: fd.write(str(grr_message)) except AttributeError: pass continue else: continue if dump_client_info: client_id = urn.Split()[0] re_match = aff4_grr.VFSGRRClient.CLIENT_ID_RE.match(client_id) if (re_match and (client_id not in completed_clients)): args = (rdf_client.ClientURN(client_id), target_path, token, overwrite) thread_pool.AddTask(target=DumpClientYaml, args=args, name='ClientYamlDownloader') completed_clients.add(client_id) args = (urn, target_path, token, overwrite) if flatten: target = CopyAndSymlinkAFF4ToLocal else: target = CopyAFF4ToLocal thread_pool.AddTask(target=target, args=args, name='Downloader') thread_pool.Stop(join_timeout=THREADPOOL_JOIN_TIMEOUT)
Iterate through a Collection object downloading all files. Args: coll_path: Path to an AFF4 collection. target_path: Base directory to write to. token: Token for access. overwrite: If True, overwrite existing files. dump_client_info: If True, this will detect client paths, and dump a yaml version of the client object to the root path. This is useful for seeing the hostname/users of the machine the client id refers to. flatten: If True, produce a "files" flat folder with links to all the found files. max_threads: Use this many threads to do the downloads.
codesearchnet
def gather_data(options): (qry_string, param_str) = qry_create(options) qry_results = awsc.get_inst_info(qry_string) i_info = process_results(qry_results) return (i_info, param_str)
Get Data specific for command selected. Create ec2 specific query and output title based on options specified, retrieves the raw response data from aws, then processes it into the i_info dict, which is used throughout this module. Args: options (object): contains args and data from parser, that has been adjusted by the command specific functions as appropriate. Returns: i_info (dict): information on instances and details. param_str (str): the title to display before the list.
codesearchnet
def _collect_data(directory): data_files = [] transcripts = [ filename for filename in os.listdir(directory) if filename.endswith(".csv") ] for transcript in transcripts: transcript_path = os.path.join(directory, transcript) with open(transcript_path, "r") as transcript_file: transcript_reader = csv.reader(transcript_file) _ = next(transcript_reader) for transcript_line in transcript_reader: media_name, label = transcript_line[0:2] filename = os.path.join(directory, media_name) data_files.append((media_name, filename, label)) return data_files
Traverses directory collecting input and target files. Args: directory: base path to extracted audio and transcripts. Returns: list of (media_base, media_filepath, label) tuples
juraj-google-style
def create(self, data, **kwargs): self._check_missing_create_attrs(data) server_data = self.gitlab.http_post(self.path, post_data=data, **kwargs) source_issue = ProjectIssue(self._parent.manager, server_data['source_issue']) target_issue = ProjectIssue(self._parent.manager, server_data['target_issue']) return source_issue, target_issue
Create a new object. Args: data (dict): parameters to send to the server to create the resource **kwargs: Extra options to send to the server (e.g. sudo) Returns: RESTObject, RESTObject: The source and target issues Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request
juraj-google-style
def fillna(self: EventSetOrNode, value: float=0.0) -> EventSetOrNode: from temporian.core.operators.fillna import fillna return fillna(self, value)
Replaces all the NaN values with `value`. Features that cannot contain NaN values (e.g. integer or bytes features) are not impacted. Usage example: ```python >>> import math >>> a = tp.event_set( ... timestamps=[0, 1, 3], ... features={ ... "f1": [0., 10., math.nan], ... "f2": ["a","b",""]}, ... ) >>> a.fillna() indexes: [] features: [('f1', float64), ('f2', str_)] events: (3 events): timestamps: [0. 1. 3.] 'f1': [ 0. 10. 0.] 'f2': [b'a' b'b' b''] ... ``` Args: value: Value to replace Nans with? Returns: EventSet without NaNs.
github-repos
def merge_resources(resource1, resource2): merged = resource1.copy() merged.update(resource2) return merged
Updates a copy of resource1 with resource2 values and returns the merged dictionary. Args: resource1: original resource resource2: resource to update resource1 Returns: dict: merged resource
juraj-google-style
def _restore_and_log_checkpoint(self, actor): actor_id = self._worker.actor_id try: checkpoints = ray.actor.get_checkpoints_for_actor(actor_id) if (len(checkpoints) > 0): checkpoint_id = actor.load_checkpoint(actor_id, checkpoints) if (checkpoint_id is not None): msg = ('`load_checkpoint` must return a checkpoint id that ' + 'exists in the `available_checkpoints` list, or eone.') assert any(((checkpoint_id == checkpoint.checkpoint_id) for checkpoint in checkpoints)), msg self._worker.raylet_client.notify_actor_resumed_from_checkpoint(actor_id, checkpoint_id) except Exception: traceback_str = ray.utils.format_error_message(traceback.format_exc()) ray.utils.push_error_to_driver(self._worker, ray_constants.CHECKPOINT_PUSH_ERROR, traceback_str, driver_id=self._worker.task_driver_id)
Restore an actor from a checkpoint if available and log any errors. This should only be called on workers that have just executed an actor creation task. Args: actor: The actor to restore from a checkpoint.
codesearchnet
def get(self): return self._get_helper(self._sorted_items, self._q)
Returns the current quantile value using the sorted list. Calculates the quantile using linear interpolation on the sorted values. Returns: float: The calculated quantile value. Returns NaN if the window is empty.
github-repos
def matches_to_marker_results(df): assert isinstance(df, pd.DataFrame) from collections import defaultdict d = defaultdict(list) for (idx, row) in df.iterrows(): marker = row['marker'] d[marker].append(row) marker_results = {} for (k, v) in d.items(): if (len(v) > 1): logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k) df_marker = pd.DataFrame(v) df_marker.sort_values('slen', ascending=False, inplace=True) for (i, r) in df_marker.iterrows(): allele = r['allele_name'] slen = r['slen'] logging.debug('Selecting allele %s from contig with length %s', allele, slen) seq = r['sseq'] if ('-' in seq): logging.warning('Gaps found in allele. Removing gaps. %s', r) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, r.to_dict()) break elif (len(v) == 1): row = v[0] seq = row['sseq'] if ('-' in seq): logging.warning('Gaps found in allele. Removing gaps. %s', row) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, row.to_dict()) else: err_msg = 'Empty list of matches for marker {}'.format(k) logging.error(err_msg) raise Exception(err_msg) return marker_results
Perfect BLAST matches to marker results dict Parse perfect BLAST matches to marker results dict. Args: df (pandas.DataFrame): DataFrame of perfect BLAST matches Returns: dict: cgMLST330 marker names to matching allele numbers
codesearchnet
def apply_transformations(collection, transformations, select=None): for t in transformations: kwargs = dict(t) func = kwargs.pop('name') cols = kwargs.pop('input', None) if isinstance(func, string_types): if func in ('and', 'or'): func += '_' if not hasattr(transform, func): raise ValueError("No transformation '%s' found!" % func) func = getattr(transform, func) func(collection, cols, **kwargs) if select is not None: transform.Select(collection, select) return collection
Apply all transformations to the variables in the collection. Args: transformations (list): List of transformations to apply. select (list): Optional list of names of variables to retain after all transformations are applied.
juraj-google-style
def get_el(el): tag_name = el.elt.tagName.lower() if (tag_name in {'input', 'textarea', 'select'}): return el.value else: raise ValueError(('Getter for %s (%s) not implemented!' % (tag_name, el.id)))
Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object.
codesearchnet
def __init__(self, candidates: typing.Sequence[ValueSpecOrAnnotation], default: typing.Any=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False): if not isinstance(candidates, (tuple, list)) or len(candidates) < 2: raise ValueError(f"Argument 'candidates' must be a list of at least 2 elements. Encountered {candidates}.") candidates = [ValueSpec.from_annotation(c, auto_typing=True) for c in candidates] candidates_by_type = {} has_noneable_candidate = False for i, c in enumerate(candidates): if not isinstance(c, ValueSpec): raise ValueError(f"Items in 'candidates' must be ValueSpec objects.Encountered {c} at {i}.") if c.is_noneable: has_noneable_candidate = True spec_type = (c.__class__, getattr(c, '_value_type')) if spec_type not in candidates_by_type: candidates_by_type[spec_type] = [] candidates_by_type[spec_type].append(c) for spec_type, cs in candidates_by_type.items(): if len(cs) > 1: raise ValueError(f'Found {len(cs)} value specs of the same type {spec_type}.') candidate_types = set() no_value_type_check = False for c in candidates: child_value_type = getattr(c, '_value_type') if child_value_type is None: no_value_type_check = True elif isinstance(child_value_type, tuple): candidate_types.update(child_value_type) else: candidate_types.add(child_value_type) self._candidates = candidates union_value_type = None if no_value_type_check else tuple(candidate_types) super().__init__(union_value_type, default, is_noneable=is_noneable or has_noneable_candidate, frozen=frozen)
Constructor. Args: candidates: A sequence of value spec objects or their equivalence as the spec for candidate types. default: (Optional) default value of this spec. is_noneable: (Optional) If True, None is acceptable for this spec. frozen: If True, values other than the default value is not accceptable.
github-repos
def address(self, compressed=True, testnet=False): return self._key.address(True, testnet)
Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string
juraj-google-style
def _PrintTasksStatus(self, processing_status): if (processing_status and processing_status.tasks_status): tasks_status = processing_status.tasks_status table_view = views.CLITabularTableView(column_names=['Tasks:', 'Queued', 'Processing', 'Merging', 'Abandoned', 'Total'], column_sizes=[15, 7, 15, 15, 15, 0]) table_view.AddRow(['', tasks_status.number_of_queued_tasks, tasks_status.number_of_tasks_processing, tasks_status.number_of_tasks_pending_merge, tasks_status.number_of_abandoned_tasks, tasks_status.total_number_of_tasks]) self._output_writer.Write('\n') table_view.Write(self._output_writer)
Prints the status of the tasks. Args: processing_status (ProcessingStatus): processing status.
codesearchnet
def GetMessages(file_protos): for file_proto in file_protos: _FACTORY.pool.Add(file_proto) return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])
Builds a dictionary of all the messages available in a set of files. Args: file_protos: A sequence of file protos to build messages out of. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message.
codesearchnet
def __init__(self, field_types): assert False not in [isinstance(e, WeldType) for e in field_types] self.field_types = field_types
Summary Args: field_types (TYPE): Description
juraj-google-style
def apply(self, score: Optional[float]) -> Optional[int]: if score is None: self._tracker.push(float('NaN')) return None self._tracker.push(score) if math.isnan(score): return self._missing_label if score < self.threshold: return self._normal_label return self._outlier_label
Applies the quantile-based threshold to an anomaly score. Updates the quantile tracker with the given score and classifies the score as normal or outlier based on the current quantile threshold. Args: score (Optional[float]): The input anomaly score. Returns: Optional[int]: The anomaly label: - `normal_label` if the score is less than the threshold. - `outlier_label` if the score is at or above the threshold. - `missing_label` if the score is `NaN` (detector not ready). - `None` if the score is `None` (detector ready, but unable to produce score).
github-repos
def top_k_with_unique(inputs, k): unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32)) (top_values, indices) = _create_topk_unique(unique_inputs, k) top_values = tf.cast(top_values, inputs.dtype) return (top_values, indices)
Finds the values and indices of the k largests entries. Instead of doing sort like tf.nn.top_k, this function finds the max value k times. The running time is proportional to k, which is be faster when k is small. The current implementation supports only inputs of rank 2. In addition, iota is used to replace the lower bits of each element, this makes the selection more stable when there are equal elements. The overhead is that output values are approximated. Args: inputs: A tensor with rank of 2. [batch_size, original_size]. k: An integer, number of top elements to select. Returns: top_values: A tensor, the k largest elements in sorted order. [batch_size, k]. indices: A tensor, indices of the top_values. [batch_size, k].
codesearchnet
def AddFileEntry(self, path, file_entry_type=definitions.FILE_ENTRY_TYPE_FILE, file_data=None, link_data=None): if (path in self._paths): raise KeyError('File entry already set for path: {0:s}.'.format(path)) if (file_data and (file_entry_type != definitions.FILE_ENTRY_TYPE_FILE)): raise ValueError('File data set for non-file file entry type.') if (link_data and (file_entry_type != definitions.FILE_ENTRY_TYPE_LINK)): raise ValueError('Link data set for non-link file entry type.') if (file_data is not None): path_data = file_data elif (link_data is not None): path_data = link_data else: path_data = None self._paths[path] = (file_entry_type, path_data)
Adds a fake file entry. Args: path (str): path of the file entry. file_entry_type (Optional[str]): type of the file entry object. file_data (Optional[bytes]): data of the fake file-like object. link_data (Optional[bytes]): link data of the fake file entry object. Raises: KeyError: if the path already exists. ValueError: if the file data is set but the file entry type is not a file or if the link data is set but the file entry type is not a link.
codesearchnet
def refresh(self, token, timeout): assert (token in self._dict), 'Lock must exist' assert ((timeout == (- 1)) or (timeout > 0)) if ((timeout < 0) or (timeout > LockStorageDict.LOCK_TIME_OUT_MAX)): timeout = LockStorageDict.LOCK_TIME_OUT_MAX self._lock.acquire_write() try: lock = self._dict[token] lock['timeout'] = timeout lock['expire'] = (time.time() + timeout) self._dict[token] = lock self._flush() finally: self._lock.release() return lock
Modify an existing lock's timeout. token: Valid lock token. timeout: Suggested lifetime in seconds (-1 for infinite). The real expiration time may be shorter than requested! Returns: Lock dictionary. Raises ValueError, if token is invalid.
codesearchnet
def raise_for_api_error(headers: MutableMapping, data: MutableMapping) -> None: if (not data['ok']): raise exceptions.SlackAPIError(data.get('error', 'unknow_error'), headers, data) if ('warning' in data): LOG.warning('Slack API WARNING: %s', data['warning'])
Check request response for Slack API error Args: headers: Response headers data: Response data Raises: :class:`slack.exceptions.SlackAPIError`
codesearchnet
def styled_plot(*style_sheets): def decorator(get_plot): def wrapper(*args, fonts=None, style=None, no_base_style=False, **kwargs): if no_base_style: list_style = [] else: list_style = list(style_sheets) if (style is not None): if isinstance(style, list): list_style += style else: list_style += [style] if (fonts is not None): list_style += [{'font.family': 'sans-serif', 'font.sans-serif': fonts}] matplotlib.pyplot.style.use(list_style) return get_plot(*args, **kwargs) return wrapper return decorator
Return a decorator that will apply matplotlib style sheets to a plot. ``style_sheets`` is a base set of styles, which will be ignored if ``no_base_style`` is set in the decorated function arguments. The style will further be overwritten by any styles in the ``style`` optional argument of the decorated function. Args: style_sheets (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib supported definition of a style sheet. Can be a list of style of style sheets.
codesearchnet
def search(self, search_phrase, limit=None): query_parts = ['SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name'] query_params = {'word': search_phrase} if limit: query_parts.append('LIMIT :limit') query_params['limit'] = limit query_parts.append(';') query = text('\n'.join(query_parts)) self.backend.library.database.set_connection_search_path() results = self.execute(query, **query_params).fetchall() for result in results: (vid, type, name, score) = result (yield IdentifierSearchResult(score=score, vid=vid, type=type, name=name))
Finds identifiers by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of IdentifierSearchResult instances.
codesearchnet
def from_origin_axis_angle(origin, axis, angle, angle_in_radians=False): theta = (((angle * pi) / 180) if (not angle_in_radians) else angle) a = origin[0] b = origin[1] c = origin[2] u = axis[0] v = axis[1] w = axis[2] u2 = (u * u) v2 = (v * v) w2 = (w * w) cos_t = cos(theta) sin_t = sin(theta) l2 = ((u2 + v2) + w2) l = sqrt(l2) m11 = ((u2 + ((v2 + w2) * cos_t)) / l2) m12 = ((((u * v) * (1 - cos_t)) - ((w * l) * sin_t)) / l2) m13 = ((((u * w) * (1 - cos_t)) + ((v * l) * sin_t)) / l2) m14 = (((((a * (v2 + w2)) - (u * ((b * v) + (c * w)))) + (((u * ((b * v) + (c * w))) - (a * (v2 + w2))) * cos_t)) + ((((b * w) - (c * v)) * l) * sin_t)) / l2) m21 = ((((u * v) * (1 - cos_t)) + ((w * l) * sin_t)) / l2) m22 = ((v2 + ((u2 + w2) * cos_t)) / l2) m23 = ((((v * w) * (1 - cos_t)) - ((u * l) * sin_t)) / l2) m24 = (((((b * (u2 + w2)) - (v * ((a * u) + (c * w)))) + (((v * ((a * u) + (c * w))) - (b * (u2 + w2))) * cos_t)) + ((((c * u) - (a * w)) * l) * sin_t)) / l2) m31 = ((((u * w) * (1 - cos_t)) - ((v * l) * sin_t)) / l2) m32 = ((((v * w) * (1 - cos_t)) + ((u * l) * sin_t)) / l2) m33 = ((w2 + ((u2 + v2) * cos_t)) / l2) m34 = (((((c * (u2 + v2)) - (w * ((a * u) + (b * v)))) + (((w * ((a * u) + (b * v))) - (c * (u2 + v2))) * cos_t)) + ((((a * v) - (b * u)) * l) * sin_t)) / l2) return SymmOp([[m11, m12, m13, m14], [m21, m22, m23, m24], [m31, m32, m33, m34], [0, 0, 0, 1]])
Generates a SymmOp for a rotation about a given axis through an origin. Args: origin (3x1 array): The origin which the axis passes through. axis (3x1 array): The axis of rotation in cartesian space. For example, [1, 0, 0]indicates rotation about x-axis. angle (float): Angle of rotation. angle_in_radians (bool): Set to True if angles are given in radians. Or else, units of degrees are assumed. Returns: SymmOp.
codesearchnet
def markdown(self, text, gfm=False, project=None, **kwargs): post_data = {'text': text, 'gfm': gfm} if (project is not None): post_data['project'] = project data = self.http_post('/markdown', post_data=post_data, **kwargs) return data['html']
Render an arbitrary Markdown document. Args: text (str): The markdown text to render gfm (bool): Render text using GitLab Flavored Markdown. Default is False project (str): Full path of a project used a context when `gfm` is True **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMarkdownError: If the server cannot perform the request Returns: str: The HTML rendering of the markdown text.
codesearchnet
def map_structure_with_atomic(is_atomic_fn, map_fn, nested): if is_atomic_fn(nested): return map_fn(nested) if not nest.is_nested(nested): raise ValueError('Received non-atomic and non-sequence element: {}'.format(nested)) if nest.is_mapping(nested): values = [nested[k] for k in sorted(nested.keys())] elif nest.is_attrs(nested): values = _astuple(nested) else: values = nested mapped_values = [map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values] return nest._sequence_like(nested, mapped_values)
Maps the atomic elements of a nested structure. Args: is_atomic_fn: A function that determines if an element of `nested` is atomic. map_fn: The function to apply to atomic elements of `nested`. nested: A nested structure. Returns: The nested structure, with atomic elements mapped according to `map_fn`. Raises: ValueError: If an element that is neither atomic nor a sequence is encountered.
github-repos
def set_fog_density(self, density): if ((density < 0) or (density > 1)): raise HolodeckException('Fog density should be between 0 and 1') self._should_write_to_command_buffer = True command_to_send = ChangeFogDensityCommand(density) self._commands.add_command(command_to_send)
Queue up a change fog density command. It will be applied when `tick` or `step` is called next. By the next tick, the exponential height fog in the world will have the new density. If there is no fog in the world, it will be automatically created with the given density. Args: density (float): The new density value, between 0 and 1. The command will not be sent if the given density is invalid.
codesearchnet
def _check_validity(cls, text): if not text[0].lstrip().startswith('1 ') or not text[1].lstrip().startswith('2 '): raise ValueError("Line number check failed") for line in text: line = line.strip() if str(cls._checksum(line)) != line[-1]: raise ValueError("Checksum validation failed")
Check the validity of a TLE Args: text (tuple of str) Raise: ValueError
juraj-google-style
def get_num_bytes(self, batch: Sequence[numpy.ndarray]) -> int: return sum((sys.getsizeof(element) for element in batch))
Returns: The number of bytes of data for a batch.
github-repos
def attention_mask_same_segment(query_segment, memory_segment=None, dtype=tf.float32): memory_segment = rename_length_to_memory_length((memory_segment or query_segment)) return (mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * (- 1000000000.0))
Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim]
codesearchnet
def references_json(references): references_json = [] for r in references: ref = r.ref ref['attributes'] = r._to_json_like(include_defaults=False) references_json.append(ref) return references_json
Given a list of all models in a graph, return JSON representing them and their properties. Args: references (seq[Model]) : A list of models to convert to JSON Returns: list
codesearchnet
def lock(vcs, lock_object, wait=True): if wait: timeout = -1 else: timeout = 0 lock_path = _get_lock_path(vcs, lock_object) lock = filelock.FileLock(lock_path) with lock.acquire(timeout=timeout): yield
A context manager that grabs the lock and releases it when done. This blocks until the lock can be acquired. Args: vcs (easyci.vcs.base.Vcs) lock_object (Lock) wait (boolean) - whether to wait for the lock or error out Raises: Timeout
juraj-google-style
def ScanSource(self, source_path): if os.path.islink(source_path): source_path = os.path.realpath(source_path) if ((not source_path.startswith('\\\\.\\')) and (not os.path.exists(source_path))): raise errors.SourceScannerError('No such device, file or directory: {0:s}.'.format(source_path)) scan_context = source_scanner.SourceScannerContext() scan_context.OpenSourcePath(source_path) try: self._source_scanner.Scan(scan_context) except (ValueError, dfvfs_errors.BackEndError) as exception: raise errors.SourceScannerError('Unable to scan source with error: {0!s}.'.format(exception)) if (scan_context.source_type not in (scan_context.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, scan_context.SOURCE_TYPE_STORAGE_MEDIA_IMAGE)): scan_node = scan_context.GetRootScanNode() self._source_path_specs.append(scan_node.path_spec) return scan_context scan_node = scan_context.GetRootScanNode() while (len(scan_node.sub_nodes) == 1): scan_node = scan_node.sub_nodes[0] base_path_specs = [] if (scan_node.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): self._ScanVolume(scan_context, scan_node, base_path_specs) else: partition_identifiers = self._GetTSKPartitionIdentifiers(scan_node) if (not partition_identifiers): raise errors.SourceScannerError('No partitions found.') for partition_identifier in partition_identifiers: location = '/{0:s}'.format(partition_identifier) sub_scan_node = scan_node.GetSubNodeByLocation(location) self._ScanVolume(scan_context, sub_scan_node, base_path_specs) if (not base_path_specs): raise errors.SourceScannerError('No supported file system found in source.') self._source_path_specs = base_path_specs return scan_context
Scans the source path for volume and file systems. This function sets the internal source path specification and source type values. Args: source_path (str): path to the source. Returns: dfvfs.SourceScannerContext: source scanner context. Raises: SourceScannerError: if the format of or within the source is not supported.
codesearchnet
def step_preprocess(x, step, hparams): original_channel_size = common_layers.shape_list(x)[-1] if hparams.add_position_timing_signal: x = add_position_timing_signal(x, step, hparams) if hparams.add_step_timing_signal: x = add_step_timing_signal(x, step, hparams) if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) and hparams.add_or_concat_timing_signal == "concat"): x = common_layers.dense( x, original_channel_size, activation=None, use_bias=False) if hparams.add_sru: x = common_layers.sru(x) return x
Preprocess the input at the beginning of each step. Args: x: input tensor step: step hparams: model hyper-parameters Returns: preprocessed input.
juraj-google-style
def output(self): return self._get_node_attribute_at_index(0, 'output_tensors', 'output')
Retrieves the output tensor(s) of a layer. Only returns the tensor(s) corresponding to the *first time* the operation was called. Returns: Output tensor or list of output tensors.
github-repos
def convert_argument(self, arg_name, arg_value): self._ensure_loaded() type_name = self.param_type(arg_name) if type_name is None: return arg_value val = typeinfo.type_system.convert_to_type(arg_value, type_name) validators = self.annotated_params[arg_name].validators if len(validators) == 0: return val type_obj = typeinfo.type_system.get_type(type_name) try: for validator_name, extra_args in validators: if not hasattr(type_obj, validator_name): raise ValidationError("Could not find validator specified for argument", argument=arg_name, validator_name=validator_name, type=str(type_obj), method=dir(type_obj)) validator = getattr(type_obj, validator_name) validator(val, *extra_args) except (ValueError, TypeError) as exc: raise ValidationError(exc.args[0], argument=arg_name, arg_value=val) return val
Given a parameter with type information, convert and validate it. Args: arg_name (str): The name of the argument to convert and validate arg_value (object): The value to convert and validate Returns: object: The converted value.
juraj-google-style
def get_variables_in_scope(scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES): scope_name = get_variable_scope_name(scope) if scope_name: scope_name = re.escape(scope_name) + "/" return tuple(tf.get_collection(collection, scope_name))
Returns a tuple `tf.Variable`s in a scope for a given collection. Args: scope: `tf.VariableScope` or string to retrieve variables from. collection: Collection to restrict query to. By default this is `tf.Graphkeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable variables such as moving averages. Returns: A tuple of `tf.Variable` objects.
juraj-google-style
class Identity(Initializer): @deprecated_args(None, 'Call initializer instance with the dtype argument instead of passing it to the constructor', 'dtype') def __init__(self, gain=1.0, dtype=dtypes.float32): self.gain = gain self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype)) def __call__(self, shape, dtype=None, partition_info=None): full_shape = shape if partition_info is None else partition_info.full_shape if len(full_shape) != 2: raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}') if dtype is None: dtype = self.dtype if isinstance(full_shape, tensor_shape.TensorShape): full_shape = full_shape.as_list() initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype) if partition_info is not None: initializer = array_ops.slice(initializer, partition_info.var_offset, shape) return self.gain * initializer def get_config(self): return {'gain': self.gain, 'dtype': self.dtype.name}
Initializer that generates the identity matrix. Only use for 2D matrices. Args: gain: Multiplicative factor to apply to the identity matrix. dtype: Default data type, used if no `dtype` argument is provided when calling the initializer. Only floating point types are supported.
github-repos
def __init__(self, encoding='utf-8'): super(CLIOutputWriter, self).__init__() self._encoding = encoding
Initializes an output writer. Args: encoding (Optional[str]): output encoding.
juraj-google-style
def serialize(obj): LOGGER.debug('serialize(%s)', obj) if isinstance(obj, datetime.date): return simplejson.dumps(obj, default=encoders.as_date) elif hasattr(obj, '__dict__'): return simplejson.dumps(obj, default=encoders.as_object) return simplejson.dumps(obj)
Serialize the given object into JSON. Args: obj: the object to be serialized. Returns: (str): JSON representation of the given object.
codesearchnet
def pr_curves_impl(self, runs, tag): if self._db_connection_provider: db = self._db_connection_provider() cursor = db.execute( % ','.join(['?'] * len(runs)), runs + [tag, metadata.PLUGIN_NAME]) response_mapping = {} for (run, step, wall_time, data, dtype, shape, plugin_data) in cursor: if run not in response_mapping: response_mapping[run] = [] buf = np.frombuffer(data, dtype=tf.DType(dtype).as_numpy_dtype) data_array = buf.reshape([int(i) for i in shape.split(',')]) plugin_data_proto = plugin_data_pb2.PrCurvePluginData() string_buffer = np.frombuffer(plugin_data, dtype=np.dtype('b')) plugin_data_proto.ParseFromString(tf.compat.as_bytes( string_buffer.tostring())) thresholds = self._compute_thresholds(plugin_data_proto.num_thresholds) entry = self._make_pr_entry(step, wall_time, data_array, thresholds) response_mapping[run].append(entry) else: response_mapping = {} for run in runs: try: tensor_events = self._multiplexer.Tensors(run, tag) except KeyError: raise ValueError( 'No PR curves could be found for run %r and tag %r' % (run, tag)) content = self._multiplexer.SummaryMetadata( run, tag).plugin_data.content pr_curve_data = metadata.parse_plugin_metadata(content) thresholds = self._compute_thresholds(pr_curve_data.num_thresholds) response_mapping[run] = [ self._process_tensor_event(e, thresholds) for e in tensor_events] return response_mapping
Creates the JSON object for the PR curves response for a run-tag combo. Arguments: runs: A list of runs to fetch the curves for. tag: The tag to fetch the curves for. Raises: ValueError: If no PR curves could be fetched for a run and tag. Returns: The JSON object for the PR curves route response.
juraj-google-style
def write_csv(data, file_name, encoding='utf-8'): name_extension = (len(data) > 1) (root, ext) = os.path.splitext(file_name) for (i, sheet) in enumerate(data): fname = (file_name if (not name_extension) else (((root + '_') + str(i)) + ext)) with open(fname, 'wb') as date_file: csv_file = csv.writer(date_file, encoding=encoding) for line in sheet: csv_file.writerow(line)
Writes out to csv format. Args: data: 2D list of tables/worksheets. file_name: Name of the output file.
codesearchnet
def go_from(self, vertex): if self.vertex_out: self.vertex_out.edges_out.remove(self) self.vertex_out = vertex vertex.edges_out.add(self)
Tell the edge to go out from this vertex. Args: vertex (Vertex): vertex to go from.
juraj-google-style
def plot_projectors(self, ax=None, fontsize=12, **kwargs): (ax, fig, plt) = get_ax_fig_plt(ax) title = kwargs.pop('title', 'Projectors') ax.grid(True) ax.set_xlabel('r [Bohr]') ax.set_ylabel('$r\\tilde p\\, [Bohr]^{-\\frac{1}{2}}$') for (state, rfunc) in self.projector_functions.items(): ax.plot(rfunc.mesh, (rfunc.mesh * rfunc.values), label=('TPROJ: ' + state)) ax.legend(loc='best', shadow=True, fontsize=fontsize) return fig
Plot the PAW projectors. Args: ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure
codesearchnet
def __init__(self, X, y, batch_size, process_fn=None): self.X = X self.y = y self.batch_size = batch_size self.process_fn = process_fn or (lambda x: x) self.pos_indices = np.where(y == 1)[0] self.neg_indices = np.where(y == 0)[0] self.n = min(len(self.pos_indices), len(self.neg_indices)) self._index_array = None
A `Sequence` implementation that returns balanced `y` by undersampling majority class. Args: X: The numpy array of inputs. y: The numpy array of targets. batch_size: The generator mini-batch size. process_fn: The preprocessing function to apply on `X`
juraj-google-style
def add_timeline_to_sketch(self, sketch_id, index_id): resource_url = '{0:s}/sketches/{1:d}/timelines/'.format( self.api_base_url, sketch_id) form_data = {'timeline': [index_id]} self.session.post(resource_url, json=form_data)
Associate the specified timeline and sketch. Args: sketch_id (int): ID of sketch index_id (int): ID of timeline to add to sketch
juraj-google-style
def fetch_local_package(self, config): self.update_paths_and_config(config=config, pkg_dir_name=config['source'], pkg_cache_dir=os.getcwd())
Make a local path available to current stacker config. Args: config (dict): 'local' path config dictionary
juraj-google-style
def word_score(word, input_letters, questions=0): score = 0 bingo = 0 filled_by_blanks = [] rack = list(input_letters) for letter in word: if letter in rack: bingo += 1 score += letter_score(letter) rack.remove(letter) else: filled_by_blanks.append(letter_score(letter)) for blank_score in sorted(filled_by_blanks, reverse=True): if questions > 0: score += blank_score questions -= 1 if bingo > 6: score += 50 return score
Checks the Scrabble score of a single word. Args: word: a string to check the Scrabble score of input_letters: the letters in our rack questions: integer of the tiles already on the board to build on Returns: an integer Scrabble score amount for the word
juraj-google-style
def create_virtual_env(venv_path: str, requirements_paths: Iterable[str], python_path: str, verbose: bool) -> None: shell_tools.run_cmd('virtualenv', None if verbose else '--quiet', '-p', python_path, venv_path, out=sys.stderr) pip_path = os.path.join(venv_path, 'bin', 'pip') for req_path in requirements_paths: shell_tools.run_cmd(pip_path, 'install', None if verbose else '--quiet', '-r', req_path, out=sys.stderr)
Creates a new virtual environment and then installs dependencies. Args: venv_path: Where to put the virtual environment's state. requirements_paths: Location of requirements files to -r install. python_path: The python binary to use. verbose: When set, more progress output is produced.
juraj-google-style
def FoldByteStream(self, mapped_value, **unused_kwargs): raise errors.FoldingError( 'Unable to fold {0:s} data type into byte stream'.format( self._data_type_definition.TYPE_INDICATOR))
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
juraj-google-style
def _ParseTokenType(self, file_object, file_offset): token_type_map = self._GetDataTypeMap('uint8') token_type, _ = self._ReadStructureFromFileObject( file_object, file_offset, token_type_map) return token_type
Parses a token type. Args: file_object (dfvfs.FileIO): file-like object. file_offset (int): offset of the token relative to the start of the file-like object. Returns: int: token type
juraj-google-style
def _get_color(self, age): if (age == self.tree.age): return self.leaf_color color = self.stem_color tree = self.tree if (len(color) == 3): return color diff = [(color[(i + 3)] - color[i]) for i in range(3)] per_age = [(diff[i] / (tree.age - 1)) for i in range(3)] return tuple([int((color[i] + (per_age[i] * age))) for i in range(3)])
Get the fill color depending on age. Args: age (int): The age of the branch/es Returns: tuple: (r, g, b)
codesearchnet
def next(self): try: return six.next(self._wrapped) except grpc.RpcError as exc: six.raise_from(exceptions.from_grpc_error(exc), exc)
Get the next response from the stream. Returns: protobuf.Message: A single response from the stream.
codesearchnet
def _create_rand_mask_from_inputs(self, from_blocked_mask, to_blocked_mask, broadcasted_rand_attn, num_attention_heads, num_random_blocks, batch_size, from_seq_length, from_block_size): num_windows = from_seq_length rand_mask = self.jax_gather(to_blocked_mask, broadcasted_rand_attn, batch_dims=1) rand_mask = rand_mask.reshape(batch_size, num_attention_heads, num_windows, num_random_blocks * from_block_size) rand_mask = jnp.einsum('blq,bhlk->bhlqk', from_blocked_mask[:, 1:-1], rand_mask) return rand_mask
Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. broadcasted_rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_random_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size].
github-repos
def destroy_elb(app='', env='dev', region='us-east-1', **_): task_json = get_template(template_file='destroy/destroy_elb.json.j2', app=app, env=env, region=region, vpc=get_vpc_id(account=env, region=region)) wait_for_task(task_json) return True
Destroy ELB Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: True upon successful completion.
codesearchnet
def replace_batch_norm(model): for name, module in model.named_children(): if isinstance(module, nn.BatchNorm2d): new_module = DFineFrozenBatchNorm2d(module.num_features) if not module.weight.device == torch.device('meta'): new_module.weight.data.copy_(module.weight) new_module.bias.data.copy_(module.bias) new_module.running_mean.data.copy_(module.running_mean) new_module.running_var.data.copy_(module.running_var) model._modules[name] = new_module if len(list(module.children())) > 0: replace_batch_norm(module)
Recursively replace all `torch.nn.BatchNorm2d` with `DFineFrozenBatchNorm2d`. Args: model (torch.nn.Module): input model
github-repos
def _spanner_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment): if (pipeline := test_spec.get('pipeline', None)): for transform in pipeline.get('transforms', []): if transform.get('type', '').startswith('ReadFromSpanner'): config = transform['config'] instance, database = (config['instance_id'], config['database_id']) if (table := (config.get('table', None) is None)): table = config.get('query', '').split('FROM')[-1].strip() transform['type'] = 'Create' transform['config'] = {k: v for k, v in config.items() if k.startswith('__')} elements = INPUT_TABLES[str(instance), str(database), str(table)] if config.get('query', None): config['query'].replace('select ', 'SELECT ').replace(' from ', ' FROM ') columns = set(''.join(config['query'].split('SELECT ')[1:]).split(' FROM', maxsplit=1)[0].split(', ')) if columns != {'*'}: elements = [{column: element[column] for column in element if column in columns} for element in elements] transform['config']['elements'] = elements return test_spec
Preprocessor for tests that involve reading from Spanner. This preprocessor replaces any ReadFromSpanner transform with a Create transform that reads from a predefined in-memory dictionary. This allows the test to verify the pipeline's correctness without relying on external Spanner instances. Args: test_spec: The dictionary representation of the YAML pipeline specification. expected: A list of strings representing the expected output of the pipeline. env: The TestEnvironment object providing utilities for creating temporary files. Returns: The modified test_spec dictionary with ReadFromSpanner transforms replaced.
github-repos
def check_end_blocks(frame): try: try: module_name = frame.f_globals['__name__'] except KeyError: warnings.warn( 'Can not get the source of an uknown module. ' 'End-of-block syntax check is skipped.', EndSyntaxWarning) return end filename = frame.f_globals.get('__file__', '<unknown>') try: source = inspect.getsource(sys.modules[module_name]) except Exception: warnings.warn( 'Can not get the source of module "%s". ' 'End-of-block syntax check is skipped.' % (module_name,), EndSyntaxWarning) return end finally: del frame end root = ast.parse(source) for node in ast.walk(root): bodies = get_compound_bodies(node) if not bodies: continue end if (isinstance(node, ast.If) and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If)): continue end if (PY2 and isinstance(node, ast.TryFinally) and len(node.body) == 1 and isinstance(node.body[0], ast.TryExcept)): continue end for body in bodies: skip_next = False for i, child in enumerate(body): if skip_next: skip_next = False elif is_end_node(child): raise SyntaxError( '"end" does not close a block.', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) elif get_compound_bodies(child): try: ok = is_end_node(body[i + 1]) except IndexError: ok = False end if not ok: raise SyntaxError( 'This block is not closed with "end".', [filename, child.lineno, child.col_offset, source.splitlines()[child.lineno - 1] + '\n']) end skip_next = True end end end end
Performs end-block check. Args: frame: A frame object of the module to be checked. Raises: SyntaxError: If check failed.
juraj-google-style
def AddForwardedIp(self, address, interface): for ip in list(netaddr.IPNetwork(address)): self._RunIfconfig(args=[interface, 'alias', '%s/32' % str(ip)])
Configure a new IP address on the network interface. Args: address: string, the IP address to configure. interface: string, the output device to use.
juraj-google-style
def get_license_from_url(url): if not url: return split_url = urlsplit(url, scheme='http') if split_url.netloc.lower() == 'creativecommons.org': if 'publicdomain' in split_url.path: match = _RE_PUBLIC_DOMAIN_URL.match(split_url.path) if match is None: license = ['public domain'] else: license = ['CC0'] license.extend(part for part in match.groups() if part) else: license = ['CC'] match = _RE_LICENSE_URL.match(split_url.path) license.extend(part.upper() for part in match.groups() if part) elif split_url.netloc == 'arxiv.org': license = ['arXiv'] match = _RE_LICENSE_URL.match(split_url.path) license.extend(part for part in match.groups() if part) else: raise ValueError('Unknown license URL') return u' '.join(license)
Get the license abbreviation from an URL. Args: url(str): canonical url of the license. Returns: str: the corresponding license abbreviation. Raises: ValueError: when the url is not recognized
juraj-google-style
def deepgetattr(obj, name, default=_UNSPECIFIED): try: if ('.' in name): (attr, subname) = name.split('.', 1) return deepgetattr(getattr(obj, attr), subname, default) else: return getattr(obj, name) except AttributeError: if (default is _UNSPECIFIED): raise else: return default
Try to retrieve the given attribute of an object, digging on '.'. This is an extended getattr, digging deeper if '.' is found. Args: obj (object): the object of which an attribute should be read name (str): the name of an attribute to look up. default (object): the default value to use if the attribute wasn't found Returns: the attribute pointed to by 'name', splitting on '.'. Raises: AttributeError: if obj has no 'name' attribute.
codesearchnet
def process(self, tensor): for processor in self.preprocessors: tensor = processor.process(tensor=tensor) return tensor
Process state. Args: tensor: tensor to process Returns: processed state
codesearchnet
def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, kwargs: Optional[Dict[str, Any]]=None, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], source_caller: Optional[Caller[RequestT, ResponseT]]=None): self.request_coder = request_coder self.response_coder = response_coder self.redis_caller = _RedisCaller(host, port, time_to_live, request_coder=self.request_coder, response_coder=self.response_coder, kwargs=kwargs, source_caller=source_caller, mode=_RedisMode.READ)
Args: host (str): The hostname or IP address of the Redis server. port (int): The port number of the Redis server. time_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for records stored in Redis. Provide an integer (in seconds) or a `datetime.timedelta` object. kwargs: Optional(Dict[str, Any]) additional keyword arguments that are required to connect to your redis server. Same as `redis.Redis()`. request_coder: (Optional[`coders.Coder`]) coder for requests stored in Redis. response_coder: (Optional[`coders.Coder`]) coder for decoding responses received from Redis. source_caller: (Optional[`Caller`]): The source caller using this Redis cache in case of fetching the cache request to store in Redis.
github-repos
def __init__(self, status, reason, message): msg = "{0}: {1} - {2}".format(status.name, reason.name, message) super(KmipOperationFailure, self).__init__(msg) self.status = status self.reason = reason self.message = message
Construct the error message and attributes for the KMIP operation failure. Args: status: a ResultStatus enumeration reason: a ResultReason enumeration message: a string providing additional error information
juraj-google-style
def evaluate(self, index): if (self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE): self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index))) return None condition_match = self.condition_data[index][3] if (condition_match is None): condition_match = ConditionMatchTypes.EXACT if (condition_match not in self.EVALUATORS_BY_MATCH_TYPE): self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index))) return None if (condition_match != ConditionMatchTypes.EXISTS): attribute_key = self.condition_data[index][0] if (attribute_key not in self.attributes): self.logger.debug(audience_logs.MISSING_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key)) return None if (self.attributes.get(attribute_key) is None): self.logger.debug(audience_logs.NULL_ATTRIBUTE_VALUE.format(self._get_condition_json(index), attribute_key)) return None return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index)
Given a custom attribute audience condition and user attributes, evaluate the condition against the attributes. Args: index: Index of the condition to be evaluated. Returns: Boolean: - True if the user attributes match the given condition. - False if the user attributes don't match the given condition. None: if the user attributes and condition can't be evaluated.
codesearchnet
def _convert_param_to_tensor(param): param_t = tf.convert_to_tensor(param, dtype=dtype) return param_t * tf.ones(shape=dim, dtype=dtype)
Converts `param` to `Tesnor`. Args: param: `Scalar` or `Tensor` with shape `batch_shape` + [1]. Returns: `param` if it `Tensor`, if it is `Scalar` convert it to `Tensor` with [1] shape.
github-repos
def check_version(version, range_=None): if range_ and version not in range_: raise RezBindError("found version %s is not within range %s" % (str(version), str(range_)))
Check that the found software version is within supplied range. Args: version: Version of the package as a Version object. range_: Allowable version range as a VersionRange object.
juraj-google-style
def set_lacp_mode(self, name, mode): if (mode not in ['on', 'passive', 'active']): return False grpid = re.search('(\\d+)', name).group() remove_commands = list() add_commands = list() for member in self.get_members(name): remove_commands.append(('interface %s' % member)) remove_commands.append(('no channel-group %s' % grpid)) add_commands.append(('interface %s' % member)) add_commands.append(('channel-group %s mode %s' % (grpid, mode))) return self.configure((remove_commands + add_commands))
Configures the LACP mode of the member interfaces Args: name(str): The Port-Channel interface name to configure the LACP mode mode(str): The LACP mode to configure the member interfaces to. Valid values are 'on, 'passive', 'active' Returns: True if the operation succeeds otherwise False
codesearchnet
def __init__(self, channel, service_name, stub=None): if stub: self._stub = stub else: self._stub = admin_pb2_grpc.AdminStub(channel) self._service_name = service_name self._shutdown = False self._shutdown_cv = threading.Condition() self._keep_alive_thread = threading.Thread(target=self._KeepAliveLoop) self._keep_alive_thread.daemon = True self._keep_alive_thread.start()
Create a Sender. Args: channel: The grpc.Channel over which we should send messages. service_name: The name of the service that we are running as. stub: If set, used instead of AdminStub(channel). Intended to ease unit tests.
juraj-google-style
def add_tree(self, tree, parent=None): if (tree.path in self.path_db): self.remove_tree_by_path(tree.path) for index in tree.indexes: if (not getattr(tree, index)): continue self._add_to(getattr(self, (index + '_db')), getattr(tree, index), tree) if parent: self._add_to(self.parent_db, tree.path, parent) for sub_tree in tree.sub_trees: assert sub_tree.path.startswith(tree.path) for sub_tree in tree.sub_trees: self.add_tree(sub_tree, parent=tree)
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
codesearchnet
def _resolve_attribute(self, attribute): value = self.attributes[attribute] if not value: return None resolved_value = re.sub('\$\((.*?)\)',self._resolve_attribute_match, value) return resolved_value
Recursively replaces references to other attributes with their value. Args: attribute (str): The name of the attribute to resolve. Returns: str: The resolved value of 'attribute'.
juraj-google-style
def angle(x): if any_symbolic_tensors((x,)): return Angle().symbolic_call(x) return backend.numpy.angle(x)
Element-wise angle of a complex tensor. Arguments: x: Input tensor. Can be real or complex. Returns: Output tensor of same shape as x. containing the angle of each element (in radians). Example: >>> x = keras.ops.convert_to_tensor([[1 + 3j, 2 - 5j], [4 - 3j, 3 + 2j]]) >>> keras.ops.angle(x) array([[ 1.2490457, -1.19029 ], [-0.6435011, 0.5880026]], dtype=float32)
github-repos
def cancel(self, invoice_id, **kwargs): url = '{}/{}/cancel'.format(self.base_url, invoice_id) return self.post_url(url, {}, **kwargs)
Cancel an unpaid Invoice with given ID via API It can only be called on an invoice that is not in the paid state. Args: invoice_id : Id for cancel the invoice Returns: The response for the API will be the invoice entity, similar to create/update API response, with status attribute's value as cancelled
codesearchnet
def mkdir_p(dirname): assert (dirname is not None) if ((dirname == '') or os.path.isdir(dirname)): return try: os.makedirs(dirname) except OSError as e: if (e.errno != errno.EEXIST): raise e
Like "mkdir -p", make a dir recursively, but do nothing if the dir exists Args: dirname(str):
codesearchnet
def configure_bigchaindb(command): @functools.wraps(command) def configure(args): config_from_cmdline = None try: if args.log_level is not None: config_from_cmdline = { 'log': { 'level_console': args.log_level, 'level_logfile': args.log_level, }, 'server': {'loglevel': args.log_level}, } except AttributeError: pass bigchaindb.config_utils.autoconfigure( filename=args.config, config=config_from_cmdline, force=True) command(args) return configure
Decorator to be used by command line functions, such that the configuration of bigchaindb is performed before the execution of the command. Args: command: The command to decorate. Returns: The command wrapper function.
juraj-google-style
def triangle(times: np.ndarray, amp: complex, period: float, phase: float = 0) -> np.ndarray: return amp*(-2*np.abs(sawtooth(times, 1, period, (phase-np.pi/2)/2)) + 1).astype(np.complex_)
Continuous triangle wave. Args: times: Times to output wave for. amp: Pulse amplitude. Wave range is [-amp, amp]. period: Pulse period, units of dt. phase: Pulse phase.
juraj-google-style
def snake_to_camel(name): ret = "".join(x.title() for x in name.split("_")) ret = ret[0].lower() + ret[1:] return ret
Takes a snake_field_name and returns a camelCaseFieldName Args: name (str): E.g. snake_field_name or SNAKE_FIELD_NAME Returns: str: camelCase converted name. E.g. capsFieldName
juraj-google-style
def basic_train_loop(supervisor, train_step_fn, args=None, kwargs=None, master=''): if args is None: args = [] if kwargs is None: kwargs = {} should_retry = True while should_retry: try: should_retry = False with supervisor.managed_session(master) as sess: while not supervisor.should_stop(): train_step_fn(sess, *args, **kwargs) except errors.AbortedError: should_retry = True
Basic loop to train a model. Calls `train_step_fn` in a loop to train a model. The function is called as: ```python train_step_fn(session, *args, **kwargs) ``` It is passed a `tf.compat.v1.Session` in addition to `args` and `kwargs`. The function typically runs one training step in the session. Args: supervisor: `tf.compat.v1.train.Supervisor` to run the training services. train_step_fn: Callable to execute one training step. Called repeatedly as `train_step_fn(session, *args **kwargs)`. args: Optional positional arguments passed to `train_step_fn`. kwargs: Optional keyword arguments passed to `train_step_fn`. master: Master to use to create the training session. Defaults to `""` which causes the session to be created in the local process.
github-repos
def _build_commands(self, ip_dest, next_hop, **kwargs): commands = ('ip route %s %s' % (ip_dest, next_hop)) next_hop_ip = kwargs.get('next_hop_ip', None) distance = kwargs.get('distance', None) tag = kwargs.get('tag', None) route_name = kwargs.get('route_name', None) if (next_hop_ip is not None): commands += (' %s' % next_hop_ip) if (distance is not None): commands += (' %s' % distance) if (tag is not None): commands += (' tag %s' % tag) if (route_name is not None): commands += (' name %s' % route_name) return commands
Build the EOS command string for ip route interactions. Args: ip_dest (string): The ip address of the destination in the form of A.B.C.D/E next_hop (string): The next hop interface or ip address **kwargs['next_hop_ip'] (string): The next hop address on destination interface **kwargs['distance'] (string): Administrative distance for this route **kwargs['tag'] (string): Route tag **kwargs['route_name'] (string): Route name Returns the ip route command string to be sent to the switch for the given set of parameters.
codesearchnet
def GetStatus(self): STATUS_FORMAT = '>BBBhhhHhhhHBBBxBbHBHHHHBbbHHBBBbbbbbbbbbBH' STATUS_FIELDS = ['packetType', 'firmwareVersion', 'protocolVersion', 'mainFineCurrent', 'usbFineCurrent', 'auxFineCurrent', 'voltage1', 'mainCoarseCurrent', 'usbCoarseCurrent', 'auxCoarseCurrent', 'voltage2', 'outputVoltageSetting', 'temperature', 'status', 'leds', 'mainFineResistor', 'serialNumber', 'sampleRate', 'dacCalLow', 'dacCalHigh', 'powerUpCurrentLimit', 'runTimeCurrentLimit', 'powerUpTime', 'usbFineResistor', 'auxFineResistor', 'initialUsbVoltage', 'initialAuxVoltage', 'hardwareRevision', 'temperatureLimit', 'usbPassthroughMode', 'mainCoarseResistor', 'usbCoarseResistor', 'auxCoarseResistor', 'defMainFineResistor', 'defUsbFineResistor', 'defAuxFineResistor', 'defMainCoarseResistor', 'defUsbCoarseResistor', 'defAuxCoarseResistor', 'eventCode', 'eventData'] self._SendStruct('BBB', 1, 0, 0) while 1: read_bytes = self._ReadPacket() if (not read_bytes): return None calsize = struct.calcsize(STATUS_FORMAT) if ((len(read_bytes) != calsize) or (read_bytes[0] != 16)): logging.warning('Wanted status, dropped type=0x%02x, len=%d', read_bytes[0], len(read_bytes)) continue status = dict(zip(STATUS_FIELDS, struct.unpack(STATUS_FORMAT, read_bytes))) p_type = status['packetType'] if (p_type != 16): raise MonsoonError(('Package type %s is not 0x10.' % p_type)) for k in status.keys(): if k.endswith('VoltageSetting'): status[k] = (2.0 + (status[k] * 0.01)) elif k.endswith('FineCurrent'): pass elif k.endswith('CoarseCurrent'): pass elif (k.startswith('voltage') or k.endswith('Voltage')): status[k] = (status[k] * 0.000125) elif k.endswith('Resistor'): status[k] = (0.05 + (status[k] * 0.0001)) if (k.startswith('aux') or k.startswith('defAux')): status[k] += 0.05 elif k.endswith('CurrentLimit'): status[k] = ((8 * (1023 - status[k])) / 1023.0) return status
Requests and waits for status. Returns: status dictionary.
codesearchnet
def get_unique_graph(tops, check_types=None, none_if_empty=False): if isinstance(tops, ops.Graph): return tops if not is_iterable(tops): raise TypeError('{} is not iterable'.format(type(tops))) if check_types is None: check_types = (ops.Operation, tensor_lib.Tensor) elif not is_iterable(check_types): check_types = (check_types,) g = None for op in tops: if not isinstance(op, check_types): raise TypeError('Expected a type in ({}), got: {}'.format(', '.join([str(t) for t in check_types]), type(op))) if g is None: g = op.graph elif g._graph_key != op.graph._graph_key: raise ValueError('Operation {} does not belong to given graph'.format(op)) if g is None and (not none_if_empty): raise ValueError("Can't find the unique graph of an empty list") return g
Return the unique graph used by the all the elements in tops. Args: tops: iterable of elements to check (usually a list of tf.Operation and/or tf.Tensor). Or a tf.Graph. check_types: check that the element in tops are of given type(s). If None, the types (tf.Operation, tf.Tensor) are used. none_if_empty: don't raise an error if tops is an empty list, just return None. Returns: The unique graph used by all the tops. Raises: TypeError: if tops is not a iterable of tf.Operation. ValueError: if the graph is not unique.
github-repos
def easeInOutQuart(n): _checkRange(n) n = 2 * n if n < 1: return 0.5 * n**4 else: n = n - 2 return -0.5 * (n**4 - 2)
A quartic tween function that accelerates, reaches the midpoint, and then decelerates. Args: n (float): The time progress, starting at 0.0 and ending at 1.0. Returns: (float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
juraj-google-style
def fetch_friends(self, user): if USING_ALLAUTH: social_app = SocialApp.objects.get_current('twitter') consumer_key = social_app.key consumer_secret = social_app.secret oauth_token = SocialToken.objects.get(account=user, app=social_app).token oauth_token_secret = SocialToken.objects.get(account=user, app=social_app).token_secret else: t = TwitterBackend() tokens = t.tokens(user) oauth_token_secret = tokens['oauth_token_secret'] oauth_token = tokens['oauth_token'] consumer_key = settings.TWITTER_CONSUMER_KEY consumer_secret = settings.TWITTER_CONSUMER_SECRET api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=oauth_token, access_token_secret=oauth_token_secret) return api.GetFriends()
fetches the friends from twitter using the information on django-social-auth models user is an instance of UserSocialAuth Returns: collection of friend objects fetched from facebook
codesearchnet
def _get_mpr_table(self, connection, partition): virtual_table = partition.vid table = '{}_v'.format(virtual_table) logger.debug( 'Looking for materialized table of the partition.\n partition: {}'.format(partition.name)) table_exists = self._relation_exists(connection, table) if table_exists: logger.debug( 'Materialized table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return table logger.debug( 'Looking for a virtual table of the partition.\n partition: {}'.format(partition.name)) virtual_exists = self._relation_exists(connection, virtual_table) if virtual_exists: logger.debug( 'Virtual table of the partition found.\n partition: {}, table: {}' .format(partition.name, table)) return virtual_table raise MissingTableError('sqlite database does not have table for mpr of {} partition.' .format(partition.vid))
Returns name of the sqlite table who stores mpr data. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: str: Raises: MissingTableError: if partition table not found in the db.
juraj-google-style
def trace(fun: Callable[[], Any], *, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, require_hyper_name: bool=False, per_thread: bool=True) -> DynamicEvaluationContext: context = DynamicEvaluationContext(where=where, require_hyper_name=require_hyper_name, per_thread=per_thread) with context.collect(): fun() return context
Trace the hyper primitives called within a function by executing it. See examples in :class:`pyglove.hyper.DynamicEvaluationContext`. Args: fun: Function in which the search space is defined. where: A callable object that decide whether a hyper primitive should be included when being instantiated under `collect`. If None, all hyper primitives under `collect` will be included. require_hyper_name: If True, all hyper primitives defined in this scope will need to carry their names, which is usually a good idea when the function that instantiates the hyper primtives need to be called multiple times. per_thread: If True, the context manager will be applied to current thread only. Otherwise, it will be applied on current process. Returns: An DynamicEvaluationContext that can be passed to `pg.sample`.
github-repos
def price(self, valuation_date, market, model=None): del model, valuation_date reference_curve = market.reference_curve discount_curve = market.discount_curve fwd_rate = reference_curve.get_forward_rate(self._accrual_start_date, self._accrual_end_date, self._daycount_fraction) discount_at_settlement = discount_curve.get_discount_factor(self._settlement_date) return discount_at_settlement * self._notional * (fwd_rate - self._fixed_rate) * self._daycount_fraction / (1.0 + self._daycount_fraction * fwd_rate)
Returns the present value of the instrument on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the FRA instrument. model: Reserved for future use. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each FRA contract based on the input market data.
github-repos
def __init__(self, src_file, src_line, message): self.message = message self.src_file = src_file self.src_line = src_line
Basic constructor for :class:`AbinitEvent`. Args: message: String with human-readable message providing info on the event. src_file: String with the name of the Fortran file where the event is raised. src_line Integer giving the line number in src_file.
juraj-google-style
def spence(x, name=None): with ops.name_scope(name, 'spence', [x]): return gen_special_math_ops.spence(x)
Computes Spence's integral of `x` element-wise. Spence's integral is defined as the integral of `log(t) / (1 - t)` from `1` to `x`, with the domain of definition all non-negative real numbers. >>> tf.math.special.spence([0.5, 1., 2., 3.]).numpy() array([ 0.58224034, 0. , -0.82246685, -1.4367464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.spence @end_compatibility
github-repos
def run(self, **kwargs): if (not super().run(**kwargs)): return if kwargs['list']: self.log.info('--- List of Scheduler Modules ---') for (name, scheduler) in list(self.scheduler_plugins.items()): if (self.active_scheduler == name): self.log.info('{} (active)'.format(name)) else: self.log.info(name) self.log.info('--- End list of Scheduler Modules ---') return scheduler = self.scheduler_plugins[self.active_scheduler]() scheduler.execute_scheduler()
Execute the scheduler. Returns: `None`
codesearchnet
def tftp_update_bios(server=None, path=None): if not server: raise salt.exceptions.CommandExecutionError("The server name must be specified.") if not path: raise salt.exceptions.CommandExecutionError("The TFTP path must be specified.") dn = "sys/rack-unit-1/bios/fw-updatable" inconfig = .format(server, path) ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False) return ret
Update the BIOS firmware through TFTP. Args: server(str): The IP address or hostname of the TFTP server. path(str): The TFTP path and filename for the BIOS image. CLI Example: .. code-block:: bash salt '*' cimc.tftp_update_bios foo.bar.com HP-SL2.cap
juraj-google-style
def device_path_to_device_name(device_dir): path_items = os.path.basename(device_dir)[len(METADATA_FILE_PREFIX) + len(DEVICE_TAG):].split(',') return '/'.join([path_item.replace('device_', 'device:').replace('_', ':', 1) for path_item in path_items])
Parse device name from device path. Args: device_dir: (str) a directory name for the device. Returns: (str) parsed device name.
github-repos
def mutual_info(rho: Density, qubits0: Qubits, qubits1: Qubits=None, base: float=None) -> float: if (qubits1 is None): qubits1 = tuple((set(rho.qubits) - set(qubits0))) rho0 = rho.partial_trace(qubits1) rho1 = rho.partial_trace(qubits0) ent = entropy(rho, base) ent0 = entropy(rho0, base) ent1 = entropy(rho1, base) return ((ent0 + ent1) - ent)
Compute the bipartite von-Neumann mutual information of a mixed quantum state. Args: rho: A density matrix of the complete system qubits0: Qubits of system 0 qubits1: Qubits of system 1. If none, taken to be all remaining qubits base: Optional logarithm base. Default is base e Returns: The bipartite von-Neumann mutual information.
codesearchnet
def hwvtep_set_overlaygw_type(self, **kwargs): name = kwargs.pop('name') type = kwargs.pop('type') ip_args = dict(name=name, gw_type=type) method_name = 'overlay_gateway_gw_type' method_class = self._brocade_tunnels gw_attr = getattr(method_class, method_name) config = gw_attr(**ip_args) output = self._callback(config) return output
Set gateway type Args: name (str): gateway-name type (str): gateway-type callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
codesearchnet
def generate_output_nir(self, json_data=None, hr=True, show_name=False, colorize=True): if (json_data is None): json_data = {} output = generate_output(line='0', short=(HR_WHOIS_NIR['nets']['_short'] if hr else 'nir_nets'), name=(HR_WHOIS_NIR['nets']['_name'] if (hr and show_name) else None), is_parent=True, colorize=colorize) count = 0 if json_data['nir']: for net in json_data['nir']['nets']: if (count > 0): output += self.generate_output_newline(line='1', colorize=colorize) count += 1 output += generate_output(line='1', short=net['handle'], is_parent=True, colorize=colorize) for (key, val) in net.items(): if (val and (isinstance(val, dict) or ('\n' in val) or (key == 'nameservers'))): output += generate_output(line='2', short=(HR_WHOIS_NIR['nets'][key]['_short'] if hr else key), name=(HR_WHOIS_NIR['nets'][key]['_name'] if (hr and show_name) else None), is_parent=(False if ((val is None) or (len(val) == 0)) else True), value=('None' if ((val is None) or (len(val) == 0)) else None), colorize=colorize) if (key == 'contacts'): for (k, v) in val.items(): if v: output += generate_output(line='3', is_parent=(False if (len(v) == 0) else True), name=k, colorize=colorize) for (contact_key, contact_val) in v.items(): if (v is not None): tmp_out = '{0}{1}{2}'.format(contact_key, ': ', contact_val) output += generate_output(line='4', value=tmp_out, colorize=colorize) elif (key == 'nameservers'): for v in val: output += generate_output(line='3', value=v, colorize=colorize) else: for v in val.split('\n'): output += generate_output(line='3', value=v, colorize=colorize) else: output += generate_output(line='2', short=(HR_WHOIS_NIR['nets'][key]['_short'] if hr else key), name=(HR_WHOIS_NIR['nets'][key]['_name'] if (hr and show_name) else None), value=val, colorize=colorize) else: output += 'None' return output
The function for generating CLI output NIR network results. Args: json_data (:obj:`dict`): The data to process. Defaults to None. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
codesearchnet