code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def wait(animation='elipses', text='', speed=0.2): def decorator(func): func.animation = animation func.speed = speed func.text = text @wraps(func) def wrapper(*args, **kwargs): animation = func.animation text = func.text if not isins...
Decorator for adding wait animation to long running functions. Args: animation (str, tuple): String reference to animation or tuple with custom animation. speed (float): Number of seconds each cycle of animation. Examples: >>> @animation.wait('bar') >>> def long_running_function(): >>> ... 5 seconds later ... >>>...
juraj-google-style
def run_inference(self, batch: Sequence[numpy.ndarray], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]: return self._inference_fn(batch, model, inference_args)
Runs inferences on a batch of 2d numpy arrays. Args: batch: A sequence of examples as 2d numpy arrays. Each row in an array is a single example. The dimensions must match the dimensions of the data used to train the model. model: XGBoost booster or XBGModel (sklearn interface). Must implement predict(X). Where the par...
github-repos
def _get_data(self) -> BaseFrameManager: def iloc(partition, row_internal_indices, col_internal_indices): return partition.iloc[(row_internal_indices, col_internal_indices)] masked_data = self.parent_data.apply_func_to_indices_both_axis(func=iloc, row_indices=self.index_map.values, col_indices=self.col...
Perform the map step Returns: A BaseFrameManager object.
codesearchnet
def _wrap_definition_section(source, width): index = (source.index('\n') + 1) (definitions, max_len) = _get_definitions(source[index:]) sep = ('\n' + (' ' * (max_len + 4))) lines = [source[:index].strip()] for (arg, desc) in six.iteritems(definitions): wrapped_desc = sep.join(textwrap.wrap(d...
Wrap the given definition section string to the current terminal size. Note: Auto-adjusts the spacing between terms and definitions. Args: source: The section string to wrap. Returns: The wrapped section string.
codesearchnet
def proportional_char(self, action): actions = {'off': 0, 'on': 1 } if action in actions: self.send(chr(27)+'p'+action) else: raise RuntimeError('Invalid action in function proportionalChar')
Specifies proportional characters. When turned on, the character spacing set with charSpacing. Args: action: Turn proportional characters on or off. Returns: None Raises: RuntimeError: Invalid action.
juraj-google-style
def _delete_gridfs_data(self, data): if isinstance(data, ObjectId): if self._gridfs.exists({"_id": data}): self._gridfs.delete(data) else: raise DataStoreGridfsIdInvalid() elif isinstance(data, list): for item in data: ...
Delete all GridFS data that is linked by fields in the specified data. Args: data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object for any ObjectID is deleted.
juraj-google-style
def TerminateAFF4Flow(cls, flow_id, reason=None, status=None, token=None): flow_obj = aff4.FACTORY.Open(flow_id, aff4_type=GRRFlow, mode='rw', token=token) if (not flow_obj): raise FlowError(('Could not terminate flow %s' % flow_id)) with flow_obj: runner = flow_obj.GetRunner() if (n...
Terminate a flow. Args: flow_id: The flow session_id to terminate. reason: A reason to log. status: Status code used in the generated status message. token: The access token to be used for this request. Raises: FlowError: If the flow can not be found.
codesearchnet
def hflip(img): if (not _is_pil_image(img)): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.transpose(Image.FLIP_LEFT_RIGHT)
Horizontally flip the given PIL Image. Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Horizontall flipped image.
codesearchnet
def process_tree(self, root_directory, output_root_directory, copy_other_files): if output_root_directory == root_directory: return self.process_tree_inplace(root_directory) if output_root_directory and os.path.exists(output_root_directory): print('Output directory %r must not already exist.' % ...
Processes upgrades on an entire tree of python files in place. Note that only Python files. If you have custom code in other languages, you will need to manually upgrade those. Args: root_directory: Directory to walk and process. output_root_directory: Directory to use as base. copy_other_files: Copy files that are n...
github-repos
def list_all_eq_to(list_, val, strict=True): if (util_type.HAVE_NUMPY and isinstance(val, np.ndarray)): return all([np.all((item == val)) for item in list_]) try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=FutureWarning) flags = [(item == v...
checks to see if list is equal everywhere to a value Args: list_ (list): val : value to check against Returns: True if all items in the list are equal to val
codesearchnet
def read_tabular(filepath): (_, fn, ext) = splitext2(filepath) if (ext == '.h5'): return _read_tabular_h5(filepath) elif (ext == '.pkl'): return _read_tabular_pickle(filepath) else: raise NotImplementedError
Read tabular object in HDF5 or pickle format Args: filepath (path-like): path to read to; must end in '.h5' or '.pkl'
codesearchnet
def _deserialize(self, entity, p, unused_depth=1): if p.meaning() == entity_pb.Property.EMPTY_LIST: self._store_value(entity, []) return val = self._db_get_value(p.value(), p) if val is not None: val = _BaseValue(val) if self._repeated: ...
Internal helper to deserialize this property from a protocol buffer. Subclasses may override this method. Args: entity: The entity, a Model (subclass) instance. p: A Property Message object (a protocol buffer). depth: Optional nesting depth, default 1 (unused here, but used by some subclasses that override this metho...
juraj-google-style
def save_parameters(self, path, grad_only=False): params = self.get_parameters(grad_only=grad_only) nn.save_parameters(path, params)
Save all parameters into a file with the specified format. Currently hdf5 and protobuf formats are supported. Args: path : path or file object grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
juraj-google-style
def add_gene_panel(self, panel_obj): panel_name = panel_obj['panel_name'] panel_version = panel_obj['version'] display_name = panel_obj.get('display_name', panel_name) if self.gene_panel(panel_name, panel_version): raise IntegrityError("Panel {0} with version {1} al...
Add a gene panel to the database Args: panel_obj(dict)
juraj-google-style
def get_value_for_datastore(self, model_instance): value = super(JsonProperty, self).get_value_for_datastore(model_instance) if (not value): return None json_value = value if (not isinstance(value, dict)): json_value = value.to_json() if (not json_value): return None retu...
Gets value for datastore. Args: model_instance: instance of the model class. Returns: datastore-compatible value.
codesearchnet
def split_input(cls, job_config): reader_params = job_config.input_reader_params bucket = reader_params[cls.BUCKET_NAME_PARAM] filenames = reader_params[cls.OBJECT_NAMES_PARAM] delimiter = reader_params.get(cls.DELIMITER_PARAM) account_id = reader_params.get(cls._ACCOUNT_ID_PARAM) buffer_si...
Returns a list of input readers. An equal number of input files are assigned to each shard (+/- 1). If there are fewer files than shards, fewer than the requested number of shards will be used. Input files are currently never split (although for some formats could be and may be split in a future implementation). Args...
juraj-google-style
def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor: context = self.context_layer_norm(context) latents = self.latents_layer_norm(latents) batch_size, seq_length, embed_dim = context.shape[:3] q = self.q_proj(latents) k = self.k_proj(torch.cat([context, latents], dim=-2))...
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! Args: context (`torch.Tensor`): Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample. latents (`torch.Tensor`): Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latent...
github-repos
def _wait_after(provider, job_ids, poll_interval, stop_on_failure): job_ids_to_check = {j for j in job_ids if j != dsub_util.NO_JOB} error_messages = [] while job_ids_to_check and (not error_messages or not stop_on_failure): print('Waiting for: %s.' % (', '.join(job_ids_to_check))) ...
Print status info as we wait for those jobs. Blocks until either all of the listed jobs succeed, or one of them fails. Args: provider: job service provider job_ids: a set of job IDs (string) to wait for poll_interval: integer seconds to wait between iterations stop_on_failure: whether to stop waiting if one of the ta...
juraj-google-style
def clone(self, name=None): if name is None: name = self.module_name + "_clone" return MLP( name=name, output_sizes=self.output_sizes, activation=self.activation, activate_final=self.activate_final, initializers=self.initializers, partitioners=self.par...
Creates a new MLP with the same structure. Args: name: Optional string specifying the name of the new module. The default name is constructed by appending "_clone" to the original name. Returns: A cloned `MLP` module.
juraj-google-style
def dump_json(json_info, json_file, overwrite=True): if overwrite: mode = "w" else: mode = "w+" try: with open(json_file, mode) as f: f.write(json.dumps(json_info)) except BaseException as e: logging.error(e.message)
Dump a whole json record into the given file. Overwrite the file if the overwrite flag set. Args: json_info (dict): Information dict to be dumped. json_file (str): File path to be dumped to. overwrite(boolean)
juraj-google-style
def _dump_to_pages(dump): pos = 0 ret = [] start_tag = u"<page>\n" end_tag = u"</page>\n" while True: start_pos = dump.find(start_tag, pos) if start_pos == -1: break start_pos += len(start_tag) end_pos = dump.find(end_tag, start_pos) if end_pos == -1: break ret.append(du...
Extract pages from an xml dump. Args: dump: a unicode string Returns: a list of unicode strings
juraj-google-style
def generate_output_whois_nets(self, json_data=None, hr=True, show_name=False, colorize=True): if (json_data is None): json_data = {} output = generate_output(line='0', short=(HR_WHOIS['nets']['_short'] if hr else 'nets'), name=(HR_WHOIS['nets']['_name'] if (hr and show_name) else None), is_parent=True,...
The function for generating CLI output Legacy Whois networks results. Args: json_data (:obj:`dict`): The data to process. Defaults to None. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. c...
codesearchnet
def set_wallpaper(image): desktop_env = system.get_name() if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']: uri = 'file: SCHEMA = 'org.gnome.desktop.background' KEY = 'picture-uri' if desktop_env == 'mate': uri = image SCHEMA = 'org.mate.background' KEY = 'picture-filename'...
Set the desktop wallpaper. Sets the desktop wallpaper to an image. Args: image (str): The path to the image to be set as wallpaper.
juraj-google-style
def midpoint(self): midpoints = [] for segment in self: if (len(segment) < 2): midpoints.append([]) else: midpoints.append(segment.midpoint()) return midpoints
Calculate the midpoint between locations in segments. Returns: list of Point: Groups of midpoint between points in segments
codesearchnet
def get_task_ops(task_type=TaskType.ALG_CTRL): try: return LearnToExecuteState.TASK_TYPE_OPS[task_type] except KeyError: raise KeyError(("Bad task_type '%s', check config." % task_type))
Returns an operations list based on the specified task index. Args: task_type: indicates the task type used. Returns: List of the eligible ops.
codesearchnet
def add_action_to(cls, parser, action, subactions, level): p = parser.add_parser(action.name, description=action.description, argument_default=argparse.SUPPRESS) for arg in action.args: arg.add_argument_to(p) if su...
Adds given action to given parser Args: parser: instance of devassistant_argparse.ArgumentParser action: devassistant.actions.Action subclass subactions: dict with subactions - {SubA: {SubB: {}}, SubC: {}}
juraj-google-style
def search(self, search_phrase, limit=None): query_parts = [ 'SELECT identifier, type, name, similarity(name, :word) AS sml', 'FROM identifier_index', 'WHERE name % :word', 'ORDER BY sml DESC, name'] query_params = { 'word': search_p...
Finds identifiers by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of IdentifierSearchResult instances.
juraj-google-style
def expo(base=2, factor=1, max_value=None): n = 0 while True: a = factor * base ** n if max_value is None or a < max_value: yield a n += 1 else: yield max_value
Generator for exponential decay. Args: base: The mathematical base of the exponentiation operation factor: Factor to multiply the exponentation by. max_value: The maximum value to yield. Once the value in the true exponential sequence exceeds this, the value of max_value will forever after be yielded.
juraj-google-style
async def async_fetch(url: str, **kwargs) -> Selector: kwargs.setdefault('headers', DEFAULT_HEADERS) async with aiohttp.ClientSession(**kwargs) as ses: async with ses.get(url, **kwargs) as res: html = (await res.text()) tree = Selector(text=html) return tree
Do the fetch in an async style. Args: url (str): The url of the site. Returns: Selector: allows you to select parts of HTML text using CSS or XPath expressions.
codesearchnet
def scan_servos(): servos = [] for servo_id in range(0x00, 0xFE): model = get_model(servo_id) if model: servos += [(servo_id, model)] return servos
Scan for the herkulex servos connected This function will scan for all the herkulex servos connected to the bus. Args: none Returns: list: a list of tuples of the form [(id, model)]
juraj-google-style
def init_from_wave_file(wavpath): try: samplerate, data = SW.read(wavpath) nframes = data.shape[0] except: try: w = wave.open(wavpath) samplerate = w.getframerate() nframes = w.getnfr...
Init a sonic visualiser environment structure based the analysis of the main audio file. The audio file have to be encoded in wave Args: wavpath(str): the full path to the wavfile
juraj-google-style
def _get_char_input_ids(self, input_ids, subwords_batch, char_count_per_id, pad_token_id=0, unk_token_id=1): if not hasattr(self.generation_config, 'char_to_id'): raise ValueError("This model generation config doesn't have a `char_to_id` key which maps\n characters to character ids. Make sure...
Returns the corresponding character input id for each character of `subwords_batch`. Args: input_ids (`torch.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. subwords_batch (`List[List[str]]` of shape `(batch_size, sequence_length)`): Corresponding text string for ...
github-repos
def _get_colors(n): import matplotlib.pyplot as plt from matplotlib.colors import rgb2hex as r2h from numpy import linspace cols = linspace(0.05, .95, n) cmap = plt.get_cmap('nipy_spectral') return [r2h(cmap(i)) for i in cols]
Returns n unique and "evenly" spaced colors for the backgrounds of the projects. Args: n (int): The number of unique colors wanted. Returns: colors (list of str): The colors in hex form.
juraj-google-style
def convert_ini(config_dict): config_lines = [] for env, configs in sorted(config_dict.items()): for resource, app_properties in sorted(configs.items()): try: for app_property, value in sorted(app_properties.items()): variable = '{env}_{resource}_{ap...
Convert _config_dict_ into a list of INI formatted strings. Args: config_dict (dict): Configuration dictionary to be flattened. Returns: (list) Lines to be written to a file in the format of KEY1_KEY2=value.
juraj-google-style
def AsDict(self, dt=True): data = {} if self.body: data['body'] = self.body if self.posted_at: data['posted_at'] = self.posted_at if self.user: data['user'] = self.user.AsDict() return data
A dict representation of this Comment instance. The return value uses the same key names as the JSON representation. Args: dt (bool): If True, return dates as python datetime objects. If False, return dates as ISO strings. Return: A dict representing this Comment instance
codesearchnet
def num_connected_components(self, unitary_only=False): reg_offset = 0 reg_map = {} if unitary_only: regs = self.qregs else: regs = (self.qregs + self.cregs) for reg in regs: reg_map[reg.name] = reg_offset reg_offset += reg.size sub_graphs = [[bit] for bit in rang...
How many non-entangled subcircuits can the circuit be factored to. Args: unitary_only (bool): Compute only unitary part of graph. Returns: int: Number of connected components in circuit.
codesearchnet
def _get_contrib_features(module): if isinstance(module, types.ModuleType): if hasattr(module, '__path__'): (yield from _get_contrib_features_from_package(module)) else: (yield _get_contrib_feature_from_module(module)) else: raise ValueError('Input is not a module...
Get contributed features from within given module Be very careful with untrusted code. The module/package will be walked, every submodule will be imported, and all the code therein will be executed. But why would you be trying to import from an untrusted package anyway? Args: contrib (module): module (standalone or p...
codesearchnet
async def create(self, coro: Coroutine) -> asyncio.Task: task = asyncio.get_event_loop().create_task(coro) self._tasks.add(task) return task
Starts execution of a coroutine. The created asyncio.Task is returned, and added to managed tasks. The scheduler guarantees that it is cancelled during application shutdown, regardless of whether it was already cancelled manually. Args: coro (Coroutine): The coroutine to be wrapped in a task, and executed. Returns: ...
codesearchnet
def set_all_tiers(key, value, django_cache_timeout=DEFAULT_TIMEOUT): DEFAULT_REQUEST_CACHE.set(key, value) django_cache.set(key, value, django_cache_timeout)
Caches the value for the provided key in both the request cache and the django cache. Args: key (string) value (object) django_cache_timeout (int): (Optional) Timeout used to determine if and for how long to cache in the django cache. A timeout of 0 will skip the django cache. If timeout is provided, use that timeout ...
codesearchnet
def validate(self, value): cast_callback = self.cast_callback if self.cast_callback else self.cast_type try: return value if isinstance(value, self.cast_type) else cast_callback(value) except Exception: raise NodeTypeError('Invalid value `{}` for {}.'.format(v...
Base validation method. Check if type is valid, or try brute casting. Args: value (object): A value for validation. Returns: Base_type instance. Raises: SchemaError, if validation or type casting fails.
juraj-google-style
def prepare_loss_weights(training_endpoints, loss_weights=None): if loss_weights is None: for e in training_endpoints: e.loss_weight = 1.0 elif isinstance(loss_weights, collections.abc.Mapping): generic_utils.check_for_unexpected_keys('loss_weights', loss_weights, [e.output_name for ...
Converts loss weights to a list of loss weights. The result loss weights will be populated on the training endpoint. Args: training_endpoints: List of model training endpoints. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model ...
github-repos
def match(self, request): for test in self.filters: if not test(request, self): return False for mapper in self.mappers: request = mapper(request, self) if not request: raise ValueError('map function must ret...
Matches a given Request instance contract against the registered mocks. If a mock passes all the matchers, its response will be returned. Arguments: request (pook.Request): Request contract to match. Raises: pook.PookNoMatches: if networking is disabled and no mock matches with the given request contract. Returns: ...
juraj-google-style
def get_alignment_df_from_file(alignment_file, a_seq_id=None, b_seq_id=None): alignments = list(AlignIO.parse(alignment_file, "emboss")) alignment_df = pd.DataFrame(columns=['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']) for alignment in alignments: if not a_seq_id: ...
Get a Pandas DataFrame of the Needle alignment results. Contains all positions of the sequences. Args: alignment_file: a_seq_id: Optional specification of the ID of the reference sequence b_seq_id: Optional specification of the ID of the aligned sequence Returns: Pandas DataFrame: all positions in the alignment
juraj-google-style
def to_json(self): cursor = self._get_cursor() cursor_object = False if (cursor and isinstance(cursor, datastore_query.Cursor)): cursor = cursor.to_websafe_string() cursor_object = True return {'key_range': self._key_range.to_json(), 'query_spec': self._query_spec.to_json(), 'cursor': cu...
Serializes all states into json form. Returns: all states in json-compatible map.
codesearchnet
def __init__(self, minimum=-18446744073709551616, maximum=18446744073709551615, singleStep=1, parent=None): super(BigIntSpinboxDelegate, self).__init__(parent) self.minimum = minimum self.maximum = maximum self.singleStep = singleStep
construct a new instance of a BigIntSpinboxDelegate. Args: maximum (int or long, optional): minimum allowed number in BigIntSpinbox. defaults to -18446744073709551616. minimum (int or long, optional): maximum allowed number in BigIntSpinbox. defaults to 18446744073709551615. singleStep (int, optional): amount of steps...
juraj-google-style
def images(self, **kwargs): path = self._get_series_id_season_number_path('images') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the images (posters) that we have stored for a TV season by season number. Args: language: (optional) ISO 639 code. include_image_language: (optional) Comma separated, a valid ISO 69-1. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def add_note(self, note): notes = self.cached_json if (not note.moderator): note.moderator = self.r.user.me().name try: mod_index = notes['constants']['users'].index(note.moderator) except ValueError: notes['constants']['users'].append(note.moderator) mod_index = notes['c...
Add a note to the usernotes wiki page. Arguments: note: the note to be added (Note) Returns the update message for the usernotes wiki Raises: ValueError when the warning type of the note can not be found in the stored list of warnings.
codesearchnet
def GetFormattedSources(self, event): event_formatter = self.GetEventFormatter(event) if (not event_formatter): return (None, None) return event_formatter.GetSources(event)
Retrieves the formatted sources related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full source string or None if no event formatter was found. str: short source string or None if no event formatter was found.
codesearchnet
def create_endpoints_csv_file(self, timeout=-1): uri = "{}/endpoints/".format(self.data["uri"]) return self._helper.do_post(uri, {}, timeout, None)
Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response.
juraj-google-style
def compute_author_match_score(x_authors, y_authors): if not x_authors or not y_authors: return 0.0 matches = get_number_of_author_matches(x_authors, y_authors) max_length = max(len(x_authors), len(y_authors)) return matches / float(max_length)
Return the matching score of 2 given lists of authors. Args: x_authors (list(dict)): first schema-compliant list of authors. y_authors (list(dict)): second schema-compliant list of authors. Returns: float: matching score of authors.
juraj-google-style
def set_time(self, value: float): if (value < 0): value = 0 self.controller.row = (self.rps * value)
Set the current time jumping in the timeline. Args: value (float): The new time
codesearchnet
def run_ui(self, init_command=None, title=None, title_color=None, enable_mouse_on_start=True): raise NotImplementedError('run_ui() is not implemented in BaseUI')
Run the UI until user- or command- triggered exit. Args: init_command: (str) Optional command to run on CLI start up. title: (str) Optional title to display in the CLI. title_color: (str) Optional color of the title, e.g., "yellow". enable_mouse_on_start: (bool) Whether the mouse mode is to be enabled on start-up. Re...
github-repos
def _SetHashers(self, hasher_names_string): if ((not hasher_names_string) or (hasher_names_string == 'none')): return analyzer_object = analyzers_manager.AnalyzersManager.GetAnalyzerInstance('hashing') analyzer_object.SetHasherNames(hasher_names_string) self._analyzers.append(analyzer_object)
Sets the hasher names. Args: hasher_names_string (str): comma separated names of the hashers to enable, where 'none' disables the hashing analyzer.
codesearchnet
def GetHashers(cls, hasher_names): hashers = [] for hasher_name, hasher_class in iter(cls._hasher_classes.items()): if hasher_name in hasher_names: hashers.append(hasher_class()) return hashers
Retrieves instances for all the specified hashers. Args: hasher_names (list[str]): names of the hashers to retrieve. Returns: list[BaseHasher]: hashers.
juraj-google-style
def verify_dataset(X, y): (X_shape, y_shape) = (np.array(X).shape, np.array(y).shape) if (len(X_shape) != 2): raise exceptions.UserError('X must be 2-dimensional array') if (len(y_shape) != 1): raise exceptions.UserError('y must be 1-dimensional array') if (X_shape[0] != y_shape[0]): ...
Verifies if a dataset is valid for use i.e. scikit-learn format Used to verify a dataset by returning shape and basic statistics of returned data. This will also provide quick and dirty check on capability of host machine to process the data. Args: X (array-like): Features array y (array-like): Label array Returns:...
codesearchnet
def call(self): (headers, data) = self.prepare() if _LOG.isEnabledFor(logging.DEBUG): _LOG.debug('Sending %s, %s', headers, prettify(data)) response = requests.post(self.endpoint, headers=headers, data=data.encode('utf-8'), **self.request_args) _LOG.debug('Received %s, %s', response.headers, res...
Call the SOAP method on the server. Returns: str: the decapusulated SOAP response from the server, still encoded as utf-8. Raises: SoapFault: if a SOAP error occurs. ~requests.exceptions.HTTPError: if an http error occurs.
codesearchnet
def search_orcid(orcid): url = 'https: r = requests.get(url, headers=headers) if (r.status_code != 200): r.raise_for_status() return r.json()
Search the ORCID public API Specfically, return a dictionary with the personal details (name, etc.) of the person associated with the given ORCID Args: orcid (`str`): The ORCID to be searched Returns: `dict`: Dictionary with the JSON response from the API Raises: `~requests.HTTPError`: If the given ORCID cannot be ...
codesearchnet
def write_grib2(self, path): if self.percentile is None: var_type = "mean" else: var_type = "p{0:02d}".format(self.percentile) lscale = 1e6 grib_id_start = [7, 0, 14, 14, 2] gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30], dtype...
Writes data to grib2 file. Currently, grib codes are set by hand to hail. Args: path: Path to directory containing grib2 files. Returns:
juraj-google-style
def pack(self, value=None): if (value is None): self.update_header_length() return super().pack() elif isinstance(value, type(self)): return value.pack() else: msg = '{} is not an instance of {}'.format(value, type(self).__name__) raise PackException(msg)
Pack the message into a binary data. One of the basic operations on a Message is the pack operation. During the packing process, we convert all message attributes to binary format. Since that this is usually used before sending the message to a switch, here we also call :meth:`update_header_length`. .. seealso:: Thi...
codesearchnet
def all_reduce_sum_gradients(grads_and_vars): grads_and_vars = list(grads_and_vars) filtered_grads_and_vars = filter_empty_gradients(grads_and_vars) if filtered_grads_and_vars: if strategy_supports_no_merge_call(): grads = [pair[0] for pair in filtered_grads_and_vars] reduced...
Returns all-reduced gradients aggregated via summation. Args: grads_and_vars: List of (gradient, variable) pairs. Returns: List of (gradient, variable) pairs where gradients have been all-reduced.
github-repos
def dataset_as_numpy(dataset): if not context.executing_eagerly(): raise ValueError('dataset_as_numpy must be run in eager mode outside tf.function') nested_ds = dataset del dataset flat_ds = nest.flatten(nested_ds) flat_np = [] for ds_el in flat_ds: if not isinstance(ds_el, (ten...
Converts a `tf.data.Dataset` to an iterable of ndarrays. `dataset_as_numpy` converts a possibly nested structure of `tf.data.Dataset`s and `tf.Tensor`s to iterables of ndarrays and ndarrays, respectively. This function must be run in eager mode outside tf.function. Args: dataset: a possibly nested structure of `tf.da...
github-repos
def track_change(self, tile, property_name, value, formatter=None): if (not self.tracking): return if ((len(self._whitelist) > 0) and ((tile, property_name) not in self._whitelist)): return if (formatter is None): formatter = str change = StateChange(monotonic(), tile, property_n...
Record that a change happened on a given tile's property. This will as a StateChange object to our list of changes if we are recording changes, otherwise, it will drop the change. Args: tile (int): The address of the tile that the change happened on. property_name (str): The name of the property that changed. value (...
codesearchnet
def _init_boto3_clients(self, profile, region): try: session = None if profile and region: session = boto3.session.Session(profile_name=profile, region_name=region) elif profile: session = boto3.session.Session(profile_name=profile) ...
The utililty requires boto3 clients to CloudFormation. Args: None Returns: Good or Bad; True or False
juraj-google-style
def today(self, strict=False): return self.on(arrow.now(), strict=strict)
Iterates (in chronological order) over all events that occurs today Args: strict (bool): if True events will be returned only if they are\ strictly *included* in `day`.
juraj-google-style
def ExamineEvent(self, mediator, event): if event.data_type != 'fs:stat': return filename = getattr(event, 'filename', None) if not filename: return if 'chrome' not in filename.lower(): return if not self._sep: self._sep = self._GetPathSegmentSeparator(filen...
Analyzes an event. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
juraj-google-style
def process_ems(self, doc: Document) -> List[Document]: new_docs = list() for a_em in self.em_lst: if a_em.document_selector(doc): self.log(" processing with " + str(type(a_em)) + ". Process", "info", doc.doc_id, doc.url) fresh_docs = a_em.process_do...
Factory method to wrap input JSON docs in an ETK Document object. Args: doc (Document): process on this document Returns: a Document object and a KnowledgeGraph object
juraj-google-style
def pager(__text: str, *, pager: Optional[str] = 'less'): if pager: run([pager, ], input=__text.encode()) else: print(__text)
Pass output through pager. See :manpage:`less(1)`, if you wish to configure the default pager. For example, you may wish to check ``FRSX`` options. Args: __text: Text to page pager: Pager to use
juraj-google-style
def dr( self, atom1, atom2 ): return self.cell.dr( atom1.r, atom2.r )
Calculate the distance between two atoms. Args: atom1 (vasppy.Atom): Atom 1. atom2 (vasppy.Atom): Atom 2. Returns: (float): The distance between Atom 1 and Atom 2.
juraj-google-style
def from_hyperplane(basis, origin, point, internal=True): basis = np.array(basis) assert ((basis.shape[0] + 1) == basis.shape[1]) big_basis = np.zeros((basis.shape[1], basis.shape[1])) big_basis[(:basis.shape[0], :basis.shape[1])] = basis (u, s, vh) = np.linalg.svd(big_basis) null_mask = (s <= 1...
Returns a Halfspace defined by a list of vectors parallel to the bounding hyperplane. Args: basis: basis for the hyperplane (array with vector rows) origin: point on the hyperplane point: point not on the hyperplane internal: whether point is inside the halfspace
codesearchnet
def read(self, path): with open(path, 'r') as f: for line in f: line = line.strip() match_obj_name = re.search('^([A-Z][A-Z/ \\d]+),', line) if (match_obj_name is not None): internal_name = match_obj_name.group(1) if (internal_name in self....
Read EPW weather data from path. Args: path (str): path to read weather data from
codesearchnet
def __init__(self, **kwds): self.code_objs = dict() self._codes = [] self._functions = [] self._executables = [] self.dry_run = None self.encoding = 'utf-8' self.newline = None if 'module' in kwds: self.import_module(kwds['module']) ...
Initialize MassEdit object. Args: - code (byte code object): code to execute on input file. - function (str or callable): function to call on input file. - module (str): module name where to find the function. - executable (str): executable file name to execute on input file. - dry_run (bool): skip actual modification...
juraj-google-style
def set_white(self, brightness, colourtemp): if not 25 <= brightness <= 255: raise ValueError("The brightness needs to be between 25 and 255.") if not 0 <= colourtemp <= 255: raise ValueError("The colour temperature needs to be between 0 and 255.") payload = sel...
Set white coloured theme of an rgb bulb. Args: brightness(int): Value for the brightness (25-255). colourtemp(int): Value for the colour temperature (0-255).
juraj-google-style
class MaxScore(ScoreAggregation): def __init__(self, **kwargs): super().__init__(agg_func=max, **kwargs)
Aggregates anomaly scores by selecting the maximum score. This `AggregationFn` selects the highest anomaly score from a collection of `AnomalyPrediction` objects as the aggregated score. Args: **kwargs: Additional keyword arguments to pass to the base `ScoreAggregation` class.
github-repos
def getShareInfo(item): key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}' info = getattr(item, key, None) if info is not None: return info meths = {} info = {'meths': meths} for name in dir(item): if name.startswith('_'): continu...
Get a dictionary of special annotations for a Telepath Proxy. Args: item: Item to inspect. Notes: This will set the ``_syn_telemeth`` attribute on the item and the items class, so this data is only computed once. Returns: dict: A dictionary of methods requiring special handling by the proxy.
juraj-google-style
def _parse_type_to_int(dtype, flag): if dtype not in mmi_constants.TFLITE_TYPES: raise ValueError("Unsupported value '{0}' for {1}. Only {2} are supported.".format(dtype, flag, mmi_constants.TFLITE_TYPES)) dtype_str = mmi_constants.TFLITE_TO_STR_TYPES[dtype] dtype_int = schema_fb.TensorType.__dict__...
Converts a tflite type to it's integer representation. Args: dtype: tf.DType representing the inference type. flag: str representing the flag name. Returns: integer, a tflite TensorType enum value. Raises: ValueError: Unsupported tflite type.
github-repos
def _update_explicit_bucket_count(a_float, dist): buckets = dist.explicitBuckets if buckets is None: raise ValueError(_BAD_UNSET_BUCKETS % (u'explicit buckets')) bucket_counts = dist.bucketCounts bounds = buckets.bounds if len(bucket_counts) < len(bounds) + 1: raise ValueError(_...
Adds `a_float` to `dist`, updating its explicit buckets. Args: a_float (float): a new value dist (:class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution`): the Distribution being updated Raises: ValueError: if `dist` does not already have explict buckets defined ValueError: if there are not enough b...
juraj-google-style
def infer_schema(stats_path, schema_path): print('Infering schema from statistics.') schema = tfdv.infer_schema(tfdv.load_statistics(stats_path), infer_feature_shape=False) print(text_format.MessageToString(schema)) print('Writing schema to output path.') file_io.write_string_to_file(schema_path, te...
Infers a schema from stats in stats_path. Args: stats_path: Location of the stats used to infer the schema. schema_path: Location where the inferred schema is materialized.
github-repos
def _execute(self, request): if self._rate_limiter: with self._rate_limiter: return request.execute(http=self.http, num_retries=self._num_retries) return request.execute(http=self.http, ...
Run execute with retries and rate limiting. Args: request (object): The HttpRequest object to execute. Returns: dict: The response from the API.
juraj-google-style
def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent': return cls._binary_op(x, y, tf.maximum, tf.float32)
Returns a TensorFluent for the maximum function.TensorFluent Args: x: The first operand. y: The second operand. Returns: A TensorFluent wrapping the maximum function.
codesearchnet
def read_int16(self, little_endian=True): if little_endian: endian = '<' else: endian = '>' return self.unpack(('%sh' % endian), 2)
Read 2 byte as a signed integer value from the stream. Args: little_endian (bool): specify the endianness. (Default) Little endian. Returns: int:
codesearchnet
def get(account): account = Account.get(account) if (not account): return None acct_type = AccountType.get(account.account_type_id).account_type account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], acct_type) return account_class(account)
Returns the class object identified by `account_id` Args: account (`int`, `str`): Unique ID of the account to load from database Returns: `Account` object if found, else None
codesearchnet
def requires_genesis(self): genesis_file = os.path.join(self._data_dir, 'genesis.batch') has_genesis_batches = Path(genesis_file).is_file() LOGGER.debug('genesis_batch_file: %s', (genesis_file if has_genesis_batches else 'not found')) chain_head = self._block_store.chain_head has_chain_head = (chain...
Determines if the system should be put in genesis mode Returns: bool: return whether or not a genesis block is required to be generated. Raises: InvalidGenesisStateError: raises this error if there is invalid combination of the following: genesis.batch, existing chain head, and block chain id.
codesearchnet
def configure_ospf(self, cmd): config = self.get() cmds = ['router ospf {}'.format(config['ospf_process_id'])] cmds.extend(make_iterable(cmd)) return super(Ospf, self).configure(cmds)
Allows for a list of OSPF subcommands to be configured" Args: cmd: (list or str): Subcommand to be entered Returns: bool: True if all the commands completed successfully
codesearchnet
def recipe_dcm_log(config, auth_read, auth_write, accounts, days, recipe_slug): dataset(config, {'description': 'The dataset will hold log table, Create it exists.', 'hour': [1], 'auth': auth_write, 'dataset': recipe_slug}) dcm_log(config, {'description': 'Will create tables with format CM_* to hold each endpoi...
Downloads Campaign manager logs and allows audits. Args: auth_read (authentication) - Credentials used for reading data. auth_write (authentication) - Credentials used for writing data. accounts (integer_list) - Comma separated CM account ids. days (integer) - Number of days to backfill the log, works on first run onl...
github-repos
def _get_db_fields(self, obj): for field in obj.indexes: (yield (field, self._zeo_key(field)))
Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`.
codesearchnet
def include_revision(revision_num, skip_factor=1.1): if (skip_factor <= 1.0): return True return (int((math.log1p(revision_num) / math.log(skip_factor))) != int((math.log((revision_num + 2.0)) / math.log(skip_factor))))
Decide whether to include a revision. If the number of revisions is large, we exclude some revisions to avoid a quadratic blowup in runtime, since the article is likely also large. We make the ratio between consecutive included revision numbers appproximately equal to "factor". Args: revision_num: an integer skip_fa...
codesearchnet
def releases(self, **kwargs): path = self._get_id_path('releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the release date and certification information by country for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
juraj-google-style
def _from_safe_path_param_name(safe_parameter): assert safe_parameter.startswith('_') safe_parameter_as_base32 = safe_parameter[1:] padding_length = ((- len(safe_parameter_as_base32)) % 8) padding = ('=' * padding_length) return base64.b32decode((safe_parameter_as_base32 + padding))
Takes a safe regex group name and converts it back to the original value. Only alphanumeric characters and underscore are allowed in variable name tokens, and numeric are not allowed as the first character. The safe_parameter is a base32 representation of the actual value. Args: safe_parameter: A string that was gen...
codesearchnet
def initialized_value(self): raise NotImplementedError
Returns the value of the initialized variable. You should use this instead of the variable itself to initialize another variable with a value that depends on the value of this variable. ```python # Initialize 'v' with a random tensor. v = tf.Variable(tf.random.truncated_normal([10, 40])) # Use `initialized_value` to ...
github-repos
def trace_buffer_capacity(self): cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get trace buffer size.') return da...
Retrieves the trace buffer's current capacity. Args: self (JLink): the ``JLink`` instance. Returns: The current capacity of the trace buffer. This is not necessarily the maximum possible size the buffer could be configured with.
juraj-google-style
def resize(self, images: 'torch.Tensor', size: SizeDict, keep_aspect_ratio: bool=False, ensure_multiple_of: int=1, interpolation: Optional['F.InterpolationMode']=None) -> 'torch.Tensor': if not size.height or not size.width: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. ...
Resize an image or batchd images to target size `(size["height"], size["width"])`. If `keep_aspect_ratio` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is resized to a size that is a multiple of this value. Args: images (...
github-repos
def expint(x, name=None): with ops.name_scope(name, 'expint', [x]): return gen_special_math_ops.expint(x)
Computes the Exponential integral of `x` element-wise. The Exponential integral is defined as the integral of `exp(t) / t` from `-inf` to `x`, with the domain of definition all positive real numbers. >>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy() array([ 1.8951179, 2.1673784, 5.3332353, 21.048464], dtype=f...
github-repos
def approve(self, peer_jid): self.roster.approve(aioxmpp.JID.fromstr(peer_jid).bare())
Approve a subscription request from jid Args: peer_jid (str): the JID to approve
codesearchnet
def _parse(json_str: str, primitive_cls: Type[Instant]) -> Instant: datetime_str, timezone_str = _primitive_time_utils.split_timezone(json_str) try: dt = datetime.datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%S') return _primitive_time_utils.build_date_like(dt, timezone_str, _primitive_time_u...
Parses the json_str into an Instant FHIR primitive. Args: json_str: The raw JSON string to parse. primitive_cls: The FHIR primitive to parse into. Returns: A FHIR primitive Instant. Raises: fhir_errors.InvalidFhirError: In the event that no FHIR primitive Instant format was able to properly parse the json_str.
github-repos
def ctc_loss(target, output, target_length, output_length, mask_index=0): if any_symbolic_tensors((target, output, target_length, output_length)): return CTCLoss(mask_index).symbolic_call(target, output, target_length, output_length) return backend.nn.ctc_loss(target, output, target_length, output_lengt...
CTC (Connectionist Temporal Classification) loss. Args: target: A tensor of shape `(batch_size, max_length)` containing the true labels in integer format. output: A tensor of shape `(batch_size, max_length, num_classes)` containing logits (the output of your model). target_length: A tensor of shape `(batch_size,)` con...
github-repos
def upsert_variant(self, variant_obj): LOG.debug('Upserting variant %s', variant_obj['_id']) try: result = self.variant_collection.insert_one(variant_obj) except DuplicateKeyError as err: LOG.debug('Variant %s already exists in database', variant_obj['_id']) result = self.variant_col...
Load a variant object, if the object already exists update compounds. Args: variant_obj(dict) Returns: result
codesearchnet
def _save_states(self, state, serialized_readers_entity): mr_id = state.key().id_or_name() fresh_state = model.MapreduceState.get_by_job_id(mr_id) if not self._check_mr_state(fresh_state, mr_id): return False if fresh_state.active_shards != 0: logging.warning( "Mapreduce %s al...
Run transaction to save state. Args: state: a model.MapreduceState entity. serialized_readers_entity: a model._HugeTaskPayload entity containing json serialized input readers. Returns: False if a fatal error is encountered and this task should be dropped immediately. True if transaction is successful. None if a previ...
juraj-google-style
def db_dp004(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp004`'.format(value)) self._db_dp004...
Corresponds to IDD Field `db_dp004` mean coincident dry-bulb temperature to Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_dp004` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missin...
juraj-google-style
def __init__(self, value, masks=None, name='X.509 Certificate'): super(X509Certificate, self).__init__( enums.CertificateType.X_509, value, masks, name) self._x509_certificate_identifier = None self._x509_certificate_subject = None ...
Create an X509Certificate. Args: value(bytes): The bytes representing the certificate. masks(list): A list of CryptographicUsageMask enumerations defining how the certificate will be used. name(string): The string name of the certificate.
juraj-google-style
def __DepthFirstSearch(node, hashes): if node.LeftChild is None: hashes.add(node.Hash) else: MerkleTree.__DepthFirstSearch(node.LeftChild, hashes) MerkleTree.__DepthFirstSearch(node.RightChild, hashes)
Internal helper method. Args: node (MerkleTreeNode): hashes (list): each item is a bytearray.
juraj-google-style