code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def write_json(self, fh, pretty=True): sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output.
codesearchnet
def summary_writer_function(name, tensor, function, family=None): name_scope = ops.get_name_scope() if name_scope: name_scope += '/' def record(): with ops.name_scope(name_scope), summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope): with ops.control_dependencies([function(tag, scope)]): return constant_op.constant(True) if _summary_state.writer is None: return control_flow_ops.no_op() with ops.device('cpu:0'): op = smart_cond.smart_cond(_legacy_contrib_should_record_summaries(), record, _nothing, name='') if not context.executing_eagerly(): ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) return op
Helper function to write summaries. Args: name: name of the summary tensor: main tensor to form the summary function: function taking a tag and a scope which writes the summary family: optional, the summary's family Returns: The result of writing the summary.
github-repos
def transform_rest_response(self, response_body): body_json = json.loads(response_body) return json.dumps(body_json, indent=1, sort_keys=True)
Translates an apiserving REST response so it's ready to return. Currently, the only thing that needs to be fixed here is indentation, so it's consistent with what the live app will return. Args: response_body: A string containing the backend response. Returns: A reformatted version of the response JSON.
codesearchnet
def check_constraint(type_constraint, object_instance): if type_constraint is None and object_instance is None: return elif isinstance(type_constraint, TypeConstraint): type_constraint.type_check(object_instance) elif type_constraint is None: pass elif not isinstance(type_constraint, type): raise RuntimeError('bad type: %s' % (type_constraint,)) elif not isinstance(object_instance, type_constraint): raise SimpleTypeHintError
Determine if the passed type instance satisfies the TypeConstraint. When examining a candidate type for constraint satisfaction in 'type_check', all CompositeTypeHint's eventually call this function. This function may end up being called recursively if the hinted type of a CompositeTypeHint is another CompositeTypeHint. Args: type_constraint: An instance of a TypeConstraint or a built-in Python type. object_instance: An object instance. Raises: SimpleTypeHintError: If 'type_constraint' is a one of the allowed primitive Python types and 'object_instance' isn't an instance of this type. CompositeTypeHintError: If 'type_constraint' is a TypeConstraint object and 'object_instance' does not satisfy its constraint.
github-repos
def _deconstruct_single_qubit_matrix_into_gate_turns( mat: np.ndarray) -> Tuple[float, float, float]: pre_phase, rotation, post_phase = ( linalg.deconstruct_single_qubit_matrix_into_angles(mat)) tau = 2 * np.pi xy_turn = rotation / tau xy_phase_turn = 0.25 - pre_phase / tau total_z_turn = (post_phase + pre_phase) / tau return (_signed_mod_1(xy_turn), _signed_mod_1(xy_phase_turn), _signed_mod_1(total_z_turn))
Breaks down a 2x2 unitary into gate parameters. Args: mat: The 2x2 unitary matrix to break down. Returns: A tuple containing the amount to rotate around an XY axis, the phase of that axis, and the amount to phase around Z. All results will be in fractions of a whole turn, with values canonicalized into the range [-0.5, 0.5).
juraj-google-style
def parse_filepath(self, filepath=None): filepath = (filepath or self._default_filename) (path, filename) = os.path.split(filepath) if (not path): path = self.basedir elif (not os.path.isabs(path)): path = os.path.join(self.basedir, path) return (os.path.normpath(path), filename)
Parse given filepath to split possible path directory from filename. * If path directory is empty, will use ``basedir`` attribute as base filepath; * If path directory is absolute, ignore ``basedir`` attribute; * If path directory is relative, join it to ``basedir`` attribute; Keyword Arguments: filepath (str): Filepath to use to search for settings file. Will use value from ``_default_filename`` class attribute if empty. If filepath contain a directory path, it will be splitted from filename and used as base directory (and update object ``basedir`` attribute). Returns: tuple: Separated path directory and filename.
codesearchnet
def get_conflicting_tools(self, request_only=False): from collections import defaultdict tool_sets = defaultdict(set) tools_dict = self.get_tools(request_only=request_only) for variant, tools in tools_dict.itervalues(): for tool in tools: tool_sets[tool].add(variant) conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1) return conflicts
Returns tools of the same name provided by more than one package. Args: request_only: If True, only return the key from resolved packages that were also present in the request. Returns: Dict of {tool-name: set([Variant])}.
juraj-google-style
def _list_samples(self, predicate=None): cursor = self.database[self.sample_collection].find(predicate, {'_id': 0, 'md5': 1}) return [item['md5'] for item in cursor]
List all samples that meet the predicate or all if predicate is not specified. Args: predicate: Match samples against this predicate (or all if not specified) Returns: List of the md5s for the matching samples
codesearchnet
def InitFromAff4Object(self, file_obj, stat_entry=None, hash_entry=None, with_details=False): self.name = file_obj.urn.Basename() self.path = '/'.join(file_obj.urn.Path().split('/')[2:]) self.is_directory = ('Container' in file_obj.behaviours) self.stat = (stat_entry or file_obj.Get(file_obj.Schema.STAT)) self.hash = (hash_entry or file_obj.Get(file_obj.Schema.HASH, None)) if (not self.is_directory): try: self.last_collected = file_obj.GetContentAge() except AttributeError: logging.debug("File-like object %s doesn't have GetContentAge defined.", file_obj.__class__.__name__) if self.last_collected: self.last_collected_size = file_obj.Get(file_obj.Schema.SIZE) type_obj = file_obj.Get(file_obj.Schema.TYPE) if (type_obj is not None): self.age = type_obj.age if with_details: self.details = ApiAff4ObjectRepresentation().InitFromAff4Object(file_obj) return self
Initializes the current instance from an Aff4Stream. Args: file_obj: An Aff4Stream representing a file. stat_entry: An optional stat entry object to be used. If none is provided, the one stored in the AFF4 data store is used. hash_entry: An optional hash entry object to be used. If none is provided, the one stored in the AFF4 data store is used. with_details: True if all details of the Aff4Object should be included, false otherwise. Returns: A reference to the current instance.
codesearchnet
def __init__( self, label, r ): self.label = label self.r = r
Initialise an Atom instance Args: label (Str): a label for this atom r (numpy.array): the atom coordinates Returns: None
juraj-google-style
def __init__(self, optimizer, scope=None, summary_labels=(), **kwargs): self.tf_optimizer_type = optimizer self.tf_optimizer = TFOptimizer.tf_optimizers[optimizer](**kwargs) super(TFOptimizer, self).__init__(scope=(scope or optimizer), summary_labels=summary_labels)
Creates a new optimizer instance of a TensorFlow optimizer. Args: optimizer: The name of the optimizer. Must be one of the keys of the tf_optimizers dict. **kwargs: Arguments passed on to the TensorFlow optimizer constructor as **kwargs.
juraj-google-style
def choose(self, locator=None, allow_label_click=None, **kwargs): self._check_with_label( "radio_button", True, locator=locator, allow_label_click=allow_label_click, **kwargs)
Find a radio button and mark it as checked. The radio button can be found via name, id, or label text. :: page.choose("Male") Args: locator (str, optional): Which radio button to choose. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
juraj-google-style
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1)
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def forward(self, hidden_features): hidden_features = self.flatten(hidden_features) hidden_features = self.dropout_layer(hidden_features) forecast = self.base_forecast_block(hidden_features) if isinstance(forecast, tuple): forecast = tuple((z.transpose(-1, -2) for z in forecast)) else: forecast = forecast.transpose(-1, -2) if self.prediction_channel_indices is not None: if isinstance(forecast, tuple): forecast = tuple((z[..., self.prediction_channel_indices] for z in forecast)) else: forecast = forecast[..., self.prediction_channel_indices] return forecast
Args: hidden_features (`torch.Tensor` of shape `(batch_size, num_patch, d_model)` in `flatten` mode or `(batch_size, n_vars, num_patch, d_model)` in `common_channel`/`mix_channel` mode.): Input hidden features. Returns: `torch.Tensor` of shape `(batch_size, prediction_length, nvars)`.
github-repos
def parse(ifp, pb_cls, **kwargs): mode = 'rb' if isinstance(ifp, str): istream = open(ifp, mode=mode, **kwargs) else: istream = open(fileobj=ifp, mode=mode, **kwargs) with istream: for data in istream: pb_obj = pb_cls() pb_obj.ParseFromString(data) (yield pb_obj)
Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream.
codesearchnet
def _resolve_and_add(nodes1, s_val, final_s, nodes2, t_val, final_t): (s_val, t_val) = _intersection_helpers.newton_refine(s_val, nodes1, t_val, nodes2) (s_val, success_s) = _helpers.wiggle_interval(s_val) (t_val, success_t) = _helpers.wiggle_interval(t_val) if (not (success_s and success_t)): return final_s.append(s_val) final_t.append(t_val)
Resolve a computed intersection and add to lists. We perform one Newton step to deal with any residual issues of high-degree polynomial solves (one of which depends on the already approximate ``x_val, y_val``). Args: nodes1 (numpy.ndarray): The nodes in the first curve. s_val (float): The approximate intersection parameter along ``nodes1``. final_s (List[float]): The list of accepted intersection parameters ``s``. nodes2 (numpy.ndarray): The nodes in the second curve. t_val (float): The approximate intersection parameter along ``nodes2``. final_t (List[float]): The list of accepted intersection parameters ``t``.
codesearchnet
def _create_3d_attention_mask_from_input_mask(self, from_tensor, to_mask): batch_size, from_seq_length = (from_tensor.shape[0], from_tensor.shape[1]) to_seq_length = to_mask.shape[1] to_mask = torch.reshape(to_mask, (batch_size, 1, to_seq_length)).float() broadcast_ones = torch.ones(size=(batch_size, from_seq_length, 1), dtype=torch.float32, device=to_mask.device) mask = broadcast_ones * to_mask return mask
Create 3D attention mask from a 2D tensor mask. Args: from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...]. to_mask: int32 Tensor of shape [batch_size, to_seq_length]. Returns: float Tensor of shape [batch_size, from_seq_length, to_seq_length].
github-repos
def register_from_fields(self, *args): names = [] for field in args: widget = self.resolve_widget(field) self.register(widget.config_name) if (widget.config_name not in names): names.append(widget.config_name) return names
Register config name from field widgets Arguments: *args: Fields that contains widget :class:`djangocodemirror.widget.CodeMirrorWidget`. Returns: list: List of registered config names from fields.
codesearchnet
def AppendPathEntries(cls, path, path_separator, number_of_wildcards, skip_first): if (path[(- 1)] == path_separator): path = path[:(- 1)] if skip_first: path = ''.join([path, path_separator, '*']) number_of_wildcards -= 1 paths = [] for _ in range(0, number_of_wildcards): path = ''.join([path, path_separator, '*']) paths.append(path) return paths
Appends glob wildcards to a path. This function will append glob wildcards "*" to a path, returning paths with an additional glob wildcard up to the specified number. E.g. given the path "/tmp" and a number of 2 wildcards, this function will return "tmp/*", "tmp/*/*". When skip_first is true the path with the first wildcard is not returned as a result. Args: path (str): path to append glob wildcards to. path_separator (str): path segment separator. number_of_wildcards (int): number of glob wildcards to append. skip_first (bool): True if the the first path with glob wildcard should be skipped as a result. Returns: list[str]: paths with glob wildcards.
codesearchnet
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write(local_stream, kmip_version=kmip_version) if self._lease_time: self._lease_time.write(local_stream, kmip_version=kmip_version) if self._last_change_date: self._last_change_date.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(ObtainLeaseResponsePayload, self).write(output_stream, kmip_version=kmip_version) output_stream.write(local_stream.buffer)
Write the data encoding the ObtainLease response payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
codesearchnet
def _fill_meta_graph_def(meta_graph_def: meta_graph_pb2.MetaGraphDef, saveable_view: _SaveableView, signature_functions: Dict[str, Callable[..., Any]], namespace_whitelist: List[str], save_custom_gradients: bool, create_saver: bool, enable_debug_stripper: bool, defaults=None) -> Tuple[_AssetInfo, ops.Graph]: resource_initializers = saveable_view.get_concrete_resource_initializers() exported_graph = ops.Graph() resource_initializer_ops = [] with exported_graph.as_default(): object_map, tensor_map, asset_info = saveable_view.map_resources() signatures = _generate_signatures(signature_functions, object_map, defaults) if save_custom_gradients: _trace_gradient_functions(exported_graph, saveable_view) with exported_graph.as_default(): for resource_initializer_function in resource_initializers: asset_dependencies = [] for capture in resource_initializer_function.graph.external_captures: asset_initializer = asset_info.asset_initializers_by_resource.get(capture, None) if asset_initializer is not None: asset_dependencies.append(asset_initializer) with ops.control_dependencies(asset_dependencies): mapped_initializer = object_map[resource_initializer_function] resource_initializer_ops.append(mapped_initializer()) resource_initializer_ops.extend(asset_info.asset_initializers_by_resource.values()) with ops.control_dependencies(resource_initializer_ops): init_op = control_flow_ops.no_op() meta_graph_def.collection_def[constants.MAIN_OP_KEY].node_list.value.append(init_op.name) meta_graph_def.signature_def[constants.INIT_OP_SIGNATURE_KEY].CopyFrom(signature_def_utils.op_signature_def(init_op, constants.INIT_OP_SIGNATURE_KEY)) def call_with_mapped_captures(function, args): if function in object_map: return object_map[function](*args) return saved_model_exported_concrete.ExportedConcreteFunction(function, tensor_map)(*args) for obj in object_map.values(): obj._maybe_initialize_trackable() named_saveable_objects, registered_savers = save_util_v1.frozen_saveables_and_savers(graph_view=saveable_view.augmented_graph_view, object_map=object_map, to_graph=exported_graph, call_with_mapped_captures=call_with_mapped_captures) if create_saver: saver = functional_saver.MultiDeviceSaver.from_saveables(named_saveable_objects, registered_savers, call_with_mapped_captures) with exported_graph.as_default(): saver_def = saver.to_proto() meta_graph_def.saver_def.CopyFrom(saver_def) _dependency_sorted_node_ids(saveable_view) graph_def, _ = exported_graph._as_graph_def(add_shapes=True, use_pybind11_proto=False) graph_def.library.registered_gradients.extend(saveable_view.gradient_defs) _verify_ops(graph_def, namespace_whitelist) meta_graph_def.graph_def.CopyFrom(graph_def) meta_graph_def.meta_info_def.tags.append(tag_constants.SERVING) if saveable_view.options.extra_tags: for tag in saveable_view.options.extra_tags: meta_graph_def.meta_info_def.tags.append(tag) meta_graph_def.meta_info_def.tensorflow_version = versions.__version__ meta_graph_def.meta_info_def.tensorflow_git_version = versions.__git_version__ meta_graph_def.meta_info_def.stripped_default_attrs = True meta_graph_def.asset_file_def.extend(asset_info.asset_defs) for signature_key, signature in signatures.items(): meta_graph_def.signature_def[signature_key].CopyFrom(signature) meta_graph.strip_graph_default_valued_attrs(meta_graph_def) if sys.byteorder == 'big': utils_impl.swap_function_tensor_content(meta_graph_def, 'big', 'little') if enable_debug_stripper: _strip_debug_nodes(meta_graph_def) meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(meta_graph.stripped_op_list_for_graph(meta_graph_def.graph_def)) return (asset_info, exported_graph)
Generates a MetaGraph which calls `signature_functions`. Args: meta_graph_def: The MetaGraphDef proto to fill. saveable_view: The _SaveableView being exported. signature_functions: A dictionary mapping signature keys to concrete functions containing signatures to add to the MetaGraph. namespace_whitelist: List of strings containing whitelisted op namespaces. save_custom_gradients: Whether to save custom gradients. create_saver: Whether to add SavedModel's native save and restore ops. enable_debug_stripper: Whether to strip the debug nodes from the graph. defaults: A dictionary mapping signature_key to dictionary of user_specified_name to Tensor representing default values. Returns: A tuple of (_AssetInfo, Graph) containing the captured assets and exported Graph generated from tracing the saveable_view.
github-repos
def get_run_short_description(run_call_count, fetches, feed_dict, is_callable_runner=False): if is_callable_runner: return 'runner from make_callable()' description = 'run if isinstance(fetches, (tensor_lib.Tensor, ops.Operation, variables.Variable)): description += '1 fetch (%s); ' % common.get_graph_element_name(fetches) else: num_fetches = len(common.get_flattened_names(fetches)) if num_fetches > 1: description += '%d fetches; ' % num_fetches else: description += '%d fetch; ' % num_fetches if not feed_dict: description += '0 feeds' elif len(feed_dict) == 1: for key in feed_dict: description += '1 feed (%s)' % (key if isinstance(key, str) or not hasattr(key, 'name') else key.name) else: description += '%d feeds' % len(feed_dict) return description
Get a short description of the run() call. Args: run_call_count: (int) Run call counter. fetches: Fetches of the `Session.run()` call. See doc of `Session.run()` for more details. feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()` for more details. is_callable_runner: (bool) whether a runner returned by Session.make_callable is being run. Returns: (str) A short description of the run() call, including information about the fetche(s) and feed(s).
github-repos
def assert_equal_graph_def_v2(expected: graph_pb2.GraphDef, actual: graph_pb2.GraphDef) -> None: assert_equal_graph_def(actual, expected, checkpoint_v2=True, hash_table_shared_name=True)
Asserts that two `GraphDef`s are (mostly) the same. Compares two `GraphDef` protos for equality, ignoring versions and ordering of nodes, attrs, and control inputs. Node names are used to match up nodes between the graphs, so the naming of nodes must be consistent. This function ignores randomized attribute values that may appear in V2 checkpoints. Args: expected: The `GraphDef` we expected. actual: The `GraphDef` we have. Raises: AssertionError: If the `GraphDef`s do not match. TypeError: If either argument is not a `GraphDef`.
github-repos
def from_directory(input_dir, optional_files=None): sub_d = {} for fname, ftype in [("INCAR", Incar), ("KPOINTS", Kpoints), ("POSCAR", Poscar), ("POTCAR", Potcar)]: fullzpath = zpath(os.path.join(input_dir, fname)) sub_d[fname.lower()] = ftype.from_file(fullzpath) sub_d["optional_files"] = {} if optional_files is not None: for fname, ftype in optional_files.items(): sub_d["optional_files"][fname] = \ ftype.from_file(os.path.join(input_dir, fname)) return VaspInput(**sub_d)
Read in a set of VASP input from a directory. Note that only the standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless optional_filenames is specified. Args: input_dir (str): Directory to read VASP input from. optional_files (dict): Optional files to read in as well as a dict of {filename: Object type}. Object type must have a static method from_file.
juraj-google-style
def is_user_enrolled(cls, user, course_id, course_mode): enrollment_client = EnrollmentApiClient() try: enrollments = enrollment_client.get_course_enrollment(user.username, course_id) if enrollments and course_mode == enrollments.get('mode'): return True except HttpClientError as exc: logging.error( 'Error while checking enrollment status of user %(user)s: %(message)s', dict(user=user.username, message=str(exc)) ) except KeyError as exc: logging.warning( 'Error while parsing enrollment data of user %(user)s: %(message)s', dict(user=user.username, message=str(exc)) ) return False
Query the enrollment API and determine if a learner is enrolled in a given course run track. Args: user: The user whose enrollment needs to be checked course_mode: The mode with which the enrollment should be checked course_id: course id of the course where enrollment should be checked. Returns: Boolean: Whether or not enrollment exists
juraj-google-style
def _error_messages(self, driver_id): assert isinstance(driver_id, ray.DriverID) message = self.redis_client.execute_command('RAY.TABLE_LOOKUP', ray.gcs_utils.TablePrefix.ERROR_INFO, '', driver_id.binary()) if (message is None): return [] gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(message, 0) error_messages = [] for i in range(gcs_entries.EntriesLength()): error_data = ray.gcs_utils.ErrorTableData.GetRootAsErrorTableData(gcs_entries.Entries(i), 0) assert (driver_id.binary() == error_data.DriverId()) error_message = {'type': decode(error_data.Type()), 'message': decode(error_data.ErrorMessage()), 'timestamp': error_data.Timestamp()} error_messages.append(error_message) return error_messages
Get the error messages for a specific driver. Args: driver_id: The ID of the driver to get the errors for. Returns: A list of the error messages for this driver.
codesearchnet
def _ReadN(self, n): ret = '' while True: chunk = self._read_file.read((n - len(ret))) ret += chunk if ((len(ret) == n) or (not chunk)): return ret
Reads n characters from the input stream, or until EOF. This is equivalent to the current CPython implementation of read(n), but not guaranteed by the docs. Args: n: int Returns: string
codesearchnet
def recompose(src: Path, target_file: Path): mission_folder, assets_folder = NewMiz._get_subfolders(src) base_info = ujson.loads(Path(mission_folder, 'base_info.json').read_text(encoding=ENCODING)) version = base_info['__version__'] with Miz(target_file) as miz: LOGGER.info('re-composing mission table from folder: "%s"', mission_folder) miz.mission.d = NewMiz._recreate_dict_from_folder(mission_folder, version) for item in assets_folder.iterdir(): target = Path(miz.temp_dir, item.name).absolute() if item.is_dir(): if target.exists(): shutil.rmtree(target) shutil.copytree(item.absolute(), target) elif item.is_file(): shutil.copy(item.absolute(), target) miz.zip(target_file, encode=False)
Recompose a Miz from json object Args: src: folder containing the json structure target_file: target Miz file
juraj-google-style
def get_value_spec(self, name: str) -> Optional[class_schema.ValueSpec]: for arg in self.named_args: if arg.name == name: return arg.value_spec if self.varkw is not None: return self.varkw.value_spec.schema.dynamic_field.value return None
Returns Value spec for an argument name. Args: name: Argument name. Returns: ValueSpec for the requested argument. If name is not found, value spec of wildcard keyword argument will be used. None will be returned if name does not exist in signature and wildcard keyword is not accepted.
github-repos
def subtract_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE): return self.add_business_days(date_tensor, -num_days, roll_convention)
Adds given number of business days to given dates. Note that this is different from calling `subtract_period_and_roll` with PeriodType.DAY. For example, subtracting 5 business days from Friday gives the previous Friday (unless there are holidays on this week or previous Friday). Subtracting 5 days and rolling means landing on Sunday and then rolling either to Monday or to Friday, depending on the roll convention. If any of the dates in `date_tensor` are not business days, they will be rolled to business days before doing the subtraction. If `roll_convention` is `NONE`, and any dates are not business days, an exception is raised. Args: date_tensor: DateTensor of dates to advance from. num_days: Tensor of int32 type broadcastable to `date_tensor`. roll_convention: BusinessDayConvention. Determines how to roll a date that falls on a holiday. Returns: The resulting DateTensor.
github-repos
def add_citations(voevent, event_ivorns): if (not voevent.xpath('Citations')): etree.SubElement(voevent, 'Citations') voevent.Citations.extend(_listify(event_ivorns))
Add citations to other voevents. The schema mandates that the 'Citations' section must either be entirely absent, or non-empty - hence we require this wrapper function for its creation prior to listing the first citation. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. event_ivorns (:class:`voeventparse.misc.EventIvorn`): List of EventIvorn elements to add to citation list.
codesearchnet
def _CopyDateFromString(self, date_string): date_string_length = len(date_string) if date_string_length < 10: raise ValueError('Date string too short.') if date_string[4] != '-' or date_string[7] != '-': raise ValueError('Invalid date string.') try: year = int(date_string[0:4], 10) except ValueError: raise ValueError('Unable to parse year.') try: month = int(date_string[5:7], 10) except ValueError: raise ValueError('Unable to parse month.') try: day_of_month = int(date_string[8:10], 10) except ValueError: raise ValueError('Unable to parse day of month.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') return year, month, day_of_month
Copies a date from a string. Args: date_string (str): date value formatted as: YYYY-MM-DD Returns: tuple[int, int, int]: year, month, day of month. Raises: ValueError: if the date string is invalid or not supported.
juraj-google-style
def equal_distribution_folds(y, folds=2): (n, classes) = y.shape dist = y.sum(axis=0).astype('float') dist /= dist.sum() index_list = [] fold_dist = np.zeros((folds, classes), dtype='float') for _ in range(folds): index_list.append([]) for i in range(n): if (i < folds): target_fold = i else: normed_folds = (fold_dist.T / fold_dist.sum(axis=1)) how_off = (normed_folds.T - dist) target_fold = np.argmin(np.dot((y[i] - 0.5).reshape(1, (- 1)), how_off.T)) fold_dist[target_fold] += y[i] index_list[target_fold].append(i) logger.debug('Fold distributions:') logger.debug(fold_dist) return index_list
Creates `folds` number of indices that has roughly balanced multi-label distribution. Args: y: The multi-label outputs. folds: The number of folds to create. Returns: `folds` number of indices that have roughly equal multi-label distributions.
codesearchnet
def to_json(self, **kwargs): config = self.get_config() timeseries_generator_config = {'class_name': self.__class__.__name__, 'config': config} return json.dumps(timeseries_generator_config, **kwargs)
Returns a JSON string containing the generator's configuration. Args: **kwargs: Additional keyword arguments to be passed to `json.dumps()`. Returns: A JSON string containing the tokenizer configuration.
github-repos
def _process_sum_prod(self, func, **kwargs): axis = kwargs.get('axis', 0) min_count = kwargs.get('min_count', 0) def sum_prod_builder(df, **kwargs): return func(df, **kwargs) if (min_count <= 1): return self._full_reduce(axis, sum_prod_builder) else: return self._full_axis_reduce(axis, sum_prod_builder)
Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object.
codesearchnet
def start(logdir): if logdir.startswith('gs: datalab.storage._api.Api.verify_permitted_to_read(logdir) port = datalab.utils.pick_unused_port() args = ['tensorboard', ('--logdir=' + logdir), ('--port=' + str(port))] p = subprocess.Popen(args) retry = 10 while (retry > 0): if datalab.utils.is_http_running_on(port): basepath = os.environ.get('DATALAB_ENDPOINT_URL', '') url = ('%s/_proxy/%d/' % (basepath.rstrip('/'), port)) html = ('<p>TensorBoard was started successfully with pid %d. ' % p.pid) html += ('Click <a href="%s" target="_blank">here</a> to access it.</p>' % url) IPython.display.display_html(html, raw=True) return p.pid time.sleep(1) retry -= 1 raise Exception('Cannot start TensorBoard.')
Start a TensorBoard instance. Args: logdir: the logdir to run TensorBoard on. Raises: Exception if the instance cannot be started.
codesearchnet
def _compile_function_expression(self, expr: Expression, scope: Dict[(str, TensorFluent)], batch_size: Optional[int]=None, noise: Optional[List[tf.Tensor]]=None) -> TensorFluent: etype = expr.etype args = expr.args if (len(args) == 1): etype2func = {'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor} if (etype[1] not in etype2func): raise ValueError('Invalid unary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2func = {'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min} if (etype[1] not in etype2func): raise ValueError('Invalid binary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
codesearchnet
def fixminimized(self, alphabet): endstate = len(list(self.states)) for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = TropicalWeight(float('inf')) for char in alphabet: self.add_arc(endstate, endstate, char)
After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None
juraj-google-style
def users_lookupByEmail(self, *, email: str, **kwargs) -> SlackResponse: kwargs.update({"email": email}) return self.api_call("users.lookupByEmail", http_verb="GET", params=kwargs)
Find a user with an email address. Args: email (str): An email address belonging to a user in the workspace. e.g. 'spengler@ghostbusters.example.com'
juraj-google-style
def sg_mean(tensor, opt): r return tf.reduce_mean(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the mean of elements across axis of a tensor. See `tf.reduce_mean()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
juraj-google-style
def safe_sum(x, alt_value=(- np.inf), name=None): with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]): if (not is_list_like(x)): raise TypeError('Expected list input.') if (not x): raise ValueError('Input should not be empty.') in_shape = x[0].shape x = tf.stack(x, axis=(- 1)) x = tf.reduce_sum(input_tensor=x, axis=(- 1)) alt_value = np.array(alt_value, x.dtype.as_numpy_dtype) alt_fill = tf.fill(tf.shape(input=x), value=alt_value) x = tf.where(tf.math.is_finite(x), x, alt_fill) x.set_shape(x.shape.merge_with(in_shape)) return x
Elementwise adds list members, replacing non-finite results with alt_value. Typically the `alt_value` is chosen so the `MetropolisHastings` `TransitionKernel` always rejects the proposal. Args: x: Python `list` of `Tensors` to elementwise add. alt_value: Python scalar used to replace any elementwise sums which would otherwise be non-finite. name: Python `str` name prefixed to Ops created by this function. Default value: `None` (i.e., "safe_sum"). Returns: safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s `x` or `alt_value` where sums are non-finite. Raises: TypeError: if `x` is not list-like. ValueError: if `x` is empty.
codesearchnet
def _GetTaskStorageFilePath(self, task): filename = '{0:s}.plaso'.format(task.identifier) return os.path.join(self._task_storage_path, filename)
Retrieves the path of a task storage file in the temporary directory. Args: task (Task): task. Returns: str: path of a task storage file in the temporary directory.
juraj-google-style
def get_column_names(self, X): if isinstance(X, pd.DataFrame): return X.columns return range(X.shape[1])
Return iterable containing columns for the given array X. Args: X: `numpy.ndarray` or `pandas.DataFrame`. Returns: iterable: columns for the given matrix.
codesearchnet
def garbage_collect_exports(export_dir_base, exports_to_keep): if (exports_to_keep is None): return version_paths = [] for filename in tf_v1.gfile.ListDirectory(export_dir_base): path = os.path.join(tf.compat.as_bytes(export_dir_base), tf.compat.as_bytes(filename)) if ((len(filename) == 10) and filename.isdigit()): version_paths.append((int(filename), path)) oldest_version_path = sorted(version_paths)[:(- exports_to_keep)] for (_, path) in oldest_version_path: try: tf_v1.gfile.DeleteRecursively(path) except tf.errors.NotFoundError as e: logging.warn('Can not delete %s recursively: %s', path, e)
Deletes older exports, retaining only a given number of the most recent. Export subdirectories are assumed to be named with monotonically increasing integers; the most recent are taken to be those with the largest values. Args: export_dir_base: the base directory under which each export is in a versioned subdirectory. exports_to_keep: Number of exports to keep. Older exports will be garbage collected. Set to None to disable.
codesearchnet
def exists(path): filesystem = FileSystems.get_filesystem(path) return filesystem.exists(path)
Check if the provided path exists on the FileSystem. Args: path: string path that needs to be checked. Returns: boolean flag indicating if path exists
github-repos
def limit(self, accountID, **kwargs): return self.create(accountID, order=LimitOrderRequest(**kwargs))
Shortcut to create a Limit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a LimitOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def Parse(text): precondition.AssertType(text, Text) if compatibility.PY2: text = text.encode("utf-8") return yaml.safe_load(text)
Parses a YAML source into a Python object. Args: text: A YAML source to parse. Returns: A Python data structure corresponding to the YAML source.
juraj-google-style
def verify(self, obj): if self.encoding == 'none' and not isinstance(obj, (bytes, bytearray)): raise ValidationError('Byte object was not either bytes or a bytearray', type=obj.__class__.__name__) elif self.encoding == 'base64': try: data = base64.b64decode(obj) return data except TypeError: raise ValidationError("Could not decode base64 encoded bytes", obj=obj) elif self.encoding == 'hex': try: data = binascii.unhexlify(obj) return data except TypeError: raise ValidationError("Could not decode hex encoded bytes", obj=obj) return obj
Verify that the object conforms to this verifier's schema Args: obj (object): A python object to verify Returns: bytes or byterray: The decoded byte buffer Raises: ValidationError: If there is a problem verifying the object, a ValidationError is thrown with at least the reason key set indicating the reason for the lack of validation.
juraj-google-style
def extract_class(jar, name): with jar.open(name) as entry: return LinkableClass(javatools.unpack_class(entry))
Extracts a LinkableClass from a jar. Args: jar: An open ZipFile instance. name: A string containing the binary name of a class. Raises: KeyError: The class does not exist in the jar.
juraj-google-style
def save(self, path=None, complevel=1, complib='zlib'): if (path is None): path = (self.hexuid + '.hdf5') elif os.path.isdir(path): path += ((os.sep + self.hexuid) + '.hdf5') elif (not (path.endswith('.hdf5') or path.endswith('.hdf'))): raise ValueError('File path must have a ".hdf5" or ".hdf" extension.') with pd.HDFStore(path, 'w', complevel=complevel, complib=complib) as store: store['kwargs'] = pd.Series() store.get_storer('kwargs').attrs.metadata = self._rel() fc = 0 for (name, data) in self._data().items(): if hasattr(data, '_revert_categories'): data._revert_categories() name = (name[1:] if name.startswith('_') else name) if isinstance(data, Field): fname = (('FIELD{}_'.format(fc) + name) + '/') store[(fname + 'data')] = pd.DataFrame(data) for (i, field) in enumerate(data.field_values): ffname = ((fname + 'values') + str(i)) if isinstance(field, pd.Series): store[ffname] = pd.Series(field) else: store[ffname] = pd.DataFrame(field) fc += 1 elif isinstance(data, Series): s = pd.Series(data) if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype): s = s.astype('O') store[name] = s elif isinstance(data, DataFrame): store[name] = pd.DataFrame(data) elif isinstance(data, SparseSeries): s = pd.SparseSeries(data) if isinstance(data.dtype, pd.types.dtypes.CategoricalDtype): s = s.astype('O') store[name] = s elif isinstance(data, SparseDataFrame): store[name] = pd.SparseDataFrame(data) else: if (hasattr(data, 'dtype') and isinstance(data.dtype, pd.types.dtypes.CategoricalDtype)): data = data.astype('O') else: for col in data: if isinstance(data[col].dtype, pd.types.dtypes.CategoricalDtype): data[col] = data[col].astype('O') store[name] = data if hasattr(data, '_set_categories'): data._set_categories()
Save the container as an HDF5 archive. Args: path (str): Path where to save the container
codesearchnet
def write_jsonl_file(fname, data): if (not isinstance(data, list)): print('warning: malformed json data for file', fname) return with open(fname, 'w') as of: for row in data: if row.strip(): of.write(('%s\n' % row.strip()))
Writes a jsonl file. Args: data: list of json encoded data
codesearchnet
def get_pointgroup(self, tolerance=0.3): PA = self._get_point_group_analyzer(tolerance=tolerance) return PointGroupOperations(PA.sch_symbol, PA.symmops)
Returns a PointGroup object for the molecule. Args: tolerance (float): Tolerance to generate the full set of symmetry operations. Returns: :class:`~PointGroupOperations`
codesearchnet
def __init__(self, idx): self.idx = idx self.in_edges = [] self.out_edges = []
Initialize the Vertex. Args: idx: The index of the vertex.
juraj-google-style
def bounding_box_from(points, i, i1, thr): pi = points[i] pi1 = points[i1] min_lat = min(pi.lat, pi1.lat) min_lon = min(pi.lon, pi1.lon) max_lat = max(pi.lat, pi1.lat) max_lon = max(pi.lon, pi1.lon) return min_lat-thr, min_lon-thr, max_lat+thr, max_lon+thr
Creates bounding box for a line segment Args: points (:obj:`list` of :obj:`Point`) i (int): Line segment start, index in points array i1 (int): Line segment end, index in points array Returns: (float, float, float, float): with bounding box min x, min y, max x and max y
juraj-google-style
def start_worker(node_ip_address, object_store_name, raylet_name, redis_address, worker_path, temp_dir, stdout_file=None, stderr_file=None): command = [sys.executable, '-u', worker_path, ('--node-ip-address=' + node_ip_address), ('--object-store-name=' + object_store_name), ('--raylet-name=' + raylet_name), ('--redis-address=' + str(redis_address)), ('--temp-dir=' + temp_dir)] process_info = start_ray_process(command, ray_constants.PROCESS_TYPE_WORKER, stdout_file=stdout_file, stderr_file=stderr_file) return process_info
This method starts a worker process. Args: node_ip_address (str): The IP address of the node that this worker is running on. object_store_name (str): The socket name of the object store. raylet_name (str): The socket name of the raylet server. redis_address (str): The address that the Redis server is listening on. worker_path (str): The path of the source code which the worker process will run. temp_dir (str): The path of the temp dir. stdout_file: A file handle opened for writing to redirect stdout to. If no redirection should happen, then this should be None. stderr_file: A file handle opened for writing to redirect stderr to. If no redirection should happen, then this should be None. Returns: ProcessInfo for the process that was started.
codesearchnet
def run(command, num_retries=1, timeout=(- 1), **kwargs): last_error = None for _ in range(num_retries): try: process = Subprocess(command, **kwargs) return process.run(timeout) except Exception as err: last_error = err raise last_error
Run a command with optional timeout and retries. Provides a convenience method for executing a subprocess with additional error handling. Arguments: command (list of str): The command to execute. num_retries (int, optional): If the subprocess fails, the number of attempts to execute it before failing. timeout (float, optional): If positive, the number of seconds to wait for subprocess completion before failing. **kwargs: Additional args to pass to Subprocess.__init__() Returns: Tuple of (int, str, str): Where the variables represent (exit status, stdout, stderr). Raises: SubprocessError: If the command fails after the given number of retries.
codesearchnet
def monitoring(line, cell=None): parser = datalab.utils.commands.CommandParser(prog='monitoring', description='Execute various Monitoring-related operations. Use "%monitoring <command> -h" for help on a specific command.') list_parser = parser.subcommand('list', 'List the metrics or resource types in a monitored project.') list_metric_parser = list_parser.subcommand('metrics', 'List the metrics that are available through the Monitoring API.') list_metric_parser.add_argument('-t', '--type', help='The type of metric(s) to list; can include wildchars.') list_metric_parser.add_argument('-p', '--project', help='The project on which to execute the request.') list_metric_parser.set_defaults(func=_list_metric_descriptors) list_resource_parser = list_parser.subcommand('resource_types', 'List the monitored resource types that are available through the Monitoring API.') list_resource_parser.add_argument('-p', '--project', help='The project on which to execute the request.') list_resource_parser.add_argument('-t', '--type', help='The resource type(s) to list; can include wildchars.') list_resource_parser.set_defaults(func=_list_resource_descriptors) list_group_parser = list_parser.subcommand('groups', 'List the Stackdriver groups in this project.') list_group_parser.add_argument('-p', '--project', help='The project on which to execute the request.') list_group_parser.add_argument('-n', '--name', help='The name of the group(s) to list; can include wildchars.') list_group_parser.set_defaults(func=_list_groups) return datalab.utils.commands.handle_magic_line(line, cell, parser)
Implements the monitoring cell magic for ipython notebooks. Args: line: the contents of the storage line. Returns: The results of executing the cell.
codesearchnet
def int64_gauge(urn, metric, ptransform=None) -> metrics_pb2.MonitoringInfo: labels = create_labels(ptransform=ptransform) if isinstance(metric, int): value = metric time_ms = int(time.time()) * 1000 else: raise TypeError('Expected int metric type but received %s with value %s' % (type(metric), metric)) coder = coders.VarIntCoder() payload = coder.encode(time_ms) + coder.encode(value) return create_monitoring_info(urn, LATEST_INT64_TYPE, payload, labels)
Return the gauge monitoring info for the URN, metric and labels. Args: urn: The URN of the monitoring info/metric. metric: An int representing the value. The current time will be used for the timestamp. ptransform: The ptransform id used as a label.
github-repos
def _call_concrete_function(function, inputs): expected_structure = function.graph.structured_input_signature flatten_inputs = nest.flatten_up_to(expected_structure, inputs, expand_composites=True) flatten_expected = nest.flatten(expected_structure, expand_composites=True) tensor_inputs = [] for arg, expected in zip(flatten_inputs, flatten_expected): if isinstance(expected, tensor.TensorSpec): tensor_inputs.append(ops.convert_to_tensor(arg, dtype_hint=expected.dtype)) elif isinstance(expected, resource_variable_ops.VariableSpec): tensor_inputs.append(arg.handle) result = function._call_flat(tensor_inputs, function.captured_inputs) if isinstance(result, ops.Operation): return None return result
Calls a restored Function with structured inputs. This differs from `function.__call__` in that inputs and outputs are structured and that it casts inputs to tensors if needed. Note: this does not checks that non-tensor inputs match. That should be done before via `_concrete_function_callable_with`. Args: function: ConcreteFunction to call. inputs: Structured inputs compatible with `function.graph.structured_input_signature`. Returns: The structured function output.
github-repos
def method_not_allowed(cls, errors=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '405 Method Not Allowed' return cls(405, None, errors).to_json
Shortcut API for HTTP 405 `Method not allowed` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
juraj-google-style
def most_frequent(self, k): word_count = {w:self.word_count[w] for w in self.words[:k]} return CountedVocabulary(word_count=word_count)
Returns a vocabulary with the most frequent `k` words. Args: k (integer): specifies the top k most frequent words to be returned.
juraj-google-style
def get_cached_filename(self, filename, extention, settings_list=None): cached_name = "_".join([filename, self.get_hash()]) return ".".join([cached_name, extention])
Creates a filename with md5 cache string based on settings list Args: filename (str): the filename without extention extention (str): the file extention without dot. (i.e. 'pkl') settings_list (dict|list): the settings list as list (optional) NB! The dictionaries have to be sorted or hash id will change arbitrarely.
juraj-google-style
def _log_band_gap_information(bs): bg_data = bs.get_band_gap() if (not bg_data['direct']): logging.info('Indirect band gap: {:.3f} eV'.format(bg_data['energy'])) direct_data = bs.get_direct_band_gap_dict() if bs.is_spin_polarized: direct_bg = min((spin_data['value'] for spin_data in direct_data.values())) logging.info('Direct band gap: {:.3f} eV'.format(direct_bg)) for (spin, spin_data) in direct_data.items(): direct_kindex = spin_data['kpoint_index'] direct_kpoint = bs.kpoints[direct_kindex].frac_coords direct_kpoint = kpt_str.format(k=direct_kpoint) eq_kpoints = bs.get_equivalent_kpoints(direct_kindex) k_indices = ', '.join(map(str, eq_kpoints)) b_indices = ', '.join([str((i + 1)) for i in spin_data['band_indices']]) logging.info(' {}:'.format(spin.name.capitalize())) logging.info(' k-point: {}'.format(direct_kpoint)) logging.info(' k-point indices: {}'.format(k_indices)) logging.info(' Band indices: {}'.format(b_indices)) else: direct_bg = direct_data[Spin.up]['value'] logging.info('Direct band gap: {:.3f} eV'.format(direct_bg)) direct_kindex = direct_data[Spin.up]['kpoint_index'] direct_kpoint = kpt_str.format(k=bs.kpoints[direct_kindex].frac_coords) k_indices = ', '.join(map(str, bs.get_equivalent_kpoints(direct_kindex))) b_indices = ', '.join([str((i + 1)) for i in direct_data[Spin.up]['band_indices']]) logging.info(' k-point: {}'.format(direct_kpoint)) logging.info(' k-point indices: {}'.format(k_indices)) logging.info(' Band indices: {}'.format(b_indices))
Log data about the direct and indirect band gaps. Args: bs (:obj:`~pymatgen.electronic_structure.bandstructure.BandStructureSymmLine`):
codesearchnet
def install(self, connection, partition, table_name=None, columns=None, materialize=False, logger=None): partition.localize() self._add_partition(connection, partition) fdw_table = partition.vid view_table = '{}_v'.format(fdw_table) if materialize: with connection.cursor() as cursor: view_exists = self._relation_exists(connection, view_table) if view_exists: logger.debug('Materialized view of the partition already exists.\n partition: {}, view: {}'.format(partition.name, view_table)) else: query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};'.format(view_table, fdw_table) logger.debug('Creating new materialized view of the partition.\n partition: {}, view: {}, query: {}'.format(partition.name, view_table, query)) cursor.execute(query) cursor.execute('COMMIT;') final_table = (view_table if materialize else fdw_table) with connection.cursor() as cursor: view_q = 'CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} '.format(partition.vid, final_table) cursor.execute(view_q) cursor.execute('COMMIT;') return partition.vid
Creates FDW or materialize view for given partition. Args: connection: connection to postgresql partition (orm.Partition): materialize (boolean): if True, create read-only table. If False create virtual table. Returns: str: name of the created table.
codesearchnet
def linear_interpolate(tensor1, tensor2, coeffs): interp_tensors = [] for coeff in coeffs: interp_tensor = tensor1 + coeff * (tensor2 - tensor1) interp_tensors.append(interp_tensor) return tf.concat(interp_tensors, axis=0)
Linearly interpolate between two tensors at coeff. Args: tensor1: 4-D Tensor, shape=(NHWC) tensor2: 4-D Tensor, shape=(NHWC) coeffs: list of floats. Returns: interp_latents: 5-D Tensor, with interp_latents[i] representing interpolations at coeffs[i]. shape=(len(coeffs), NHWC)
juraj-google-style
def CheckAltTokens(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if Match('^\\s* return if ((line.find('') >= 0)): return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, ('Use operator %s instead of %s' % (_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))))
Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
codesearchnet
def remove(path): if os.path.isdir(path): return __rmtree(path) else: return __rmfile(path)
Delete a file or directory. Args: path (str): Path to the file or directory that needs to be deleted. Returns: bool: True if the operation is successful, False otherwise.
juraj-google-style
def parse(self, **global_args): if (self.build_file not in ParseContext._parsed): butcher_context = {} for str_to_exec in self._strs_to_exec: ast = compile(str_to_exec, '<string>', 'exec') exec_function(ast, butcher_context) with ParseContext.activate(self): startdir = os.path.abspath(os.curdir) try: os.chdir(self.build_file.path_on_disk) if (self.build_file not in ParseContext._parsed): ParseContext._parsed.add(self.build_file) eval_globals = copy.copy(butcher_context) eval_globals.update({'ROOT_DIR': self.build_file.path_on_disk, '__file__': 'bogus please fix this'}) eval_globals.update(global_args) exec_function(self.build_file.code, eval_globals) finally: os.chdir(startdir)
Entry point to parsing a BUILD file. Args: **global_args: Variables to include in the parsing environment.
codesearchnet
def variable_type(self, variable): var_type = 'String' if (variable is not None): variable = variable.strip() if re.match(self._variable_match, variable): var_type = re.search(self._variable_parse, variable).group(4) return var_type
Get the Type from the variable string or default to String type. The default type is "String" for those cases when the input variable is contains not "DB variable" and is just a String. **Example Variable**:: #App:1234:output!StringArray returns **StringArray** **Example String**:: "My Data" returns **String** Args: variable (string): The variable to be parsed Returns: (string): The variable type.
codesearchnet
def FillDepressions(dem, epsilon=False, in_place=False, topology='D8'): if (type(dem) is not rdarray): raise Exception('A richdem.rdarray or numpy.ndarray is required!') if (topology not in ['D8', 'D4']): raise Exception('Unknown topology!') if (not in_place): dem = dem.copy() _AddAnalysis(dem, 'FillDepressions(dem, epsilon={0})'.format(epsilon)) demw = dem.wrap() if epsilon: if (topology == 'D8'): _richdem.rdPFepsilonD8(demw) elif (topology == 'D4'): _richdem.rdPFepsilonD4(demw) elif (topology == 'D8'): _richdem.rdFillDepressionsD8(demw) elif (topology == 'D4'): _richdem.rdFillDepressionsD4(demw) dem.copyFromWrapped(demw) if (not in_place): return dem
Fills all depressions in a DEM. Args: dem (rdarray): An elevation model epsilon (float): If True, an epsilon gradient is imposed to all flat regions. This ensures that there is always a local gradient. in_place (bool): If True, the DEM is modified in place and there is no return; otherwise, a new, altered DEM is returned. topology (string): A topology indicator Returns: DEM without depressions.
codesearchnet
def getHeaders(self): headers = self._impl.getHeaders() return tuple((headers.getIndex(i) for i in range(self._impl.getNumCols())))
Get the headers of this DataFrame. Returns: The headers of this DataFrame.
codesearchnet
def PyParseRangeCheck(lower_bound, upper_bound): def CheckRange(string, location, tokens): try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( 'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( 'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound)) return CheckRange
Verify that a number is within a defined range. This is a callback method for pyparsing setParseAction that verifies that a read number is within a certain range. To use this method it needs to be defined as a callback method in setParseAction with the upper and lower bound set as parameters. Args: lower_bound (int): lower bound of the range. upper_bound (int): upper bound of the range. Returns: Function: callback method that can be used by pyparsing setParseAction.
juraj-google-style
def _rolling_window(self, window_length, func1d, step=1, return_rolled=False): if window_length % 2 == 0: window_length += 1 shape = self.shape[:-1] + (self.shape[-1], window_length) strides = self.strides + (step*self.strides[-1],) data = np.nan_to_num(self) data = np.pad(data, int(step*window_length rolled = np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) result = np.apply_along_axis(func1d, -1, rolled) result[np.isnan(self)] = np.nan if return_rolled: return result, rolled else: return result
Private function. Smoother for other smoothing/conditioning functions. Args: window_length (int): the window length. func1d (function): a function that takes a 1D array and returns a scalar. step (int): if you want to skip samples in the shifted versions. Don't use this for smoothing, you will get strange results. Returns: ndarray: the resulting array.
juraj-google-style
def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor: projected_group_tokens = self.mlp_inter(group_tokens) projected_group_tokens = self.norm_post_tokens(projected_group_tokens) return projected_group_tokens
Args: group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels] Returns: projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]
github-repos
def parseSemver(text): txt = text.strip().lstrip('vV') ret = {} m = semver_re.match(txt) if (not m): return None d = m.groupdict() ret['major'] = int(d.get('maj')) ret['minor'] = int(d.get('min')) ret['patch'] = int(d.get('pat')) pre = d.get('pre') bld = d.get('bld') if pre: parts = pre.split('.') for part in parts: if (not part): return None try: int(part) except ValueError: continue else: if ((part[0] == '0') and (len(part) > 1)): return None ret['pre'] = pre if bld: parts = bld.split('.') for part in parts: if (not part): return None ret['build'] = bld return ret
Parse a Semantic Version string into is component parts. Args: text (str): A text string to parse into semver components. This string has whitespace and leading 'v' characters stripped off of it. Examples: Parse a string into it semvar parts:: parts = parseSemver('v1.2.3') Returns: dict: The dictionary will contain the keys 'major', 'minor' and 'patch' pointing to integer values. The dictionary may also contain keys for 'build' and 'pre' information if that data is parsed out of a semver string. None is returned if the string is not a valid Semver string.
codesearchnet
def get_execution_role(sagemaker_session=None): if not sagemaker_session: sagemaker_session = Session() arn = sagemaker_session.get_caller_identity_arn() if ':role/' in arn: return arn message = 'The current AWS identity is not a role: {}, therefore it cannot be used as a SageMaker execution role' raise ValueError(message.format(arn))
Return the role ARN whose credentials are used to call the API. Throws an exception if Args: sagemaker_session(Session): Current sagemaker session Returns: (str): The role ARN
juraj-google-style
def createEditor(self, parent, option, index): editor = QtGui.QLineEdit(parent) return editor
Returns the widget used to edit the item specified by index for editing. The parent widget and style option are used to control how the editor widget appears. Args: parent (QWidget): parent widget. option (QStyleOptionViewItem): controls how editor widget appears. index (QModelIndex): model data index.
juraj-google-style
def validator(sample: rd.RepresentativeSample) -> rd.RepresentativeSample: if not isinstance(sample, Mapping): raise ValueError(f'Invalid representative sample type. Provide a mapping (usually a dict) of {{input_key: input_value}}. Got type: {type(sample)} instead.') if set(sample.keys()) != expected_input_keys: raise KeyError(f'Invalid input keys for representative sample. The function expects input keys of: {set(expected_input_keys)}. Got: {set(sample.keys())}. Please provide correct input keys for representative samples.') return sample
Validates a single instance of representative sample. This provides a simple check for `sample` that this is a mapping of {input_key: input_value}. Args: sample: A `RepresentativeSample` to validate. Returns: `sample` iff it is valid. Raises: ValueError: iff the sample isn't an instance of `Mapping`. KeyError: iff the sample does not have the set of input keys that match the input keys of the function.
github-repos
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if token_ids_1 is None: return [1] + [0] * len(token_ids_0) + [1] return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]
Args: Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def threshold(x, threshold, default_value): return ops.threshold(x, threshold, default_value)
Threshold activation function. It is defined as: `threshold(x) = x` if `x > threshold`, `threshold(x) = default_value` otherwise. Args: x: Input tensor. threshold: The value that decides when to retain or replace x. default_value: Value to assign when `x <= threshold`.
github-repos
def get_repo(task, source_env_prefix): repo = _extract_from_env_in_payload(task, source_env_prefix + '_HEAD_REPOSITORY') if repo is not None: repo = repo.rstrip('/') return repo
Get the repo for a task. Args: task (ChainOfTrust or LinkOfTrust): the trust object to inspect source_env_prefix (str): The environment variable prefix that is used to get repository information. Returns: str: the source url. None: if not defined for this task.
juraj-google-style
def preprocessing_fn(inputs): outputs = {} for key in taxi.DENSE_FLOAT_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.scale_to_z_score(_fill_in_missing(inputs[key])) for key in taxi.VOCAB_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.compute_and_apply_vocabulary(_fill_in_missing(inputs[key]), top_k=taxi.VOCAB_SIZE, num_oov_buckets=taxi.OOV_SIZE) for key in taxi.BUCKET_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = transform.bucketize(_fill_in_missing(inputs[key]), taxi.FEATURE_BUCKET_COUNT) for key in taxi.CATEGORICAL_FEATURE_KEYS: outputs[taxi.transformed_name(key)] = _fill_in_missing(inputs[key]) taxi_fare = _fill_in_missing(inputs[taxi.FARE_KEY]) tips = _fill_in_missing(inputs[taxi.LABEL_KEY]) outputs[taxi.transformed_name(taxi.LABEL_KEY)] = tf.where(tf.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), tf.cast(tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
github-repos
def __init__(self, resolver_context, file_object=None): super(DataRange, self).__init__(resolver_context) self._current_offset = 0 self._file_object = file_object if file_object: self._file_object_set_in_init = True self._range_offset = 0 self._range_size = file_object.get_size() else: self._file_object_set_in_init = False self._range_offset = -1 self._range_size = -1
Initializes a file-like object. If the file-like object is chained do not separately use the parent file-like object. Args: resolver_context (Context): resolver context. file_object (Optional[file]): parent file-like object.
juraj-google-style
def fetch(self, addon_id, data={}, **kwargs): return super(Addon, self).fetch(addon_id, data, **kwargs)
Fetch addon for given Id Args: addon_id : Id for which addon object has to be retrieved Returns: addon dict for given subscription Id
juraj-google-style
def get_context(self, max_frames=None, missing_entities=[]): if ((not max_frames) or (max_frames > len(self.frame_stack))): max_frames = len(self.frame_stack) missing_entities = list(missing_entities) context = [] for i in xrange(max_frames): frame_entities = [entity.copy() for entity in self.frame_stack[i].entities] for entity in frame_entities: entity['confidence'] = (entity.get('confidence', 1.0) / (2.0 + i)) context += frame_entities result = [] if (len(missing_entities) > 0): for entity in context: if (entity.get('data') in missing_entities): result.append(entity) missing_entities.remove(entity.get('data')) else: result = context return result
Constructs a list of entities from the context. Args: max_frames(int): maximum number of frames to look back missing_entities(list of str): a list or set of tag names, as strings Returns: list: a list of entities
codesearchnet
def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-09)
The function calculates the mean of token embeddings Args: model_output: The output of the model. attention_mask: This is a tensor that contains 1s for all input tokens and 0s for all padding tokens. Returns: The mean of the token embeddings.
github-repos
def by_location(self, location, cc=None): header, content = self._http_request(self.BASE_URL, location=location, cc=cc) return json.loads(content)
Perform a Yelp Neighborhood API Search based on a location specifier. Args: location - textual location specifier of form: "address, city, state or zip, optional country" cc - ISO 3166-1 alpha-2 country code. (Optional)
juraj-google-style
def show_backref(target, max_depth=3): if objgraph is None: raise NotImplementedError('objgraph is not installed.') string_io = io.StringIO() objgraph.show_backrefs(target, max_depth=max_depth, output=string_io) graph = string_io.getvalue() string_io.close() return graph
Returns a dot graph of all the objects that are referencing the target. A object referencing graph is useful to debug memory leak like circular reference. objgraph provides a good visualization of the memory graph than most python built-in utilities like gc.get_referrers(), which are not human-readable sometimes. The dot graph will be written to a string IO object, and can be rendered with graphviz in operating system. E.g. dot -Tpng {$dot_graph} -o output.png Args: target: The target object for the memory graph. max_depth: The maximum depth of the graph. By default 3 layers of references are used. Increases this a lot may result in the graph growing too big. Returns: A string that contains the object reference graph. Raises: NotImplementedError: if objgraph is not installed.
github-repos
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): mru_values_dict = {} for subkey in registry_key.GetSubkeys(): username_value = subkey.GetValueByName('UsernameHint') if (username_value and username_value.data and username_value.DataIsString()): username = username_value.GetDataAsObject() else: username = 'N/A' mru_values_dict[subkey.name] = username event_data = windows_events.WindowsRegistryEventData() event_data.key_path = subkey.path event_data.offset = subkey.offset event_data.regvalue = {'Username hint': username} event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = mru_values_dict event_data.source_append = self._SOURCE_APPEND event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Terminal Server Client Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def get_victim_web_asset(self, main_type, sub_type, unique_id, asset_id, params=None): params = params or {} return self.victim_web_asset(main_type, sub_type, unique_id, asset_id, params=params)
Args: main_type: sub_type: unique_id: asset_id: params: Return:
juraj-google-style
def forward(self, g_values: torch.Tensor) -> torch.Tensor: p_one_unique_token, p_two_unique_tokens = self._compute_latents(g_values) return 0.5 * ((g_values + 0.5) * p_two_unique_tokens + p_one_unique_token)
Computes the likelihoods P(g_values|watermarked). Args: g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`): g-values (values 0 or 1) Returns: p(g_values|watermarked) of shape [batch_size, seq_len, watermarking_depth].
github-repos
def _bitResponseToValue(bytestring): _checkString(bytestring, description='bytestring', minlength=1, maxlength=1) RESPONSE_ON = '\x01' RESPONSE_OFF = '\x00' if (bytestring == RESPONSE_ON): return 1 elif (bytestring == RESPONSE_OFF): return 0 else: raise ValueError('Could not convert bit response to a value. Input: {0!r}'.format(bytestring))
Convert a response string to a numerical value. Args: bytestring (str): A string of length 1. Can be for example ``\\x01``. Returns: The converted value (int). Raises: TypeError, ValueError
codesearchnet
def tseries_between(self, tstart=None, tend=None): if self.tseries is None: return None ndat = self.tseries.shape[0] if tstart is None: istart = 0 else: igm = 0 igp = ndat - 1 while igp - igm > 1: istart = igm + (igp - igm) if self.tseries.iloc[istart]['t'] >= tstart: igp = istart else: igm = istart istart = igp if tend is None: iend = None else: igm = 0 igp = ndat - 1 while igp - igm > 1: iend = igm + (igp - igm) if self.tseries.iloc[iend]['t'] > tend: igp = iend else: igm = iend iend = igm + 1 return self.tseries.iloc[istart:iend]
Return time series data between requested times. Args: tstart (float): starting time. Set to None to start at the beginning of available data. tend (float): ending time. Set to None to stop at the end of available data. Returns: :class:`pandas.DataFrame`: slice of :attr:`tseries`.
juraj-google-style
def count(cls, cur, table:str, where_keys: list=None): if where_keys: where_clause, values = cls._get_where_clause_with_values(where_keys) query = cls._count_query_where.format(table, where_clause) q, t = query, values else: query = cls._count_query.format(table) q, t = query, () yield from cur.execute(q, t) result = yield from cur.fetchone() return int(result[0])
gives the number of records in the table Args: table: a string indicating the name of the table Returns: an integer indicating the number of records in the table
juraj-google-style
def _postprocess_flat_outputs(outputs: Any, need_spmd_partitioning: bool) -> Tuple[List[Optional[core_types.Tensor]], List[ops.Operation], List[Any]]: if outputs is None: outputs = tuple() pack_template = nest.flatten(outputs, expand_composites=False) outputs = nest.flatten(outputs, expand_composites=True) outputs += (control_flow_ops.no_op(),) maybe_convert = lambda x: None if x is None else ops.convert_to_tensor(x) try: if need_spmd_partitioning: outputs = [o if isinstance(o, ops.Operation) else maybe_convert(o) for o in outputs] else: with ops.device(core(0)): outputs = [o if isinstance(o, ops.Operation) else maybe_convert(o) for o in outputs] except Exception as e: raise ValueError(f'TPU function return values must all either be Operations or convertible to Tensors. Got error: {e}') output_operations = [o for o in outputs if isinstance(o, ops.Operation)] output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)] if outputs != output_tensors + output_operations: raise ValueError('TPU functions must return zero-or more Tensor values followed by zero or more Operations.') if len(output_operations) > 1: pack_template = pack_template[:1 - len(output_operations)] new_output_tensors = [] for t in output_tensors: if t is None: new_output_tensors.append(None) elif need_spmd_partitioning: o = array_ops.identity(t) o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True)) new_output_tensors.append(o) else: with ops.device(t.device if t.device else core(0)): o = array_ops.identity(t) o.op._set_attr('_tpu_output_identity', attr_value_pb2.AttrValue(b=True)) new_output_tensors.append(o) return (new_output_tensors, output_operations, pack_template)
Validates non-flat outputs, add backs device assignments and other attrs. Args: outputs: Output from `computation` inside `tpu.rewrite`. need_spmd_partitioning: Whether XLA SPMD partitioning is needed. Returns: - Tensors extracted from outputs. - Operations extracted from outputs. - A pack template for use with nest.pack_sequence_as to pack the tensors.
github-repos
def HandleMessageBundles(self, request_comms, response_comms): (messages, source, timestamp) = self._communicator.DecodeMessages(request_comms) now = time.time() if messages: self.ReceiveMessages(source, messages) required_count = max(0, (self.max_queue_size - request_comms.queue_size)) tasks = [] message_list = rdf_flows.MessageList() if ((time.time() - now) < 10): tasks = self.DrainTaskSchedulerQueueForClient(source, required_count) message_list.job = tasks self._communicator.EncodeMessages(message_list, response_comms, destination=source, timestamp=timestamp, api_version=request_comms.api_version) return (source, len(messages))
Processes a queue of messages as passed from the client. We basically dispatch all the GrrMessages in the queue to the task scheduler for backend processing. We then retrieve from the TS the messages destined for this client. Args: request_comms: A ClientCommunication rdfvalue with messages sent by the client. source should be set to the client CN. response_comms: A ClientCommunication rdfvalue of jobs destined to this client. Returns: tuple of (source, message_count) where message_count is the number of messages received from the client with common name source.
codesearchnet
def scatterplot_matrix(df, features, downsample_frac=None, figsize=(15, 15)): if downsample_frac: df = df.sample(frac=downsample_frac) plt.figure(figsize=figsize) sns.pairplot(df[features], hue='target') plt.show()
Plot a scatterplot matrix for a list of features, colored by target value. Example: `scatterplot_matrix(X, X.columns.tolist(), downsample_frac=0.01)` Args: df: Pandas dataframe containing the target column (named 'target'). features: The list of features to include in the correlation plot. downsample_frac: Dataframe downsampling rate (0.1 to include 10% of the dataset). figsize: The size of the plot.
juraj-google-style
def get_module(module_abs_import): logger.debug("starting") logger.debug(f"loading module {module_abs_import}") try: imported_module = importlib.import_module(module_abs_import) logger.debug("done") return imported_module except ModuleNotFoundError as err: msg = ("The module doesn't exist. Looking for a file like this: " f"{module_abs_import}") extended_msg = (f"{module_abs_import}.py should be in your working " "dir or it should be installed to the python path." "\nIf you have 'package.sub.mod' your current working " "dir should contain ./package/sub/mod.py\n" "If you specified 'mymodulename', your current " "working dir should contain ./mymodulename.py\n" "If the module is not in your current working dir, it " "must exist in your current python path - so you " "should have run pip install or setup.py") logger.error(msg) raise PyModuleNotFoundError(extended_msg) from err
Use importlib to get the module dynamically. Get instance of the module specified by the module_abs_import. This means that module_abs_import must be resolvable from this package. Args: module_abs_import: string. Absolute name of module to import. Raises: PyModuleNotFoundError: if module not found.
juraj-google-style
def get_qos_aggregated_configuration(self): uri = '{}{}'.format(self.data['uri'], self.QOS_AGGREGATED_CONFIGURATION) return self._helper.do_get(uri)
Gets the QoS aggregated configuration for the logical interconnect. Returns: dict: QoS Configuration.
codesearchnet
def _length(self): self._build_chunk_headers() length = 0 if self._data: for field in self._data: length += len(self._chunk_headers[field]) length += len(self._data[field]) length += 2 if self._files: for field in self._files: length += len(self._chunk_headers[field]) length += self._file_size(field) length += 2 length += len(self.boundary) length += 6 return length
Returns total length for this request. Returns: int. Length
codesearchnet