code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _timestamp_query_param_from_json(value, field): if _not_null(value, field): value = value.replace(" ", "T", 1) value = value.replace("Z", "") value = value.replace("+00:00", "") if "." in value: return datetime.datetime.strptime(value, _RFC3339_MICROS_NO_ZULU).replace( tzinfo=UTC ) else: return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION).replace( tzinfo=UTC ) else: return None
Coerce 'value' to a datetime, if set or not nullable. Args: value (str): The timestamp. field (.SchemaField): The field corresponding to the value. Returns: Optional[datetime.datetime]: The parsed datetime object from ``value`` if the ``field`` is not null (otherwise it is :data:`None`).
juraj-google-style
def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None): variable_type = entities.Variable.Type.INTEGER return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes)
Returns value for a certain integer variable attached to a feature flag. Args: feature_key: Key of the feature whose variable's value is being accessed. variable_key: Key of the variable whose value is to be accessed. user_id: ID for user. attributes: Dict representing user attributes. Returns: Integer value of the variable. None if: - Feature key is invalid. - Variable key is invalid. - Mismatch with type of variable.
codesearchnet
def __init__(self, server, sock, makefile=MakeFile): self.server = server self.socket = sock self.rfile = makefile(sock, 'rb', self.rbufsize) self.wfile = makefile(sock, 'wb', self.wbufsize) self.requests_seen = 0 self.peercreds_enabled = self.server.peercreds_enabled self.peercreds_resolve_enabled = self.server.peercreds_resolve_enabled self.resolve_peer_creds = ( lru_cache(maxsize=1)(self.resolve_peer_creds) ) self.get_peer_creds = ( lru_cache(maxsize=1)(self.get_peer_creds) )
Initialize HTTPConnection instance. Args: server (HTTPServer): web server object receiving this request socket (socket._socketobject): the raw socket object (usually TCP) for this connection makefile (file): a fileobject class for reading from the socket
juraj-google-style
def rot_vec_mul(r: torch.Tensor, t: torch.Tensor) -> torch.Tensor: x, y, z = torch.unbind(t, dim=-1) return torch.stack([r[..., 0, 0] * x + r[..., 0, 1] * y + r[..., 0, 2] * z, r[..., 1, 0] * x + r[..., 1, 1] * y + r[..., 1, 2] * z, r[..., 2, 0] * x + r[..., 2, 1] * y + r[..., 2, 2] * z], dim=-1)
Applies a rotation to a vector. Written out by hand to avoid transfer to avoid AMP downcasting. Args: r: [*, 3, 3] rotation matrices t: [*, 3] coordinate tensors Returns: [*, 3] rotated coordinates
github-repos
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): lr_lambda = partial(_get_linear_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) return LambdaLR(optimizer, lr_lambda, last_epoch)
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer ([`~torch.optim.Optimizer`]): The optimizer for which to schedule the learning rate. num_warmup_steps (`int`): The number of steps for the warmup phase. num_training_steps (`int`): The total number of training steps. last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. Return: `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
github-repos
def render_unregistered(error=None): return template(read_index_template(), registered=False, error=error, seeder_data=None, url_id=None)
Render template file for the unregistered user. Args: error (str, default None): Optional error message. Returns: str: Template filled with data.
codesearchnet
def func_dump(func): if os.name == 'nt': raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/') code = codecs.encode(raw_code, 'base64').decode('ascii') else: raw_code = marshal.dumps(func.__code__) code = codecs.encode(raw_code, 'base64').decode('ascii') defaults = func.__defaults__ if func.__closure__: closure = tuple((c.cell_contents for c in func.__closure__)) else: closure = None return (code, defaults, closure)
Serializes a user defined function. Args: func: the function to serialize. Returns: A tuple `(code, defaults, closure)`.
github-repos
def BasenamePath(self, path): if path.endswith(self.PATH_SEPARATOR): path = path[:(- 1)] (_, _, basename) = path.rpartition(self.PATH_SEPARATOR) return basename
Determines the basename of the path. Args: path (str): path. Returns: str: basename of the path.
codesearchnet
def output_shapes(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), self._element_spec)
Returns the shape of each component of an element of this iterator. Returns: A nested structure of `tf.TensorShape` objects corresponding to each component of an element of this dataset.
github-repos
def setup_data_stream( self, connection_factory: Callable[[tuple], Connection], data_stream_factory: Callable[[Connection], DataStream]=DataStream) -> \ DataStream: yield from self._control_stream.write_command(Command('TYPE', 'I')) reply = yield from self._control_stream.read_reply() self.raise_if_not_match('Binary mode', ReplyCodes.command_okay, reply) address = yield from self.passive_mode() connection = yield from connection_factory(address) connection.reset() yield from connection.connect() data_stream = data_stream_factory(connection) return data_stream
Create and setup a data stream. This function will set up passive and binary mode and handle connecting to the data connection. Args: connection_factory: A coroutine callback that returns a connection data_stream_factory: A callback that returns a data stream Coroutine. Returns: DataStream
juraj-google-style
def render(self, program: moderngl.Program, mode=None, vertices=-1, first=0, instances=1): vao = self.instance(program) if mode is None: mode = self.mode vao.render(mode, vertices=vertices, first=first, instances=instances)
Render the VAO. Args: program: The ``moderngl.Program`` Keyword Args: mode: Override the draw mode (``TRIANGLES`` etc) vertices (int): The number of vertices to transform first (int): The index of the first vertex to start with instances (int): The number of instances
juraj-google-style
def getText(page, output='text'): CheckParent(page) dl = page.getDisplayList() formats = ('text', 'html', 'json', 'xml', 'xhtml', 'dict', 'rawdict') images = (0, 1, 1, 0, 1, 1, 1) try: f = formats.index(output.lower()) except: f = 0 flags = (TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE) if images[f]: flags |= TEXT_PRESERVE_IMAGES tp = dl.getTextPage(flags) t = tp._extractText(f) del dl del tp return t
Extract a document page's text. Args: output: (str) text, html, dict, json, rawdict, xhtml or xml. Returns: the output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is "text".
codesearchnet
def price(self, valuation_date, market, model=None, pricing_context=None, name=None): model = model or rc.InterestRateModelType.LOGNORMAL_RATE name = name or self._name + '_price' with tf.name_scope(name): valuation_date = dates.convert_to_date_tensor(valuation_date) if model == rc.InterestRateModelType.LOGNORMAL_RATE: caplet_prices = self._price_lognormal_rate(valuation_date, market, pricing_context) else: raise ValueError(f'Unsupported model {model}.') return tf.math.segment_sum(caplet_prices, self._contract_index)
Returns the present value of the Cap/Floor on the valuation date. Args: valuation_date: A scalar `DateTensor` specifying the date on which valuation is being desired. market: A namedtuple of type `InterestRateMarket` which contains the necessary information for pricing the Cap/Floor. model: An optional input of type `InterestRateModelType` to specify which model to use for pricing. Default value: `None` in which case `LOGNORMAL_RATE` model is used. pricing_context: An optional input to provide additional parameters (such as model parameters) relevant for pricing. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to `"price"`. Returns: A Rank 1 `Tensor` of real type containing the modeled price of each cap (or floor) based on the input market data. Raises: ValueError: If an unsupported model is supplied to the function.
github-repos
def _ReadBooleanDataTypeDefinition( self, definitions_registry, definition_values, definition_name, is_member=False): return self._ReadFixedSizeDataTypeDefinition( definitions_registry, definition_values, data_types.BooleanDefinition, definition_name, self._SUPPORTED_ATTRIBUTES_BOOLEAN, is_member=is_member, supported_size_values=(1, 2, 4))
Reads a boolean data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. definition_name (str): name of the definition. is_member (Optional[bool]): True if the data type definition is a member data type definition. Returns: BooleanDataTypeDefinition: boolean data type definition.
juraj-google-style
def list_inputs(self, args, screen_info=None): _ = screen_info parsed = self._arg_parsers['list_inputs'].parse_args(args) output = self._list_inputs_or_outputs(parsed.recursive, parsed.node_name, parsed.depth, parsed.control, parsed.op_type, do_outputs=False) node_name = debug_graphs.get_node_name(parsed.node_name) _add_main_menu(output, node_name=node_name, enable_list_inputs=False) return output
Command handler for inputs. Show inputs to a given node. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object.
github-repos
def get_assigned_value(self, name): message_type = type(self) try: field = message_type.field_by_name(name) except KeyError: raise AttributeError('Message %s has no field %s' % ( message_type.__name__, name)) return self.__tags.get(field.number)
Get the assigned value of an attribute. Get the underlying value of an attribute. If value has not been set, will not return the default for the field. Args: name: Name of attribute to get. Returns: Value of attribute, None if it has not been set.
juraj-google-style
def image_data_format(): return _IMAGE_DATA_FORMAT
Returns the default image data format convention. Returns: A string, either `'channels_first'` or `'channels_last'` Example: >>> tf.keras.backend.image_data_format() 'channels_last'
github-repos
def FormatTime(fmt, stime = None): precondition.AssertType(fmt, str) precondition.AssertOptionalType(stime, time.struct_time) if stime is None: strftime = time.strftime else: strftime = lambda fmt: time.strftime(fmt, stime) if PY2: return strftime(fmt.encode("ascii")).decode("ascii") else: return strftime(fmt)
A compatibility wrapper for the `strftime` function. It is guaranteed to always take unicode string as an argument and return an unicode string as a result. Args: fmt: A format string specifying formatting of the output. stime: A time representation as returned by `gmtime` or `localtime`. Returns: A human-readable representation of `stime`.
juraj-google-style
def insert_bytes(fobj, size, offset, BUFFER_SIZE=(2 ** 16)): if ((size < 0) or (offset < 0)): raise ValueError fobj.seek(0, 2) filesize = fobj.tell() movesize = (filesize - offset) if (movesize < 0): raise ValueError resize_file(fobj, size, BUFFER_SIZE) if (mmap is not None): try: mmap_move(fobj, (offset + size), offset, movesize) except mmap.error: fallback_move(fobj, (offset + size), offset, movesize, BUFFER_SIZE) else: fallback_move(fobj, (offset + size), offset, movesize, BUFFER_SIZE)
Insert size bytes of empty space starting at offset. fobj must be an open file object, open rb+ or equivalent. Mutagen tries to use mmap to resize the file, but falls back to a significantly slower method if mmap fails. Args: fobj (fileobj) size (int): The amount of space to insert offset (int): The offset at which to insert the space Raises: IOError
codesearchnet
def get_jobs(self, name=None): if self.applicationResource: return self._get_elements(self.jobs, 'jobs', Job, None, name) else: return []
Retrieves jobs running on this resource in its instance. Args: name (str, optional): Only return jobs containing property **name** that matches `name`. `name` can be a regular expression. If `name` is not supplied, then all jobs are returned. Returns: list(Job): A list of jobs matching the given `name`. .. note:: If ``applicationResource`` is `False` an empty list is returned. .. versionadded:: 1.9
codesearchnet
def _load_variables_impl(config: Text, hosts: List[Tuple[int, Text]], variables: Dict[Text, Dict[Text, tf_variables.Variable]], table_config: tpu_embedding_v2_utils.TableConfig): def select_fn(host_id): def select_or_zeros(x): if host_id >= len(x.variables): return array_ops.zeros_like(x.variables[0]) return x.variables[host_id] return select_or_zeros for host_id, host in enumerate(hosts): with ops.device(host): host_variables = nest.map_structure(select_fn(host_id), variables) for table in table_config: table.optimizer._load()(table_name=table.name, num_shards=len(hosts), shard_id=host_id, config=config, **host_variables[table.name]) config = None
Load embedding tables to onto TPU for each table and host. Args: config: A serialized TPUEmbeddingConfiguration proto. hosts: A list of CPU devices, on per host. variables: A dictionary of dictionaries of TPUEmbeddingVariables. First key is the table name, second key is 'parameters' or the optimizer slot name. table_config: A list of tf.tpu.experimental.embedding.TableConfig objects.
github-repos
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', None) if year > 9999: raise ValueError('Unsupported year value: {0:d}.'.format(year)) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp = float(timestamp) / definitions.SECONDS_PER_DAY timestamp += self._DELPHI_TO_POSIX_BASE if microseconds is not None: timestamp += float(microseconds) / definitions.MICROSECONDS_PER_DAY self._normalized_timestamp = None self._timestamp = timestamp self.is_local_time = False
Copies a Delphi TDateTime timestamp from a string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the time string is invalid or not supported.
juraj-google-style
def _NewMatchSection(self, val): section = {'criterion': val, 'config': {}} self.matches.append(section) self.section = section['config'] self.processor = self._ParseMatchGrp
Create a new configuration section for each match clause. Each match clause is added to the main config, and the criterion that will trigger the match is recorded, as is the configuration. Args: val: The value following the 'match' keyword.
codesearchnet
def _GetTimestamps(self, olecf_item): if not olecf_item: return None, None try: creation_time = olecf_item.get_creation_time_as_integer() except OverflowError as exception: logger.warning( 'Unable to read the creation time with error: {0!s}'.format( exception)) creation_time = 0 try: modification_time = olecf_item.get_modification_time_as_integer() except OverflowError as exception: logger.warning( 'Unable to read the modification time with error: {0!s}'.format( exception)) modification_time = 0 if not creation_time and not modification_time: return None, None if creation_time == 0xffffffffffffffff: creation_time = 0 return creation_time, modification_time
Retrieves the timestamps from an OLECF item. Args: olecf_item (pyolecf.item): OLECF item. Returns: tuple[int, int]: creation and modification FILETIME timestamp.
juraj-google-style
def maybe_broadcast_structure(from_structure: Any, to_structure: Any) -> Any: flat_from = tf.nest.flatten(from_structure) flat_to = tf.nest.flatten(to_structure) if len(flat_from) == 1: flat_from *= len(flat_to) return tf.nest.pack_sequence_as(to_structure, flat_from)
Maybe broadcasts `from_structure` to `to_structure`. If `from_structure` is a singleton, it is tiled to match the structure of `to_structure`. Note that the elements in `from_structure` are not copied if this tiling occurs. Args: from_structure: A structure. to_structure: A structure. Returns: new_from_structure: Same structure as `to_structure`.
juraj-google-style
def _on_disconnect(self, result): success, _, context = self._parse_return(result) callback = context['callback'] connection_id = context['connection_id'] handle = context['handle'] callback(connection_id, self.id, success, "No reason given") self._remove_connection(handle)
Callback called when disconnection command finishes Args: result (dict): result returned from diconnection command
juraj-google-style
def get_vertex(self, key): if key in self.vertex_map: return self.vertex_map[key] vertex = self.new_vertex() self.vertex_map[key] = vertex return vertex
Returns or Creates a Vertex mapped by key. Args: key: A string reference for a vertex. May refer to a new Vertex in which case it will be created. Returns: A the Vertex mapped to by key.
juraj-google-style
def tab(tab_name, element_list=None, section_list=None): _tab = {'Type': 'Tab', 'Title': tab_name} if (element_list is not None): if isinstance(element_list, list): _tab['Elements'] = element_list else: _tab['Elements'] = [element_list] if (section_list is not None): if isinstance(section_list, list): _tab['Sections'] = section_list elif ('Elements' not in section_list): _tab['Elements'] = element_list else: _tab['Elements'].append(element_list) return _tab
Returns a dictionary representing a new tab to display elements. This can be thought of as a simple container for displaying multiple types of information. Args: tab_name: The title to display element_list: The list of elements to display. If a single element is given it will be wrapped in a list. section_list: A list of sections to display. Returns: A dictionary with metadata specifying that it is to be rendered as a page containing multiple elements and/or tab.
codesearchnet
def rotate_capture_handler_log(self, name): for (sc_key, sc) in self._stream_capturers.iteritems(): for h in sc[0].capture_handlers: if (h['name'] == name): sc[0]._rotate_log(h)
Force a rotation of a handler's log file Args: name: The name of the handler who's log file should be rotated.
codesearchnet
def _CreateBudget(client): budget_service = client.GetService('BudgetService', version='v201809') operation = { 'operand': { 'name': 'Interplanetary Cruise Budget 'deliveryMethod': 'STANDARD', 'amount': { 'microAmount': 500000 } }, 'operator': 'ADD' } budget = budget_service.mutate([operation])['value'][0] print 'Budget with ID "%d" and name "%s" was created.' % ( budget['budgetId'], budget['name']) return budget
Creates the budget. Args: client: an AdWordsClient instance. Returns: a suds.sudsobject.Object representation of the created budget.
juraj-google-style
def gene_panel(self, panel_id, version=None): query = {'panel_name': panel_id} if version: LOG.info('Fetch gene panel {0}, version {1} from database'.format(panel_id, version)) query['version'] = version return self.panel_collection.find_one(query) else: LOG.info('Fetching gene panels %s from database', panel_id) res = self.panel_collection.find(query).sort('version', (- 1)) if (res.count() > 0): return res[0] else: LOG.info('No gene panel found') return None
Fetch a gene panel. If no panel is sent return all panels Args: panel_id (str): unique id for the panel version (str): version of the panel. If 'None' latest version will be returned Returns: gene_panel: gene panel object
codesearchnet
def equal_to_current(cls, json, fields_to_ignore=('id', 'change_date', 'changed_by')): info = model_meta.get_field_info(cls) for (field_name, relation_info) in info.relations.items(): if (relation_info.to_many and (field_name in json)): json.pop(field_name) new_instance = cls(**json) key_field_args = tuple((getattr(new_instance, key) for key in cls.KEY_FIELDS)) current = cls.current(*key_field_args) if (current.id is not None): return current.fields_equal(new_instance, fields_to_ignore) return False
Compares for equality this instance to a model instance constructed from the supplied JSON. This will ignore any fields in `fields_to_ignore`. Note that this method cannot handle fields with many-to-many associations, as those can only be set on a saved model instance (and saving the model instance will create a new entry). All many-to-many field entries will be removed before the equality comparison is done. Args: json: json representing an entry to compare fields_to_ignore: List of fields that should not be compared for equality. By default includes `id`, `change_date`, and `changed_by`. Returns: True if the checked fields are all equivalent, else False
codesearchnet
class PerceiverMultimodalPreprocessor(AbstractPreprocessor): def __init__(self, modalities: Mapping[str, PreprocessorType], mask_probs: Optional[Mapping[str, float]]=None, min_padding_size: int=2): super().__init__() self.modalities = nn.ModuleDict(modalities) self.min_padding_size = min_padding_size self.mask_probs = mask_probs if mask_probs is not None else {} self.padding = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels)) for modality, preprocessor in modalities.items()}) self.mask = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()}) @property def num_channels(self) -> int: max_channel_size = max((processor.num_channels for _, processor in self.modalities.items())) common_channel_size = max_channel_size + self.min_padding_size return common_channel_size def forward(self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False) -> PreprocessorOutputType: padded = {} modality_sizes = {} inputs_without_pos = {} for modality, preprocessor in self.modalities.items(): output, _, inputs_without_pos[modality] = preprocessor(inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d) batch_size, num_samples, num_channels = output.shape pos_enc = self.padding[modality].expand(batch_size, -1, -1) padding = torch.broadcast_to(pos_enc, [batch_size, num_samples, self.num_channels - num_channels]) output_padded = torch.cat([output, padding], dim=2) if modality in self.mask_probs: mask_token = self.mask[modality].expand(batch_size, -1, -1) mask_prob = self.mask_probs[modality] mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob)) mask = torch.unsqueeze(mask, dim=2).to(mask_token.device) output_padded = (1 - mask) * output_padded + mask * mask_token padded[modality] = output_padded modality_sizes[modality] = output_padded.shape[1] padded_ls = [padded[k] for k in sorted(padded.keys())] final_inputs = torch.cat(padded_ls, dim=1) return (final_inputs, modality_sizes, inputs_without_pos)
Multimodal preprocessing for Perceiver Encoder. Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number of channels. Args: modalities (`Mapping[str, PreprocessorType]`): Dict mapping modality name to preprocessor. mask_probs (`Dict[str, float]`): Dict mapping modality name to masking probability of that modality. min_padding_size (`int`, *optional*, defaults to 2): The minimum padding size for all modalities. The final output will have num_channels equal to the maximum channels across all modalities plus min_padding_size.
github-repos
def _project_observable(self, input_key: str, input_observable: Any, get_hist_args: Dict[(str, Any)]=None, projection_name_args: Dict[(str, Any)]=None, **kwargs) -> Hist: if (get_hist_args is None): get_hist_args = copy.deepcopy(kwargs) if (projection_name_args is None): projection_name_args = copy.deepcopy(kwargs) get_hist_args.update({'observable': input_observable}) hist = self.get_hist(**get_hist_args) projection_name_args.update(self.projection_information) projection_name_args.update(kwargs) projection_name_args.update({'input_key': input_key, 'input_observable': input_observable, 'input_hist': hist}) projection_name = self.projection_name(**projection_name_args) logger.debug(f'hist: {hist}') for axis in self.additional_axis_cuts: logger.debug(f'Apply additional axis hist range: {axis.name}') axis.apply_range_set(hist) if (self.projection_dependent_cut_axes == []): self.projection_dependent_cut_axes.append([]) duplicated_axes = [PDCA for PA in self.projection_axes for PDCA_group in self.projection_dependent_cut_axes for PDCA in PDCA_group if (PDCA.axis_type == PA.axis_type)] if duplicated_axes: raise ValueError(f'Axis {duplicated_axes} is in the projection axes and the projection dependent cut axes. This configuration is not allowed, as the range in the PDCA will be overwritten by the projection axes! Please revise your configuration.') hists = [] for (i, axes) in enumerate(self.projection_dependent_cut_axes): for axis in axes: logger.debug(f'Apply projection dependent hist range: {axis.name}') axis.apply_range_set(hist) projected_hist = self.call_projection_function(hist) projected_hist.SetName(f'{projection_name}_{i}') hists.append(projected_hist) self.cleanup_cuts(hist, cut_axes=axes) self.cleanup_cuts(hist, cut_axes=self.additional_axis_cuts) output_hist = hists[0] for temp_hist in hists[1:]: output_hist.Add(temp_hist) output_hist.SetName(projection_name) output_hist.SetDirectory(0) return (output_hist, projection_name, projection_name_args)
Perform a projection for a single observable. Note: All cuts on the original histograms will be reset when this function is completed. Args: input_key: Key to describe the input observable. input_observable: Observable to project from. get_hist_args: Arguments to pass to ``get_hist(...)``. Made available so the args can be cached to avoid a ``deepcopy`` when looping. Default: None. In this case, they will be retrieved automatically. projection_name_args: Arguments to pass to ``projection_name(...)``. Made available so the args can be cached to avoid a ``deepcopy`` when looping. Default: None. In this case, they will be retrieved automatically. kwargs: Additional named args to be passed to projection_name(...) and output_key_name(...). Returns: The projected histogram.
codesearchnet
def get(cls, user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs): if (user_agent or user_agent_config_yaml or ('user_agent' in UserAgent._environment_variables(**kwargs))): return UserAgent._create(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs) if cls.user_agent: return cls.user_agent else: raise UserAgentError('You must either set the global user agent: UserAgent.set_global(...) or pass in user agent parameters!')
Get full user agent string from parameters if supplied falling back on global user agent if set. Args: user_agent (Optional[str]): User agent string. HDXPythonLibrary/X.X.X- is prefixed. user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml. user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied. Returns: str: Full user agent string
codesearchnet
def bin_to_mac(bin, size=6): if len(bin) != size: raise Exception("Invalid MAC address: %s" % (bin)) return ':'.join([binascii.hexlify(o) for o in bin])
Convert 6 bytes into a MAC string. Args: bin (str): hex string of lenth 6. Returns: str: String representation of the MAC address in lower case. Raises: Exception: if ``len(bin)`` is not 6.
juraj-google-style
def _get_prop_from_modelclass(modelclass, name): if (name == '__key__'): return modelclass._key parts = name.split('.') (part, more) = (parts[0], parts[1:]) prop = modelclass._properties.get(part) if (prop is None): if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) else: raise TypeError(('Model %s has no property named %r' % (modelclass._get_kind(), part))) while more: part = more.pop(0) if (not isinstance(prop, model.StructuredProperty)): raise TypeError(('Model %s has no property named %r' % (modelclass._get_kind(), part))) maybe = getattr(prop, part, None) if (isinstance(maybe, model.Property) and (maybe._name == part)): prop = maybe else: maybe = prop._modelclass._properties.get(part) if (maybe is not None): prop = getattr(prop, maybe._code_name) elif (issubclass(prop._modelclass, model.Expando) and (not more)): prop = model.GenericProperty() prop._name = name else: raise KeyError(('Model %s has no property named %r' % (prop._modelclass._get_kind(), part))) return prop
Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando.
codesearchnet
def _retrieve_problem(self, id_): future = Future(self, id_, self.return_matrix, None) self.client._poll(future) return future
Resume polling for a problem previously submitted. Args: id_: Identification of the query. Returns: :obj: `Future`
juraj-google-style
def to_CAG_agraph(self): CAG = self.to_CAG() A = nx.nx_agraph.to_agraph(CAG) A.graph_attr.update({'dpi': 227, 'fontsize': 20, 'fontname': 'Menlo'}) A.node_attr.update({'shape': 'rectangle', 'color': ' A.edge_attr.update({'color': ' return A
Returns a variable-only view of the GrFN in the form of an AGraph. Returns: type: A CAG constructed via variable influence in the GrFN object.
codesearchnet
def _blocking_poll(self, timeout=None): if self._result_set: return retry_ = self._retry.with_deadline(timeout) try: retry_(self._done_or_raise)() except exceptions.RetryError: raise concurrent.futures.TimeoutError('Operation did not complete within the designated timeout.')
Poll and wait for the Future to be resolved. Args: timeout (int): How long (in seconds) to wait for the operation to complete. If None, wait indefinitely.
codesearchnet
def insert_data(self, data, include_index=False, index_name=None): max_rows_per_post = 500 post_interval = 0.05 if (not self.exists()): raise Exception(('Table %s does not exist.' % self._full_name)) data_schema = _schema.Schema.from_data(data) if isinstance(data, list): if include_index: if (not index_name): index_name = 'Index' data_schema._add_field(index_name, 'INTEGER') table_schema = self.schema for data_field in data_schema: name = data_field.name table_field = table_schema[name] if (table_field is None): raise Exception(('Table does not contain field %s' % name)) data_type = data_field.data_type table_type = table_field.data_type if (table_type != data_type): raise Exception(('Field %s in data has type %s but in table has type %s' % (name, data_type, table_type))) total_rows = len(data) total_pushed = 0 job_id = uuid.uuid4().hex rows = [] column_name_map = {} is_dataframe = isinstance(data, pandas.DataFrame) if is_dataframe: gen = data.reset_index(drop=(not include_index)).iterrows() else: gen = enumerate(data) for (index, row) in gen: if is_dataframe: row = row.to_dict() elif include_index: row[index_name] = index rows.append({'json': self._encode_dict_as_row(row, column_name_map), 'insertId': (job_id + str(index))}) total_pushed += 1 if ((total_pushed == total_rows) or (len(rows) == max_rows_per_post)): try: response = self._api.tabledata_insert_all(self._name_parts, rows) except Exception as e: raise e if ('insertErrors' in response): raise Exception(('insertAll failed: %s' % response['insertErrors'])) time.sleep(post_interval) rows = [] while True: self._info = self._api.tables_get(self._name_parts) if (('streamingBuffer' not in self._info) or ('estimatedRows' not in self._info['streamingBuffer']) or (int(self._info['streamingBuffer']['estimatedRows']) > 0)): break time.sleep(2) return self
Insert the contents of a Pandas DataFrame or a list of dictionaries into the table. The insertion will be performed using at most 500 rows per POST, and at most 10 POSTs per second, as BigQuery has some limits on streaming rates. Args: data: the DataFrame or list to insert. include_index: whether to include the DataFrame or list index as a column in the BQ table. index_name: for a list, if include_index is True, this should be the name for the index. If not specified, 'Index' will be used. Returns: The table. Raises: Exception if the table doesn't exist, the table's schema differs from the data's schema, or the insert failed.
codesearchnet
def create_file_writer_v2(logdir, max_queue=None, flush_millis=None, filename_suffix=None, name=None, experimental_trackable=False, experimental_mesh=None): if logdir is None: raise ValueError('Argument `logdir` cannot be None') inside_function = ops.inside_function() with ops.name_scope(name, 'create_file_writer') as scope, ops.device('cpu:0'): with ops.init_scope(): if context.executing_eagerly(): _check_create_file_writer_args(inside_function, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix) logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string) if max_queue is None: max_queue = constant_op.constant(10) if flush_millis is None: flush_millis = constant_op.constant(2 * 60 * 1000) if filename_suffix is None: filename_suffix = constant_op.constant('.v2') def create_fn(): if context.executing_eagerly(): shared_name = context.anonymous_name() else: shared_name = ops.name_from_scope_name(scope) return gen_summary_ops.summary_writer(shared_name=shared_name, name=name) init_op_fn = functools.partial(gen_summary_ops.create_summary_file_writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix) if experimental_trackable: return _TrackableResourceSummaryWriter(create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh) else: return _ResourceSummaryWriter(create_fn=create_fn, init_op_fn=init_op_fn, mesh=experimental_mesh)
Creates a summary file writer for the given log directory. Args: logdir: a string specifying the directory in which to write an event file. max_queue: the largest number of summaries to keep in a queue; will flush once the queue gets bigger than this. Defaults to 10. flush_millis: the largest interval between flushes. Defaults to 120,000. filename_suffix: optional suffix for the event file name. Defaults to `.v2`. name: a name for the op that creates the writer. experimental_trackable: a boolean that controls whether the returned writer will be a `TrackableResource`, which makes it compatible with SavedModel when used as a `tf.Module` property. experimental_mesh: a `tf.experimental.dtensor.Mesh` instance. When running with DTensor, the mesh (experimental_mesh.host_mesh()) will be used for bringing all the DTensor logging from accelerator to CPU mesh. Returns: A SummaryWriter object.
github-repos
def _extract_attrs(op, keys): kwargs = {} not_found = object() for k in keys: srcs = [getattr(op, k, not_found), getattr(op, '_' + k, not_found), getattr(op, 'parameters', {}).get(k, not_found)] if any((v is not not_found for v in srcs)): kwargs[k] = [v for v in srcs if v is not not_found][0] else: raise ValueError(f"Could not determine an appropriate value for field `{k}` in object `{op}`. Looked for \n 1. an attr called `{k}`,\n 2. an attr called `_{k}`,\n 3. an entry in `op.parameters` with key '{k}'.") if k in op._composite_tensor_prefer_static_fields and kwargs[k] is not None: if tensor_util.is_tensor(kwargs[k]): static_val = tensor_util.constant_value(kwargs[k]) if static_val is not None: kwargs[k] = static_val if isinstance(kwargs[k], (np.ndarray, np.generic)): kwargs[k] = kwargs[k].tolist() return kwargs
Extract constructor kwargs to reconstruct `op`. Args: op: A `LinearOperator` instance. keys: A Python `tuple` of strings indicating the names of the constructor kwargs to extract from `op`. Returns: kwargs: A Python `dict` of kwargs to `op`'s constructor, keyed by `keys`.
github-repos
def _normalize_edge(self, edge: EDGE) -> EDGE: def lower(n: GridQubit, m: GridQubit) -> bool: return n.row < m.row or (n.row == m.row and n.col < m.col) n1, n2 = edge return (n1, n2) if lower(n1, n2) else (n2, n1)
Gives unique representative of the edge. Two edges are equivalent if they form an edge between the same nodes. This method returns representative of this edge which can be compared using equality operator later. Args: edge: Edge to normalize. Returns: Normalized edge with lexicographically lower node on the first position.
juraj-google-style
def run( self, inputs: Dict[str, Union[float, Iterable]], covers: Dict[str, Union[float, Iterable]], torch_size: Optional[int] = None, ) -> Union[float, Iterable]: if len(covers) != len(self.cover_nodes): raise ValueError("Incorrect number of cover values.") for node_name, val in covers.items(): self.nodes[node_name]["value"] = val return super().run(inputs, torch_size)
Executes the FIB over a particular set of inputs and returns the result. Args: inputs: Input set where keys are the names of input nodes in the GrFN and each key points to a set of input values (or just one). Returns: A set of outputs from executing the GrFN, one for every set of inputs.
juraj-google-style
def add_work_item(self, work_item): with self._conn: self._conn.execute( , _work_item_to_row(work_item))
Add a WorkItems. Args: work_item: A WorkItem.
juraj-google-style
def from_http_status(status_code, message, **kwargs): error_class = exception_class_for_http_status(status_code) error = error_class(message, **kwargs) if error.code is None: error.code = status_code return error
Create a :class:`GoogleAPICallError` from an HTTP status code. Args: status_code (int): The HTTP status code. message (str): The exception message. kwargs: Additional arguments passed to the :class:`GoogleAPICallError` constructor. Returns: GoogleAPICallError: An instance of the appropriate subclass of :class:`GoogleAPICallError`.
juraj-google-style
def get_accounts(self, provider='aws'): url = '{gate}/credentials'.format(gate=API_URL) response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT) assert response.ok, 'Failed to get accounts: {0}'.format(response.text) all_accounts = response.json() self.log.debug('Accounts in Spinnaker:\n%s', all_accounts) filtered_accounts = [] for account in all_accounts: if (account['type'] == provider): filtered_accounts.append(account) if (not filtered_accounts): raise ForemastError('No Accounts matching {0}.'.format(provider)) return filtered_accounts
Get Accounts added to Spinnaker. Args: provider (str): What provider to find accounts for. Returns: list: list of dicts of Spinnaker credentials matching _provider_. Raises: AssertionError: Failure getting accounts from Spinnaker.
codesearchnet
def _send(self, **req_kwargs): auth_token = self._auth.getAuthToken() if (auth_token is None): raise exception.LoginException('Not logged in') req_kwargs.setdefault('headers', {'Authorization': ('OAuth ' + auth_token)}) return self._session.request(**req_kwargs)
Send an authenticated request to a Google API. Args: **req_kwargs: Arbitrary keyword arguments to pass to Requests. Return: requests.Response: The raw response. Raises: LoginException: If :py:meth:`login` has not been called.
codesearchnet
def predict(self, x_test): if self.model: lengths = map(len, x_test) x_test = self.p.transform(x_test) y_pred = self.model.predict(x_test) y_pred = self.p.inverse_transform(y_pred, lengths) return y_pred else: raise OSError('Could not find a model. Call load(dir_path).')
Returns the prediction of the model on the given test data. Args: x_test : array-like, shape = (n_samples, sent_length) Test samples. Returns: y_pred : array-like, shape = (n_smaples, sent_length) Prediction labels for x.
codesearchnet
def _save_work_results(self, run_stats, scores, num_processed_images, filename): with open(filename, 'w') as f: writer = csv.writer(f) writer.writerow(['SubmissionID', 'ExternalSubmissionId', 'Score', 'CompletedBatches', 'BatchesWithError', 'ProcessedImages', 'MinEvalTime', 'MaxEvalTime', 'MedianEvalTime', 'MeanEvalTime', 'ErrorMsg']) for submission_id in sorted(iterkeys(run_stats)): stat = run_stats.get(submission_id, collections.defaultdict((lambda : float('NaN')))) external_id = self.submissions.get_external_id(submission_id) error_msg = '' while ((not error_msg) and stat['error_messages']): error_msg = stat['error_messages'].pop() if error_msg.startswith('Cant copy adversarial batch locally'): error_msg = '' writer.writerow([submission_id, external_id, scores.get(submission_id, None), stat['completed'], stat['num_errors'], num_processed_images.get(submission_id, None), stat['min_eval_time'], stat['max_eval_time'], stat['median_eval_time'], stat['mean_eval_time'], error_msg])
Saves statistics about each submission. Saved statistics include score; number of completed and failed batches; min, max, average and median time needed to run one batch. Args: run_stats: dictionary with runtime statistics for submissions, can be generated by WorkPiecesBase.compute_work_statistics scores: dictionary mapping submission ids to scores num_processed_images: dictionary with number of successfully processed images by each submission, one of the outputs of ClassificationBatches.compute_classification_results filename: output filename
codesearchnet
def __init__(self, live_api_processor: live_model.LiveProcessor, chattiness: float=1.0, unsafe_string_list: list[str] | None=None): self._processor = live_api_processor self._chattiness = chattiness self._commentator = CommentatorStateMachine() self.ttfts = collections.deque(maxlen=50) self._unsafe_string_list = unsafe_string_list if unsafe_string_list is not None: pattern = '|'.join((re.escape(s) for s in unsafe_string_list)) self._processor += text.MatchProcessor(pattern=pattern, substream_input='output_transcription', substream_output='unsafe_regex', remove_from_input_stream=False, flush_fn=lambda x: x.get_metadata('generation_complete') or x.get_metadata('interrupted') or x.get_metadata('interrupt_request') or x.get_metadata('turn_complete') or x.get_metadata('go_away'))
Initializes the processor. Args: live_api_processor: The live API processor to use. chattiness: Probability of triggering a comment when the model has finished talking or every 3 seconds. Set to 0 to disable commenting. unsafe_string_list: The strings to use for unsafe content. If None, the processor will not block unsafe content. If set, the processor will interrupt itself when it sees the string in the output.
github-repos
def Create(path, password, generate_default_key=True): wallet = UserWallet(path=path, passwordKey=password, create=True) if generate_default_key: wallet.CreateKey() return wallet
Create a new user wallet. Args: path (str): A path indicating where to create or open the wallet e.g. "/Wallets/mywallet". password (str): a 10 characters minimum password to secure the wallet with. Returns: UserWallet: a UserWallet instance.
juraj-google-style
def CheckForHeaderGuard(filename, clean_lines, error): raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(' return for i in raw_lines: if Search('^\\s* return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for (linenum, line) in enumerate(raw_lines): linesplit = line.split() if (len(linesplit) >= 2): if ((not ifndef) and (linesplit[0] == ' ifndef = linesplit[1] ifndef_linenum = linenum if ((not define) and (linesplit[0] == ' define = linesplit[1] if line.startswith(' endif = line endif_linenum = linenum if ((not ifndef) or (not define) or (ifndef != define)): error(filename, 0, 'build/header_guard', 5, ('No return if (ifndef != cppvar): error_level = 0 if (ifndef != (cppvar + '_')): error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, (' ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(((' if match: if (match.group(1) == '_'): error(filename, endif_linenum, 'build/header_guard', 0, (' return no_single_line_comments = True for i in xrange(1, (len(raw_lines) - 1)): line = raw_lines[i] if Match('^(?:(?:\\\'(?:\\.|[^\\\'])*\\\')|(?:"(?:\\.|[^"])*")|[^\\\'"])* no_single_line_comments = False break if no_single_line_comments: match = Match(((' if match: if (match.group(1) == '_'): error(filename, endif_linenum, 'build/header_guard', 0, (' return error(filename, endif_linenum, 'build/header_guard', 5, ('
Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found.
codesearchnet
def indent_css(f, output): line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline().rstrip() if (len(string) > 0): if (string[(- 1)] == ';'): output.write(((' ' + string) + '\n')) else: output.write((string + '\n')) output.close() f.close()
Indentes css that has not been indented and saves it to a new file. A new file is created if the output destination does not already exist. Args: f: string, path to file. output: string, path/name of the output file (e.g. /directory/output.css). print type(response.read()) Returns: None.
codesearchnet
def compact(self, accumulator, *args, **kwargs): return accumulator
Optionally returns a more compact representation of the accumulator. This is called before an accumulator is sent across the wire, and can be useful in cases where values are buffered or otherwise lazily kept unprocessed when added to the accumulator. Should return an equivalent, though possibly modified, accumulator. By default returns the accumulator unmodified. Args: accumulator: the current accumulator *args: Additional arguments and side inputs. **kwargs: Additional arguments and side inputs.
github-repos
def __init__(self, attention_logit_mod, name="attention"): super(AttentiveRead, self).__init__(name=name) self._attention_logit_mod = attention_logit_mod
Initialize AttentiveRead module. Args: attention_logit_mod: Module that produces logit corresponding to a memory slot's compatibility. Must map a [batch_size * memory_size, memory_word_size + query_word_size]-shaped Tensor to a [batch_size * memory_size, 1] shape Tensor. name: string. Name for module.
juraj-google-style
def __init__(self, path): super(TaggingFile, self).__init__() self._path = path
Initializes a tagging file. Args: path (str): path to a file that contains one or more event tagging rules.
juraj-google-style
def register_index(self, index): self._indexes[index._name] = index self.create_index(index) return index
Registers a given index: * Creates and opens an index for it (if it doesn't exist yet) * Sets some default values on it (unless they're already set) Args: index (PonyWhoosh.Index): An instance of PonyWhoosh.Index class
juraj-google-style
def from_json(self, js, groups: Iterable[Group]): self.index = js["index"] self.groupIndex = js["groupIndex"] self.label = js["label"] self.functionalChannelType = FunctionalChannelType.from_str( js["functionalChannelType"], js["functionalChannelType"] ) self.groups = [] for id in js["groups"]: for g in groups: if g.id == id: self.groups.append(g) break
this function will load the functional channel object from a json object and the given groups Args: js(dict): the json object groups(Iterable[Group]): the groups for referencing
juraj-google-style
def iter_geno_marker(self, markers, return_index=False): if (self._mode != 'r'): raise UnsupportedOperation("not available in 'w' mode") if isinstance(markers, str): markers = [markers] if return_index: for marker in markers: (geno, seek) = self.get_geno_marker(marker, return_index=True) (yield (marker, geno, seek)) else: for marker in markers: (yield (marker, self.get_geno_marker(marker)))
Iterates over genotypes for a list of markers. Args: markers (list): The list of markers to iterate onto. return_index (bool): Wether to return the marker's index or not. Returns: tuple: The name of the marker as a string, and its genotypes as a :py:class:`numpy.ndarray` (additive format).
codesearchnet
def assemble_schedules(schedules, qobj_id=None, qobj_header=None, run_config=None): qobj_config = QasmQobjConfig() if run_config: qobj_config = QasmQobjConfig(**run_config.to_dict()) instruction_converter = PulseQobjConverter instruction_converter = instruction_converter(PulseQobjInstruction, **run_config.to_dict()) lo_converter = LoConfigConverter(PulseQobjExperimentConfig, run_config.qubit_lo_freq, run_config.meas_lo_freq, **run_config.to_dict()) qobj_schedules = [] user_pulselib = set() for idx, schedule in enumerate(schedules): qobj_instructions = [] for shift, instruction in schedule.instructions: qobj_instructions.append(instruction_converter(shift, instruction)) if isinstance(instruction, PulseInstruction): user_pulselib.add(instruction.command) qobj_experiment_header = QobjExperimentHeader( name=schedule.name or 'Experiment-%d' % idx ) qobj_schedules.append({ 'header': qobj_experiment_header, 'instructions': qobj_instructions }) run_config.pulse_library = [QobjPulseLibrary(name=pulse.name, samples=pulse.samples) for pulse in user_pulselib] experiments = [] if len(run_config.schedule_los) == 1: lo_dict = run_config.schedule_los.pop() q_los = lo_converter.get_qubit_los(lo_dict) if q_los: run_config.qubit_lo_freq = q_los m_los = lo_converter.get_meas_los(lo_dict) if m_los: run_config.meas_lo_freq = m_los if run_config.schedule_los: if len(qobj_schedules) == 1: for lo_dict in run_config.schedule_los: experiments.append(PulseQobjExperiment( instructions=qobj_schedules[0]['instructions'], experimentheader=qobj_schedules[0]['header'], experimentconfig=lo_converter(lo_dict) )) elif len(qobj_schedules) == len(run_config.schedule_los): for lo_dict, schedule in zip(run_config.schedule_los, qobj_schedules): experiments.append(PulseQobjExperiment( instructions=schedule['instructions'], experimentheader=schedule['header'], experimentconfig=lo_converter(lo_dict) )) else: raise QiskitError('Invalid LO setting is specified. ' 'The LO should be configured for each schedule, or ' 'single setup for all schedules (unique), or ' 'multiple setups for a single schedule (frequency sweep),' 'or no LO configured at all.') else: for schedule in qobj_schedules: experiments.append(PulseQobjExperiment( instructions=schedule['instructions'], experimentheader=schedule['header'], )) qobj_config = PulseQobjConfig(**run_config.to_dict()) return PulseQobj(qobj_id=qobj_id, config=qobj_config, experiments=experiments, header=qobj_header)
Assembles a list of schedules into a qobj which can be run on the backend. Args: schedules (list[Schedule]): schedules to assemble qobj_id (int): identifier for the generated qobj qobj_header (QobjHeader): header to pass to the results run_config (RunConfig): configuration of the runtime environment Returns: PulseQobj: the Qobj to be run on the backends Raises: QiskitError: when invalid schedules or configs are provided
juraj-google-style
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator): for permission in exceptions_dict: if permission not in self._EXCEPTIONS_KEYS: continue exception_dict = exceptions_dict.get(permission, {}) for urls, url_dict in exception_dict.items(): last_used = url_dict.get('last_used', None) if not last_used: continue primary_url, secondary_url = urls.split(',') event_data = ChromeContentSettingsExceptionsEventData() event_data.permission = permission event_data.primary_url = primary_url event_data.secondary_url = secondary_url timestamp = int(last_used * 1000000) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts site specific events. Args: exceptions_dict (dict): Permission exceptions data from Preferences file. parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs.
juraj-google-style
def visualize_qualitative_analysis(inputs, model, samples=1, batch_size=3, length=8): average = (lambda dist: tf.reduce_mean(input_tensor=dist.mean(), axis=0)) with tf.compat.v1.name_scope('val_reconstruction'): reconstruct = functools.partial(model.reconstruct, inputs=inputs, samples=samples) visualize_reconstruction(inputs, average(reconstruct())) visualize_reconstruction(inputs, average(reconstruct(sample_static=True)), name='static_prior') visualize_reconstruction(inputs, average(reconstruct(sample_dynamic=True)), name='dynamic_prior') visualize_reconstruction(inputs, average(reconstruct(swap_static=True)), name='swap_static') visualize_reconstruction(inputs, average(reconstruct(swap_dynamic=True)), name='swap_dynamic') with tf.compat.v1.name_scope('generation'): generate = functools.partial(model.generate, batch_size=batch_size, length=length, samples=samples) image_summary(average(generate(fix_static=True)), 'fix_static') image_summary(average(generate(fix_dynamic=True)), 'fix_dynamic')
Visualizes a qualitative analysis of a given model. Args: inputs: A tensor of the original inputs, of shape [batch, timesteps, h, w, c]. model: A DisentangledSequentialVAE model. samples: Number of samples to draw from the latent distributions. batch_size: Number of sequences to generate. length: Number of timesteps to generate for each sequence.
codesearchnet
def participants(self, **kwargs): path = ('%s/%s/participants' % (self.manager.path, self.get_id())) return self.manager.gitlab.http_get(path, **kwargs)
List the participants. Args: all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabListError: If the list could not be retrieved Returns: RESTObjectList: The list of participants
codesearchnet
def get_stdout(self, workflow_id, task_id): url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % { 'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id } r = self.gbdx_connection.get(url) r.raise_for_status() return r.text
Get stdout for a particular task. Args: workflow_id (str): Workflow id. task_id (str): Task id. Returns: Stdout of the task (string).
juraj-google-style
def _query_response_to_snapshot(response_pb, collection, expected_prefix): if (not response_pb.HasField('document')): return None document_id = _helpers.get_doc_id(response_pb.document, expected_prefix) reference = collection.document(document_id) data = _helpers.decode_dict(response_pb.document.fields, collection._client) snapshot = document.DocumentSnapshot(reference, data, exists=True, read_time=response_pb.read_time, create_time=response_pb.document.create_time, update_time=response_pb.document.update_time) return snapshot
Parse a query response protobuf to a document snapshot. Args: response_pb (google.cloud.proto.firestore.v1beta1.\ firestore_pb2.RunQueryResponse): A collection (~.firestore_v1beta1.collection.CollectionReference): A reference to the collection that initiated the query. expected_prefix (str): The expected prefix for fully-qualified document names returned in the query results. This can be computed directly from ``collection`` via :meth:`_parent_info`. Returns: Optional[~.firestore.document.DocumentSnapshot]: A snapshot of the data returned in the query. If ``response_pb.document`` is not set, the snapshot will be :data:`None`.
codesearchnet
def group_device_names(devices, group_size): num_devices = len(devices) if (group_size > num_devices): raise ValueError(('only %d devices, but group_size=%d' % (num_devices, group_size))) num_groups = ((num_devices groups = [[] for i in range(num_groups)] for i in range(0, (num_groups * group_size)): groups[(i % num_groups)].append(devices[(i % num_devices)]) return groups
Group device names into groups of group_size. Args: devices: list of strings naming devices. group_size: int >= 1 Returns: list of lists of devices, where each inner list is group_size long, and each device appears at least once in an inner list. If len(devices) % group_size = 0 then each device will appear exactly once. Raises: ValueError: group_size > len(devices)
codesearchnet
def Normal(cls, mean: 'TensorFluent', variance: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]: if (mean.scope != variance.scope): raise ValueError('Normal distribution: parameters must have same scope!') loc = mean.tensor scale = tf.sqrt(variance.tensor) dist = tf.distributions.Normal(loc, scale) batch = (mean.batch or variance.batch) if ((not batch) and (batch_size is not None)): t = dist.sample(batch_size) batch = True else: t = dist.sample() scope = mean.scope.as_list() return (dist, TensorFluent(t, scope, batch=batch))
Returns a TensorFluent for the Normal sampling op with given mean and variance. Args: mean: The mean parameter of the Normal distribution. variance: The variance parameter of the Normal distribution. batch_size: The size of the batch (optional). Returns: The Normal distribution and a TensorFluent sample drawn from the distribution. Raises: ValueError: If parameters do not have the same scope.
codesearchnet
def __init__(self, format_str=None, color=None, attrs=None): self._format_str = format_str self._color = color self._attrs = attrs or []
Defines a set of attributes for a piece of text. Args: format_str: (str), string that will be used to format the text with. For example '[{}]', to enclose text in brackets. color: (Colors), the color the text should be formatted with. attrs: (Attrs), the attributes to apply to text.
github-repos
def getAll(self, event_name): raw_events = self._callEventGetAll(self._id, event_name) return [snippet_event.from_dict(msg) for msg in raw_events]
Gets all the events of a certain name that have been received so far. This is a non-blocking call. Args: callback_id: The id of the callback. event_name: string, the name of the event to get. Returns: A list of SnippetEvent, each representing an event from the Java side.
github-repos
def make_list_of_audio(audio: Union[list[AudioInput], AudioInput]) -> AudioInput: if isinstance(audio, (list, tuple)) and is_valid_list_of_audio(audio): return audio if is_valid_audio(audio): return [audio] raise ValueError('Invalid input type. Must be a single audio or a list of audio')
Ensure that the output is a list of audio. Args: audio (`Union[List[AudioInput], AudioInput]`): The input audio. Returns: list: A list of audio.
github-repos
def datasets_delete(self, dataset_name, delete_contents): url = Api._ENDPOINT + (Api._DATASETS_PATH % dataset_name) args = {} if delete_contents: args['deleteContents'] = True return datalab.utils.Http.request(url, method='DELETE', args=args, credentials=self._credentials, raw_response=True)
Issues a request to delete a dataset. Args: dataset_name: the name of the dataset to delete. delete_contents: if True, any tables in the dataset will be deleted. If False and the dataset is non-empty an exception will be raised. Returns: A parsed result object. Raises: Exception if there is an error performing the operation.
juraj-google-style
def _CheckSignature(self, value_data): signature_map = self._GetDataTypeMap('uint32le') try: signature = self._ReadStructureFromByteStream(value_data, 0, signature_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse signature value with error: {0!s}'.format(exception)) format_type = self._HEADER_SIGNATURES.get(signature, None) if (format_type == self._FORMAT_TYPE_2003): return self._FORMAT_TYPE_2003 if (format_type == self._FORMAT_TYPE_8): cached_entry_signature = value_data[signature:(signature + 4)] if (cached_entry_signature in (self._CACHED_ENTRY_SIGNATURE_8_0, self._CACHED_ENTRY_SIGNATURE_8_1)): return self._FORMAT_TYPE_8 elif (format_type == self._FORMAT_TYPE_10): cached_entry_signature = value_data[signature:(signature + 4)] if (cached_entry_signature == self._CACHED_ENTRY_SIGNATURE_8_1): return self._FORMAT_TYPE_10 return format_type
Parses and validates the signature. Args: value_data (bytes): value data. Returns: int: format type or None if format could not be determined. Raises: ParseError: if the value data could not be parsed.
codesearchnet
def return_main_dataset(self): if (not self.main_dataset['source']): raise exceptions.UserError('Source is empty') extraction_code = self.main_dataset['source'] extraction_function = functions.import_object_from_string_code(extraction_code, 'extract_main_dataset') try: (X, y) = extraction_function() except Exception as e: raise exceptions.UserError('User code exception', exception_message=str(e)) (X, y) = (np.array(X), np.array(y)) return (X, y)
Returns main data set from self Returns: X (numpy.ndarray): Features y (numpy.ndarray): Labels
codesearchnet
def set_date_range(self, start=None, end=None): start = self._start if start is None else pd.to_datetime(start) end = self._end if end is None else pd.to_datetime(end) self._update(self._prices.loc[start:end])
Update date range of stats, charts, etc. If None then the original date range is used. So to reset to the original range, just call with no args. Args: * start (date): start date * end (end): end date
juraj-google-style
def _PrintWarningsDetails(self, storage): if (not storage.HasWarnings()): self._output_writer.Write('No warnings stored.\n\n') return for (index, warning) in enumerate(storage.GetWarnings()): title = 'Warning: {0:d}'.format(index) table_view = views.ViewsFactory.GetTableView(self._views_format_type, title=title) table_view.AddRow(['Message', warning.message]) table_view.AddRow(['Parser chain', warning.parser_chain]) path_specification = warning.path_spec.comparable for (path_index, line) in enumerate(path_specification.split('\n')): if (not line): continue if (path_index == 0): table_view.AddRow(['Path specification', line]) else: table_view.AddRow(['', line]) table_view.Write(self._output_writer)
Prints the details of the warnings. Args: storage (BaseStore): storage.
codesearchnet
def read_index(fn): index = None with open(fn, "rb") as i_file: if i_file.read(len(_CHECK_STRING)) != _CHECK_STRING: raise ValueError("{}: not a valid index file".format(fn)) index = pd.read_csv(io.StringIO( zlib.decompress(i_file.read()).decode(encoding="utf-8"), )) return index
Reads index from file. Args: fn (str): the name of the file containing the index. Returns: pandas.DataFrame: the index of the file. Before reading the index, we check the first couple of bytes to see if it is a valid index file.
juraj-google-style
def __params_order_descriptor(self, message_type, path, is_params_class=False): path_params = [] query_params = [] path_parameter_dict = self.__get_path_parameters(path) for field in sorted(message_type.all_fields(), key=lambda f: f.number): matched_path_parameters = path_parameter_dict.get(field.name, []) if not isinstance(field, messages.MessageField): name = field.name if name in matched_path_parameters: path_params.append(name) elif is_params_class and field.required: query_params.append(name) else: for subfield_list in self.__field_to_subfields(field): name = '.'.join(subfield.name for subfield in subfield_list) if name in matched_path_parameters: path_params.append(name) elif is_params_class and field.required: query_params.append(name) return path_params + sorted(query_params)
Describe the order of path parameters. Args: message_type: messages.Message class, Message with parameters to describe. path: string, HTTP path to method. is_params_class: boolean, Whether the message represents URL parameters. Returns: Descriptor list for the parameter order.
juraj-google-style
def readfrom(fpath, aslines=False, errors='replace', verbose=None): if verbose: print('Reading text file: %r ' % (fpath,)) if not exists(fpath): raise IOError('File %r does not exist' % (fpath,)) with open(fpath, 'rb') as file: if aslines: text = [line.decode('utf8', errors=errors) for line in file.readlines()] if sys.platform.startswith('win32'): text = [ line[:-2] + '\n' if line.endswith('\r\n') else line for line in text ] else: text = file.read().decode('utf8', errors=errors) return text
Reads (utf8) text from a file. Args: fpath (PathLike): file path aslines (bool): if True returns list of lines verbose (bool): verbosity flag Returns: str: text from fpath (this is unicode)
juraj-google-style
def Deserialize(self, reader): self.AssetId = reader.ReadUInt256() self.Value = reader.ReadFixed8() self.ScriptHash = reader.ReadUInt160() if self.ScriptHash is None: raise Exception("Script hash is required from deserialize!!!!!!!!")
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def omega(self, structure, n, u): l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n) l0 *= 1e-10 weight = float(structure.composition.weight) * 1.66054e-27 vol = structure.volume * 1e-30 vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5 return vel / l0
Finds directional frequency contribution to the heat capacity from direction and polarization Args: structure (Structure): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made
juraj-google-style
def get_slot_names(self, *args, **kwargs): return self._opt.get_slot_names(*args, **kwargs)
Return a list of the names of slots created by the `Optimizer`. This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings.
github-repos
def array(self, size_chunk, start, bytesize): with open(self.img, 'rb') as f1: f1.seek(self.start_byte + start * self.bytesize) data = f1.read(size_chunk * self.bytesize) Z = np.fromstring(data, dtype=self.dtype, count=size_chunk) if self.grid == 'LOLA': return Z * float(self.SCALING_FACTOR) else: return Z
Read part of the binary file Args: size_chunk (int) : Size of the chunk to read start (int): Starting byte bytesize (int): Ending byte Returns: (np.array): array of the corresponding values
juraj-google-style
def gramschmidt(vin, uin): vin_uin = np.inner(vin, uin) uin_uin = np.inner(uin, uin) if (uin_uin <= 0.0): raise ValueError('Zero or negative inner product!') return (vin - ((vin_uin / uin_uin) * uin))
Returns that part of the first input vector that is orthogonal to the second input vector. The output vector is not normalized. Args: vin (numpy array): first input vector uin (numpy array): second input vector
codesearchnet
def build_markdown_table(headers, rows, row_keys=None): row_maxes = _find_row_maxes(headers, rows) row_keys = row_keys or [key for key, value in headers.items()] table = [ _build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys) ] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return '\n'.join(table) + '\n'
Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string.
juraj-google-style
def _prune_traverse_using_omitted_locations(match_traversal, omitted_locations, complex_optional_roots, location_to_optional_roots): new_match_traversal = [] for step in match_traversal: new_step = step if (isinstance(step.root_block, Traverse) and step.root_block.optional): current_location = step.as_block.location optional_root_locations_stack = location_to_optional_roots.get(current_location, None) optional_root_location = optional_root_locations_stack[(- 1)] if (optional_root_location is None): raise AssertionError(u'Found optional Traverse location {} that was not present in location_to_optional_roots dict: {}'.format(current_location, location_to_optional_roots)) elif (optional_root_location in omitted_locations): field_name = step.root_block.get_field_name() new_predicate = filter_edge_field_non_existence(LocalField(field_name)) old_filter = new_match_traversal[(- 1)].where_block if (old_filter is not None): new_predicate = BinaryComposition(u'&&', old_filter.predicate, new_predicate) new_match_step = new_match_traversal[(- 1)]._replace(where_block=Filter(new_predicate)) new_match_traversal[(- 1)] = new_match_step new_step = None elif (optional_root_location in complex_optional_roots): new_root_block = Traverse(step.root_block.direction, step.root_block.edge_name) new_step = step._replace(root_block=new_root_block) else: pass if (new_step is None): break else: new_match_traversal.append(new_step) return new_match_traversal
Return a prefix of the given traverse, excluding any blocks after an omitted optional. Given a subset (omitted_locations) of complex_optional_roots, return a new match traversal removing all MatchStep objects that are within any omitted location. Args: match_traversal: list of MatchStep objects to be pruned omitted_locations: subset of complex_optional_roots to be omitted complex_optional_roots: list of all @optional locations (location immmediately preceding an @optional traverse) that expand vertex fields location_to_optional_roots: dict mapping from location -> optional_roots where location is within some number of @optionals and optional_roots is a list of optional root locations preceding the successive @optional scopes within which the location resides Returns: list of MatchStep objects as a copy of the given match traversal with all steps within any omitted location removed.
codesearchnet
def _time_to_datetime(value): if not isinstance(value, datetime.time): raise TypeError('Cannot convert to datetime expected time value; ' 'received %s' % value) return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond)
Convert a time to a datetime for Cloud Datastore storage. Args: value: A datetime.time object. Returns: A datetime object with date set to 1970-01-01.
juraj-google-style
def _StructPackDecoder(wire_type, format): value_size = struct.calcsize(format) local_unpack = struct.unpack def InnerDecode(buffer, pos): new_pos = pos + value_size result = local_unpack(format, buffer[pos:new_pos])[0] return (result, new_pos) return _SimpleDecoder(wire_type, InnerDecode)
Return a constructor for a decoder for a fixed-width field. Args: wire_type: The field's wire type. format: The format string to pass to struct.unpack().
juraj-google-style
def __init__(self, text_encoder_config=None, target_language=None, **kwargs): if target_language not in _TARGET_LANGUAGES: raise ValueError("Invalid target language: %s " % target_language) encoder_name = ( text_encoder_config.name if text_encoder_config else "plain_text") name = "en%s_%s" % (target_language, encoder_name) description = ("Translation dataset from English to %s, uses encoder %s." ) % (target_language, encoder_name) super(ParaCrawlConfig, self).__init__( name=name, description=description, **kwargs) self.text_encoder_config = ( text_encoder_config or tfds.features.text.TextEncoderConfig()) self.target_language = target_language self.data_url = _BASE_DATA_URL_FORMAT_STR.format( target_lang=target_language)
BuilderConfig for ParaCrawl. Args: text_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration for the `tfds.features.text.TextEncoder` used for the features feature. target_language: Target language that will be used to translate to from English which is always the source language. It has to contain 2-letter coded strings. For example: "se", "hu". **kwargs: Keyword arguments forwarded to super.
juraj-google-style
def VisitNamedType(self, t): if t.name in self._module_map: if self._alias_name and '.' in self._alias_name: return pytd.Module(name=self._alias_name, module_name=t.name) else: return t module_name, dot, name = t.name.rpartition('.') if not dot or self._IsLocalName(module_name): return t if module_name in self._module_alias_map: module_name = self._module_alias_map[module_name] try: module, cls_prefix = self._LookupModuleRecursive(module_name) except KeyError: if self._unit and f'{self.name}.{module_name}' in self._unit: return t raise module_name = module.name if module_name == self.name: return t if cls_prefix: try: maybe_alias = pytd.LookupItemRecursive(module, cls_prefix[:-1]) except KeyError: pass else: if isinstance(maybe_alias, pytd.Alias) and isinstance(maybe_alias.type, pytd.Module): if maybe_alias.type.module_name not in self._module_map: raise KeyError(f'{t.name} refers to unknown module {maybe_alias.name}') module = self._module_map[maybe_alias.type.module_name] cls_prefix = '' name = cls_prefix + name try: if name == '*': self._star_imports.add(module_name) item = t else: item = pytd.LookupItemRecursive(module, name) except KeyError as e: item = self._ResolveUsingGetattr(module_name, module) if item is None: item = self._ResolveUsingStarImport(module, name) if item is None: raise KeyError(f'No {name} in module {module_name}') from e if isinstance(item, pytd.Alias): lookup_local = LookupLocalTypes() lookup_local.unit = module new_item = item.Visit(lookup_local) if lookup_local.local_names: item = new_item if not self._in_generic_type and isinstance(item, pytd.Alias): item = MaybeSubstituteParameters(item.type) or item if isinstance(item, pytd.Constant) and item.name == 'typing_extensions.TypedDict': return self.to_type(pytd.NamedType('typing.TypedDict')) try: return self.to_type(item) except NotImplementedError as e: raise SymbolLookupError(f'{item} is not a type') from e
Try to look up a NamedType. Args: t: An instance of pytd.NamedType Returns: The same node t. Raises: KeyError: If we can't find a module, or an identifier in a module, or if an identifier in a module isn't a class.
github-repos
def on_hello(self, message): logger.info('Got a hello') self.identify(self.token) self.heartbeat_thread = Heartbeat(self.ws, message['d']['heartbeat_interval']) self.heartbeat_thread.start() return
Runs on a hello event from websocket connection Args: message (dict): Full message from Discord websocket connection"
codesearchnet
def Log(self, format_str, *args): format_str = utils.SmartUnicode(format_str) status = format_str if args: try: status = format_str % args except TypeError: logging.error( "Tried to log a format string with the wrong number " "of arguments: %s", format_str) logging.info("%s: %s", self.session_id, status) self.context.status = utils.SmartUnicode(status) log_entry = rdf_flows.FlowLog( client_id=None, urn=self.session_id, flow_name=self.hunt_obj.__class__.__name__, log_message=status) logs_collection_urn = self.hunt_obj.logs_collection_urn with data_store.DB.GetMutationPool() as pool: grr_collections.LogCollection.StaticAdd( logs_collection_urn, log_entry, mutation_pool=pool)
Logs the message using the hunt's standard logging. Args: format_str: Format string *args: arguments to the format string Raises: RuntimeError: on parent missing logs_collection
juraj-google-style
def from_celery(cls, name, worker_dict, queues): return WorkerStats(name=name, broker=BrokerStats.from_celery(worker_dict['broker']), pid=worker_dict['pid'], process_pids=worker_dict['pool']['processes'], concurrency=worker_dict['pool']['max-concurrency'], job_count=worker_dict['pool']['writes']['total'], queues=queues)
Create a WorkerStats object from the dictionary returned by celery. Args: name (str): The name of the worker. worker_dict (dict): The dictionary as returned by celery. queues (list): A list of QueueStats objects that represent the queues this worker is listening on. Returns: WorkerStats: A fully initialized WorkerStats object.
codesearchnet
def assert_reentrant_reads_succeed(source_info): source, start_position, stop_position = source_info assert isinstance(source, iobase.BoundedSource) expected_values = [val for val in source.read(source.get_range_tracker(start_position, stop_position))] if len(expected_values) < 2: raise ValueError('Source is too trivial since it produces only %d values. Please give a source that reads at least 2 values.' % len(expected_values)) for i in range(1, len(expected_values) - 1): read_iter = source.read(source.get_range_tracker(start_position, stop_position)) original_read = [] for _ in range(i): original_read.append(next(read_iter)) reentrant_read = [val for val in source.read(source.get_range_tracker(start_position, stop_position))] for val in read_iter: original_read.append(val) if equal_to(original_read)(expected_values): raise ValueError('Source did not produce expected values when performing a reentrant read after reading %d values. Expected %r received %r.' % (i, expected_values, original_read)) if equal_to(reentrant_read)(expected_values): raise ValueError('A reentrant read of source after reading %d values did not produce expected values. Expected %r received %r.' % (i, expected_values, reentrant_read))
Tests if a given source can be read in a reentrant manner. Assume that given source produces the set of values ``{v1, v2, v3, ... vn}``. For ``i`` in range ``[1, n-1]`` this method performs a reentrant read after reading ``i`` elements and verifies that both the original and reentrant read produce the expected set of values. Args: source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]): a three-tuple that gives the reference :class:`~apache_beam.io.iobase.BoundedSource`, position to start reading at, and a position to stop reading at. Raises: ValueError: if source is too trivial or reentrant read result in an incorrect read.
github-repos
def set_marked(self, name: str, marked: bool=False, unmarked: bool=False) -> None: if marked: self._marked[name] = True elif unmarked: self._marked[name] = False else: self._marked.pop(name, None)
Add or remove the ``\\Marked`` and ``\\Unmarked`` mailbox attributes. Args: name: The name of the mailbox. marked: True if the ``\\Marked`` attribute should be added. unmarked: True if the ``\\Unmarked`` attribute should be added.
codesearchnet
def create_reader_of_type(type_name): readers = available_readers() if (type_name not in readers.keys()): raise UnknownReaderException(('Unknown reader: %s' % (type_name,))) return readers[type_name]()
Create an instance of the reader with the given name. Args: type_name: The name of a reader. Returns: An instance of the reader with the given type.
codesearchnet
def HumanReadableStartType(self): if isinstance(self.start_type, py2to3.STRING_TYPES): return self.start_type return human_readable_service_enums.SERVICE_ENUMS['Start'].get(self.start_type, '{0:d}'.format(self.start_type))
Return a human readable string describing the start type value. Returns: str: human readable description of the start type value.
codesearchnet
def delete(self, url, **kwargs): check_type(url, basestring, may_be_none=False) erc = kwargs.pop('erc', EXPECTED_RESPONSE_CODE['DELETE']) self.request('DELETE', url, erc, **kwargs)
Sends a DELETE request. Args: url(basestring): The URL of the API endpoint. **kwargs: erc(int): The expected (success) response code for the request. others: Passed on to the requests package. Raises: ApiError: If anything other than the expected response code is returned by the Webex Teams API endpoint.
juraj-google-style
def _compute_hparam_infos(self): run_to_tag_to_content = self.multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) hparams = collections.defaultdict(list) for tag_to_content in run_to_tag_to_content.values(): if (metadata.SESSION_START_INFO_TAG not in tag_to_content): continue start_info = metadata.parse_session_start_info_plugin_data(tag_to_content[metadata.SESSION_START_INFO_TAG]) for (name, value) in six.iteritems(start_info.hparams): hparams[name].append(value) result = [] for (name, values) in six.iteritems(hparams): hparam_info = self._compute_hparam_info_from_values(name, values) if (hparam_info is not None): result.append(hparam_info) return result
Computes a list of api_pb2.HParamInfo from the current run, tag info. Finds all the SessionStartInfo messages and collects the hparams values appearing in each one. For each hparam attempts to deduce a type that fits all its values. Finally, sets the 'domain' of the resulting HParamInfo to be discrete if the type is string and the number of distinct values is small enough. Returns: A list of api_pb2.HParamInfo messages.
codesearchnet