code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base, currency): payload = {'description': description, 'additionalValues': [{'name': 'ITEM_VALUE', 'value': plan_value, 'currency': currency}, {'name': 'ITEM_TAX', 'value': plan_tax, 'currency': currency}, {'name': 'ITEM_TAX_RETURN_BASE', 'value': plan_tax_return_base, 'currency': currency}]} fmt = 'recurringBillItems/{}'.format(recurring_billing_id) return self.client._put((self.url + fmt), payload=payload, headers=self.get_headers())
Updates the information from an additional charge in an invoice. Args: recurring_billing_id: Identifier of the additional charge. description: plan_value: plan_tax: plan_tax_return_base: currency: Returns:
codesearchnet
def filter_publication(publication, cmp_authors=True): query = None isbn_query = False if publication.optionals and publication.optionals.ISBN: query = aleph.ISBNQuery(publication.optionals.ISBN) isbn_query = True else: query = aleph.TitleQuery(publication.title) result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), "") if not result.records: return publication if isbn_query: for record in result.records: epub = record.epublication if compare_names(epub.nazev, publication.title) >= 80: return None return publication for record in result.records: epub = record.epublication if not compare_names(epub.nazev, publication.title) >= 80: continue if not cmp_authors: return None for author in epub.autori: author_str = "%s %s %s" % ( author.firstName, author.lastName, author.title ) pub_authors = map(lambda x: x.name, publication.authors) if type(pub_authors) not in [list, tuple, set]: pub_authors = [pub_authors] for pub_author in pub_authors: if compare_names(author_str, pub_author) >= 50: return None return publication
Filter publications based at data from Aleph. Args: publication (obj): :class:`.Publication` instance. Returns: obj/None: None if the publication was found in Aleph or `publication` \ if not.
juraj-google-style
def approximate_density(dist, xloc, parameters=None, cache=None, eps=1e-07): if (parameters is None): parameters = dist.prm.copy() if (cache is None): cache = {} xloc = numpy.asfarray(xloc) (lo, up) = (numpy.min(xloc), numpy.max(xloc)) mu = (0.5 * (lo + up)) eps = (numpy.where((xloc < mu), eps, (- eps)) * xloc) floc = evaluation.evaluate_forward(dist, xloc, parameters=parameters.copy(), cache=cache.copy()) for d in range(len(dist)): xloc[d] += eps[d] tmp = evaluation.evaluate_forward(dist, xloc, parameters=parameters.copy(), cache=cache.copy()) floc[d] -= tmp[d] xloc[d] -= eps[d] floc = numpy.abs((floc / eps)) return floc
Approximate the probability density function. Args: dist : Dist Distribution in question. May not be an advanced variable. xloc : numpy.ndarray Location coordinates. Requires that xloc.shape=(len(dist), K). eps : float Acceptable error level for the approximations retall : bool If True return Graph with the next calculation state with the approximation. Returns: numpy.ndarray: Local probability density function with ``out.shape == xloc.shape``. To calculate actual density function, evaluate ``numpy.prod(out, 0)``. Example: >>> distribution = chaospy.Normal(1000, 10) >>> xloc = numpy.array([[990, 1000, 1010]]) >>> print(numpy.around(approximate_density(distribution, xloc), 4)) [[0.0242 0.0399 0.0242]] >>> print(numpy.around(distribution.pdf(xloc), 4)) [[0.0242 0.0399 0.0242]]
codesearchnet
def _compile_property_ast(schema, current_schema_type, ast, location, context, unique_local_directives): validate_property_directives(unique_local_directives) if (location.field == COUNT_META_FIELD_NAME): if (not is_in_fold_scope(context)): raise GraphQLCompilationError(u'Cannot use the "{}" meta field when not within a @fold vertex field, as counting elements only makes sense in a fold. Location: {}'.format(COUNT_META_FIELD_NAME, location)) tag_directive = unique_local_directives.get('tag', None) if tag_directive: if is_in_fold_scope(context): raise GraphQLCompilationError(u'Tagging values within a @fold vertex field is not allowed! Location: {}'.format(location)) if (location.field == COUNT_META_FIELD_NAME): raise AssertionError(u'Tags are prohibited within @fold, but unexpectedly found use of a tag on the {} meta field that is only allowed within a @fold!Location: {}'.format(COUNT_META_FIELD_NAME, location)) tag_name = tag_directive.arguments[0].value.value if (tag_name in context['tags']): raise GraphQLCompilationError(u'Cannot reuse tag name: {}'.format(tag_name)) validate_safe_string(tag_name) context['tags'][tag_name] = {'location': location, 'optional': is_in_optional_scope(context), 'type': strip_non_null_from_type(current_schema_type)} context['metadata'].record_tag_info(tag_name, TagInfo(location=location)) output_directive = unique_local_directives.get('output', None) if output_directive: output_name = output_directive.arguments[0].value.value if (output_name in context['outputs']): raise GraphQLCompilationError(u'Cannot reuse output name: {}, {}'.format(output_name, context)) validate_safe_string(output_name) validate_output_name(output_name) graphql_type = strip_non_null_from_type(current_schema_type) if is_in_fold_scope(context): set_fold_innermost_scope(context) if (location.field != COUNT_META_FIELD_NAME): graphql_type = GraphQLList(graphql_type) context['outputs'][output_name] = {'location': location, 'optional': is_in_optional_scope(context), 'type': graphql_type, 'fold': context.get('fold', None)}
Process property directives at this AST node, updating the query context as appropriate. Args: schema: GraphQL schema object, obtained from the graphql library current_schema_type: GraphQLType, the schema type at the current location ast: GraphQL AST node, obtained from the graphql library. Only for function signature uniformity at the moment -- it is currently not used. location: Location object representing the current location in the query context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function! unique_local_directives: dict, directive name string -> directive object, containing unique directives present on the current AST node *only*
codesearchnet
def range(self, start_date=None, stop_date=None, field=(lambda x: x.xfer)): assert (start_date <= stop_date), 'Start date must be earlier than end date.' out = Transactions() for t in self.trans: date = field(t) if ((start_date is not None) and (not (date >= start_date))): continue if ((stop_date is not None) and (not (date <= stop_date))): continue out.append(t) return out
Return a ``Transactions`` object in an inclusive date range. Args: start_date: A ``datetime.Date`` object that marks the inclusive start date for the range. stop_date: A ``datetime.Date`` object that marks the inclusive end date for the range. field: The field to compare start and end dates to. Default is the ``xfer`` field. Returns: A ``Transactions`` object.
codesearchnet
def __init__(self, path_segment_index): super(PathFilterScanTreeNode, self).__init__() self._path_segments = {} self.default_value = None self.parent = None self.path_segment_index = path_segment_index
Initializes a path filter scan tree node. Args: path_segment_index: an integer containing the path segment index.
juraj-google-style
def create_projection(self, fov: float=75.0, near: float=1.0, far: float=100.0, aspect_ratio: float=None): return matrix44.create_perspective_projection_matrix(fov, (aspect_ratio or self.window.aspect_ratio), near, far, dtype='f4')
Create a projection matrix with the following parameters. When ``aspect_ratio`` is not provided the configured aspect ratio for the window will be used. Args: fov (float): Field of view (float) near (float): Camera near value far (float): Camrea far value Keyword Args: aspect_ratio (float): Aspect ratio of the viewport Returns: The projection matrix as a float32 :py:class:`numpy.array`
codesearchnet
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http: f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
juraj-google-style
def validate_config(config, required_keys, optional_keys=None): if (optional_keys is None): optional_keys = [] if (not isinstance(config, dict)): raise Exception('config is not dict type') invalid_keys = (set(config) - set((required_keys + optional_keys))) if (len(invalid_keys) > 0): raise Exception(('Invalid config with unexpected keys "%s"' % ', '.join((e for e in invalid_keys)))) missing_keys = (set(required_keys) - set(config)) if (len(missing_keys) > 0): raise Exception(('Invalid config with missing keys "%s"' % ', '.join(missing_keys)))
Validate a config dictionary to make sure it includes all required keys and does not include any unexpected keys. Args: config: the config to validate. required_keys: the names of the keys that the config must have. optional_keys: the names of the keys that the config can have. Raises: Exception if the config is not a dict or invalid.
codesearchnet
def Embed(variables, verbose=False): print(_AvailableString(variables, verbose)) try: _EmbedIPython(variables) except ImportError: _EmbedCode(variables)
Drops into a Python REPL with variables available as local variables. Args: variables: A dict of variables to make available. Keys are variable names. Values are variable values. verbose: Whether to include 'hidden' members, those keys starting with _.
github-repos
def __init__(self, callbacks=None, add_history=False, add_progbar=False, model=None, **params): self.callbacks = nest.flatten(callbacks) if callbacks else [] self._add_default_callbacks(add_history, add_progbar) if model: self.set_model(model) if params: self.set_params(params) self._supports_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks)) self._batch_hooks_support_tf_logs = all((getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks if cb._implements_train_batch_hooks() or cb._implements_test_batch_hooks() or cb._implements_predict_batch_hooks())) self._should_call_train_batch_hooks = any((cb._implements_train_batch_hooks() for cb in self.callbacks)) self._should_call_test_batch_hooks = any((cb._implements_test_batch_hooks() for cb in self.callbacks)) self._should_call_predict_batch_hooks = any((cb._implements_predict_batch_hooks() for cb in self.callbacks)) self._disallow_batch_hooks_in_ps_strategy() self._check_timing = any((cbk.__class__.__name__ not in globals() for cbk in self.callbacks)) self._num_batches_for_timing_check = 5 self._hook_times = {} self._batch_start_time = None self._batch_times = []
Container for `Callback` instances. This object wraps a list of `Callback` instances, making it possible to call them all at once via a single endpoint (e.g. `callback_list.on_epoch_end(...)`). Args: callbacks: List of `Callback` instances. add_history: Whether a `History` callback should be added, if one does not already exist in the `callbacks` list. add_progbar: Whether a `ProgbarLogger` callback should be added, if one does not already exist in the `callbacks` list. model: The `Model` these callbacks are used with. **params: If provided, parameters will be passed to each `Callback` via `Callback.set_params`.
github-repos
async def change_votes(self, player1_votes: int=None, player2_votes: int=None, add: bool=False): assert_or_raise(((player1_votes is not None) or (player2_votes is not None)), ValueError, 'One of the votes must not be None') if add: res = (await self.connection('GET', 'tournaments/{}/matches/{}'.format(self._tournament_id, self._id))) self._refresh_from_json(res) if (player1_votes is not None): player1_votes += (self._player1_votes or 0) if (player2_votes is not None): player2_votes += (self._player2_votes or 0) params = {} if (player1_votes is not None): params.update({'player1_votes': player1_votes}) if (player2_votes is not None): params.update({'player2_votes': player2_votes}) res = (await self.connection('PUT', 'tournaments/{}/matches/{}'.format(self._tournament_id, self._id), 'match', **params)) self._refresh_from_json(res)
change the votes for either player |methcoro| The votes will be overriden by default, If `add` is set to True, another API request call will be made to ensure the local is up to date with the Challonge server. Then the votes given in argument will be added to those on the server Args: player1_votes: if set, the player 1 votes will be changed to this value, or added to the current value if `add` is set player1_votes: if set, the player 2 votes will be changed to this value, or added to the current value if `add` is set add: if set, votes in parameters will be added instead of overriden Raises: ValueError: one of the votes arguments must not be None APIException
codesearchnet
def snow_depth(self, value=999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `snow_depth`'.format(value)) self._snow_depth = value
Corresponds to IDD Field `snow_depth` Args: value (float): value for IDD Field `snow_depth` Unit: cm Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def format_tensor(tensor, tensor_label, include_metadata=False, auxiliary_message=None, include_numeric_summary=False, np_printoptions=None, highlight_options=None): lines = [] font_attr_segs = {} if tensor_label is not None: lines.append('Tensor "%s":' % tensor_label) suffix = tensor_label.split(':')[-1] if suffix.isdigit(): font_attr_segs[0] = [(8, 8 + len(tensor_label), 'bold')] else: debug_op_len = len(suffix) proper_len = len(tensor_label) - debug_op_len - 1 font_attr_segs[0] = [(8, 8 + proper_len, 'bold'), (8 + proper_len + 1, 8 + proper_len + 1 + debug_op_len, 'yellow')] if isinstance(tensor, debug_data.InconvertibleTensorProto): if lines: lines.append('') lines.extend(str(tensor).split('\n')) return debugger_cli_common.RichTextLines(lines) elif not isinstance(tensor, np.ndarray): if lines: lines.append('') lines.extend(repr(tensor).split('\n')) return debugger_cli_common.RichTextLines(lines) if include_metadata: lines.append(' dtype: %s' % str(tensor.dtype)) lines.append(' shape: %s' % str(tensor.shape).replace('L', '')) if lines: lines.append('') formatted = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs) if auxiliary_message: formatted.extend(auxiliary_message) if include_numeric_summary: formatted.append('Numeric summary:') formatted.extend(numeric_summary(tensor)) formatted.append('') if np_printoptions is not None: np.set_printoptions(**np_printoptions) array_lines = repr(tensor).split('\n') if tensor.dtype.type is not np.bytes_: annotations = _annotate_ndarray_lines(array_lines, tensor, np_printoptions=np_printoptions) else: annotations = None formatted_array = debugger_cli_common.RichTextLines(array_lines, annotations=annotations) formatted.extend(formatted_array) if highlight_options is not None: indices_list = list(np.argwhere(highlight_options.criterion(tensor))) total_elements = np.size(tensor) highlight_summary = 'Highlighted%s: %d of %d element(s) (%.2f%%)' % ('(%s)' % highlight_options.description if highlight_options.description else '', len(indices_list), total_elements, len(indices_list) / float(total_elements) * 100.0) formatted.lines[0] += ' ' + highlight_summary if indices_list: indices_list = [list(indices) for indices in indices_list] are_omitted, rows, start_cols, end_cols = locate_tensor_element(formatted, indices_list) for is_omitted, row, start_col, end_col in zip(are_omitted, rows, start_cols, end_cols): if is_omitted or start_col is None or end_col is None: continue if row in formatted.font_attr_segs: formatted.font_attr_segs[row].append((start_col, end_col, highlight_options.font_attr)) else: formatted.font_attr_segs[row] = [(start_col, end_col, highlight_options.font_attr)] return formatted
Generate a RichTextLines object showing a tensor in formatted style. Args: tensor: The tensor to be displayed, as a numpy ndarray or other appropriate format (e.g., None representing uninitialized tensors). tensor_label: A label for the tensor, as a string. If set to None, will suppress the tensor name line in the return value. include_metadata: Whether metadata such as dtype and shape are to be included in the formatted text. auxiliary_message: An auxiliary message to display under the tensor label, dtype and shape information lines. include_numeric_summary: Whether a text summary of the numeric values (if applicable) will be included. np_printoptions: A dictionary of keyword arguments that are passed to a call of np.set_printoptions() to set the text format for display numpy ndarrays. highlight_options: (HighlightOptions) options for highlighting elements of the tensor. Returns: A RichTextLines object. Its annotation field has line-by-line markups to indicate which indices in the array the first element of each line corresponds to.
github-repos
def load_library(lib, name=None, lib_cls=None): try: if lib_cls: return lib_cls(lib) else: return ctypes.CDLL(lib) except Exception: if name: lib_msg = '%s (%s)' % (name, lib) else: lib_msg = lib lib_msg += ' could not be loaded' if sys.platform == 'cygwin': lib_msg += ' in cygwin' _LOGGER.error(lib_msg, exc_info=True) return None
Loads a library. Catches and logs exceptions. Returns: the loaded library or None arguments: * lib -- path to/name of the library to be loaded * name -- the library's identifier (for logging) Defaults to None. * lib_cls -- library class. Defaults to None (-> ctypes.CDLL).
juraj-google-style
def single_conv_dist(name, x, output_channels=None): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) if output_channels is None: output_channels = x_shape[-1] mean_log_scale = conv("conv2d", x, output_channels=2*output_channels, conv_init="zeros", apply_actnorm=False) mean = mean_log_scale[:, :, :, 0::2] log_scale = mean_log_scale[:, :, :, 1::2] return tf.distributions.Normal(mean, tf.exp(log_scale))
A 3x3 convolution mapping x to a standard normal distribution at init. Args: name: variable scope. x: 4-D Tensor. output_channels: number of channels of the mean and std.
juraj-google-style
def make_persister(self, to_persist): if not self.meta_data: raise Exception("Root not set. Can't create persister.") def persister(c, broker): if c in to_persist: self.dehydrate(c, broker) return persister
Returns a function that hydrates components as they are evaluated. The function should be registered as an observer on a Broker just before execution. Args: to_persist (set): Set of components to persist. Skip everything else.
juraj-google-style
def _compute_gradients_wrt_embedding_table(self, gradient_wrt_activation, embedding_table, feature_indices, feature_values, combiner): if combiner not in ('mean', 'sum'): raise ValueError('`combiner` must be mean or sum; got {}.'.format(combiner)) grads_shape = gradient_wrt_activation.shape[:-1] + embedding_table.shape grads = np.zeros(shape=grads_shape) count = np.zeros(shape=grads_shape) for feature_indice, vocabulary_id in zip(feature_indices, feature_values): batch_index = tuple(feature_indice[:-1]) grads[batch_index][vocabulary_id] += gradient_wrt_activation[batch_index] count[batch_index] += 1 count[count == 0] = 1 if combiner == 'mean': grads = grads / count return np.reshape(grads, (-1, *embedding_table.shape))
Compute gradients wrt embedding_table. Args: gradient_wrt_activation: `np.array` with shape `batch_size` by embedding `dimension`. embedding_table: `np.array` with shape `vocabulary_size` by embedding `dimension`. feature_indices: `indices` as used to construct `SparseTensor`. feature_values: `values` as used to construct `SparseTensor`. combiner: `String`, 'mean' or 'sum'. Returns: Gradients wrt `embedding_table`, an `np.array`s with shape `batch_size` by `vocabulary_size` by embedding `dimension`. Raises: ValueError: if `combiner` is not one of 'mean' or 'sum'.
github-repos
def _normalize_batch_coordinates(self, inputs, original_sizes, is_bounding_box=False): if len(original_sizes) != len(inputs): return [self._normalize_coordinates(self.target_size, item, original_sizes[0], is_bounding_box=is_bounding_box) for item in inputs] else: return [self._normalize_coordinates(self.target_size, item, size, is_bounding_box=is_bounding_box) for item, size in zip(inputs, original_sizes)]
Normalize coordinates based on original sizes. Args: inputs: List of coordinate arrays original_sizes: Original sizes of the images is_bounding_box: Whether inputs are bounding boxes Returns: Normalized coordinates as list
github-repos
def trace_min_buffer_capacity(self): cmd = enums.JLinkTraceCommand.GET_MIN_CAPACITY data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get min trace buffer size.') return data.value
Retrieves the minimum capacity the trace buffer can be configured with. Args: self (JLink): the ``JLink`` instance. Returns: The minimum configurable capacity for the trace buffer.
codesearchnet
def _get_type(points, soma_class): assert (soma_class in (SOMA_CONTOUR, SOMA_CYLINDER)) npoints = len(points) if (soma_class == SOMA_CONTOUR): return {0: None, 1: SomaSinglePoint, 2: None}.get(npoints, SomaSimpleContour) if ((npoints == 3) and (points[0][COLS.P] == (- 1)) and (points[1][COLS.P] == 1) and (points[2][COLS.P] == 1)): L.warning('Using neuromorpho 3-Point soma') return SomaNeuromorphoThreePointCylinders return {0: None, 1: SomaSinglePoint}.get(npoints, SomaCylinders)
get the type of the soma Args: points: Soma points soma_class(str): one of 'contour' or 'cylinder' to specify the type
codesearchnet
def on_session_init(self, request):
Callback invoked during construction of the debug-wrapper session. This is a blocking callback. The invocation happens right before the constructor ends. Args: request: (`OnSessionInitRequest`) callback request carrying information such as the session being wrapped. Returns: An instance of `OnSessionInitResponse`.
github-repos
def serialize_object_graph(self, saveables_cache=None): named_saveable_objects, object_graph_proto, feed_additions, _ = save_util_v1.serialize_object_graph_with_registered_savers(self, saveables_cache) return (named_saveable_objects, object_graph_proto, feed_additions)
Determine checkpoint keys for variables and build a serialized graph. Non-slot variables are keyed based on a shortest path from the root saveable to the object which owns the variable (i.e. the one which called `Trackable._add_variable` to create it). Slot variables are keyed based on a shortest path to the variable being slotted for, a shortest path to their optimizer, and the slot name. Args: saveables_cache: An optional cache storing previously created SaveableObjects created for each Trackable. Maps Trackables to a dictionary of attribute names to Trackable. Returns: A tuple of (named_variables, object_graph_proto, feed_additions): named_variables: A dictionary mapping names to variable objects. object_graph_proto: A TrackableObjectGraph protocol buffer containing the serialized object graph and variable references. feed_additions: A dictionary mapping from Tensors to values which should be fed when saving. Raises: ValueError: If there are invalid characters in an optimizer's slot names.
github-repos
def no_gradient(op_type: str) -> None: if not isinstance(op_type, str): raise TypeError('op_type must be a string') gradient_registry.register(None, op_type)
Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.no_gradient("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string.
github-repos
def _process_book(html_chunk): title, book_url = _parse_title_url(html_chunk) data = DOWNER.download(book_url) dom = dhtmlparser.parseString( handle_encodnig(data) ) details = dom.find("div", {"id": "kniha_detail"})[0] pub = Publication( title=title, authors=_parse_authors(html_chunk), price=_parse_price(details), publisher="CPress" ) pub.optionals.URL = book_url pub.optionals.EAN = _parse_ean(details) pub.optionals.format = _parse_format(details) pub.optionals.pub_date = _parse_date(details) pub.optionals.description = _parse_description(details) return pub
Parse available informations about book from the book details page. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: obj: :class:`structures.Publication` instance with book details.
juraj-google-style
def _check_docstring_quotes(self, quote_record): (_, triple, row, col) = quote_record if (triple != TRIPLE_QUOTE_OPTS.get(self.config.docstring_quote)): self._invalid_docstring_quote(triple, row, col)
Check if the docstring quote from tokenization is valid. Args: quote_record: a tuple containing the info about the string from tokenization, giving the (token, quote, row number).
codesearchnet
def wait_till_change_set_complete(cfn_client, change_set_id, try_count=25, sleep_time=0.5, max_sleep=3): complete = False response = None for i in range(try_count): response = cfn_client.describe_change_set(ChangeSetName=change_set_id) complete = (response['Status'] in ('FAILED', 'CREATE_COMPLETE')) if complete: break if (sleep_time == max_sleep): logger.debug('Still waiting on changeset for another %s seconds', sleep_time) time.sleep(sleep_time) sleep_time = min((sleep_time * 2), max_sleep) if (not complete): raise exceptions.ChangesetDidNotStabilize(change_set_id) return response
Checks state of a changeset, returning when it is in a complete state. Since changesets can take a little bit of time to get into a complete state, we need to poll it until it does so. This will try to get the state `try_count` times, waiting `sleep_time` * 2 seconds between each try up to the `max_sleep` number of seconds. If, after that time, the changeset is not in a complete state it fails. These default settings will wait a little over one minute. Args: cfn_client (:class:`botocore.client.CloudFormation`): Used to query cloudformation. change_set_id (str): The unique changeset id to wait for. try_count (int): Number of times to try the call. sleep_time (int): Time to sleep between attempts. max_sleep (int): Max time to sleep during backoff Return: dict: The response from cloudformation for the describe_change_set call.
codesearchnet
def unlock(self, passphrase, encrypted_seed=None): wallet = self.resource if (not encrypted_seed): encrypted_seed = wallet.primary_private_seed try: if encrypted_seed['nonce']: primary_seed = NaclPassphraseBox.decrypt(passphrase, encrypted_seed) else: primary_seed = PassphraseBox.decrypt(passphrase, encrypted_seed) except: raise InvalidPassphraseError() self.multi_wallet = MultiWallet(private_seeds={'primary': primary_seed}, public={'cosigner': wallet.cosigner_public_seed, 'backup': wallet.backup_public_seed}) return self
Unlock the Wallet by decrypting the primary_private_seed with the supplied passphrase. Once unlocked, the private seed is accessible in memory and calls to `account.pay` will succeed. This is a necessary step for creating transactions. Args: passphrase (str): The passphrase the User used to encrypt this wallet. encrypted_seed (dict): A dictionary of the form {'ciphertext': longhexvalue, 'iterations': integer of pbkdf2 derivations, 'nonce': 24-byte hex value 'salt': 16-byte hex value} this dict represents an private seed (not a master key) encrypted with the `passphrase` using pbkdf2. You can obtain this value with wallet.generate. If this value is supplied, it overwrites (locally only) the encrypted primary_private_seed value, allowing you to load in a primary key that you didn't store with Gem. Note that the key MUST match the pubkey that this wallet was created with. Returns: self
codesearchnet
def parse_flux_bounds(entry): lower_bound = None upper_bound = None for parameter in entry.kinetic_law_reaction_parameters: (pid, name, value, units) = parameter if ((pid == 'UPPER_BOUND') or (name == 'UPPER_BOUND')): upper_bound = value elif ((pid == 'LOWER_BOUND') or (name == 'LOWER_BOUND')): lower_bound = value return (lower_bound, upper_bound)
Return flux bounds for reaction entry. Detect flux bounds that are specified using the non-standardized kinetic law parameters which are used by many pre-FBC SBML models. The flux bounds are returned as a pair of lower, upper bounds. The returned bound is None if undefined. Args: entry: :class:`SBMLReactionEntry`.
codesearchnet
def __init__(self, resolver_context): super(OSFile, self).__init__(resolver_context) self._file_object = None self._size = 0
Initializes a file-like object. Args: resolver_context (Context): resolver context.
juraj-google-style
class PatchTSMixerChannelFeatureMixerBlock(nn.Module): def __init__(self, config: PatchTSMixerConfig): super().__init__() self.norm = PatchTSMixerNormLayer(config) self.gated_attn = config.gated_attn self.mlp = PatchTSMixerMLP(in_features=config.num_input_channels, out_features=config.num_input_channels, config=config) if config.gated_attn: self.gating_block = PatchTSMixerGatedAttention(in_size=config.num_input_channels, out_size=config.num_input_channels) def forward(self, inputs: torch.Tensor): residual = inputs inputs = self.norm(inputs) inputs = inputs.permute(0, 3, 2, 1) if self.gated_attn: inputs = self.gating_block(inputs) inputs = self.mlp(inputs) inputs = inputs.permute(0, 3, 2, 1) out = inputs + residual return out
This module mixes the features in the channel dimension. Args: config (`PatchTSMixerConfig`): Configuration.
github-repos
def flatten(input_layer, preserve_batch=True): if preserve_batch: return reshape(input_layer, [DIM_SAME, (- 1)]) else: return reshape(input_layer, [(- 1)])
Flattens this. If preserve_batch is True, the result is rank 2 and the first dim (batch) is unchanged. Otherwise the result is rank 1. Args: input_layer: The Pretty Tensor object, supplied. preserve_batch: If True (the default), then preserve the first dimension. Returns: A LayerWrapper with the flattened tensor.
codesearchnet
def to_diff_dict(self) -> Dict[str, Any]: config_dict = self.to_dict() default_config_dict = HqqConfig().to_dict() serializable_config_dict = {} for key, value in config_dict.items(): if value != default_config_dict[key]: serializable_config_dict[key] = value return serializable_config_dict
Removes all attributes from config which correspond to the default config attributes for better readability and serializes to a Python dictionary. Returns: `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
github-repos
def gaussian_square(times: np.ndarray, amp: complex, center: float, width: float, sigma: float, zeroed_width: Union[(None, float)]=None) -> np.ndarray: square_start = (center - (width / 2)) square_stop = (center + (width / 2)) if zeroed_width: zeroed_width = min(width, zeroed_width) gauss_zeroed_width = (zeroed_width - width) else: gauss_zeroed_width = None funclist = [functools.partial(gaussian, amp=amp, center=square_start, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(gaussian, amp=amp, center=square_stop, sigma=sigma, zeroed_width=gauss_zeroed_width, rescale_amp=True), functools.partial(constant, amp=amp)] condlist = [(times <= square_start), (times >= square_stop)] return np.piecewise(times.astype(np.complex_), condlist, funclist)
r"""Continuous gaussian square pulse. Args: times: Times to output pulse for. amp: Pulse amplitude. center: Center of the square pulse component. width: Width of the square pulse component. sigma: Width (standard deviation) of gaussian rise/fall portion of the pulse. zeroed_width: Subtract baseline of gaussian square pulse to enforce $\OmegaSquare(center \pm zeroed_width/2)=0$.
codesearchnet
def get_all_nn_info(self, structure): return [self.get_nn_info(structure, n) for n in range(len(structure))]
Get a listing of all neighbors for all sites in a structure Args: structure (Structure): Input structure Return: List of NN site information for each site in the structure. Each entry has the same format as `get_nn_info`
codesearchnet
def peak_signal_to_noise_ratio(true, pred): return ((10.0 * tf.log((1.0 / mean_squared_error(true, pred)))) / tf.log(10.0))
Image quality metric based on maximal signal power vs. power of the noise. Args: true: the ground truth image. pred: the predicted image. Returns: peak signal to noise ratio (PSNR)
codesearchnet
def verify_directory(directory_name, directory_location, directory_create=False): if not directory_create: return __os.path.exists(__os.path.join(directory_location, directory_name)) elif directory_create: good = __os.path.exists(__os.path.join(directory_location, directory_name)) if not good: __os.mkdir(__os.path.join(directory_location, directory_name))
Function to verify if a directory exists Args: directory_name: The name of directory to check directory_location: The location of the directory, derive from the os module directory_create: If you want to create the directory Returns: returns boolean True or False, but if you set directory_create to True it will create the directory
juraj-google-style
def point_dist2(p1, p2): v = vector(p1, p2) return np.dot(v, v)
compute the square of the euclidian distance between two 3D points Args: p1, p2: indexable objects with indices 0, 1, 2 corresponding to 3D cartesian coordinates. Returns: The square of the euclidian distance between the points.
codesearchnet
def get_var(environ_cp, var_name, query_item, enabled_by_default, question=None, yes_reply=None, no_reply=None): if not question: question = 'Do you wish to build TensorFlow with {} support?'.format(query_item) if not yes_reply: yes_reply = '{} support will be enabled for TensorFlow.'.format(query_item) if not no_reply: no_reply = 'No {}'.format(yes_reply) yes_reply += '\n' no_reply += '\n' if enabled_by_default: question += ' [Y/n]: ' else: question += ' [y/N]: ' var = environ_cp.get(var_name) if var is not None: var_content = var.strip().lower() true_strings = ('1', 't', 'true', 'y', 'yes') false_strings = ('0', 'f', 'false', 'n', 'no') if var_content in true_strings: var = True elif var_content in false_strings: var = False else: raise UserInputError('Environment variable %s must be set as a boolean indicator.\nThe following are accepted as TRUE : %s.\nThe following are accepted as FALSE: %s.\nCurrent value is %s.' % (var_name, ', '.join(true_strings), ', '.join(false_strings), var)) while var is None: user_input_origin = get_input(question) user_input = user_input_origin.strip().lower() if user_input == 'y': print(yes_reply) var = True elif user_input == 'n': print(no_reply) var = False elif not user_input: if enabled_by_default: print(yes_reply) var = True else: print(no_reply) var = False else: print('Invalid selection: {}'.format(user_input_origin)) return var
Get boolean input from user. If var_name is not set in env, ask user to enable query_item or not. If the response is empty, use the default. Args: environ_cp: copy of the os.environ. var_name: string for name of environment variable, e.g. "TF_NEED_CUDA". query_item: string for feature related to the variable, e.g. "CUDA for Nvidia GPUs". enabled_by_default: boolean for default behavior. question: optional string for how to ask for user input. yes_reply: optional string for reply when feature is enabled. no_reply: optional string for reply when feature is disabled. Returns: boolean value of the variable. Raises: UserInputError: if an environment variable is set, but it cannot be interpreted as a boolean indicator, assume that the user has made a scripting error, and will continue to provide invalid input. Raise the error to avoid infinitely looping.
github-repos
def get_variant_genotypes(self, variant): try: plink_chrom = CHROM_STR_TO_INT[variant.chrom.name] except KeyError: raise ValueError( "Invalid chromosome ('{}') for Plink.".format(variant.chrom) ) info = self.bim.loc[ (self.bim.chrom == plink_chrom) & (self.bim.pos == variant.pos), : ] if info.shape[0] == 0: logging.variant_not_found(variant) return [] elif info.shape[0] == 1: return self._get_biallelic_variant(variant, info) else: return self._get_multialleic_variant(variant, info)
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes. Note ==== If the sample IDs are not unique, the index is changed to be the sample family ID and individual ID (i.e. fid_iid).
juraj-google-style
def _ParseRecord(self, parser_mediator, file_object): header_record_offset = file_object.tell() token_type = self._ParseTokenType(file_object, header_record_offset) if token_type not in self._HEADER_TOKEN_TYPES: raise errors.ParseError( 'Unsupported header token type: 0x{0:02x}'.format(token_type)) token_type, token_data = self._ParseToken(file_object, header_record_offset) if token_data.format_version != 11: raise errors.ParseError('Unsupported format version type: {0:d}'.format( token_data.format_version)) timestamp = token_data.microseconds + ( token_data.timestamp * definitions.MICROSECONDS_PER_SECOND) event_type = token_data.event_type header_record_size = token_data.record_size record_end_offset = header_record_offset + header_record_size event_tokens = [] return_token_values = None file_offset = file_object.tell() while file_offset < record_end_offset: token_type, token_data = self._ParseToken(file_object, file_offset) if not token_data: raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format( token_type)) file_offset = file_object.tell() if token_type == self._TOKEN_TYPE_AUT_TRAILER: break token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN') token_values = self._FormatTokenData(token_type, token_data) event_tokens.append({token_type_string: token_values}) if token_type in ( self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64): return_token_values = token_values if token_data.signature != self._TRAILER_TOKEN_SIGNATURE: raise errors.ParseError('Unsupported signature in trailer token.') if token_data.record_size != header_record_size: raise errors.ParseError( 'Mismatch of event record size between header and trailer token.') event_data = BSMEventData() event_data.event_type = event_type event_data.extra_tokens = event_tokens event_data.offset = header_record_offset event_data.record_length = header_record_size event_data.return_value = return_token_values date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an event record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: ParseError: if the event record cannot be read.
juraj-google-style
def __init__(self, timestamp=None): super(CocoaTime, self).__init__() self._precision = definitions.PRECISION_1_SECOND self._timestamp = timestamp
Initializes a Cocoa timestamp. Args: timestamp (Optional[float]): Cocoa timestamp.
juraj-google-style
def animate(func: types.AnyFunction = None, *, animation: types.AnimationGenerator = _default_animation(), step: float = 0.1) -> types.AnyFunction: if callable(func): return _animate_no_kwargs(func, animation, step) elif func is None: return _animate_with_kwargs(animation_gen=animation, step=step) else: raise TypeError("argument 'func' must either be None or callable")
Wrapper function for the _Animate wrapper class. Args: func: A function to run while animation is showing. animation: An AnimationGenerator that yields animation frames. step: Approximate timestep (in seconds) between frames. Returns: An animated version of func if func is not None. Otherwise, a function that takes a function and returns an animated version of that.
juraj-google-style
def num_memory_zones(self): count = self._dll.JLINK_GetMemZones(0, 0) if count < 0: raise errors.JLinkException(count) return count
Returns the number of memory zones supported by the target. Args: self (JLink): the ``JLink`` instance Returns: An integer count of the number of memory zones supported by the target. Raises: JLinkException: on error.
juraj-google-style
def run_census(flags_obj, ctx): train_file = os.path.join(flags_obj.data_dir, census_dataset.TRAINING_FILE) test_file = os.path.join(flags_obj.data_dir, census_dataset.EVAL_FILE) def train_input_fn(): return census_dataset.input_fn(train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size) def eval_input_fn(): return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size) tensors_to_log = {'average_loss': '{loss_prefix}head/truediv', 'loss': '{loss_prefix}head/weighted_loss/Sum'} model_helpers.apply_clean(flags.FLAGS) model = build_estimator(model_dir=flags_obj.model_dir, model_type=flags_obj.model_type, model_column_fn=census_dataset.build_model_columns, inter_op=flags_obj.inter_op_parallelism_threads, intra_op=flags_obj.intra_op_parallelism_threads, ctx=ctx) loss_prefix = LOSS_PREFIX.get(flags_obj.model_type, '') tensors_to_log = {k: v.format(loss_prefix=loss_prefix) for (k, v) in tensors_to_log.items()} train_hooks = hooks_helper.get_train_hooks(flags_obj.hooks, model_dir=flags_obj.model_dir, batch_size=flags_obj.batch_size, tensors_to_log=tensors_to_log) train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, hooks=train_hooks) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn) tf.estimator.train_and_evaluate(model, train_spec, eval_spec)
Construct all necessary functions and call run_loop. Args: flags_obj: Object containing user specified flags.
codesearchnet
def sort_recursive(data): newdict = {} for i in data.items(): if type(i[1]) is dict: newdict[i[0]] = sort_recursive(i[1]) else: newdict[i[0]] = i[1] return OrderedDict(sorted(newdict.items(), key=lambda item: (compare_type(type(item[1])), item[0])))
Recursively sorts all elements in a dictionary Args: data (dict): The dictionary to sort Returns: sorted_dict (OrderedDict): The sorted data dict
juraj-google-style
def _process_string_token(self, token, start_row, start_col): for i, char in enumerate(token): if char in QUOTES: break norm_quote = token[i:] if len(norm_quote) >= 3 and norm_quote[:3] in TRIPLE_QUOTE_OPTS.values(): self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col) return preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote) if self.config.string_quote in SMART_CONFIG_OPTS: other_quote = next(q for q in QUOTES if q != preferred_quote) if preferred_quote in token[i + 1:-1] and other_quote not in token[i + 1:-1]: preferred_quote = other_quote if norm_quote[0] != preferred_quote: self._invalid_string_quote( quote=norm_quote[0], row=start_row, correct_quote=preferred_quote, col=start_col, )
Internal method for identifying and checking string tokens from the token stream. Args: token: the token to check. start_row: the line on which the token was found. start_col: the column on which the token was found.
juraj-google-style
def before_starting_server(self): self._validate_snippet_app_on_device() self._disable_hidden_api_blocklist()
Performs the preparation steps before starting the remote server. This function performs following preparation steps: * Validate that the Mobly Snippet app is available on the device. * Disable hidden api blocklist if necessary and possible. Raises: errors.ServerStartPreCheckError: if the server app is not installed for the current user.
github-repos
def get(self, blob): return self._send( url=self._base_url + blob.parent.server_id + '/' + blob.server_id + '?s=0', method='GET', allow_redirects=False ).headers.get('Location')
Get the canonical link to a media blob. Args: blob (gkeepapi.node.Blob): The blob. Returns: str: A link to the media.
juraj-google-style
def _read_variable_op(self, no_copy=False): variable_accessed(self) self._variable_read = True def read_and_set_handle(no_copy): if no_copy and forward_compat.forward_compatible(2022, 5, 3): gen_resource_variable_ops.disable_copy_on_read(self.handle) result = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype) _maybe_set_handle_data(self._dtype, self.handle, result) return result if getattr(self, '_caching_device', None) is not None: with ops.colocate_with(None, ignore_existing=True): with ops.device(self._caching_device): result = read_and_set_handle(no_copy) else: result = read_and_set_handle(no_copy) if not context.executing_eagerly(): record.record_operation('ReadVariableOp', [result], [self.handle], backward_function=lambda x: [x], forward_function=lambda x: [x]) if context.xla_sharding_for_resource_variables_enabled() and (not context.executing_eagerly()) and (self._xla_sharding is not None): sharding_string = self._xla_sharding.SerializeToString() with ops.colocate_with(result): result = gen_xla_ops.xla_sharding(result, sharding=sharding_string) result.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=sharding_string)) return result
Reads the value of the variable. If the variable is in copy-on-read mode and `no_copy` is True, the variable is converted to copy-on-write mode before it is read. Args: no_copy: Whether to prevent a copy of the variable. Returns: The value of the variable.
github-repos
def __init__(self, context=None, queue=None): self._context = context or TelemetryContext() self._queue = queue or SynchronousQueue(SynchronousSender())
Initializes a new instance of the class. Args: context (:class:`TelemetryContext') the telemetry context to use when sending telemetry data.\n queue (:class:`QueueBase`) the queue to enqueue the resulting :class:`contracts.Envelope` to.
juraj-google-style
def enable_logging(main): @functools.wraps(main) def wrapper(*args, **kwargs): import argparse parser = argparse.ArgumentParser() parser.add_argument( '--loglevel', default="ERROR", type=str, help="Set the loglevel. Possible values: CRITICAL, ERROR (default)," "WARNING, INFO, DEBUG") options = parser.parse_args() numeric_level = getattr(logging, options.loglevel.upper(), None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % options.loglevel) logging.basicConfig(level=numeric_level) retcode = main(*args, **kwargs) return retcode return wrapper
This decorator is used to decorate main functions. It adds the initialization of the logger and an argument parser that allows one to select the loglevel. Useful if we are writing simple main functions that call libraries where the logging module is used Args: main: main function.
juraj-google-style
def normalize_in_place(sysmeta_pyxb, reset_timestamps=False): if (sysmeta_pyxb.accessPolicy is not None): sysmeta_pyxb.accessPolicy = d1_common.wrap.access_policy.get_normalized_pyxb(sysmeta_pyxb.accessPolicy) if getattr(sysmeta_pyxb, 'mediaType', False): d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.mediaType.property_) if getattr(sysmeta_pyxb, 'replicationPolicy', False): d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.replicationPolicy.preferredMemberNode) d1_common.xml.sort_value_list_pyxb(sysmeta_pyxb.replicationPolicy.blockedMemberNode) d1_common.xml.sort_elements_by_child_values(sysmeta_pyxb.replica, ['replicaVerified', 'replicaMemberNode', 'replicationStatus']) sysmeta_pyxb.archived = bool(sysmeta_pyxb.archived) if reset_timestamps: epoch_dt = datetime.datetime(1970, 1, 1, tzinfo=d1_common.date_time.UTC()) sysmeta_pyxb.dateUploaded = epoch_dt sysmeta_pyxb.dateSysMetadataModified = epoch_dt for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []): replica_pyxb.replicaVerified = epoch_dt else: sysmeta_pyxb.dateUploaded = d1_common.date_time.round_to_nearest(sysmeta_pyxb.dateUploaded) sysmeta_pyxb.dateSysMetadataModified = d1_common.date_time.round_to_nearest(sysmeta_pyxb.dateSysMetadataModified) for replica_pyxb in getattr(sysmeta_pyxb, 'replica', []): replica_pyxb.replicaVerified = d1_common.date_time.round_to_nearest(replica_pyxb.replicaVerified)
Normalize SystemMetadata PyXB object in-place. Args: sysmeta_pyxb: SystemMetadata PyXB object to normalize. reset_timestamps: bool ``True``: Timestamps in the SystemMetadata are set to a standard value so that objects that are compared after normalization register as equivalent if only their timestamps differ. Notes: The SystemMetadata is normalized by removing any redundant information and ordering all sections where there are no semantics associated with the order. The normalized SystemMetadata is intended to be semantically equivalent to the un-normalized one.
codesearchnet
def SubtractFromBalance(self, assetId, fixed8_val): found = False for key, balance in self.Balances.items(): if key == assetId: self.Balances[assetId] = self.Balances[assetId] - fixed8_val found = True if not found: self.Balances[assetId] = fixed8_val * Fixed8(-1)
Subtract amount to the specified balance. Args: assetId (UInt256): fixed8_val (Fixed8): amount to add.
juraj-google-style
def to_struct(self, from_api: dict=None, from_json: dict=None, indent: int=2) -> str: if from_api: from_json = self.to_json(from_api=from_api) fields = [] spaces = ' ' * indent for key, value in from_json.items(): if not isinstance(value, dict): continue if value.get('type', 'record') == 'record': fields.append('%sSTRUCT(\n%s\n%s) AS %s' % (spaces, self.to_struct(from_json=value, indent=indent + 2), spaces, key)) elif value['type'] == 'array': if 'enum' in value['items']: fields.append('%s[%s\n%s] AS %s' % (spaces, 'STRING', spaces, key)) else: fields.append('%s[STRUCT(\n%s\n%s)] AS %s' % (spaces, self.to_struct(from_json=value['items'], indent=indent + 2), spaces, key)) else: fields.append('%s%s AS %s' % (spaces, value['type'].upper(), key)) return ',\n'.join(fields)
Translates a Discovery API Document schema to a BigQuery STRUCT. Recursively crawls the discovery document reference tree to build struct. Leverages recursion depth passed in constructor to stop if necessary. Args: from_api: the api schema to extrapolate from_json: new object with references replaced, not passed by caller parents: used to track recursion depth for a specific schema branch Returns: A BigQuery STRUCT object that can be pasted into a query.
github-repos
def export_run_metadata(): return context().export_run_metadata()
Returns a RunMetadata proto with accumulated information. The returned protocol buffer contains information since the most recent call to either enable_run_metadata or export_run_metadata. Returns: A RunMetadata protocol buffer.
github-repos
def correct_segmentation(segments, clusters, min_time): result_segments = [] prev_segment = None for (i, segment) in enumerate(segments): if (len(segment) >= 1): continue cluster = clusters[i] if (prev_segment is None): prev_segment = segment else: cluster_dt = 0 if (len(cluster) > 0): cluster_dt = abs(cluster[0].time_difference(cluster[(- 1)])) if (cluster_dt <= min_time): prev_segment.extend(segment) else: prev_segment.append(segment[0]) result_segments.append(prev_segment) prev_segment = segment if (prev_segment is not None): result_segments.append(prev_segment) return result_segments
Corrects the predicted segmentation This process prevents over segmentation Args: segments (:obj:`list` of :obj:`list` of :obj:`Point`): segments to correct min_time (int): minimum required time for segmentation
codesearchnet
def convert_http_request(request, referrer_host=None): new_request = urllib.request.Request(request.url_info.url, origin_req_host=referrer_host) for (name, value) in request.fields.get_all(): new_request.add_header(name, value) return new_request
Convert a HTTP request. Args: request: An instance of :class:`.http.request.Request`. referrer_host (str): The referrering hostname or IP address. Returns: Request: An instance of :class:`urllib.request.Request`
codesearchnet
def _find_all_line_split(self, begin_line: int, end_line: int) -> list[int]: curr_line = 0 curr_idx = 0 point_idx = [] while curr_line < begin_line: curr_idx = self._src.find('\n', curr_idx) + 1 curr_line += 1 point_idx.append(curr_idx) while curr_line < end_line: curr_idx = self._src.find('\n', curr_idx) + 1 point_idx.append(curr_idx) curr_line += 1 curr_idx = self._src.find('\n', curr_idx) curr_idx = len(self._src) if curr_idx == -1 else curr_idx point_idx.append(curr_idx) return point_idx
Finds all index of line boundaries between begin_line and end_line. Due to the possibility of the endline of the error message being on the last line of the source code, last line will be the last index, and not past the last index. Args: begin_line: The begin line of which we want to find the index of. end_line: The end line of which we want to find the index of. Returns: A list of indicies for the line boundaries.
github-repos
def multi_log_probs_from_logits_and_actions(policy_logits, actions): log_probs = [] for i in range(len(policy_logits)): log_probs.append((- tf.nn.sparse_softmax_cross_entropy_with_logits(logits=policy_logits[i], labels=actions[i]))) return log_probs
Computes action log-probs from policy logits and actions. In the notation used throughout documentation and comments, T refers to the time dimension ranging from 0 to T-1. B refers to the batch size and ACTION_SPACE refers to the list of numbers each representing a number of actions. Args: policy_logits: A list with length of ACTION_SPACE of float32 tensors of shapes [T, B, ACTION_SPACE[0]], ..., [T, B, ACTION_SPACE[-1]] with un-normalized log-probabilities parameterizing a softmax policy. actions: A list with length of ACTION_SPACE of int32 tensors of shapes [T, B], ..., [T, B] with actions. Returns: A list with length of ACTION_SPACE of float32 tensors of shapes [T, B], ..., [T, B] corresponding to the sampling log probability of the chosen action w.r.t. the policy.
codesearchnet
def wiki_request(self, params): params['format'] = 'json' if ('action' not in params): params['action'] = 'query' limit = self._rate_limit last_call = self._rate_limit_last_call if (limit and last_call and ((last_call + self._min_wait) > datetime.now())): wait_time = ((last_call + self._min_wait) - datetime.now()) time.sleep(int(wait_time.total_seconds())) req = self._get_response(params) if self._rate_limit: self._rate_limit_last_call = datetime.now() return req
Make a request to the MediaWiki API using the given search parameters Args: params (dict): Request parameters Returns: A parsed dict of the JSON response Note: Useful when wanting to query the MediaWiki site for some \ value that is not part of the wrapper API
codesearchnet
def fit_transform(self, data): if data: assert isinstance(data, dict), 'Step {}, "data" argument in the "fit_transform()" method must be dict, got {} instead.'.format(self.name, type(data)) logger.info('Step {}, working in "{}" mode'.format(self.name, self._mode)) if (self._mode == 'inference'): ValueError('Step {}, you are in "{}" mode, where you cannot run "fit".Please change mode to "train" to enable fitting.Use: "step.set_mode_train()" then "step.fit_transform()"'.format(self.name, self._mode)) if (self.output_is_cached and (not self.force_fitting)): logger.info('Step {} using cached output'.format(self.name)) step_output_data = self.output elif (self.output_is_persisted and self.load_persisted_output and (not self.force_fitting)): logger.info('Step {} loading persisted output from {}'.format(self.name, self.experiment_directory_output_step)) step_output_data = self._load_output(self.experiment_directory_output_step) else: step_inputs = {} if (self.input_data is not None): for input_data_part in self.input_data: step_inputs[input_data_part] = data[input_data_part] for input_step in self.input_steps: step_inputs[input_step.name] = input_step.fit_transform(data) if self.adapter: step_inputs = self._adapt(step_inputs) else: step_inputs = self._unpack(step_inputs) step_output_data = self._fit_transform_operation(step_inputs) logger.info('Step {}, fit and transform completed'.format(self.name)) return step_output_data
Fit the model and transform data or load already processed data. Loads cached or persisted output or adapts data for the current transformer and executes ``transformer.fit_transform``. Args: data (dict): data dictionary with keys as input names and values as dictionaries of key-value pairs that can be passed to the ``self.transformer.fit_transform`` method. Example: .. code-block:: python data = {'input_1': {'X': X, 'y': y}, 'input_2': {'X': X, 'y': y} } Returns: dict: Step output from the ``self.transformer.fit_transform`` method
codesearchnet
def __init__(self, max_sza=95.0, **kwargs): self.max_sza = max_sza self.max_sza_cos = np.cos(np.deg2rad(max_sza)) if max_sza is not None else None super(SunZenithCorrectorBase, self).__init__(**kwargs)
Collect custom configuration values. Args: max_sza (float): Maximum solar zenith angle in degrees that is considered valid and correctable. Default 95.0.
juraj-google-style
def _NeedsClassParam(self, sig): if self.class_name and self.function_name and sig.params: safe_class_name = pytd_utils.Print(pytd.NamedType(self.class_name)) return pytd_utils.Print(sig.return_type) == safe_class_name and pytd_utils.Print(sig.params[0].type) in (f'type[{safe_class_name}]', f'Type[{safe_class_name}]', safe_class_name) return False
Whether the signature needs a bounded type param for the class. We detect the signatures (cls: Type[X][, ...]) -> X and (self: X[, ...]) -> X so that we can replace X with a bounded TypeVar. This heuristic isn't perfect; for example, in this naive copy method: class X: def copy(self): return X() we should have left X alone. But it prevents a number of false positives by enabling us to infer correct types for common implementations of __new__ and __enter__. Args: sig: A pytd.Signature. Returns: True if the signature needs a class param, False otherwise.
github-repos
def _relation_exists(self, connection, relation): query = 'SELECT 1 FROM sqlite_master WHERE (type=\'table\' OR type=\'view\') AND name=?;' cursor = connection.cursor() cursor.execute(query, [relation]) result = cursor.fetchall() return result == [(1,)]
Returns True if relation (table or view) exists in the sqlite db. Otherwise returns False. Args: connection (apsw.Connection): connection to sqlite database who stores mpr data. partition (orm.Partition): Returns: boolean: True if relation exists, False otherwise.
juraj-google-style
def unshuffle_from_sc_to_cpu(t: tensor.Tensor, num_sparse_cores: int, offset_in_shard: int, size_in_shard: int, shard_rotation: int=0) -> tensor.Tensor: old_shape = t.shape if t.shape[0] % num_sparse_cores != 0: raise ValueError('The dim of table ({}) should be multiple of number of sparse cores ({})'.format(t.shape[1], num_sparse_cores)) shards_t = array_ops.reshape(t, (num_sparse_cores, t.shape[0] shards = shards_t[:, offset_in_shard:offset_in_shard + size_in_shard, :] if shard_rotation: shards = manip_ops.roll(shards, -shard_rotation, axis=0) intermediate_tensor = array_ops.transpose(shards, (1, 0, 2)) new_shape = (size_in_shard * num_sparse_cores, old_shape[1]) return array_ops.reshape(intermediate_tensor, new_shape)
Unshuffles the sparse core sharded embedding tables to unsharded. This converts an input tensor respresenting stacked and sharded embedding table into a specific embedding table variable by using the provided metadata about the said table within the stacked, sharded embedding table. Args: t: The input stacked and sharded embedding table from sparsecore. num_sparse_cores: The number of sparsecores, this determines the number of shards that are present in the input t. offset_in_shard: Offset within a shard where the queried table starts. size_in_shard: size (number of rows) of this queried table within each shard of the input t. shard_rotation: The rotation of this table's shards. Returns: An embedding table which is part of the stacked embedding table t.
github-repos
def generate_branches(scales=None, angles=None, shift_angle=0): branches = [] for pos, scale in enumerate(scales): angle = -sum(angles)/2 + sum(angles[:pos]) + shift_angle branches.append([scale, angle]) return branches
Generates branches with alternative system. Args: scales (tuple/array): Indicating how the branch/es length/es develop/s from age to age. angles (tuple/array): Holding the branch and shift angle in radians. shift_angle (float): Holding the rotation angle for all branches. Returns: branches (2d-array): A array constits of arrays holding scale and angle for every branch.
juraj-google-style
def summary(self): with tf.name_scope((self._name + '/summary')): mean_summary = tf.cond((self._count > 0), (lambda : self._summary('mean', self._mean)), str) std_summary = tf.cond((self._count > 1), (lambda : self._summary('stddev', self._std())), str) return tf.summary.merge([mean_summary, std_summary])
Summary string of mean and standard deviation. Returns: Summary tensor.
codesearchnet
def _get_channel(host, timeout): connection = create_blocking_connection(host) if timeout >= 0: connection.add_timeout( timeout, lambda: sys.stderr.write("Timeouted!\n") or sys.exit(1) ) return connection.channel()
Create communication channel for given `host`. Args: host (str): Specified --host. timeout (int): Set `timeout` for returned `channel`. Returns: Object: Pika channel object.
juraj-google-style
def get_item(dictionary, tuple_key, default_value): (u, v) = tuple_key tuple1 = dictionary.get((u, v), None) tuple2 = dictionary.get((v, u), None) return (tuple1 or tuple2 or default_value)
Grab values from a dictionary using an unordered tuple as a key. Dictionary should not contain None, 0, or False as dictionary values. Args: dictionary: Dictionary that uses two-element tuple as keys tuple_key: Unordered tuple of two elements default_value: Value that is returned when the tuple_key is not found in the dictionary
codesearchnet
def dict_to_xml(spec, full_document=False): middle = xmltodict.unparse(spec, full_document=full_document, pretty=True) return lxml.etree.fromstring(middle)
Convert dict to XML Args: spec(dict): dict to convert full_document(bool): whether to add XML headers Returns: lxml.etree.Element: XML tree
codesearchnet
def traced(func=None, *, span_name=None, standalone=False, additional_attributes: Optional[List[Tuple[str, str, Union[Any, Callable[[Any], Any]]]]]=None): def decorator(func): if not _has_opentelemetry: return func import functools @functools.wraps(func) def wrapper(*args, **kwargs): instance = args[0] if args and (hasattr(func, '__self__') and func.__self__ is not None) else None is_method = instance is not None if is_method and hasattr(instance, 'tracer'): tracer = instance.tracer else: tracer = get_tracer(f'transformers.{func.__module__}.{func.__name__}') name = span_name or func.__name__ span_fn = tracer.start_span if standalone else tracer.start_as_current_span with span_fn(name) as span: span.set_attribute('function.name', func.__name__) span.set_attribute('function.module', func.__module__) span.set_attribute('function.is_method', is_method) if args: for i, arg in enumerate(args): if isinstance(arg, (str, int, float, bool)) or arg is None: span.set_attribute(f'args.{i}', str(arg)) else: span.set_attribute(f'args.{i}', str(type(arg))) if kwargs: for key, value in kwargs.items(): if isinstance(value, (str, int, float, bool)) or value is None: span.set_attribute(f'kwargs.{key}', str(value)) else: span.set_attribute(f'kwargs.{key}', str(type(value))) if additional_attributes and is_method: for attr_config in additional_attributes: instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config if hasattr(instance, instance_attribute_name): attribute_value = getattr(instance, instance_attribute_name) if callable(value_or_transform_function): transformed_value = value_or_transform_function(attribute_value) else: transformed_value = value_or_transform_function span.set_attribute(span_attribute_key, transformed_value) try: result = func(*args, **kwargs) return result except Exception as e: span.set_status(Status(StatusCode.ERROR)) span.record_exception(e) raise return wrapper if func is None: return decorator return decorator(func)
Decorator to trace function calls with OpenTelemetry. Can be used as @traced or @traced(span_name="custom_name") Args: func: The function to trace span_name: Optional custom name for the span (defaults to function name) standalone: If True, creates a parentless span additional_attributes: Optional list of additional attributes to set on the span. Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function) where: - instance_attribute_name: Name of the attribute to get from the class instance - span_attribute_key: Key to use when setting the attribute on the span - value_or_transform_function: Either a raw value to use directly, or a function to transform the attribute value before setting it on the span Returns: Decorated function with tracing
github-repos
def FileEntryExistsByPathSpec(self, path_spec): fsntfs_file_entry = None location = getattr(path_spec, 'location', None) mft_attribute = getattr(path_spec, 'mft_attribute', None) mft_entry = getattr(path_spec, 'mft_entry', None) try: if mft_attribute is not None and mft_entry is not None: fsntfs_file_entry = self._fsntfs_volume.get_file_entry(mft_entry) elif location is not None: fsntfs_file_entry = self._fsntfs_volume.get_file_entry_by_path(location) except IOError as exception: raise errors.BackEndError(exception) return fsntfs_file_entry is not None
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists. Raises: BackEndError: if the file entry cannot be opened.
juraj-google-style
def discriminator(self, x, is_training, reuse=False): hparams = self.hparams with tf.variable_scope( "discriminator", reuse=reuse, initializer=tf.random_normal_initializer(stddev=0.02)): batch_size, height, width = common_layers.shape_list(x)[:3] net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2), padding="SAME", name="d_conv1") net = lrelu(net) net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2), padding="SAME", name="d_conv2") if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn2") net = lrelu(net) size = height * width net = tf.reshape(net, [batch_size, size * 8]) net = tf.layers.dense(net, 1024, name="d_fc3") if hparams.discriminator_batchnorm: net = tf.layers.batch_normalization(net, training=is_training, momentum=0.999, name="d_bn3") net = lrelu(net) return net
Discriminator architecture based on InfoGAN. Args: x: input images, shape [bs, h, w, channels] is_training: boolean, are we in train or eval model. reuse: boolean, should params be re-used. Returns: out_logit: the output logits (before sigmoid).
juraj-google-style
def build(self): if self.exists: self._build('requirements', self.requirements_last_modified, ('pip install -U -r %s' % self.requirements_file)) try: self._build('requirements (dev)', self.dev_requirements_last_modified, ('pip install -U -r %s' % self.dev_requirements_file)) except Exception as e: if ('No such file' not in str(e)): raise e self.stdout.write(style.yellow('Could not find dev requirements')) try: self._build('requirements (local)', self.local_requirements_last_modified, ('pip install -U -r %s' % self.local_requirements_file)) except Exception as e: if ('No such file' not in str(e)): raise e self.stdout.write(style.yellow('Could not find local requirements')) self._build('application', self.setup_last_modified, ('python %s develop' % self.setup_file))
Builds the app in the app's environment. Only builds if the build is out-of-date and is non-empty. Builds in 3 stages: requirements, dev requirements, and app. pip is used to install requirements, and setup.py is used to install the app itself. Raises: ValidationError if the app fails to build.
codesearchnet
def get_field_value(self, key, default=MISSING): meta_value = self.metadata.get(key) context_value = self.context.get(key) if (context_value is not None): return context_value elif (meta_value is not None): return meta_value return default
Method to fetch a value from either the fields metadata or the schemas context, in that order. Args: key (str): The name of the key to grab the value for. Keyword Args: default (object, optional): If the value doesn't exist in the schema's ``context`` or the field's ``metadata``, this value will be returned. By default this will be ``MISSING``. Returns: object: This will be the correct value to use given the parameters.
codesearchnet
def add(self, val): return cache.lpush(self.key, json.dumps(val) if self.serialize else val)
Add given value to item (list) Args: val: A JSON serializable object. Returns: Cache backend response.
juraj-google-style
def autodiscover(self, message): if (message['version'] in self.allowed_versions): logger.debug(('<%s> Client version matches server version.' % message['cuuid'])) response = serialize_data({'method': 'OHAI Client', 'version': self.version, 'server_name': self.server_name}, self.compression, encryption=False) else: logger.warning(('<%s> Client version %s does not match allowed server versions %s' % (message['cuuid'], message['version'], self.version))) response = serialize_data({'method': 'BYE REGISTER'}, self.compression, encryption=False) return response
This function simply returns the server version number as a response to the client. Args: message (dict): A dictionary of the autodiscover message from the client. Returns: A JSON string of the "OHAI Client" server response with the server's version number. Examples: >>> response '{"method": "OHAI Client", "version": "1.0"}'
codesearchnet
def join(self, timeout_s=None): if not self.thread: return False self.thread.join(timeout_s) return self.running
Joins blocking until the interval ends or until timeout is reached. Args: timeout_s: The time in seconds to wait, defaults to forever. Returns: True if the interval is still running and we reached the timeout.
juraj-google-style
def detokenize_numbers(text: str) -> str: for reg, sub in DETOKENIZE_NUMBERS: text = re.sub(reg, sub, text) return text
Inverts the operation of *tokenize_numbers*. This is replacing ' @,@ ' and ' @.@' by ',' and '.'. Args: text: A string where the number should be detokenized. Returns: A detokenized string. Example: ```python >>> detokenize_numbers("$ 5 @,@ 000 1 @.@ 73 m") '$ 5,000 1.73 m' ```
github-repos
def transform_action(self, obs, func_call, skip_available=False): func_id = func_call.function try: func = actions.FUNCTIONS[func_id] except KeyError: raise ValueError(('Invalid function id: %s.' % func_id)) if (not (skip_available or (func_id in self.available_actions(obs)))): raise ValueError(('Function %s/%s is currently not available' % (func_id, func.name))) if (len(func_call.arguments) != len(func.args)): raise ValueError(('Wrong number of arguments for function: %s, got: %s' % (func, func_call.arguments))) aif = self._agent_interface_format for (t, arg) in zip(func.args, func_call.arguments): if (t.name in ('screen', 'screen2')): sizes = aif.action_dimensions.screen elif (t.name == 'minimap'): sizes = aif.action_dimensions.minimap else: sizes = t.sizes if (len(sizes) != len(arg)): raise ValueError(('Wrong number of values for argument of %s, got: %s' % (func, func_call.arguments))) for (s, a) in zip(sizes, arg): if (not (0 <= a < s)): raise ValueError(('Argument is out of range for %s, got: %s' % (func, func_call.arguments))) kwargs = {type_.name: type_.fn(a) for (type_, a) in zip(func.args, func_call.arguments)} sc2_action = sc_pb.Action() kwargs['action'] = sc2_action kwargs['action_space'] = aif.action_space if func.ability_id: kwargs['ability_id'] = func.ability_id actions.FUNCTIONS[func_id].function_type(**kwargs) return sc2_action
Tranform an agent-style action to one that SC2 can consume. Args: obs: a `sc_pb.Observation` from the previous frame. func_call: a `FunctionCall` to be turned into a `sc_pb.Action`. skip_available: If True, assume the action is available. This should only be used for testing or if you expect to make actions that weren't valid at the last observation. Returns: a corresponding `sc_pb.Action`. Raises: ValueError: if the action doesn't pass validation.
codesearchnet
def sg_restore(sess, save_path, category=''): r if not isinstance(category, (tuple, list)): category = [category] var_list = {} for cat in category: for t in tf.global_variables(): if t.name.startswith(cat): var_list[t.name[:-2]] = t saver = tf.train.Saver(var_list) saver.restore(sess, save_path)
r""" Restores previously saved variables. Args: sess: A `Session` to use to restore the parameters. save_path: Path where parameters were previously saved. category: A `String` to filter variables starts with given category. Returns:
juraj-google-style
def _mouseUp(x, y, button): if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTUP, x, y) except (PermissionError, OSError): pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLEUP, x, y) except (PermissionError, OSError): pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTUP, x, y) except (PermissionError, OSError): pass else: assert False, "button argument not in ('left', 'middle', 'right')"
Send the mouse up event to Windows by calling the mouse_event() win32 function. Args: x (int): The x position of the mouse event. y (int): The y position of the mouse event. button (str): The mouse button, either 'left', 'middle', or 'right' Returns: None
juraj-google-style
def perform_check(self, env: env_tools.PreparedEnv, verbose: bool) -> Tuple[bool, str]:
Evaluates the status check and returns a pass/fail with message. Args: env: Describes a prepared python 3 environment in which to run. verbose: When set, more progress output is produced. Returns: A tuple containing a pass/fail boolean and then a details message.
juraj-google-style
def get_reduced_symbols(symbols): reduced_symbols = [] for ss in symbols: if not (ss in reduced_symbols): reduced_symbols.append(ss) return reduced_symbols
Reduces expanded list of symbols. Args: symbols: list containing any chemical symbols as often as the atom appears in the structure Returns: reduced_symbols: any symbols appears only once
juraj-google-style
def compute_memory_contents_under_schedule(self, schedule): out_degree = self._compute_initial_out_degree() curr_memory_contents = set() memory_contents_for_each_operation = [] for operation_id in schedule: operation_name = self._operations[operation_id].name for output_name in self.get_operation_output_names(operation_name): curr_memory_contents.add(output_name) memory_contents_for_each_operation.append(frozenset(curr_memory_contents)) for output_name in self.get_operation_output_names(operation_name): if (out_degree[output_name] == 0): curr_memory_contents.remove(output_name) for input_name in self.get_operation_input_names(operation_name): out_degree[input_name] -= 1 if (out_degree[input_name] == 0): curr_memory_contents.remove(input_name) return memory_contents_for_each_operation
The in-memory tensors present when executing each operation in schedule. Simulates running operations in the order given by a schedule. Keeps track of the tensors in memory at every point in time, and outputs a list (one entry for each point in time) of all sets of all memory contents (i.e. a frozenset of strings) ever seen in this execution. It is assumed (but not checked) that schedule is a valid topological sort of the operations in this graph. Args: schedule: A list of integer ids; the order to run operations in. Returns: a list of frozenset of strings, where the ith entry describes the tensors in memory when executing operation i (where schedule[i] is an index into get_all_operation_names()).
codesearchnet
def placeholder_value(self, placeholder_context): if placeholder_context.unnest_only: return self component_placeholders = nest.map_structure(lambda x: x.placeholder_value(placeholder_context), self._component_specs) return self._from_components(component_placeholders)
Value used for tracing a function signature with this TraceType. WARNING: Do not override. Args: placeholder_context: A class container for context information when creating a placeholder value. Returns: A `CompositeTensor` placeholder whose components are recursively composed of placeholders themselves.
github-repos
def roll(x, shift, axis=None): if any_symbolic_tensors((x,)): return Roll(shift, axis=axis).symbolic_call(x) return backend.numpy.roll(x, shift, axis=axis)
Roll tensor elements along a given axis. Elements that roll beyond the last position are re-introduced at the first. Args: x: Input tensor. shift: The number of places by which elements are shifted. axis: The axis along which elements are shifted. By default, the array is flattened before shifting, after which the original shape is restored. Returns: Output tensor.
github-repos
def get_dialect_name(mixed: Union[SQLCompiler, Engine, Dialect]) -> str: dialect = get_dialect(mixed) return dialect.name
Finds the name of the SQLAlchemy dialect in use. Args: mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the SQLAlchemy dialect name being used
juraj-google-style
def send_worker_queue_message(self, *, batch_id, job_name, entry_point, worker_args, retry_count=0): try: job_id = str(uuid4()) self.job_queue.send_message(MessageBody=json.dumps({'batch_id': batch_id, 'job_id': job_id, 'job_name': job_name, 'entry_point': entry_point, 'worker_args': worker_args}), MessageDeduplicationId=job_id, MessageGroupId=batch_id, MessageAttributes={'RetryCount': {'StringValue': str(retry_count), 'DataType': 'Number'}}) if (retry_count == 0): job = SchedulerJob() job.job_id = job_id job.batch_id = batch_id job.status = SchedulerStatus.PENDING job.data = worker_args db.session.add(job) db.session.commit() except: self.log.exception('Error when processing worker task')
Send a message to the `worker_queue` for a worker to execute the requests job Args: batch_id (`str`): Unique ID of the batch the job belongs to job_name (`str`): Non-unique ID of the job. This is used to ensure that the same job is only scheduled a single time per batch entry_point (`dict`): A dictionary providing the entry point information for the worker to load the class worker_args (`dict`): A dictionary with the arguments required by the worker class (if any, can be an empty dictionary) retry_count (`int`): The number of times this one job has been attempted to be executed. If a job fails to execute after 3 retries it will be marked as failed Returns: `None`
codesearchnet
def __init__(self, maximum_number_of_items=50000): super(_EventSourceHeap, self).__init__() self._heap = [] self._maximum_number_of_items = maximum_number_of_items
Initializes an event source heap. Args: maximum_number_of_items (Optional[int]): maximum number of items in the heap.
juraj-google-style
def _make_tensor_slice_spec(slice_spec, use_constant=True): def make_piece_scalar(piece): if isinstance(piece, int): scalar = constant_op.constant(piece) if use_constant: return scalar else: return array_ops.placeholder_with_default(scalar, []) elif isinstance(piece, slice): return slice(make_piece_scalar(piece.start), make_piece_scalar(piece.stop), make_piece_scalar(piece.step)) else: return piece if isinstance(slice_spec, tuple): return tuple((make_piece_scalar(piece) for piece in slice_spec)) else: return make_piece_scalar(slice_spec)
Wraps all integers in an extended slice spec w/ a tensor. This function is used to help test slicing when the slice spec contains tensors, rather than integers. Args: slice_spec: The extended slice spec. use_constant: If true, then wrap each integer with a tf.constant. If false, then wrap each integer with a tf.placeholder. Returns: A copy of slice_spec, but with each integer i replaced with tf.constant(i).
github-repos
def kron(*matrices: np.ndarray) -> np.ndarray: product = np.eye(1) for m in matrices: product = np.kron(product, m) return np.array(product)
Computes the kronecker product of a sequence of matrices. A *args version of lambda args: functools.reduce(np.kron, args). Args: *matrices: The matrices and controls to combine with the kronecker product. Returns: The resulting matrix.
codesearchnet
def _send_join_group_request(self): if self.coordinator_unknown(): e = Errors.GroupCoordinatorNotAvailableError(self.coordinator_id) return Future().failure(e) elif (not self._client.ready(self.coordinator_id, metadata_priority=False)): e = Errors.NodeNotReadyError(self.coordinator_id) return Future().failure(e) log.info('(Re-)joining group %s', self.group_id) member_metadata = [(protocol, (metadata if isinstance(metadata, bytes) else metadata.encode())) for (protocol, metadata) in self.group_protocols()] if (self.config['api_version'] < (0, 9)): raise Errors.KafkaError('JoinGroupRequest api requires 0.9+ brokers') elif ((0, 9) <= self.config['api_version'] < (0, 10, 1)): request = JoinGroupRequest[0](self.group_id, self.config['session_timeout_ms'], self._generation.member_id, self.protocol_type(), member_metadata) elif ((0, 10, 1) <= self.config['api_version'] < (0, 11, 0)): request = JoinGroupRequest[1](self.group_id, self.config['session_timeout_ms'], self.config['max_poll_interval_ms'], self._generation.member_id, self.protocol_type(), member_metadata) else: request = JoinGroupRequest[2](self.group_id, self.config['session_timeout_ms'], self.config['max_poll_interval_ms'], self._generation.member_id, self.protocol_type(), member_metadata) log.debug('Sending JoinGroup (%s) to coordinator %s', request, self.coordinator_id) future = Future() _f = self._client.send(self.coordinator_id, request) _f.add_callback(self._handle_join_group_response, future, time.time()) _f.add_errback(self._failed_request, self.coordinator_id, request, future) return future
Join the group and return the assignment for the next generation. This function handles both JoinGroup and SyncGroup, delegating to :meth:`._perform_assignment` if elected leader by the coordinator. Returns: Future: resolves to the encoded-bytes assignment returned from the group leader
codesearchnet
def HashFilePath(self, path, byte_count): with open(path, 'rb') as fd: self.HashFile(fd, byte_count)
Updates underlying hashers with file on a given path. Args: path: A path to the file that is going to be fed to the hashers. byte_count: A maximum numbers of bytes that are going to be processed.
codesearchnet
def on_options(self, req, resp, **kwargs): resp.set_header('Allow', ', '.join(self.allowed_methods())) resp.body = json.dumps(self.describe(req, resp)) resp.content_type = 'application/json'
Respond with JSON formatted resource description on OPTIONS request. Args: req (falcon.Request): Optional request object. Defaults to None. resp (falcon.Response): Optional response object. Defaults to None. kwargs (dict): Dictionary of values created by falcon from resource uri template. Returns: None .. versionchanged:: 0.2.0 Default ``OPTIONS`` responses include ``Allow`` header with list of allowed HTTP methods.
codesearchnet
def generate_sigproc_header(f): header_string = b'' header_string += to_sigproc_keyword(b'HEADER_START') for keyword in f.header.keys(): if (keyword == b'src_raj'): header_string += (to_sigproc_keyword(b'src_raj') + to_sigproc_angle(f.header[b'src_raj'])) elif (keyword == b'src_dej'): header_string += (to_sigproc_keyword(b'src_dej') + to_sigproc_angle(f.header[b'src_dej'])) elif ((keyword == b'az_start') or (keyword == b'za_start')): header_string += (to_sigproc_keyword(keyword) + np.float64(f.header[keyword]).tostring()) elif (keyword not in header_keyword_types.keys()): pass else: header_string += to_sigproc_keyword(keyword, f.header[keyword]) header_string += to_sigproc_keyword(b'HEADER_END') return header_string
Generate a serialzed sigproc header which can be written to disk. Args: f (Filterbank object): Filterbank object for which to generate header Returns: header_str (str): Serialized string corresponding to header
codesearchnet
def get_unfrozen_copy(values): if isinstance(values, (frozendict, dict)): return {key: get_unfrozen_copy(value) for key, value in values.items()} elif isinstance(values, (list, tuple)): return [get_unfrozen_copy(value) for value in values] return values
Recursively convert `value`'s tuple values into lists, and frozendicts into dicts. Args: values (frozendict/tuple): the frozendict/tuple. Returns: values (dict/list): the unfrozen copy.
juraj-google-style
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None): import matplotlib.pyplot as plt plt.figure(figsize=(9, 7)) seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[temp] plt.plot(self._bz.mu_steps, seebeck, linewidth=3.0) self._plot_bg_limits() self._plot_doping(temp) if (output == 'eig'): plt.legend(['S$_1$', 'S$_2$', 'S$_3$']) if (xlim is None): plt.xlim((- 0.5), (self._bz.gap + 0.5)) else: plt.xlim(xlim[0], xlim[1]) plt.ylabel('Seebeck \n coefficient ($\\mu$V/K)', fontsize=30.0) plt.xlabel('E-E$_f$ (eV)', fontsize=30) plt.xticks(fontsize=25) plt.yticks(fontsize=25) plt.tight_layout() return plt
Plot the seebeck coefficient in function of Fermi level Args: temp: the temperature xlim: a list of min and max fermi energy by default (0, and band gap) Returns: a matplotlib object
codesearchnet
def _environment_variables(**kwargs): user_agent = os.getenv('USER_AGENT') if user_agent is not None: kwargs['user_agent'] = user_agent preprefix = os.getenv('PREPREFIX') if preprefix is not None: kwargs['preprefix'] = preprefix return kwargs
Overwrite keyword arguments with environment variables Args: **kwargs: See below user_agent (str): User agent string. Returns: kwargs: Changed keyword arguments
juraj-google-style