diff --git "a/test.jsonl" "b/test.jsonl" new file mode 100644--- /dev/null +++ "b/test.jsonl" @@ -0,0 +1,1000 @@ +{"repo": "temporian", "function": "def milliseconds(value: Union[int, float]) -> Duration:\n return float(value / 1000)", "docstring": "Converts input value from milliseconds to a `Duration` in seconds.\n\nExample:\n ```python\n >>> duration = tp.duration.milliseconds(250)\n >>> duration\n 0.25\n\n >>> # Usage in a window operation\n >>> a = tp.event_set(\n ... timestamps=[0.5, 1.0, 1.2],\n ... features={\"f1\": [1, 5, -5]}\n ... )\n >>> a.moving_sum(window_length=duration)\n indexes: ...\n timestamps: [0.5 1. 1.2]\n 'f1': [1 5 0]\n ...\n\n ```\n\nArgs:\n value: Number of milliseconds.\n\nReturns:\n Equivalent number of seconds."} +{"repo": "tensorflow", "function": "def _flip(image, flip_index, scope_name):\n with ops.name_scope(None, scope_name, [image]):\n image = ops.convert_to_tensor(image, name='image')\n image = _AssertAtLeast3DImage(image)\n shape = image.get_shape()\n\n def f_rank3():\n return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index]))\n\n def f_rank4():\n return array_ops.reverse(image, [flip_index + 1])\n if shape.ndims is None:\n rank = array_ops.rank(image)\n return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4)\n elif shape.ndims == 3:\n return f_rank3()\n elif shape.ndims == 4:\n return f_rank4()\n else:\n raise ValueError(\"'image' (shape %s)must have either 3 or 4 dimensions.\" % shape)", "docstring": "Flip an image either horizontally or vertically.\n\nOutputs the contents of `image` flipped along the dimension `flip_index`.\n\nSee also `reverse()`.\n\nArgs:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n flip_index: 0 For vertical, 1 for horizontal.\n scope_name: string, scope name.\n\nReturns:\n A `Tensor` of the same type and shape as `image`.\n\nRaises:\n ValueError: if the shape of `image` not supported."} +{"repo": "tensorflow", "function": "def ZerosLikeForExit(self, val):\n val_shape = val.get_shape()\n forward_ctxt = val.op._get_control_flow_context()\n outer_forward_ctxt = forward_ctxt.outer_context\n if outer_forward_ctxt:\n outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()\n outer_grad_state = None\n if outer_forward_ctxt:\n outer_grad_state = self._map.get(outer_forward_ctxt)\n if outer_grad_state:\n if val_shape.is_fully_defined():\n outer_grad_state.grad_context.Enter()\n result = array_ops.zeros(val_shape.dims, val.dtype)\n outer_grad_state.grad_context.Exit()\n else:\n forward_ctxt.outer_context.Enter()\n shape = array_ops.shape_internal(val, optimize=False)\n forward_ctxt.outer_context.Exit()\n history_shape = outer_grad_state.AddForwardAccumulator(shape)\n outer_grad_ctxt = outer_grad_state.grad_context\n outer_grad_ctxt.Enter()\n real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_shape, shape)\n result = array_ops.zeros(real_shape, val.dtype)\n outer_grad_ctxt.Exit()\n elif val_shape.is_fully_defined():\n result = array_ops.zeros(val_shape.dims, val.dtype)\n else:\n result = array_ops.zeros_like(val, optimize=False)\n return result", "docstring": "Create zeros_like gradient for a loop exit.\n\nIf the result of a loop variable is not used but is involved in\ncomputing the result of some needed loop variable, we create a\nzero-valued tensor that is fed as gradient for the Exit node of that\nloop variable. Note that val.op is an Exit, and this method must be\ncalled in the control flow context where gradients() is called.\n\nArgs:\n val: The output tensor of an Exit op.\n\nReturns:\n A zero tensor of the same shape of val."} +{"repo": "transformers", "function": "def pad_image(self, image: np.ndarray, size_divisibility: int=32, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n height, width = get_image_size(image, input_data_format)\n pad_height = 0 if height % size_divisibility == 0 else size_divisibility - height % size_divisibility\n pad_width = 0 if width % size_divisibility == 0 else size_divisibility - width % size_divisibility\n if pad_width + pad_height > 0:\n padding = ((0, pad_height), (0, pad_width))\n image = pad(image, padding=padding, data_format=data_format, input_data_format=input_data_format)\n if data_format is not None:\n image = to_channel_dimension_format(image, data_format, input_data_format)\n return image", "docstring": "Args:\n image (`np.ndarray`):\n Image to pad.\n size_divisibility (`int`, *optional*, defaults to 32):\n The width and height of the image will be padded to be divisible by this number.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format."} +{"repo": "tensorflow", "function": "def deprecated_internal_set_learning_phase(value):\n global _GRAPH_LEARNING_PHASES\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be 0 or 1.')\n with ops.init_scope():\n if context.executing_eagerly():\n _DUMMY_EAGER_GRAPH.learning_phase_is_set = True\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value\n _GRAPH_LEARNING_PHASES[get_graph()] = value", "docstring": "A deprecated internal implementation of set_learning_phase.\n\nThis method is an internal-only version of `set_learning_phase` that\ndoes not raise a deprecation error. It is required because\nsaved_model needs to keep working with user code that uses the deprecated\nlearning phase methods until those APIs are fully removed from the public API.\n\nSpecifically SavedModel saving needs to make sure the learning phase is 0\nduring tracing even if users overwrote it to a different value.\n\nBut, we don't want to raise deprecation warnings for users when savedmodel\nsets learning phase just for compatibility with code that relied on\nexplicitly setting the learning phase for other values.\n\nArgs:\n value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train\n\nRaises:\n ValueError: if `value` is neither `0` nor `1`."} +{"repo": "tensorflow", "function": "def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):\n ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)\n max_ratio = math_ops.reduce_max(ratio_l)\n min_ratio = math_ops.reduce_min(ratio_l)\n m = min_ratio\n a_i = (ratio_l - m) / (max_ratio - m)\n return (a_i, m)", "docstring": "Calculates the acceptance probabilities and mixing ratio.\n\nIn this case, we assume that we can *either* sample from the original data\ndistribution with probability `m`, or sample from a reshaped distribution\nthat comes from rejection sampling on the original distribution. This\nrejection sampling is done on a per-class basis, with `a_i` representing the\nprobability of accepting data from class `i`.\n\nThis method is based on solving the following analysis for the reshaped\ndistribution:\n\nLet F be the probability of a rejection (on any example).\nLet p_i be the proportion of examples in the data in class i (init_probs)\nLet a_i is the rate the rejection sampler should *accept* class i\nLet t_i is the target proportion in the minibatches for class i (target_probs)\n\n```\nF = sum_i(p_i * (1-a_i))\n = 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1\n```\n\nAn example with class `i` will be accepted if `k` rejections occur, then an\nexample with class `i` is seen by the rejector, and it is accepted. This can\nbe written as follows:\n\n```\nt_i = sum_k=0^inf(F^k * p_i * a_i)\n = p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1\n = p_i * a_i / sum_j(p_j * a_j) using F from above\n```\n\nNote that the following constraints hold:\n```\n0 <= p_i <= 1, sum_i(p_i) = 1\n0 <= a_i <= 1\n0 <= t_i <= 1, sum_i(t_i) = 1\n```\n\nA solution for a_i in terms of the other variables is the following:\n ```a_i = (t_i / p_i) / max_i[t_i / p_i]```\n\nIf we try to minimize the amount of data rejected, we get the following:\n\nM_max = max_i [ t_i / p_i ]\nM_min = min_i [ t_i / p_i ]\n\nThe desired probability of accepting data if it comes from class `i`:\n\na_i = (t_i/p_i - m) / (M_max - m)\n\nThe desired probability of pulling a data element from the original dataset,\nrather than the filtered one:\n\nm = M_min\n\nArgs:\n initial_probs: A Tensor of the initial probability distribution, given or\n estimated.\n target_probs: A Tensor of the corresponding classes.\n\nReturns:\n (A 1D Tensor with the per-class acceptance probabilities, the desired\n probability of pull from the original distribution.)"} +{"repo": "keras", "function": "def remat(f):\n return backend.core.remat(f)", "docstring": "Applies rematerialization to a function or layer for memory optimization.\n\nRematerialization is a memory optimization technique that trades off\ncomputation for memory. Instead of storing intermediate results\n(e.g. activations) for backpropagation, they are recomputed during the\nbackward pass. This reduces peak memory usage at the cost of increased\ncomputation time, allowing the training of larger models or using larger\nbatch sizes within the same memory constraints.\n\nArgs:\n f: A callable function, to which rematerialization is\n applied. This is typically a computationally expensive operation\n where intermediate states can be recomputed instead of stored.\n\nReturns:\n A wrapped function that applies rematerialization. The returned\n function defines a custom gradient, ensuring that during the backward\n pass, the forward computation is recomputed as needed.\n\nExample:\n\n```python\nfrom keras import Model\nclass CustomRematLayer(layers.Layer):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.remat_function = remat(self.intermediate_function)\n\n def intermediate_function(self, x):\n for _ in range(2):\n x = x + x * 0.1 # Simple scaled transformation\n return x\n\n def call(self, inputs):\n return self.remat_function(inputs)\n\n# Define a simple model using the custom layer\ninputs = layers.Input(shape=(4,))\nx = layers.Dense(4, activation=\"relu\")(inputs)\nx = CustomRematLayer()(x) # Custom layer with rematerialization\noutputs = layers.Dense(1)(x)\n\n# Create and compile the model\nmodel = Model(inputs=inputs, outputs=outputs)\nmodel.compile(optimizer=\"sgd\", loss=\"mse\")\n```"} +{"repo": "tensorflow", "function": "def __init__(self, reflection_axis, is_non_singular=None, is_self_adjoint=None, is_positive_definite=None, is_square=None, name='LinearOperatorHouseholder'):\n parameters = dict(reflection_axis=reflection_axis, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, name=name)\n with ops.name_scope(name, values=[reflection_axis]):\n self._reflection_axis = linear_operator_util.convert_nonref_to_tensor(reflection_axis, name='reflection_axis')\n self._check_reflection_axis(self._reflection_axis)\n if is_self_adjoint is False:\n raise ValueError('A Householder operator is always self adjoint.')\n else:\n is_self_adjoint = True\n if is_positive_definite is True:\n raise ValueError('A Householder operator is always non-positive definite.')\n else:\n is_positive_definite = False\n if is_square is False:\n raise ValueError('A Householder operator is always square.')\n is_square = True\n super(LinearOperatorHouseholder, self).__init__(dtype=self._reflection_axis.dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)", "docstring": "Initialize a `LinearOperatorHouseholder`.\n\nArgs:\n reflection_axis: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.\n The vector defining the hyperplane to reflect about.\n Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,\n `complex128`.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose. This is autoset to true\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n This is autoset to false.\n is_square: Expect that this operator acts like square [batch] matrices.\n This is autoset to true.\n name: A name for this `LinearOperator`.\n\nRaises:\n ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is\n not `False` or `is_square` is not `True`."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A LUKE sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "transformers", "function": "class GenerateBeamEncoderDecoderOutput(ModelOutput):\n sequences: torch.LongTensor\n sequences_scores: Optional[torch.FloatTensor] = None\n scores: Optional[Tuple[torch.FloatTensor]] = None\n logits: Optional[Tuple[torch.FloatTensor]] = None\n beam_indices: Optional[torch.LongTensor] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None", "docstring": "Outputs of encoder-decoder generation models, when using beam methods.\n\nArgs:\n sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`):\n The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter\n if all batches finished early due to the `eos_token_id`.\n sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True`):\n Final beam scores of the generated `sequences`.\n scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True`):\n Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting\n of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam.\n Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token),\n with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.\n logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True`):\n Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)\n at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for\n each generated token), with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.\n beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True`):\n Beam indices of generated token id at each generation step. `torch.LongTensor` of shape\n `(batch_size*num_return_sequences, sequence_length)`.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads,\n sequence_length, sequence_length)`.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.\n decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,\n sequence_length)`.\n cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.\n decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.\n past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True`):\n Returns the model cache, used to speed up decoding. Different models have a different cache format, check\n the model's documentation. Usually, a [`~cache_utils.Cache`] instance."} +{"repo": "tensorflow", "function": "def __rmod__(self, other):\n other = as_dimension(other)\n return other % self", "docstring": "Returns `other` modulo `self`.\n\nArgs:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\n A Dimension whose value is `other` modulo `self`."} +{"repo": "fhir-py", "function": "def materialize_value_set_expansion(self, urls: Iterable[str], expander: Union[terminology_service_client.TerminologyServiceClient, value_sets.ValueSetResolver], terminology_service_url: Optional[str]=None, batch_size: int=500) -> None:\n if terminology_service_url is not None and (not isinstance(expander, terminology_service_client.TerminologyServiceClient)):\n raise TypeError('`terminology_service_url` can only be given if `expander` is a TerminologyServiceClient')\n if terminology_service_url is not None and isinstance(expander, terminology_service_client.TerminologyServiceClient):\n expanded_value_sets = (expander.expand_value_set_url_using_service(url, terminology_service_url) for url in urls)\n else:\n expanded_value_sets = (expander.expand_value_set_url(url) for url in urls)\n self.materialize_value_sets(expanded_value_sets, batch_size=batch_size)", "docstring": "Expands a sequence of value set and materializes their expanded codes.\n\nExpands the given value set URLs to obtain the set of codes they describe.\nThen writes these expanded codes into the database\nnamed after the `value_set_codes_table` provided at class initialization.\nBuilds a valueset_codes table as described by\nhttps://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md#valueset-support\n\nThe table will be created if it does not already exist.\n\nThe function will avoid inserting duplicate rows if some of the codes are\nalready present in the given table. It will not attempt to perform an\n'upsert' or modify any existing rows.\n\nProvided as a utility function for user convenience. If `urls` is a large\nset of URLs, callers may prefer to use multi-processing and/or\nmulti-threading to perform expansion and table insertion of the URLs\nconcurrently. This function performs all expansions and table insertions\nserially.\n\nArgs:\n urls: The urls for value sets to expand and materialize.\n expander: The ValueSetResolver or TerminologyServiceClient to perform\n value set expansion. A ValueSetResolver may be used to attempt to avoid\n some network requests by expanding value sets locally. A\n TerminologyServiceClient will use external terminology services to\n perform all value set expansions.\n terminology_service_url: If `expander` is a TerminologyServiceClient, the\n URL of the terminology service to use when expanding value set URLs. If\n not given, the client will attempt to infer the correct terminology\n service to use for each value set URL based on its domain.\n batch_size: The maximum number of rows to insert in a single query.\n\nRaises:\n TypeError: If a `terminology_service_url` is given but `expander` is not a\n TerminologyServiceClient."} +{"repo": "tensorflow", "function": "def take_while(self, predicate, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import take_while_op\n return take_while_op._take_while(self, predicate, name=name)", "docstring": "A transformation that stops dataset iteration based on a `predicate`.\n\n>>> dataset = tf.data.Dataset.range(10)\n>>> dataset = dataset.take_while(lambda x: x < 5)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[0, 1, 2, 3, 4]\n\nArgs:\n predicate: A function that maps a nested structure of tensors (having\n shapes and types defined by `self.output_shapes` and\n `self.output_types`) to a scalar `tf.bool` tensor.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A new `Dataset` with the transformation applied as described above."} +{"repo": "tensorflow", "function": "def less_equal(x, y):\n return math_ops.less_equal(x, y)", "docstring": "Element-wise truth value of (x <= y).\n\nArgs:\n x: Tensor or variable.\n y: Tensor or variable.\n\nReturns:\n A bool tensor."} +{"repo": "qhbm-library", "function": "def __init__(self, pqc: cirq.Circuit, initializer: tf.keras.initializers.Initializer=tf.keras.initializers.RandomUniform(0, 2), name: Union[None, str]=None):\n raw_symbol_names = list(sorted(tfq.util.get_circuit_symbols(pqc)))\n symbol_names = tf.constant([str(x) for x in raw_symbol_names], dtype=tf.string)\n values = [tf.Variable(initializer(shape=[len(raw_symbol_names)]))]\n value_layers = [[]]\n super().__init__(tfq.convert_to_tensor([pqc]), pqc.all_qubits(), symbol_names, values, value_layers)", "docstring": "Initializes a DirectQuantumCircuit.\n\nArgs:\n pqc: Representation of a parameterized quantum circuit.\n initializer: A `tf.keras.initializers.Initializer` which specifies how to\n initialize the values of the parameters in `circuit`. The default\n initializer assumes parameters of gates are exponents, so that one full\n period is covered by the parameter range 0 to 2.\n name: Optional name for the model."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A ConvBERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def reduce(self, initial_state, reduce_func):", "docstring": "Reduces this iterable object to a single element.\n\nThe transformation calls `reduce_func` successively on each element.\nThe `initial_state` argument is used for the initial state and the final\nstate is returned as the result.\n\nArgs:\n initial_state: An element representing the initial state of the\n reduction.\n reduce_func: A function that maps `(old_state, input_element)` to\n `new_state`. The structure of `new_state` must match the structure of\n `old_state`. For the first element, `old_state` is `initial_state`.\n\nReturns:\n The final state of the transformation."} +{"repo": "transformers", "function": "class LlamaTokenizerFast(PreTrainedTokenizerFast):\n vocab_files_names = VOCAB_FILES_NAMES\n slow_tokenizer_class = LlamaTokenizer\n padding_side = 'left'\n model_input_names = ['input_ids', 'attention_mask']\n\n def __init__(self, vocab_file=None, tokenizer_file=None, clean_up_tokenization_spaces=False, unk_token='', bos_token='', eos_token='', add_bos_token=True, add_eos_token=False, use_default_system_prompt=False, legacy=None, add_prefix_space=None, **kwargs):\n if legacy is None:\n logger.warning_once(f'You are using the default legacy behaviour of the {self.__class__}. This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message.')\n legacy = True\n self.legacy = legacy\n if add_prefix_space is not None:\n kwargs['from_slow'] = True\n super().__init__(vocab_file=vocab_file, tokenizer_file=tokenizer_file, clean_up_tokenization_spaces=clean_up_tokenization_spaces, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, add_bos_token=add_bos_token, add_eos_token=add_eos_token, use_default_system_prompt=use_default_system_prompt, add_prefix_space=add_prefix_space, legacy=legacy, **kwargs)\n self._add_bos_token = add_bos_token\n self._add_eos_token = add_eos_token\n self.update_post_processor()\n self.use_default_system_prompt = use_default_system_prompt\n self.vocab_file = vocab_file\n\n def update_post_processor(self):\n \"\"\"\n Updates the underlying post processor with the current `bos_token` and `eos_token`.\n \"\"\"\n bos = self.bos_token\n bos_token_id = self.bos_token_id\n if bos is None and self.add_bos_token:\n raise ValueError('add_bos_token = True but bos_token = None')\n eos = self.eos_token\n eos_token_id = self.eos_token_id\n if eos is None and self.add_eos_token:\n raise ValueError('add_eos_token = True but eos_token = None')\n single = f'{(bos + ':0 ' if self.add_bos_token else '')}$A:0{(' ' + eos + ':0' if self.add_eos_token else '')}'\n pair = f'{single}{(' ' + bos + ':1' if self.add_bos_token else '')} $B:1{(' ' + eos + ':1' if self.add_eos_token else '')}'\n special_tokens = []\n if self.add_bos_token:\n special_tokens.append((bos, bos_token_id))\n if self.add_eos_token:\n special_tokens.append((eos, eos_token_id))\n self._tokenizer.post_processor = processors.TemplateProcessing(single=single, pair=pair, special_tokens=special_tokens)\n\n @property\n def add_eos_token(self):\n return self._add_eos_token\n\n @property\n def add_bos_token(self):\n return self._add_bos_token\n\n @add_eos_token.setter\n def add_eos_token(self, value):\n self._add_eos_token = value\n self.update_post_processor()\n\n @add_bos_token.setter\n def add_bos_token(self, value):\n self._add_bos_token = value\n self.update_post_processor()\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if not self.can_save_slow_tokenizer:\n raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')\n if not os.path.isdir(save_directory):\n logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n return\n out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n output = bos_token_id + token_ids_0 + eos_token_id\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n return output", "docstring": "Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.\n\nThis uses notably ByteFallback and no normalization.\n\n```python\n>>> from transformers import LlamaTokenizerFast\n\n>>> tokenizer = LlamaTokenizerFast.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n>>> tokenizer.encode(\"Hello this is a test\")\n[1, 15043, 445, 338, 263, 1243]\n```\n\nIf you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or\ncall `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the\nvalues of the first token and final token of an encoded sequence will not be correct). For more details, checkout\n[post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.\n\n\nThis tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\nrefer to this superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`, *optional*):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n tokenizer_file (`str`, *optional*):\n [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that\n contains everything needed to load the tokenizer.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces.\n unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n add_bos_token (`bool`, *optional*, defaults to `True`):\n Whether or not to add an `bos_token` at the start of sequences.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add an `eos_token` at the end of sequences.\n use_default_system_prompt (`bool`, *optional*, defaults to `False`):\n Whether or not the default system prompt for Llama should be used\n legacy (`bool`, *optional*):\n Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622\n and #25224 which includes fixes to properly handle tokens that appear after special tokens.\n Make sure to also set `from_slow` to `True`.\n A simple example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import LlamaTokenizerFast\n\n >>> tokenizer = LlamaTokenizerFast.from_pretrained(\"huggyllama/llama-7b\", legacy=True, from_slow=True)\n >>> tokenizer.encode(\"Hello .\") # 869 is '\u2581.'\n [1, 15043, 29871, 1, 869]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import LlamaTokenizerFast\n\n >>> tokenizer = LlamaTokenizerFast.from_pretrained(\"huggyllama/llama-7b\", legacy=False, from_slow=True)\n >>> tokenizer.encode(\"Hello .\") # 29889 is '.'\n [1, 15043, 29871, 1, 29889]\n ```\n Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.\n add_prefix_space (`bool`, *optional*):\n Whether or not the tokenizer should automatically add a prefix space"} +{"repo": "beam", "function": "def _verify_pipeline_uuid(self, pipeline_uuid):\n try:\n uuid.UUID(pipeline_uuid)\n except ValueError as ve:\n raise ValueError(f\"Incorrect pipeline uuid: '{pipeline_uuid}'\") from ve", "docstring": "Verify the received pipeline_uuid format\n\nArgs:\n pipeline_uuid: uuid of the pipeline\n\nReturns:\n If pipeline ID is not verified, will raise an exception"} +{"repo": "tensorflow", "function": "def _in_multi_worker_mode(self):\n strategy = self._distribution_strategy\n if not strategy and distribute_lib.has_strategy():\n strategy = distribute_lib.get_strategy()\n return strategy and strategy.extended._in_multi_worker_mode()", "docstring": "Method to infer if this `Model` is working in multi-worker settings.\n\nMulti-worker training refers to the setup where the training is\ndistributed across multiple workers, as opposed to the case where\nonly a local process performs the training. This function is\nused to infer for example whether or not a distribute coordinator\nshould be run, and thus TensorFlow servers should be started for\ncommunication with other servers in the cluster, or whether or not\nsaving/restoring checkpoints is relevant for preemption fault tolerance.\n\nExperimental. Signature and implementation are subject to change.\n\nReturns:\n Whether this model indicates it's working in multi-worker settings."} +{"repo": "tensorflow", "function": "def get_conversion_metadata(model_buffer):\n model_object = flatbuffer_utils.convert_bytearray_to_object(model_buffer)\n if not model_object or not model_object.metadata:\n return None\n for meta in model_object.metadata:\n if meta.name.decode('utf-8') == CONVERSION_METADATA_FIELD_NAME:\n metadata_buf = model_object.buffers[meta.buffer].data.tobytes()\n return conversion_metadata_fb.ConversionMetadataT.InitFromObj(conversion_metadata_fb.ConversionMetadata.GetRootAsConversionMetadata(metadata_buf, 0))\n return None", "docstring": "Read conversion metadata from a tflite model.\n\nArgs:\n model_buffer: A tflite model.\n\nReturns:\n The conversion metadata or None if it is not populated."} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[TFBaseModelOutput]=None, past_key_values: Optional[Tuple[Tuple[tf.Tensor]]]=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:\n if labels is not None:\n labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n use_cache = False\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n lm_logits = self.bias_layer(lm_logits)\n masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:"} +{"repo": "tensorflow", "function": "def constant_value_as_shape(tensor):\n if isinstance(tensor, core.Value):\n return tensor_shape.TensorShape([dim if dim != -1 else None for dim in tensor.numpy()])\n if tensor.get_shape().ndims == 0:\n value = constant_value(tensor)\n if value is None:\n raise ValueError(\"Received a scalar with unknown value as shape; require a statically known scalar with value '-1' to describe an unknown shape.\")\n if value != -1:\n raise ValueError(f\"Received a scalar value '{value}' as shape; require a statically known scalar with value '-1' to describe an unknown shape.\")\n return tensor_shape.unknown_shape()\n shape = tensor.get_shape().with_rank(1)\n if shape == [0]:\n return tensor_shape.TensorShape([])\n elif tensor.op.type == 'Cast':\n pre_cast = constant_value_as_shape(tensor.op.inputs[0])\n if pre_cast.dims is None:\n return pre_cast\n cast_dtype = dtypes.as_dtype(tensor.op.get_attr('DstT'))\n if cast_dtype not in (dtypes.int32, dtypes.int64):\n return tensor_shape.unknown_shape(shape.dims[0].value)\n dest_dtype_shape_array = np.array([x if x is not None else -1 for x in pre_cast.as_list()]).astype(cast_dtype.as_numpy_dtype)\n return tensor_shape.TensorShape([x if x >= 0 else None for x in dest_dtype_shape_array])\n elif tensor.op.type == 'Shape':\n return tensor.op.inputs[0].get_shape()\n elif tensor.op.type == 'Pack':\n ret = tensor_shape.TensorShape([])\n assert tensor.op.get_attr('axis') == 0\n for pack_input in tensor.op.inputs:\n pack_input_val = constant_value(pack_input)\n if pack_input_val is None or pack_input_val < 0:\n new_dim = tensor_shape.Dimension(None)\n else:\n new_dim = tensor_shape.Dimension(pack_input_val)\n ret = ret.concatenate([new_dim])\n return ret\n elif tensor.op.type == 'Concat':\n ret = tensor_shape.TensorShape([])\n for concat_input in tensor.op.inputs[1:]:\n ret = ret.concatenate(constant_value_as_shape(concat_input))\n return ret\n elif tensor.op.type == 'ConcatV2':\n ret = tensor_shape.TensorShape([])\n for concat_input in tensor.op.inputs[:-1]:\n ret = ret.concatenate(constant_value_as_shape(concat_input))\n return ret\n elif tensor.op.type == 'StridedSlice':\n try:\n begin = constant_value(tensor.op.inputs[1])\n end = constant_value(tensor.op.inputs[2])\n strides = constant_value(tensor.op.inputs[3])\n if begin is not None and end is not None and (strides is not None):\n begin = begin[0]\n end = end[0]\n strides = strides[0]\n begin_mask = tensor.op.get_attr('begin_mask')\n if begin_mask == 1:\n begin = None\n end_mask = tensor.op.get_attr('end_mask')\n if end_mask == 1:\n end = None\n ellipsis_mask = tensor.op.get_attr('ellipsis_mask')\n new_axis_mask = tensor.op.get_attr('new_axis_mask')\n shrink_axis_mask = tensor.op.get_attr('shrink_axis_mask')\n valid_attributes = not ellipsis_mask and (not new_axis_mask) and (not shrink_axis_mask) and (not begin_mask or begin_mask == 1) and (not end_mask or end_mask == 1)\n if valid_attributes:\n prev = constant_value_as_shape(tensor.op.inputs[0])\n prev = prev[begin:end:strides]\n ret = tensor_shape.TensorShape(prev)\n return ret\n except ValueError:\n pass\n except TypeError:\n pass\n elif tensor.op.type == 'Placeholder' and tensor.op.graph.building_function and hasattr(tensor.op.graph, 'internal_captures'):\n for i, capture in enumerate(tensor.op.graph.internal_captures):\n if capture is tensor:\n external_capture = tensor.op.graph.external_captures[i]\n return constant_value_as_shape(external_capture)\n ret = tensor_shape.unknown_shape(shape.dims[0].value)\n value = constant_value(tensor)\n if value is not None:\n ret = ret.merge_with(tensor_shape.TensorShape([d if d >= 0 else None for d in value]))\n return ret", "docstring": "A version of `constant_value()` that returns a `TensorShape`.\n\nThis version should be used when a constant tensor value is\ninterpreted as a (possibly partial) shape, e.g. in the shape\nfunction for `tf.reshape()`. By explicitly requesting a\n`TensorShape` as the return value, it is possible to represent\nunknown dimensions; by contrast, `constant_value()` is\nall-or-nothing.\n\nArgs:\n tensor: The rank-0 or rank-1 Tensor to be evaluated.\n\nReturns:\n A `TensorShape` based on the constant value of the given `tensor`.\n\nRaises:\n ValueError: If the shape is rank-0 and is not statically known to be -1."} +{"repo": "transformers", "function": "class TimmBackboneConfig(PretrainedConfig):\n model_type = 'timm_backbone'\n\n def __init__(self, backbone=None, num_channels=3, features_only=True, use_pretrained_backbone=True, out_indices=None, freeze_batch_norm_2d=False, **kwargs):\n super().__init__(**kwargs)\n self.backbone = backbone\n self.num_channels = num_channels\n self.features_only = features_only\n self.use_pretrained_backbone = use_pretrained_backbone\n self.use_timm_backbone = True\n self.out_indices = out_indices if out_indices is not None else [-1]\n self.freeze_batch_norm_2d = freeze_batch_norm_2d", "docstring": "This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].\n\nIt is used to instantiate a timm backbone model according to the specified arguments, defining the model.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n backbone (`str`, *optional*):\n The timm checkpoint to load.\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n features_only (`bool`, *optional*, defaults to `True`):\n Whether to output only the features or also the logits.\n use_pretrained_backbone (`bool`, *optional*, defaults to `True`):\n Whether to use a pretrained backbone.\n out_indices (`List[int]`, *optional*):\n If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how\n many stages the model has). Will default to the last stage if unset.\n freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):\n Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.\n\nExample:\n```python\n>>> from transformers import TimmBackboneConfig, TimmBackbone\n\n>>> # Initializing a timm backbone\n>>> configuration = TimmBackboneConfig(\"resnet50\")\n\n>>> # Initializing a model from the configuration\n>>> model = TimmBackbone(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def print_layer_summary(layer):\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n except RuntimeError:\n output_shape = '?'\n name = layer.name\n cls_name = layer.__class__.__name__\n if not layer.built and (not getattr(layer, '_is_graph_network', False)):\n params = '0 (unused)'\n else:\n params = layer.count_params()\n fields = [name + ' (' + cls_name + ')', output_shape, params]\n print_row(fields, positions)", "docstring": "Prints a summary for a single layer.\n\nArgs:\n layer: target layer."} +{"repo": "tensorflow", "function": "def __init__(self, logits=None, probs=None, dtype=dtypes.int32, validate_args=False, allow_nan_stats=True, name='Categorical'):\n parameters = dict(locals())\n with ops.name_scope(name, values=[logits, probs]) as name:\n self._logits, self._probs = distribution_util.get_logits_and_probs(logits=logits, probs=probs, validate_args=validate_args, multidimensional=True, name=name)\n if validate_args:\n self._logits = distribution_util.embed_check_categorical_event_shape(self._logits)\n logits_shape_static = self._logits.get_shape().with_rank_at_least(1)\n if logits_shape_static.ndims is not None:\n self._batch_rank = ops.convert_to_tensor(logits_shape_static.ndims - 1, dtype=dtypes.int32, name='batch_rank')\n else:\n with ops.name_scope(name='batch_rank'):\n self._batch_rank = array_ops.rank(self._logits) - 1\n logits_shape = array_ops.shape(self._logits, name='logits_shape')\n if tensor_shape.dimension_value(logits_shape_static[-1]) is not None:\n self._event_size = ops.convert_to_tensor(logits_shape_static.dims[-1].value, dtype=dtypes.int32, name='event_size')\n else:\n with ops.name_scope(name='event_size'):\n self._event_size = logits_shape[self._batch_rank]\n if logits_shape_static[:-1].is_fully_defined():\n self._batch_shape_val = constant_op.constant(logits_shape_static[:-1].as_list(), dtype=dtypes.int32, name='batch_shape')\n else:\n with ops.name_scope(name='batch_shape'):\n self._batch_shape_val = logits_shape[:-1]\n super(Categorical, self).__init__(dtype=dtype, reparameterization_type=distribution.NOT_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._logits, self._probs], name=name)", "docstring": "Initialize Categorical distributions using class log-probabilities.\n\nArgs:\n logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities\n of a set of Categorical distributions. The first `N - 1` dimensions\n index into a batch of independent distributions and the last dimension\n represents a vector of logits for each class. Only one of `logits` or\n `probs` should be passed in.\n probs: An N-D `Tensor`, `N >= 1`, representing the probabilities\n of a set of Categorical distributions. The first `N - 1` dimensions\n index into a batch of independent distributions and the last dimension\n represents a vector of probabilities for each class. Only one of\n `logits` or `probs` should be passed in.\n dtype: The type of the event samples (default: int32).\n validate_args: Python `bool`, default `False`. When `True` distribution\n parameters are checked for validity despite possibly degrading runtime\n performance. When `False` invalid inputs may silently render incorrect\n outputs.\n allow_nan_stats: Python `bool`, default `True`. When `True`, statistics\n (e.g., mean, mode, variance) use the value \"`NaN`\" to indicate the\n result is undefined. When `False`, an exception is raised if one or\n more of the statistic's batch members are undefined.\n name: Python `str` name prefixed to Ops created by this class."} +{"repo": "keras", "function": "def traverse(func, structure, top_down=True):\n return tree_impl.traverse(func, structure, top_down=top_down)", "docstring": "Traverses the given nested structure, applying the given function.\n\nThe traversal is depth-first. If `top_down` is True (default), parents\nare returned before their children (giving the option to avoid traversing\ninto a sub-tree).\n\nExamples:\n\n>>> v = []\n>>> keras.tree.traverse(v.append, [(1, 2), [3], {\"a\": 4}], top_down=True)\n[(1, 2), [3], {'a': 4}]\n>>> v\n[[(1, 2), [3], {'a': 4}], (1, 2), 1, 2, [3], 3, {'a': 4}, 4]\n\n>>> v = []\n>>> keras.tree.traverse(v.append, [(1, 2), [3], {\"a\": 4}], top_down=False)\n[(1, 2), [3], {'a': 4}]\n>>> v\n[1, 2, (1, 2), 3, [3], 4, {'a': 4}, [(1, 2), [3], {'a': 4}]]\n\nArgs:\n func: The function to be applied to each sub-nest of the structure.\n\n When traversing top-down:\n If `func(subtree) is None` the traversal continues into the\n sub-tree.\n If `func(subtree) is not None` the traversal does not continue\n into the sub-tree. The sub-tree will be replaced by `func(subtree)`\n in the returned structure (to replace the sub-tree with `None`, use\n the special value `MAP_TO_NONE`).\n\n When traversing bottom-up:\n If `func(subtree) is None` the traversed sub-tree is returned\n unaltered.\n If `func(subtree) is not None` the sub-tree will be replaced by\n `func(subtree)` in the returned structure (to replace the sub-tree\n with None, use the special value `MAP_TO_NONE`).\n\n structure: The structure to traverse.\n top_down: If True, parent structures will be visited before their\n children.\n\nReturns:\n The structured output from the traversal.\n\nRaises:\n TypeError: If `func` is not callable."} +{"repo": "tensorflow", "function": "def cosine_similarity(y_true, y_pred, axis=-1):\n y_true = nn.l2_normalize(y_true, axis=axis)\n y_pred = nn.l2_normalize(y_pred, axis=axis)\n return -math_ops.reduce_sum(y_true * y_pred, axis=axis)", "docstring": "Computes the cosine similarity between labels and predictions.\n\nNote that it is a number between -1 and 1. When it is a negative number\nbetween -1 and 0, 0 indicates orthogonality and values closer to -1\nindicate greater similarity. The values closer to 1 indicate greater\ndissimilarity. This makes it usable as a loss function in a setting\nwhere you try to maximize the proximity between predictions and\ntargets. If either `y_true` or `y_pred` is a zero vector, cosine\nsimilarity will be 0 regardless of the proximity between predictions\nand targets.\n\n`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`\n\nStandalone usage:\n\n>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]\n>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]\n>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)\n>>> loss.numpy()\narray([-0., -0.999, 0.999], dtype=float32)\n\nArgs:\n y_true: Tensor of true targets.\n y_pred: Tensor of predicted targets.\n axis: Axis along which to determine similarity.\n\nReturns:\n Cosine similarity tensor."} +{"repo": "beam", "function": "def __init__(self, runner: Optional[Union[str, PipelineRunner]]=None, options: Optional[PipelineOptions]=None, argv: Optional[List[str]]=None, display_data: Optional[Dict[str, Any]]=None):\n logging.basicConfig()\n if options is not None:\n if isinstance(options, PipelineOptions):\n saved_runner = options.view_as(StandardOptions).runner\n options.view_as(StandardOptions).runner = None\n self._options = copy.deepcopy(options)\n self._options.view_as(StandardOptions).runner = saved_runner\n options.view_as(StandardOptions).runner = saved_runner\n else:\n raise ValueError('Parameter options, if specified, must be of type PipelineOptions. Received : %r' % options)\n elif argv is not None:\n if isinstance(argv, list):\n self._options = PipelineOptions(argv)\n else:\n raise ValueError('Parameter argv, if specified, must be a list. Received : %r' % argv)\n else:\n self._options = PipelineOptions([])\n FileSystems.set_options(self._options)\n if runner is None:\n runner = self._options.view_as(StandardOptions).runner\n if runner is None:\n runner = StandardOptions.DEFAULT_RUNNER\n logging.info('Missing pipeline option (runner). Executing pipeline using the default runner: %s.', runner)\n if isinstance(runner, str):\n runner = create_runner(runner)\n elif not isinstance(runner, PipelineRunner):\n raise TypeError('Runner %s is not a PipelineRunner object or the name of a registered runner.' % runner)\n if self._options.view_as(SetupOptions).pickle_library == 'default' and runner.default_pickle_library_override():\n logging.info('Runner defaulting to pickling library: %s.', runner.default_pickle_library_override())\n self._options.view_as(SetupOptions).pickle_library = runner.default_pickle_library_override()\n pickler.set_library(self._options.view_as(SetupOptions).pickle_library)\n errors = PipelineOptionsValidator(self._options, runner).validate()\n if errors:\n raise ValueError('Pipeline has validations errors: \\n' + '\\n'.join(errors))\n if runner.is_fnapi_compatible():\n experiments = self._options.view_as(DebugOptions).experiments or []\n if not 'beam_fn_api' in experiments:\n experiments.append('beam_fn_api')\n self._options.view_as(DebugOptions).experiments = experiments\n self.local_tempdir = tempfile.mkdtemp(prefix='beam-pipeline-temp')\n self.runner = runner\n self.transforms_stack = [AppliedPTransform(None, None, '', None, None, None)]\n self.applied_labels = set()\n self._root_transform().resource_hints = resource_hints_from_options(options)\n self.component_id_map = ComponentIdMap()\n self.contains_external_transforms = False\n self._display_data = display_data or {}\n self._error_handlers = []\n self._annotations_stack = [{}]", "docstring": "Initialize a pipeline object.\n\nArgs:\n runner (~apache_beam.runners.runner.PipelineRunner): An object of\n type :class:`~apache_beam.runners.runner.PipelineRunner` that will be\n used to execute the pipeline. For registered runners, the runner name\n can be specified, otherwise a runner object must be supplied.\n options (~apache_beam.options.pipeline_options.PipelineOptions):\n A configured\n :class:`~apache_beam.options.pipeline_options.PipelineOptions` object\n containing arguments that should be used for running the Beam job.\n argv (List[str]): a list of arguments (such as :data:`sys.argv`)\n to be used for building a\n :class:`~apache_beam.options.pipeline_options.PipelineOptions` object.\n This will only be used if argument **options** is :data:`None`.\n display_data (Dict[str, Any]): a dictionary of static data associated\n with this pipeline that can be displayed when it runs.\n\nRaises:\n ValueError: if either the runner or options argument is not\n of the expected type."} +{"repo": "beam", "function": "def __init__(self, namespace: str):\n self.requests = Metrics.counter(namespace, 'requests')\n self.responses = Metrics.counter(namespace, 'responses')\n self.failures = Metrics.counter(namespace, 'failures')\n self.throttled_requests = Metrics.counter(namespace, 'throttled_requests')\n self.throttled_secs = Metrics.counter(namespace, 'cumulativeThrottlingSeconds')\n self.timeout_requests = Metrics.counter(namespace, 'requests_timed_out')\n self.call_counter = Metrics.counter(namespace, 'call_invocations')\n self.setup_counter = Metrics.counter(namespace, 'setup_counter')\n self.teardown_counter = Metrics.counter(namespace, 'teardown_counter')\n self.backoff_counter = Metrics.counter(namespace, 'backoff_counter')\n self.sleeper_counter = Metrics.counter(namespace, 'sleeper_counter')\n self.should_backoff_counter = Metrics.counter(namespace, 'should_backoff_counter')", "docstring": "Args:\n namespace: Namespace for the metrics."} +{"repo": "transformers", "function": "def find_all_dependencies(dependency_mapping: Dict[str, set], start_entity: Optional[str]=None, initial_dependencies: Optional[set]=None, initial_checked_dependencies: Optional[set]=None, return_parent: bool=False) -> Union[list, set]:\n if initial_dependencies is None and start_entity is not None:\n initial_dependencies = dependency_mapping[start_entity]\n if initial_checked_dependencies is None:\n initial_checked_dependencies = set()\n dependency_queue = deque(initial_dependencies)\n all_dependencies = set()\n all_dependencies_with_parent = []\n checked_dependencies = set(initial_checked_dependencies)\n parents = dict.fromkeys(initial_dependencies, start_entity)\n while len(dependency_queue) > 0:\n current = dependency_queue.popleft()\n if current not in checked_dependencies:\n all_dependencies.add(current)\n all_dependencies_with_parent += [(current, parents[current])]\n if current in dependency_mapping.keys():\n dependency_queue.extend(dependency_mapping[current])\n parents.update(dict.fromkeys(dependency_mapping[current], current))\n checked_dependencies.add(current)\n if not return_parent:\n return all_dependencies\n return all_dependencies_with_parent", "docstring": "Return all the dependencies of the given `start_entity` or `initial_dependencies`. This is basically some kind of\nBFS traversal algorithm. It can either start from `start_entity`, or `initial_dependencies`.\n\nArgs:\n dependency_mapping (`Dict[str, set]`):\n A mapping from entities (usually function/assignment names), to immediate dependencies. That is, for function names,\n a mapping {\"foo\": {\"bar\", \"test\"}} would indicate that functions `bar` and `test` are immediately called\n in `foo`'s definition.\n start_entity (str | None, *optional*):\n A key of `dependency_mapping`, indicating from which entity to start the search.\n initial_dependencies (set | None, *optional*):\n If `start_entity` is not provided, this can be used as an alternative. In this case, the search will continue\n from all the entities in `initial_dependencies`, if they are in `dependency_mapping`.\n initial_checked_dependencies (set | None, *optional*):\n If provided, entities already present in `initial_checked_dependencies` will not be part of the returned dependencies.\n return_parent (bool, *optional*):\n If `True`, will return a list consisting of tuples (dependency, parent) instead of a simple set of dependencies. Note\n that the order of the items in the list reflects the traversal order. Thus, no parent can ever appear before childs.\nReturns:\n A set of all the dependencies, or a list of tuples `(dependency, parent)` if `return_parent=True`.\n\nExample:\nGiven the following structure in the `modular_xxx.py` file:\n```\ndef foo1():\n pass\n\ndef foo2():\n pass\n\ndef bar():\n foo1()\n\ndef foobar():\n bar()\n foo2()\n\nclass MyLayer(SomeOtherModelLayer):\n def forward(...):\n foobar()\n```\nand the `dependency_mapping` created when visiting the `modular_xxx.py` file, we get:\n```\ndependency_mapping = {'bar': {'foo1'}, 'foobar': {'bar', 'foo2'}}\nfind_all_dependencies(dependency_mapping, start_entity='foobar', return_parent=True)\n>>> [('bar', 'foobar'), ('foo2', 'foobar'), ('foo1', 'bar')]\n```\nThat is, all the functions needed (and potentially their immediate parent) so that the function to be added\nin MyLayer (`foobar`) can work correctly."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if not self.add_bos_token:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0)\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "tensorflow", "function": "def _is_user_included_op(self, op):\n for opname_re in self._parameters.included_opname_re_list:\n if opname_re.match(op.name):\n return True\n for optype_re in self._parameters.included_optype_re_list:\n if optype_re.match(op.type):\n return True\n return False", "docstring": "Checks whether the op is included in the tensor tracer flags.\n\nArgs:\n op: tf Operation\nReturns:\n True, if the op is included.\n An op is included if:\n - Its op name is given in included_opnames\n - Its op type is given in included_optypes\n - The op is at most _trace_ops_before_included hops before an included op\n - The op is at most _trace_ops_after_included hops after an included op"} +{"repo": "tf-quant-finance", "function": "class SvenssonParameters:\n beta_0: types.RealTensor\n beta_1: types.RealTensor\n beta_2: types.RealTensor\n beta_3: types.RealTensor\n tau_1: types.RealTensor\n tau_2: types.RealTensor", "docstring": "Nelson Seigel Svensson interpolation parameters.\n\nAttributes:\n beta_0: A real `Tensor` of arbitrary shape `batch_shape`.\n beta_1: A real `Tensor` of arbitrary shape `batch_shape`.\n beta_2: A real `Tensor` of arbitrary shape `batch_shape`.\n beta_3: A real `Tensor` of arbitrary shape `batch_shape`.\n tau_1: A real `Tensor` of arbitrary shape `batch_shape`.\n tau_2: A real `Tensor` of arbitrary shape `batch_shape`."} +{"repo": "tensorflow", "function": "def add_collection_def(meta_graph_def, key, graph=None, export_scope=None, exclude_nodes=None, override_contents=None):\n if graph and (not isinstance(graph, ops.Graph)):\n raise TypeError(f'graph must be of type Graph. Received type: {type(graph)}.')\n if not isinstance(key, str) and (not isinstance(key, bytes)):\n logging.warning('Only collections with string type keys will be serialized. This key has %s', type(key))\n return\n graph = graph or ops.get_default_graph()\n if override_contents:\n collection_list = override_contents\n else:\n collection_list = graph.get_collection(key)\n collection_list = [x for x in collection_list if _should_include_node(x, export_scope, exclude_nodes)]\n if not collection_list:\n return\n try:\n col_def = meta_graph_def.collection_def[key]\n to_proto = ops.get_to_proto_function(key)\n proto_type = ops.get_collection_proto_type(key)\n if to_proto:\n kind = 'bytes_list'\n for x in collection_list:\n proto = to_proto(x, export_scope=export_scope)\n if proto:\n assert isinstance(proto, proto_type)\n getattr(col_def, kind).value.append(proto.SerializeToString())\n else:\n kind = _get_kind_name(collection_list[0])\n if kind == 'node_list':\n for x in collection_list:\n if not export_scope or x.name.startswith(export_scope):\n getattr(col_def, kind).value.append(ops.strip_name_scope(x.name, export_scope))\n elif kind == 'bytes_list':\n getattr(col_def, kind).value.extend([compat.as_bytes(x) for x in collection_list])\n else:\n getattr(col_def, kind).value.extend([x for x in collection_list])\n except Exception as e:\n logging.warning(\"Issue encountered when serializing %s.\\nType is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\\n%s\", key, str(e))\n if key in meta_graph_def.collection_def:\n del meta_graph_def.collection_def[key]\n return", "docstring": "Adds a collection to MetaGraphDef protocol buffer.\n\nArgs:\n meta_graph_def: MetaGraphDef protocol buffer.\n key: One of the GraphKeys or user-defined string.\n graph: The `Graph` from which to get collections.\n export_scope: Optional `string`. Name scope to remove.\n exclude_nodes: An iterable of nodes or `string` node names to omit from the\n collection, or None.\n override_contents: An iterable of values to place in the collection,\n ignoring the current values (if set)."} +{"repo": "transformers", "function": "def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n if cache_kwargs is None:\n cache_kwargs = {}\n key_states = key_states.to(self.key_cache[layer_idx].dtype)\n value_states = value_states.to(self.value_cache[layer_idx].dtype)\n return _static_cache_update(self.key_cache[layer_idx], self.value_cache[layer_idx], key_states, value_states, cache_kwargs.get('cache_position'))", "docstring": "Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.\nIt is VERY important to index using a tensor, otherwise you introduce a copy to the device.\n\nParameters:\n key_states (`torch.Tensor`):\n The new key states to cache.\n value_states (`torch.Tensor`):\n The new value states to cache.\n layer_idx (`int`):\n The index of the layer to cache the states for.\n cache_kwargs (`Dict[str, Any]`, `optional`):\n Additional arguments for the cache subclass. The `StaticCache` needs the `cache_position` input\n to know how where to write in the cache.\n\nReturn:\n A tuple containing the updated key and value states."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, original_hidden_states: torch.Tensor, layer_idx: int, attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Zamba2HybridDynamicCache]=None, output_attentions: Optional[bool]=False, position_embeddings: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, layer_idx=layer_idx, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, position_embeddings=position_embeddings, **kwargs)\n hidden_states = self.pre_ff_layernorm(hidden_states)\n hidden_states = self.feed_forward(hidden_states, layer_idx)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`\n original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.\n This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The\n concatenated tensor is then used as input of the pre-attention RMSNorm\n (see fig. 2 in https://huggingface.co/papers/2405.16712).\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, sequence_length)` where padding elements are indicated by 0.\n past_key_value (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):\n Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,\n with `head_dim` being the embedding dimension of each attention head."} +{"repo": "python-fire", "function": "def _AvailableString(variables, verbose=False):\n modules = []\n other = []\n for name, value in variables.items():\n if not verbose and name.startswith('_'):\n continue\n if '-' in name or '/' in name:\n continue\n if inspect.ismodule(value):\n modules.append(name)\n else:\n other.append(name)\n lists = [('Modules', modules), ('Objects', other)]\n list_strs = []\n for name, varlist in lists:\n if varlist:\n items_str = ', '.join(sorted(varlist))\n list_strs.append(f'{name}: {items_str}')\n lists_str = '\\n'.join(list_strs)\n return f'Fire is starting a Python REPL with the following objects:\\n{lists_str}\\n'", "docstring": "Returns a string describing what objects are available in the Python REPL.\n\nArgs:\n variables: A dict of the object to be available in the REPL.\n verbose: Whether to include 'hidden' members, those keys starting with _.\nReturns:\n A string fit for printing at the start of the REPL, indicating what objects\n are available for the user to use."} +{"repo": "transformers", "function": "def _prepare_4d_attention_mask(attention_mask: Optional[torch.Tensor], sequence_length: int, dtype: torch.dtype, device: torch.device, is_causal: bool=True) -> Optional[torch.Tensor]:\n min_value = torch.finfo(dtype).min if dtype.is_floating_point else torch.iinfo(dtype).min\n if attention_mask is not None:\n attention_mask = attention_mask.view(attention_mask.shape[0], 1, 1, -1)\n attention_mask = attention_mask * min_value\n if is_causal:\n causal_mask = torch.triu(torch.ones((sequence_length, sequence_length), dtype=dtype, device=device) * min_value, diagonal=1)\n causal_mask = causal_mask.view(1, 1, sequence_length, sequence_length)\n if attention_mask is not None:\n attention_mask = torch.minimum(attention_mask, causal_mask)\n else:\n attention_mask = causal_mask\n return attention_mask", "docstring": "Creates 4D attention mask and combines causal and padding masks if needed.\n\nArgs:\n attention_mask: Optional tensor of shape (batch_size, seq_length) containing padding mask\n sequence_length: Length of the sequence\n dtype: Data type of the mask\n device: Device of the mask\n is_causal: Whether to apply causal masking\n\nReturns:\n 4D attention mask of shape (batch_size, 1, seq_length, seq_length)"} +{"repo": "tensorflow", "function": "def find(self, key, dynamic_default_value=None, name=None):\n with tf.name_scope(name or '%s_lookup_table_find' % self._name):\n key = tf.convert_to_tensor(key, dtype=self._key_dtype, name='key')\n if dynamic_default_value is not None:\n dynamic_default_value = tf.convert_to_tensor(dynamic_default_value, dtype=self._value_dtype, name='default_value')\n value = gen_simple_hash_table_op.examples_simple_hash_table_find(self.resource_handle, key, dynamic_default_value if dynamic_default_value is not None else self._default_value)\n return value", "docstring": "Looks up `key` in a table, outputs the corresponding value.\n\nThe `default_value` is used if key not present in the table.\n\nArgs:\n key: Key to look up. Must match the table's key_dtype.\n dynamic_default_value: The value to use if the key is missing in the\n table. If None (by default), the `table.default_value` will be used.\n name: A name for the operation (optional).\n\nReturns:\n A tensor containing the value in the same shape as `key` using the\n table's value type.\n\nRaises:\n TypeError: when `key` do not match the table data types."} +{"repo": "beam", "function": "class RunEnsembleDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n def __init__(self, ensemble_detector: EnsembleAnomalyDetector):\n self._ensemble_detector = ensemble_detector\n\n def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n model_uuid = f'{self._ensemble_detector._model_id}:{uuid.uuid4().hex[:6]}'\n assert self._ensemble_detector._sub_detectors is not None\n if not self._ensemble_detector._sub_detectors:\n raise ValueError(f'No detectors found at {model_uuid}')\n results = []\n for idx, detector in enumerate(self._ensemble_detector._sub_detectors):\n if isinstance(detector, EnsembleAnomalyDetector):\n results.append(input | f'Run Ensemble Detector at index {idx} ({model_uuid})' >> RunEnsembleDetector(detector))\n elif isinstance(detector, OfflineDetector):\n results.append(input | f'Run Offline Detector at index {idx} ({model_uuid})' >> RunOfflineDetector(detector))\n else:\n results.append(input | f'Run One Detector at index {idx} ({model_uuid})' >> RunOneDetector(detector))\n if self._ensemble_detector._aggregation_strategy is None:\n aggregation_type = 'Simple'\n else:\n aggregation_type = 'Custom'\n ret = results | beam.Flatten() | f'Run {aggregation_type} Aggregation Strategy ({model_uuid})' >> RunAggregationStrategy(self._ensemble_detector._aggregation_strategy, self._ensemble_detector._model_id)\n if self._ensemble_detector._threshold_criterion:\n ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._ensemble_detector._threshold_criterion)\n return ret", "docstring": "Runs an ensemble of anomaly detectors on a PCollection of data.\n\nThis PTransform applies an `EnsembleAnomalyDetector` to the input data,\nrunning each sub-detector and aggregating the results.\n\nArgs:\n ensemble_detector: The `EnsembleAnomalyDetector` to run."} +{"repo": "tensorflow", "function": "def unique_fn_name(scope, name):\n return ('%s%s_%s' % (scope, name, ops.uid())).replace('/', '_')", "docstring": "Returns a unique name to use for a control flow function.\n\nArgs:\n scope: A name scope string.\n name: An identifier for this function (e.g. \"true\", \"body\").\n\nReturns:\n A string, the name to use for the function."} +{"repo": "keras", "function": "def pop(self, rebuild=True):\n layer = self._layers.pop()\n self.built = False\n self._functional = None\n if rebuild:\n self._maybe_rebuild()\n return layer", "docstring": "Removes the last layer in the model.\n\nArgs:\n rebuild: `bool`. Whether to rebuild the model after removing\n the layer. Defaults to `True`.\n\nReturns:\n layer: layer instance."} +{"repo": "transformers", "function": "def get_number_of_image_tokens(self, height: int, width: int, images_kwargs=None):\n min_patches = images_kwargs.get('min_patches', None) or self.min_patches\n max_patches = images_kwargs.get('max_patches', None) or self.max_patches\n patch_size = images_kwargs.get('size', None) or self.size\n crop_to_patches = images_kwargs.get('crop_to_patches', None) or self.crop_to_patches\n num_patches = 1\n if crop_to_patches and max_patches > 1:\n num_columns, num_rows = get_optimal_tiled_canvas((height, width), (patch_size['height'], patch_size['width']), min_patches, max_patches)\n num_patches += num_columns * num_rows\n return num_patches", "docstring": "A utility that returns number patches for a given image size.\n\nArgs:\n height (`int`):\n Height of the input image.\n width (`int`):\n Width of the input image.\n images_kwargs (`dict`, *optional*)\n Any kwargs to override defaults of the image processor.\nReturns:\n `int`: Number of patches per image."} +{"repo": "tensorflow", "function": "def _crossed_column(keys, hash_bucket_size, hash_key=None):\n if not hash_bucket_size or hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be > 1. hash_bucket_size: {}'.format(hash_bucket_size))\n if not keys or len(keys) < 2:\n raise ValueError('keys must be a list with length > 1. Given: {}'.format(keys))\n for key in keys:\n if not isinstance(key, six.string_types) and (not isinstance(key, _CategoricalColumn)):\n raise ValueError('Unsupported key type. All keys must be either string, or categorical column except _HashedCategoricalColumn. Given: {}'.format(key))\n if isinstance(key, _HashedCategoricalColumn):\n raise ValueError('categorical_column_with_hash_bucket is not supported for crossing. Hashing before crossing will increase probability of collision. Instead, use the feature name as a string. Given: {}'.format(key))\n return _CrossedColumn(keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)", "docstring": "Returns a column for performing crosses of categorical features.\n\nCrossed features are hashed according to `hash_bucket_size`. Conceptually,\nthe transformation can be thought of as:\n Hash(cartesian product of features) % `hash_bucket_size`\n\nFor example, if the input features are:\n\n* SparseTensor referred by first key:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n\n* SparseTensor referred by second key:\n\n ```python\n shape = [2, 1]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n }\n ```\n\nthen crossed feature will look like:\n\n```python\n shape = [2, 2]\n{\n [0, 0]: Hash64(\"d\", Hash64(\"a\")) % hash_bucket_size\n [1, 0]: Hash64(\"e\", Hash64(\"b\")) % hash_bucket_size\n [1, 1]: Hash64(\"e\", Hash64(\"c\")) % hash_bucket_size\n}\n```\n\nHere is an example to create a linear model with crosses of string features:\n\n```python\nkeywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)\ncolumns = [keywords_x_doc_terms, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nYou could also use vocabulary lookup before crossing:\n\n```python\nkeywords = categorical_column_with_vocabulary_file(\n 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)\nkeywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)\ncolumns = [keywords_x_doc_terms, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nIf an input feature is of numeric type, you can use\n`categorical_column_with_identity`, or `bucketized_column`, as in the example:\n\n```python\n# vertical_id is an integer categorical feature.\nvertical_id = categorical_column_with_identity('vertical_id', 10K)\nprice = numeric_column('price')\n# bucketized_column converts numerical feature to a categorical one.\nbucketized_price = bucketized_column(price, boundaries=[...])\nvertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\ncolumns = [vertical_id_x_price, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nTo use crossed column in DNN model, you need to add it in an embedding column\nas in this example:\n\n```python\nvertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\nvertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)\ndense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])\n```\n\nArgs:\n keys: An iterable identifying the features to be crossed. Each element can\n be either:\n * string: Uses the corresponding feature which must be of string type.\n * `_CategoricalColumn`: Uses the transformed tensor produced by this\n column. Does not support hashed categorical column.\n hash_bucket_size: An int > 1. The number of buckets.\n hash_key: Specify the hash_key that will be used by the `FingerprintCat64`\n function to combine the crosses fingerprints on SparseCrossOp (optional).\n\nReturns:\n A `_CrossedColumn`.\n\nRaises:\n ValueError: If `len(keys) < 2`.\n ValueError: If any of the keys is neither a string nor `_CategoricalColumn`.\n ValueError: If any of the keys is `_HashedCategoricalColumn`.\n ValueError: If `hash_bucket_size < 1`."} +{"repo": "genai-processors", "function": "def __init__(self, *, pattern: str, word_start: str | None=None, substream_input: str='', substream_output: str='', flush_fn: Callable[[content_api.ProcessorPart], bool] | None=None, remove_from_input_stream: bool=True):\n self._word_start = word_start\n self._pattern = re.compile(pattern, re.DOTALL)\n self._substream_input = substream_input\n self._substream_output = substream_output\n self._flush_fn = flush_fn or (lambda _: False)\n self._remove_from_input_stream = remove_from_input_stream", "docstring": "Extracts text parts from the input stream that match the pattern.\n\nOnly considers text parts from the input stream `substream_input`.\n\nSee class docstring for more details.\n\nThis processor buffers input parts until one of the following happens:\n\n1. the `pattern` is found, then it outputs all parts in the buffer up to the\n end of the match.\n2. when the `flush_fn` returns True, then it outputs all parts in the\n buffer.\n\nWhen `word_start` is set, the following happens:\n\n3. the word_start is found, then it outputs all the parts before the\n `word_start`.\n4. `word_start` is not found in buffer_text[-len(word_start):], where\n buffer_text is the concatenation of all the text parts in the buffer. All\n parts whose text is in buffer_text[-len(word_start):] are returned.\n\nWhile in the buffer, the parts are not output which means they can lead to\ndelays in the output stream.\n\nTo avoid such delays, set the `remove_from_input_stream` to False and/or\ndefine a `flush_fn` that returns True often to discard the parts in the\nbuffer frequently.\n\nArgs:\n pattern: pattern to match a text to extract into a part. When\n `remove_from_input_stream` is True, the matched text will be removed\n from the stream and will be replaced by a single extracted part. The\n parts before and after this match will be returned as is. Note that\n re.DOTALL is used to match newlines.\n word_start: text to match the start of the text that needs to be captured.\n `word_start` is not a regular expression but a plain string that will be\n matched exactly. `word_start` should be a substring of the pattern and\n should indicate that the pattern is about to be matched. Whenever\n `word_start` is found, the parts after it will be buffered (not\n returned) until the pattern is found, the `flush_fn` returns True, or\n the input stream is exhausted. When set to None (default), this logic is\n not applied.\n substream_input: name of the substream to use for the input part.\n substream_output: name of the substream to use for the extracted part.\n flush_fn: function to check when to reset the buffer and yield all the\n parts in the buffer. The part where `flush_fn` returns True will be\n returned as is and will not be matched against the pattern.\n remove_from_input_stream: if True, the processor will remove the matched\n parts from the input stream. If False, the input stream will be\n preserved and the parts will be returned as is quickly. The processor\n will output into its `substream_output` substream once a match is found."} +{"repo": "beam", "function": "def _delete_minibatch(self, bucket, keys):\n request = messages.DeleteBatchRequest(bucket, keys)\n results = {}\n try:\n response = self.client.delete_batch(request)\n for key in response.deleted:\n results[bucket, key] = None\n for key, error in zip(response.failed, response.errors):\n results[bucket, key] = error\n except messages.S3ClientError as e:\n for key in keys:\n results[bucket, key] = e\n return results", "docstring": "A helper method. Boto3 allows batch deletions\nfor files within the same bucket.\n\nArgs:\n bucket: String bucket name\n keys: List of keys to be deleted in the bucket\n\nReturns: dict of the form {(bucket, key): error}, where error is None if the\noperation succeeded"} +{"repo": "tensorflow", "function": "def __init__(self, axis=-1, **kwargs):\n super(Concatenate, self).__init__(**kwargs)\n self.axis = axis\n self.supports_masking = True\n self._reshape_required = False", "docstring": "Instantiates a Concatenate layer.\n\n>>> x = np.arange(20).reshape(2, 2, 5)\n>>> print(x)\n[[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n>>> y = np.arange(20, 30).reshape(2, 1, 5)\n>>> print(y)\n[[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n>>> tf.keras.layers.Concatenate(axis=1)([x, y])\n\n\nArgs:\n axis: Axis along which to concatenate.\n **kwargs: standard layer keyword arguments."} +{"repo": "temporian", "function": "def cumprod(self: EventSetOrNode, sampling: Optional[EventSetOrNode]=None) -> EventSetOrNode:\n from temporian.core.operators.window.moving_product import cumprod\n return cumprod(self, sampling=sampling)", "docstring": "Computes the cumulative product of values over each feature in an\n[`EventSet`][temporian.EventSet].\n\nThis operation only supports floating-point features.\n\nMissing (NaN) values are not accounted for. The output will be NaN until\nthe input contains at least one numeric value.\n\nWarning: The `cumprod` function leverages an infinite window length for\nits calculations, which may lead to considerable computational overhead\nwith increasing dataset sizes.\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[0, 1, 2, 3],\n ... features={\"value\": [1.0, 2.0, 10.0, 12.0]},\n ... )\n\n >>> b = a.cumprod()\n >>> b\n indexes: ...\n (4 events):\n timestamps: [0. 1. 2. 3.]\n 'value': [ 1. 2. 20. 240.]\n ...\n\n ```\n\nExamples with sampling:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[0, 1, 2, 5, 6, 7],\n ... features={\"value\": [1, 2, 10, 12, np.nan, 2]},\n ... )\n\n >>> # Cumulative product at 5 and 10\n >>> b = tp.event_set(timestamps=[5, 10])\n >>> c = a.cumprod(sampling=b)\n >>> c\n indexes: ...\n (2 events):\n timestamps: [ 5. 10.]\n 'value': [240. 480.]\n ...\n\n >>> # Product all values in the EventSet\n >>> c = a.cumprod(sampling=a.end())\n >>> c\n indexes: ...\n (1 events):\n timestamps: [7.]\n 'value': [480.]\n ...\n\n ```\n\nArgs:\n sampling: Timestamps to sample the sliding window's value at. If not\n provided, timestamps in the input are used.\n\nReturns:\n Cumulative product of each feature."} +{"repo": "transformers", "function": "class MoshiDepthDecoder(MoshiPreTrainedModel, GenerationMixin):\n config_class = MoshiDepthConfig\n\n def __init__(self, config: MoshiDepthConfig):\n super().__init__(config)\n self.text_embed_tokens = nn.Embedding(config.vocab_size + 1, config.hidden_size)\n self.embed_tokens = nn.ModuleList([nn.Embedding(config.audio_vocab_size + 1, config.hidden_size) for _ in range(config.num_codebooks - 1)])\n self.input_projections = MoshiFlexibleLinear(config.input_size, config.hidden_size, config.num_codebooks)\n self.layers = nn.ModuleList([MoshiDecoderLayer(config, layer_idx, use_flexible_linear=True, use_rope=False) for layer_idx in range(config.num_hidden_layers)])\n self.lm_heads = MoshiFlexibleLinear(config.hidden_size, config.audio_vocab_size, config.num_codebooks)\n self._attn_implementation = config._attn_implementation\n self.gradient_checkpointing = False\n self.config = config\n\n def forward(self, input_ids: Optional[torch.LongTensor]=None, last_hidden_state: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPast]:\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens. The first element of the sequence must the text token associated to the audio codebooks.\n The rest of the elements must be flatten audio codebooks. The `cache_position` argument can be used to indicate to which index is associated each token.\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the main decoder. Used to contextualize `input_ids`\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n If `past_key_values` is used, optionally only the last `input_ids` have to be input (see\n `past_key_values`).\n\n If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more\n information on the default strategy.\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):\n Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`\n returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.\n\n Two formats are allowed:\n - a [`~cache_utils.Cache`] instance;\n - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy\n cache format.\n\n The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the\n legacy cache format will be returned.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't\n have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`\n of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert the inputs into associated vectors than the\n model's internal embedding lookup matrix.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if self.gradient_checkpointing and self.training and use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')\n use_cache = False\n if use_cache and past_key_values is None and (not self.training):\n past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n past_seen_tokens = 0 if past_key_values is None else past_key_values.get_seq_length()\n if cache_position is None:\n cache_position = torch.arange(past_seen_tokens, past_seen_tokens + input_ids.shape[1], device=input_ids.device)\n if position_ids is None:\n position_ids = cache_position.unsqueeze(0)\n if inputs_embeds is None:\n inputs_embeds = []\n for position_idx in cache_position:\n position_idx = position_idx.item()\n if position_idx == 0:\n inputs_embeds.append(self.text_embed_tokens(input_ids[:, [position_idx]]))\n else:\n inputs_embeds.append(self.embed_tokens[position_idx - 1](input_ids[:, [position_idx - past_seen_tokens]]))\n inputs_embeds = torch.cat(inputs_embeds, dim=1)\n inputs_embeds += self.input_projections(last_hidden_state, cache_position)\n causal_mask = None\n if attention_mask is not None:\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = None\n hidden_states = inputs_embeds\n for decoder_layer in self.layers:\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n logits = self.lm_heads(hidden_states, cache_position)\n loss = None\n if labels is not None:\n logits = logits.float()\n loss_fct = CrossEntropyLoss()\n labels = labels.masked_fill(labels == self.config.audio_vocab_size, -100).reshape(-1)\n labels = labels.to(logits.device)\n loss = loss_fct(logits.reshape(-1, self.config.audio_vocab_size), labels)\n if not return_dict:\n return tuple((v for v in [loss, logits, next_cache, all_hidden_states, all_self_attns] if v is not None))\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)\n\n def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):\n if self.config._attn_implementation == 'flash_attention_2':\n if attention_mask is not None and past_key_values is not None:\n is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]\n if is_padding_right:\n raise ValueError(\"You are attempting to perform batched generation with padding_side='right' this may lead to unexpected behaviour for Flash Attention version of Moshi. Make sure to call `tokenizer.padding_side = 'left'` before tokenizing the input. \")\n if attention_mask is not None and 0.0 in attention_mask:\n return attention_mask\n return None\n if self.config._attn_implementation == 'flex_attention':\n if isinstance(attention_mask, torch.Tensor):\n attention_mask = make_flex_block_causal_mask(attention_mask)\n return attention_mask\n past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0\n using_static_cache = isinstance(past_key_values, StaticCache)\n using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)\n if self.config._attn_implementation == 'sdpa' and (not (using_static_cache or using_sliding_window_cache)) and (not output_attentions):\n if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, sliding_window=self.config.sliding_window, is_training=self.training):\n return None\n dtype = input_tensor.dtype\n min_dtype = torch.finfo(dtype).min\n sequence_length = input_tensor.shape[1]\n if using_sliding_window_cache or using_static_cache:\n target_length = past_key_values.get_max_cache_shape()\n else:\n target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1\n causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0], config=self.config, past_key_values=past_key_values)\n if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):\n causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)\n return causal_mask\n\n @staticmethod\n def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, config: MoshiDepthConfig, past_key_values: Cache):\n \"\"\"\n Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\n Args:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size.\n config (`MoshiDepthConfig`):\n The model's configuration class\n past_key_values (`Cache`):\n The cache class that is being used currently to generate\n \"\"\"\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n text_config = config.get_text_config()\n if getattr(text_config, 'use_sliding_window', True) and text_config.sliding_window is not None:\n if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:\n sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= cache_position.reshape(-1, 1) - text_config.sliding_window\n diagonal_attend_mask.bitwise_or_(sliding_attend_mask)\n causal_mask *= diagonal_attend_mask\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n if attention_mask.shape[-1] > target_length:\n attention_mask = attention_mask[:, :target_length]\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Transformer depth decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoshiTransformerLayer`]\n\nArgs:\n config: MoshiConfig"} +{"repo": "tensorflow", "function": "def complex(real, imag, name=None):\n real = ops.convert_to_tensor(real, name='real')\n imag = ops.convert_to_tensor(imag, name='imag')\n with ops.name_scope(name, 'Complex', [real, imag]) as name:\n input_types = (real.dtype, imag.dtype)\n if input_types == (dtypes.float64, dtypes.float64):\n Tout = dtypes.complex128\n elif input_types == (dtypes.float32, dtypes.float32):\n Tout = dtypes.complex64\n else:\n raise TypeError(f'The `real` and `imag` components have incorrect types: {real.dtype.name} {imag.dtype.name}. They must be consistent, and one of {[dtypes.float32, dtypes.float64]}')\n return gen_math_ops._complex(real, imag, Tout=Tout, name=name)", "docstring": "Converts two real numbers to a complex number.\n\nGiven a tensor `real` representing the real part of a complex number, and a\ntensor `imag` representing the imaginary part of a complex number, this\noperation returns complex numbers elementwise of the form \\\\(a + bj\\\\), where\n*a* represents the `real` part and *b* represents the `imag` part.\n\nThe input tensors `real` and `imag` must have the same shape.\n\nFor example:\n\n```python\nreal = tf.constant([2.25, 3.25])\nimag = tf.constant([4.75, 5.75])\ntf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]\n```\n\nArgs:\n real: A `Tensor`. Must be one of the following types: `float32`, `float64`.\n imag: A `Tensor`. Must have the same type as `real`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `complex64` or `complex128`.\n\nRaises:\n TypeError: Real and imag must be correct types"} +{"repo": "transformers", "function": "class ZoeDepthNeck(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n if config.backbone_config is not None and config.backbone_config.model_type in ['swinv2']:\n self.reassemble_stage = None\n else:\n self.reassemble_stage = ZoeDepthReassembleStage(config)\n self.convs = nn.ModuleList()\n for channel in config.neck_hidden_sizes:\n self.convs.append(nn.Conv2d(channel, config.fusion_hidden_size, kernel_size=3, padding=1, bias=False))\n self.fusion_stage = ZoeDepthFeatureFusionStage(config)\n\n def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) -> List[torch.Tensor]:\n \"\"\"\n Args:\n hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\n List of hidden states from the backbone.\n \"\"\"\n if not isinstance(hidden_states, (tuple, list)):\n raise TypeError('hidden_states should be a tuple or list of tensors')\n if len(hidden_states) != len(self.config.neck_hidden_sizes):\n raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n if self.reassemble_stage is not None:\n hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n output = self.fusion_stage(features)\n return (output, features[-1])", "docstring": "ZoeDepthNeck. A neck is a module that is normally used between the backbone and the head. It takes a list of tensors as\ninput and produces another list of tensors as output. For ZoeDepth, it includes 2 stages:\n\n* ZoeDepthReassembleStage\n* ZoeDepthFeatureFusionStage.\n\nArgs:\n config (dict): config dict."} +{"repo": "transformers", "function": "class BigBirdConfig(PretrainedConfig):\n model_type = 'big_bird'\n\n def __init__(self, vocab_size=50358, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu_new', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=4096, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, use_cache=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, sep_token_id=66, attention_type='block_sparse', use_bias=True, rescale_embeddings=False, block_size=64, num_random_blocks=3, classifier_dropout=None, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, sep_token_id=sep_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.type_vocab_size = type_vocab_size\n self.layer_norm_eps = layer_norm_eps\n self.use_cache = use_cache\n self.rescale_embeddings = rescale_embeddings\n self.attention_type = attention_type\n self.use_bias = use_bias\n self.block_size = block_size\n self.num_random_blocks = num_random_blocks\n self.classifier_dropout = classifier_dropout", "docstring": "This is the configuration class to store the configuration of a [`BigBirdModel`]. It is used to instantiate an\nBigBird model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the BigBird\n[google/bigbird-roberta-base](https://huggingface.co/google/bigbird-roberta-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 50358):\n Vocabulary size of the BigBird model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`BigBirdModel`].\n hidden_size (`int`, *optional*, defaults to 768):\n Dimension of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimension of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu_new\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 4096):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 1024 or 2048 or 4096).\n type_vocab_size (`int`, *optional*, defaults to 2):\n The vocabulary size of the `token_type_ids` passed when calling [`BigBirdModel`].\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n attention_type (`str`, *optional*, defaults to `\"block_sparse\"`)\n Whether to use block sparse attention (with n complexity) as introduced in paper or original attention\n layer (with n^2 complexity). Possible values are `\"original_full\"` and `\"block_sparse\"`.\n use_bias (`bool`, *optional*, defaults to `True`)\n Whether to use bias in query, key, value.\n rescale_embeddings (`bool`, *optional*, defaults to `False`)\n Whether to rescale embeddings with (hidden_size ** 0.5).\n block_size (`int`, *optional*, defaults to 64)\n Size of each block. Useful only when `attention_type == \"block_sparse\"`.\n num_random_blocks (`int`, *optional*, defaults to 3)\n Each query is going to attend these many number of random blocks. Useful only when `attention_type ==\n \"block_sparse\"`.\n classifier_dropout (`float`, *optional*):\n The dropout ratio for the classification head.\n\nExample:\n\n```python\n>>> from transformers import BigBirdConfig, BigBirdModel\n\n>>> # Initializing a BigBird google/bigbird-roberta-base style configuration\n>>> configuration = BigBirdConfig()\n\n>>> # Initializing a model (with random weights) from the google/bigbird-roberta-base style configuration\n>>> model = BigBirdModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class IdeficsVisionModelOutput(ModelOutput):\n image_embeds: Optional[torch.FloatTensor] = None\n last_hidden_state: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.\n\nArgs:\n image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):\n The image embeddings obtained by applying the projection layer to the pooler_output.\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def _forward_and_backward_functions(self, inference_args, input_tangents):\n outputs = self._func_graph.outputs[:self._num_inference_outputs]\n return self._build_functions_for_outputs(outputs, inference_args, input_tangents)", "docstring": "Shortcut for when only first-order gradients are required.\n\nThe returned backward function does not accept gradients with respect to\nside output of forward_function. This is fine as long as the user can't\npossibly request second order tape gradients, as when they've used a single\nnon-persistent GradientTape. Since we don't need the backward function to\ntake gradients with respect to side outputs, we can skip some potentially\nslow graph building.\n\nArgs:\n inference_args: A flat list of Tensors, arguments to the inference\n function.\n input_tangents: A flat list of Tensors, jvps associated with\n `inference_args`.\n\nReturns:\n A tuple of (forward_function, backward_function):\n forward_function: Takes the same inputs as the inference function, but\n returns side outputs used by backward_function in addition to the\n inference function's outputs.\n backward_function: Takes side outputs from forward_function and\n gradients with respect to the \"real\" outputs of forward_function and\n returns gradients with respect to the inputs."} +{"repo": "transformers", "function": "class PerceiverMultimodalPreprocessor(AbstractPreprocessor):\n\n def __init__(self, modalities: Mapping[str, PreprocessorType], mask_probs: Optional[Mapping[str, float]]=None, min_padding_size: int=2):\n super().__init__()\n self.modalities = nn.ModuleDict(modalities)\n self.min_padding_size = min_padding_size\n self.mask_probs = mask_probs if mask_probs is not None else {}\n self.padding = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels)) for modality, preprocessor in modalities.items()})\n self.mask = nn.ParameterDict({modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()})\n\n @property\n def num_channels(self) -> int:\n max_channel_size = max((processor.num_channels for _, processor in self.modalities.items()))\n common_channel_size = max_channel_size + self.min_padding_size\n return common_channel_size\n\n def forward(self, inputs: Mapping[str, torch.Tensor], pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True, interpolate_pos_encoding: bool=False) -> PreprocessorOutputType:\n padded = {}\n modality_sizes = {}\n inputs_without_pos = {}\n for modality, preprocessor in self.modalities.items():\n output, _, inputs_without_pos[modality] = preprocessor(inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d)\n batch_size, num_samples, num_channels = output.shape\n pos_enc = self.padding[modality].expand(batch_size, -1, -1)\n padding = torch.broadcast_to(pos_enc, [batch_size, num_samples, self.num_channels - num_channels])\n output_padded = torch.cat([output, padding], dim=2)\n if modality in self.mask_probs:\n mask_token = self.mask[modality].expand(batch_size, -1, -1)\n mask_prob = self.mask_probs[modality]\n mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))\n mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)\n output_padded = (1 - mask) * output_padded + mask * mask_token\n padded[modality] = output_padded\n modality_sizes[modality] = output_padded.shape[1]\n padded_ls = [padded[k] for k in sorted(padded.keys())]\n final_inputs = torch.cat(padded_ls, dim=1)\n return (final_inputs, modality_sizes, inputs_without_pos)", "docstring": "Multimodal preprocessing for Perceiver Encoder.\n\nInputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number\nof channels.\n\nArgs:\n modalities (`Mapping[str, PreprocessorType]`):\n Dict mapping modality name to preprocessor.\n mask_probs (`Dict[str, float]`):\n Dict mapping modality name to masking probability of that modality.\n min_padding_size (`int`, *optional*, defaults to 2):\n The minimum padding size for all modalities. The final output will have num_channels equal to the maximum\n channels across all modalities plus min_padding_size."} +{"repo": "fhir-py", "function": "def expand_value_set_url(self, value_set_url: str) -> value_set_pb2.ValueSet:\n value_set_url, value_set_version = url_utils.parse_url_version(value_set_url)\n base_url, terminology_service_url = _expansion_request_url_for_value_set_url(value_set_url)\n auth = self.auth_per_terminology_server.get(base_url)\n return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)", "docstring": "Expands the value set using a terminology server.\n\nRequests an expansion of the value set from the appropriate terminology\nserver for the given URL and version if present on the URL. The terminology\nservice is chosen based on the domain of `value_set_url`.\n\nRetrieves the current definition of the value set from the terminology\nservice as well as its expansion.\n\nArgs:\n value_set_url: The url of the value set to expand.\n\nRaises:\n ValueError: If a terminology service can not be found for `value_set_url`.\n\nReturns:\n The current definition of the value set from the server with its expanded\n codes present."} +{"repo": "tensorflow", "function": "def __init__(self, loss=None, predictions=None, metrics=None):\n if loss is not None:\n loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)\n self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)\n if predictions is not None:\n pred_dict = self._wrap_and_check_outputs(predictions, self.PREDICTIONS_NAME)\n self._predictions = self._prefix_output_keys(pred_dict, self.PREDICTIONS_NAME)\n if metrics is not None:\n self._metrics = self._wrap_and_check_metrics(metrics)", "docstring": "Constructor for SupervisedOutput (ie, Train or Eval output).\n\nArgs:\n loss: dict of Tensors or single Tensor representing calculated loss.\n predictions: dict of Tensors or single Tensor representing model\n predictions.\n metrics: Dict of metric results keyed by name.\n The values of the dict can be one of the following:\n (1) instance of `Metric` class.\n (2) (metric_value, update_op) tuples, or a single tuple.\n metric_value must be a Tensor, and update_op must be a Tensor or Op.\n\nRaises:\n ValueError: if any of the outputs' dict keys are not strings or tuples of\n strings or the values are not Tensors (or Operations in the case of\n update_op)."} +{"repo": "mobly", "function": "def find_field(item_list, cond, comparator, target_field):\n for item in item_list:\n if comparator(item, cond) and target_field in item:\n return item[target_field]\n return None", "docstring": "Finds the value of a field in a dict object that satisfies certain\nconditions.\n\nArgs:\n item_list: A list of dict objects.\n cond: A param that defines the condition.\n comparator: A function that checks if an dict satisfies the condition.\n target_field: Name of the field whose value to be returned if an item\n satisfies the condition.\n\nReturns:\n Target value or None if no item satisfies the condition."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, doc_scores: Optional[torch.FloatTensor]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, n_docs: Optional[int]=None) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]:\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved\n has_to_retrieve = self.retriever is not None and (context_input_ids is None or context_attention_mask is None or doc_scores is None) and (encoder_outputs is None)\n if encoder_outputs is None:\n if has_to_retrieve:\n question_enc_outputs = self.question_encoder(input_ids, attention_mask=attention_mask, return_dict=True)\n question_encoder_last_hidden_state = question_enc_outputs[0]\n retriever_outputs = self.retriever(input_ids, question_encoder_last_hidden_state.detach().to(device='cpu', dtype=torch.float32).numpy(), prefix=self.generator.config.prefix, n_docs=n_docs, return_tensors='pt')\n if self.context_encoder_training:\n context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_input_ids, retrieved_doc_attention_mask, retrieved_doc_ids = (retriever_outputs['context_input_ids'], retriever_outputs['context_attention_mask'], retriever_outputs['retrieved_doc_embeds'], retriever_outputs['tokenized_doc_ids'], retriever_outputs['tokenized_doc_attention_mask'], retriever_outputs['doc_ids'])\n context_input_ids = context_input_ids.to(input_ids)\n context_attention_mask = context_attention_mask.to(input_ids)\n retrieved_doc_input_ids = retrieved_doc_input_ids.to(input_ids)\n retrieved_doc_attention_mask = retrieved_doc_attention_mask.to(input_ids)\n retrieved_doc_embeds = self.ctx_encoder(retrieved_doc_input_ids, attention_mask=retrieved_doc_attention_mask, return_dict=True).pooler_output\n retrieved_doc_embeds = retrieved_doc_embeds.view(-1, n_docs, question_encoder_last_hidden_state.shape[1])\n doc_scores = torch.bmm(question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)\n else:\n context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (retriever_outputs['context_input_ids'], retriever_outputs['context_attention_mask'], retriever_outputs['retrieved_doc_embeds'], retriever_outputs['doc_ids'])\n retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)\n context_input_ids = context_input_ids.to(input_ids)\n context_attention_mask = context_attention_mask.to(input_ids)\n doc_scores = torch.bmm(question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(1)\n else:\n assert context_input_ids is not None, 'Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'\n assert context_attention_mask is not None, 'Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'\n assert doc_scores is not None, 'Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a retriever using the `set_retriever(...)` function.'\n assert doc_scores is not None, 'Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function.'\n assert doc_scores.shape[1] % n_docs == 0, f' The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is {context_input_ids.shape[0]}.'\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)\n gen_outputs = self.generator(input_ids=context_input_ids, attention_mask=context_attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, return_dict=True)\n if not has_to_retrieve:\n question_encoder_last_hidden_state = None\n question_enc_hidden_states = None\n question_enc_attentions = None\n retrieved_doc_embeds = None\n retrieved_doc_ids = None\n else:\n question_enc_hidden_states = question_enc_outputs.hidden_states\n question_enc_attentions = question_enc_outputs.attentions\n if not has_to_retrieve or not output_retrieved:\n context_input_ids = (None,)\n context_attention_mask = None\n retrieved_doc_embeds = None\n retrieved_doc_ids = None\n return RetrievAugLMOutput(logits=gen_outputs.logits, doc_scores=doc_scores, past_key_values=gen_outputs.past_key_values, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, retrieved_doc_embeds=retrieved_doc_embeds, retrieved_doc_ids=retrieved_doc_ids, question_encoder_last_hidden_state=question_encoder_last_hidden_state, question_enc_hidden_states=question_enc_hidden_states, question_enc_attentions=question_enc_attentions, generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state, generator_enc_hidden_states=gen_outputs.encoder_hidden_states, generator_enc_attentions=gen_outputs.encoder_attentions, generator_dec_hidden_states=gen_outputs.decoder_hidden_states, generator_dec_attentions=gen_outputs.decoder_attentions, generator_cross_attentions=gen_outputs.cross_attentions)", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies\n which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to\n obtain the indices.\n\n [What are input IDs?](../glossary#input-ids)\nencoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)\n Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,\n *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *\n sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the\n generator's encoder.\n\n Used by the ([`RagModel`]) model during decoding.\ndecoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Provide for generation tasks. `None` by default, construct as per instructions for the generator model\n you're using with your RAG instance.\ndecoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\ndoc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and\n `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`\n has to be provided to the forward pass. `doc_scores` can be computed via\n `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.\ncontext_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):\n Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the\n retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to\n the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].\ncontext_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):\n Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the\n retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be\n provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].\noutput_retrieved (`bool`, *optional*):\n Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask`. See returned tensors for more detail.\nn_docs (`int`, *optional*):\n The number of documents to retrieve.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, RagRetriever, RagModel\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/rag-token-base\")\n>>> retriever = RagRetriever.from_pretrained(\n... \"facebook/rag-token-base\", index_name=\"exact\", use_dummy_dataset=True\n... )\n>>> # initialize with RagRetriever to do everything in one forward call\n>>> model = RagModel.from_pretrained(\"facebook/rag-token-base\", retriever=retriever)\n\n>>> inputs = tokenizer(\"How many people live in Paris?\", return_tensors=\"pt\")\n>>> outputs = model(input_ids=inputs[\"input_ids\"])\n```"} +{"repo": "etils", "function": "def reverse_fstring(pattern: str, string: str) -> dict[str, str] | None:\n pattern = _pattern_cache(pattern)\n if (m := pattern.fullmatch(string)):\n return m.groupdict()\n else:\n return None", "docstring": "Reverse f-string.\n\nExample:\n\n```python\nepy.reverse_fstring(\n '/home/{user}/projects/{project}',\n '/home/conchylicultor/projects/menhir'\n) == {\n 'user': 'conchylicultor',\n 'project': 'menhir',\n}\n```\n\nArgs:\n pattern: The f-string pattern (can only contained named group)\n string: The string to search\n\nReturns:\n The extracted info"} +{"repo": "pytype", "function": "class Class(Node):\n name: str\n keywords: tuple[tuple[str, TypeU], ...]\n bases: tuple[Class | TypeU, ...]\n methods: tuple[Function, ...]\n constants: tuple[Constant, ...]\n classes: tuple[Class, ...]\n decorators: tuple[Alias, ...]\n slots: tuple[str, ...] | None\n template: tuple[TemplateItem, ...]\n _name2item: dict[str, Any] = {}\n\n def _InitCache(self):\n for x in (self.methods, self.constants, self.classes):\n for item in x:\n self._name2item[item.name] = item\n\n def Lookup(self, name):\n \"\"\"Convenience function: Look up a given name in the class namespace.\n\n Tries to find a method or constant by this name in the class.\n\n Args:\n name: Name to look up.\n\n Returns:\n A Constant or Function instance.\n\n Raises:\n KeyError: if this identifier doesn't exist in this class.\n \"\"\"\n if not self._name2item:\n self._InitCache()\n return self._name2item[name]\n\n def Get(self, name):\n \"\"\"Version of Lookup that returns None instead of raising.\"\"\"\n if not self._name2item:\n self._InitCache()\n return self._name2item.get(name)\n\n def __contains__(self, name):\n return bool(self.Get(name))\n\n def __hash__(self):\n nohash = self.Replace(_name2item=None)\n return super(Class, nohash).__hash__()\n\n def IterChildren(self) -> Generator[tuple[str, Any | None], None, None]:\n for name, child in super().IterChildren():\n if name == '_name2item':\n continue\n yield (name, child)\n\n def Replace(self, **kwargs):\n if '_name2item' not in kwargs:\n kwargs['_name2item'] = {}\n return super().Replace(**kwargs)\n\n @property\n def metaclass(self):\n for key, val in self.keywords:\n if key == 'metaclass':\n return val\n return None", "docstring": "Represents a class declaration.\n\nUsed as dict/set key, so all components must be hashable.\n\nAttributes:\n name: Class name (string)\n bases: The super classes of this class (instances of pytd.Type).\n methods: Tuple of methods, classmethods, staticmethods (instances of\n pytd.Function).\n constants: Tuple of constant class attributes (instances of pytd.Constant).\n classes: Tuple of nested classes.\n slots: A.k.a. __slots__, declaring which instance attributes are writable.\n template: Tuple of pytd.TemplateItem instances."} +{"repo": "keras", "function": "def get(identifier):\n from keras.src.dtype_policies.dtype_policy import _get_quantized_dtype_policy_by_str\n if identifier is None:\n return dtype_policy.dtype_policy()\n if isinstance(identifier, DTypePolicy):\n return identifier\n if isinstance(identifier, dict):\n return deserialize(identifier)\n if isinstance(identifier, str):\n if identifier.startswith(QUANTIZATION_MODES):\n return _get_quantized_dtype_policy_by_str(identifier)\n else:\n return DTypePolicy(identifier)\n try:\n return DTypePolicy(backend.standardize_dtype(identifier))\n except:\n raise ValueError(f'Cannot interpret `dtype` argument. Expected a string or an instance of DTypePolicy. Received: dtype={identifier}')", "docstring": "Retrieves a Keras `DTypePolicy` instance.\n\nThe `identifier` may be the string name of a `DTypePolicy` class.\n\n>>> policy = dtype_policies.get(\"mixed_bfloat16\")\n>>> type(policy)\n\n\nYou can also specify `config` of the dtype policy to this function by\npassing dict containing `class_name` and `config` as an identifier. Also\nnote that the `class_name` must map to a `DTypePolicy` class\n\n>>> identifier = {\"class_name\": \"DTypePolicy\",\n... \"config\": {\"name\": \"float32\"}}\n>>> policy = dtype_policies.get(identifier)\n>>> type(policy)\n\n\nArgs:\n identifier: A dtype policy identifier. One of `None` or string name of a\n `DTypePolicy` or `DTypePolicy` configuration dictionary or a\n `DTypePolicy` instance.\n\nReturns:\n A Keras `DTypePolicy` instance."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None):\n return token_ids_0 + [self.eos_token_id]", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Blenderbot sequence has the following format:\n- single sequence: ` X `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (`List[int]`, *optional*):\n Will be ignored\nReturns:\n `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, aspect_ratio_mask: Optional[torch.Tensor]=None, aspect_ratio_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, cross_attention_mask: Optional[torch.Tensor]=None, cross_attention_states: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> Union[Tuple, CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, aspect_ratio_mask=aspect_ratio_mask, aspect_ratio_ids=aspect_ratio_ids, cross_attention_mask=cross_attention_mask, cross_attention_states=cross_attention_states, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs)\n hidden_states = outputs[0]\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, self.config.text_config.vocab_size, **kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "aspect_ratio_mask (`torch.Tensor` of shape `(batch_size, max_num_images, max_num_tiles)`, *optional*):\n Mask to avoid performing attention on padding tiles. Mask values selected in `[0, 1]`:\n\n - 1 for tiles that are **not masked**,\n - 0 for tiles that are **masked**.\naspect_ratio_ids (`torch.Tensor` of shape `(batch_size, max_num_images)`, *optional*):\n Aspect ratio ids used to select the appropriate precomputed tile embeddings based on the aspect ratio of each input image.\n These ids correspond to indices in the model's list of supported aspect ratios, offset by 1.\n\n For example, if the model supports aspect ratios [[1, 1], [1, 2], [2, 1]]:\n - An image with aspect ratio [1, 1] would have ID 1\n - An image with aspect ratio [1, 2] would have ID 2\n - An image with aspect ratio [2, 1] would have ID 3\n\n The id 0 is reserved for padding (i.e., no image).\n\n If an image has aspect ratio [1, 2], that means it was split into 2 tiles horizontally, and its `aspect_ratio_id` would be 2.\ncross_attention_mask (`torch.Tensor` of shape `(batch_size, seq_length, max_num_images, max_num_tiles)`, *optional*):\n Cross-attention mask to control the interaction between text tokens and image tiles.\n This 4D tensor defines which image tiles each text token should attend to.\n\n For each text token (in seq_length):\n - 1 indicates the token **should attend** to the corresponding image tile\n - 0 indicates the token **should not attend** to the corresponding image tile\ncross_attention_states (`torch.FloatTensor`, *optional*):\n Output of the vision model, used for cross-attention. This tensor contains the processed image features that\n the language model will attend to.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, MllamaForConditionalGeneration\n\n>>> checkpoint = \"meta-llama/Llama-3.2-11B-Vision\"\n>>> model = MllamaForConditionalGeneration.from_pretrained(checkpoint)\n>>> processor = AutoProcessor.from_pretrained(checkpoint)\n\n>>> prompt = \"<|image|>If I had to write a haiku for this one\"\n>>> url = \"https://www.ilankelman.org/stopsigns/australia.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = processor(text=prompt, images=image, return_tensors=\"pt\")\n\n>>> # Generate\n>>> output = model.generate(**inputs, max_new_tokens=15)\n\n>>> prompt_len = inputs.input_ids.shape[-1]\n>>> generated_ids = output[:, prompt_len:]\n>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n>>> print(generated_text)\n[', it would be:.\\\\nA stop sign in Chinatown.\\\\n']\n```"} +{"repo": "fhir-py", "function": "def field_is_required(field_descriptor: descriptor.FieldDescriptor) -> bool:\n return get_value_for_annotation_extension(field_descriptor, annotations_pb2.validation_requirement) == annotations_pb2.Requirement.REQUIRED_BY_FHIR", "docstring": "Returns true if field_desriptor is marked as 'required' by FHIR.\n\nArgs:\n field_descriptor: A FieldDescriptor to examine.\n\nReturns:\n A Boolean indicating whether or not field_descriptor is required.\n\nRaises:\n ValueError: Unable to retrieve options for type: ."} +{"repo": "tensorflow", "function": "def get_data(self, how_many, offset, model_settings, background_frequency, background_volume_range, time_shift, mode, sess):\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = max(0, min(how_many, len(candidates) - offset))\n data = np.zeros((sample_count, model_settings['fingerprint_size']))\n labels = np.zeros(sample_count)\n desired_samples = model_settings['desired_samples']\n use_background = self.background_data and mode == 'training'\n pick_deterministically = mode != 'training'\n for i in range(offset, offset + sample_count):\n if how_many == -1 or pick_deterministically:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n if time_shift > 0:\n time_shift_amount = np.random.randint(-time_shift, time_shift)\n else:\n time_shift_amount = 0\n if time_shift_amount > 0:\n time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n time_shift_offset = [0, 0]\n else:\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n input_dict = {self.wav_filename_placeholder_: sample['file'], self.time_shift_padding_placeholder_: time_shift_padding, self.time_shift_offset_placeholder_: time_shift_offset}\n if use_background or sample['label'] == SILENCE_LABEL:\n background_index = np.random.randint(len(self.background_data))\n background_samples = self.background_data[background_index]\n if len(background_samples) <= model_settings['desired_samples']:\n raise ValueError('Background sample is too short! Need more than %d samples but only %d were found' % (model_settings['desired_samples'], len(background_samples)))\n background_offset = np.random.randint(0, len(background_samples) - model_settings['desired_samples'])\n background_clipped = background_samples[background_offset:background_offset + desired_samples]\n background_reshaped = background_clipped.reshape([desired_samples, 1])\n if sample['label'] == SILENCE_LABEL:\n background_volume = np.random.uniform(0, 1)\n elif np.random.uniform(0, 1) < background_frequency:\n background_volume = np.random.uniform(0, background_volume_range)\n else:\n background_volume = 0\n else:\n background_reshaped = np.zeros([desired_samples, 1])\n background_volume = 0\n input_dict[self.background_data_placeholder_] = background_reshaped\n input_dict[self.background_volume_placeholder_] = background_volume\n if sample['label'] == SILENCE_LABEL:\n input_dict[self.foreground_volume_placeholder_] = 0\n else:\n input_dict[self.foreground_volume_placeholder_] = 1\n summary, data_tensor = sess.run([self.merged_summaries_, self.output_], feed_dict=input_dict)\n self.summary_writer_.add_summary(summary)\n data[i - offset, :] = data_tensor.flatten()\n label_index = self.word_to_index[sample['label']]\n labels[i - offset] = label_index\n return (data, labels)", "docstring": "Gather samples from the data set, applying transformations as needed.\n\nWhen the mode is 'training', a random selection of samples will be returned,\notherwise the first N clips in the partition will be used. This ensures that\nvalidation always uses the same samples, reducing noise in the metrics.\n\nArgs:\n how_many: Desired number of samples to return. -1 means the entire\n contents of this partition.\n offset: Where to start when fetching deterministically.\n model_settings: Information about the current model being trained.\n background_frequency: How many clips will have background noise, 0.0 to\n 1.0.\n background_volume_range: How loud the background noise will be.\n time_shift: How much to randomly shift the clips by in time.\n mode: Which partition to use, must be 'training', 'validation', or\n 'testing'.\n sess: TensorFlow session that was active when processor was created.\n\nReturns:\n List of sample data for the transformed samples, and list of label indexes\n\nRaises:\n ValueError: If background samples are too short."} +{"repo": "transformers", "function": "def replace_batch_norm(model):\n for name, module in model.named_children():\n if isinstance(module, nn.BatchNorm2d):\n new_module = ConditionalDetrFrozenBatchNorm2d(module.num_features)\n if not module.weight.device == torch.device('meta'):\n new_module.weight.data.copy_(module.weight)\n new_module.bias.data.copy_(module.bias)\n new_module.running_mean.data.copy_(module.running_mean)\n new_module.running_var.data.copy_(module.running_var)\n model._modules[name] = new_module\n if len(list(module.children())) > 0:\n replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `ConditionalDetrFrozenBatchNorm2d`.\n\nArgs:\n model (torch.nn.Module):\n input model"} +{"repo": "nsscache", "function": "def Verify(self):\n return getattr(self, self._KEY) is not None", "docstring": "We can properly index this instance into a Map.\n\nReturns:\n True if the value in the attribute named by self._KEY for this class\n is not None. False otherwise."} +{"repo": "tensorflow", "function": "def _read_variable_op(self, no_copy=False):\n variable_accessed(self)\n self._variable_read = True\n\n def read_and_set_handle(no_copy):\n if no_copy and forward_compat.forward_compatible(2022, 5, 3):\n gen_resource_variable_ops.disable_copy_on_read(self.handle)\n result = gen_resource_variable_ops.read_variable_op(self.handle, self._dtype)\n _maybe_set_handle_data(self._dtype, self.handle, result)\n return result\n if getattr(self, '_caching_device', None) is not None:\n with ops.colocate_with(None, ignore_existing=True):\n with ops.device(self._caching_device):\n result = read_and_set_handle(no_copy)\n else:\n result = read_and_set_handle(no_copy)\n if not context.executing_eagerly():\n record.record_operation('ReadVariableOp', [result], [self.handle], backward_function=lambda x: [x], forward_function=lambda x: [x])\n if context.xla_sharding_for_resource_variables_enabled() and (not context.executing_eagerly()) and (self._xla_sharding is not None):\n sharding_string = self._xla_sharding.SerializeToString()\n with ops.colocate_with(result):\n result = gen_xla_ops.xla_sharding(result, sharding=sharding_string)\n result.op._set_attr('_XlaSharding', attr_value_pb2.AttrValue(s=sharding_string))\n return result", "docstring": "Reads the value of the variable.\n\nIf the variable is in copy-on-read mode and `no_copy` is True, the variable\nis converted to copy-on-write mode before it is read.\n\nArgs:\n no_copy: Whether to prevent a copy of the variable.\n\nReturns:\n The value of the variable."} +{"repo": "keras", "function": "def is_remote_path(filepath):\n if re.match('^(/cns|/cfs|/gcs|/hdfs|/readahead|/placer|/tfhub|.*://).*$', str(filepath)):\n return True\n return False", "docstring": "Determines if a given filepath indicates a remote location.\n\nThis function checks if the filepath represents a known remote pattern\nsuch as GCS (`/gcs`), CNS (`/cns`), CFS (`/cfs`), HDFS (`/hdfs`), Placer\n(`/placer`), TFHub (`/tfhub`), or a URL (`.*://`).\n\nArgs:\n filepath (str): The path to be checked.\n\nReturns:\n bool: True if the filepath is a recognized remote path, otherwise False"} +{"repo": "beam", "function": "def with_id_spec(self, column_name: str='id', python_type: Type=str, convert_fn: Optional[Callable[[str], Any]]=None, sql_typecast: Optional[str]=None) -> 'ColumnSpecsBuilder':\n\n def value_fn(chunk: Chunk) -> Any:\n value = chunk.id\n return convert_fn(value) if convert_fn else value\n self._specs.append(ColumnSpec(column_name=column_name, python_type=python_type, value_fn=value_fn, sql_typecast=sql_typecast))\n return self", "docstring": "Add ID :class:`.ColumnSpec` with optional type and conversion.\n\nArgs:\n column_name: Name for the ID column (defaults to \"id\")\n python_type: Python type for the column (defaults to str)\n convert_fn: Optional function to convert the chunk ID\n If None, uses ID as-is\n sql_typecast: Optional SQL type cast\n\nReturns:\n Self for method chaining\n\nExample:\n >>> builder.with_id_spec(\n ... column_name=\"doc_id\",\n ... python_type=int,\n ... convert_fn=lambda id: int(id.split('_')[1])\n ... )"} +{"repo": "pyglove", "function": "def format_candidate(self, index: int, display_format: str='choice_and_literal') -> Union[str, int, float]:\n if display_format not in ['choice', 'literal', 'choice_and_literal']:\n raise ValueError(f\"`display_format` must be either 'choice', 'literal', or 'choice_and_literal'. Encountered: {display_format!r}.\")\n if self.literal_values:\n if display_format == 'literal':\n return self.literal_values[index]\n elif display_format == 'choice_and_literal':\n return f'{index}/{len(self.candidates)} ({self.literal_values[index]})'\n return f'{index}/{len(self.candidates)}'", "docstring": "Get a formatted candidate value by index.\n\nArgs:\n index: The index of the candidate to format.\n display_format: One of 'choice', 'literal' and 'choice_and_literal' as\n the output format for human consumption.\n\nReturns:\n A int, float or string that represent the candidate based on the\n display format."} +{"repo": "pyglove", "function": "def trivial_reward(example):\n return example", "docstring": "Reward for the trivial search space.\n\nThe reward (i.e. fitness) is the value itself. The goal of the search,\ntherefore, is to find the value 1.\n\nArgs:\n example: a materialized value.\n\nReturns:\n The corresponding reward."} +{"repo": "genai-processors", "function": "def __init__(self, api_key: str, model_name: str, realtime_config: Optional[genai_types.LiveConnectConfigOrDict]=None, debug_config: client.DebugConfig | None=None, http_options: genai_types.HttpOptions | genai_types.HttpOptionsDict | None=None):\n self._client = client.Client(api_key=api_key, debug_config=debug_config, http_options=http_options)\n self._model_name = model_name\n self._realtime_config = realtime_config", "docstring": "Initializes the Live Processor.\n\nArgs:\n api_key: The [API key](https://ai.google.dev/gemini-api/docs/api-key) to\n use for authentication. Applies to the Gemini Developer API only.\n model_name: The name of the model to use. See\n https://ai.google.dev/gemini-api/docs/models for a list of available\n models. Only use models with a `-live-` suffix.\n realtime_config: The configuration for generating realtime content.\n debug_config: Config settings that control network behavior of the client.\n This is typically used when running test code.\n http_options: Http options to use for the client. These options will be\n applied to all requests made by the client. Example usage: `client =\n genai.Client(http_options=types.HttpOptions(api_version='v1'))`.\n\nReturns:\n A `Processor` that calls the Genai API in a realtime (aka live) fashion."} +{"repo": "transformers", "function": "def fn(x: str, y: Optional[list[Union[str, int]]], z: tuple[Union[str, int], str]=(42, 'hello')) -> tuple[int, str]:\n pass", "docstring": "Test function with multiple args, and docstring args that we have to strip out.\n\nArgs:\n x: The first input. It's got a big multiline\n description and also contains\n (choices: [\"a\", \"b\", \"c\"])\n\n y: The second input. It's a big list with a single-line description.\n\n z: The third input. It's some kind of tuple with a default arg.\n\nReturns:\n The output. The return description is also a big multiline\n description that spans multiple lines."} +{"repo": "tensorflow", "function": "def prefer_static_value(x):\n static_x = tensor_util.constant_value(x)\n if static_x is not None:\n return static_x\n return x", "docstring": "Return static value of tensor `x` if available, else `x`.\n\nArgs:\n x: `Tensor` (already converted).\n\nReturns:\n Numpy array (if static value is obtainable), else `Tensor`."} +{"repo": "tensorflow", "function": "def _GetGradReduced(output_grad, output_subs, input_subs, input_shape, reduced_label_set):\n reduced_subs, reduced_dims, reduced_axes = _GetReducedSubscripts(reduced_label_set, input_shape, input_subs)\n has_repeated_labels = len(set(input_subs)) + len(set(output_subs)) < len(input_subs) + len(output_subs)\n input_subs_without_reduced_labels = ''.join([s for s in input_subs if s not in reduced_label_set])\n if not has_repeated_labels and input_subs_without_reduced_labels == output_subs:\n reduced_shape = math_ops.reduced_shape(input_shape, ops.convert_to_tensor(reduced_axes))\n return array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), input_shape)\n grad_shape_with_reduced_labels = array_ops.concat([reduced_dims, array_ops.shape(output_grad)], axis=0)\n reduced_shape = array_ops.concat([array_ops.ones(len(reduced_label_set), dtype=dtypes.int32), array_ops.shape(output_grad)], axis=0)\n broadcasted_grad = array_ops.broadcast_to(array_ops.reshape(output_grad, reduced_shape), grad_shape_with_reduced_labels)\n return gen_linalg_ops.einsum([broadcasted_grad], '{}->{}'.format(reduced_subs + output_subs, input_subs))", "docstring": "Returns the gradient wrt input for a unary einsum with reductions.\n\nArgs:\n output_grad: The gradient wrt the output of a unary einsum operation.\n output_subs: The output subscript. (E.g. `ac` for equation `abc->ac`).\n input_subs: The input subscript. (E.g. `abc` for equation `abc->ac`).\n input_shape: A `Tensor` representing the shape of the input operand.\n reduced_label_set: The set of axis labels appearing in `input_subs` but\n not in `output_subs`."} +{"repo": "keras", "function": "def set_floatx(value):\n global _FLOATX\n accepted_dtypes = {'bfloat16', 'float16', 'float32', 'float64'}\n if value not in accepted_dtypes:\n raise ValueError(f'Unknown `floatx` value: {value}. Expected one of {accepted_dtypes}')\n _FLOATX = str(value)", "docstring": "Set the default float dtype.\n\nNote: It is not recommended to set this to `\"float16\"` for training,\nas this will likely cause numeric stability issues.\nInstead, mixed precision, which leverages\na mix of `float16` and `float32`. It can be configured by calling\n`keras.mixed_precision.set_dtype_policy('mixed_float16')`.\n\nArgs:\n value: String; `'bfloat16'`, `'float16'`, `'float32'`, or `'float64'`.\n\nExamples:\n>>> keras.config.floatx()\n'float32'\n\n>>> keras.config.set_floatx('float64')\n>>> keras.config.floatx()\n'float64'\n\n>>> # Set it back to float32\n>>> keras.config.set_floatx('float32')\n\nRaises:\n ValueError: In case of invalid value."} +{"repo": "tensorflow", "function": "def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03):\n c1 = (k1 * max_val) ** 2\n c2 = (k2 * max_val) ** 2\n mean0 = reducer(x)\n mean1 = reducer(y)\n num0 = mean0 * mean1 * 2.0\n den0 = math_ops.square(mean0) + math_ops.square(mean1)\n luminance = (num0 + c1) / (den0 + c1)\n num1 = reducer(x * y) * 2.0\n den1 = reducer(math_ops.square(x) + math_ops.square(y))\n c2 *= compensation\n cs = (num1 - num0 + c2) / (den1 - den0 + c2)\n return (luminance, cs)", "docstring": "Helper function for computing SSIM.\n\nSSIM estimates covariances with weighted sums. The default parameters\nuse a biased estimate of the covariance:\nSuppose `reducer` is a weighted sum, then the mean estimators are\n \\mu_x = \\sum_i w_i x_i,\n \\mu_y = \\sum_i w_i y_i,\nwhere w_i's are the weighted-sum weights, and covariance estimator is\n cov_{xy} = \\sum_i w_i (x_i - \\mu_x) (y_i - \\mu_y)\nwith assumption \\sum_i w_i = 1. This covariance estimator is biased, since\n E[cov_{xy}] = (1 - \\sum_i w_i ^ 2) Cov(X, Y).\nFor SSIM measure with unbiased covariance estimators, pass as `compensation`\nargument (1 - \\sum_i w_i ^ 2).\n\nArgs:\n x: First set of images.\n y: Second set of images.\n reducer: Function that computes 'local' averages from the set of images. For\n non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and\n for convolutional version, this is usually tf.nn.avg_pool2d or\n tf.nn.conv2d with weighted-sum kernel.\n max_val: The dynamic range (i.e., the difference between the maximum\n possible allowed value and the minimum allowed value).\n compensation: Compensation factor. See above.\n k1: Default value 0.01\n k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so\n it would be better if we took the values in the range of 0 < K2 < 0.4).\n\nReturns:\n A pair containing the luminance measure, and the contrast-structure measure."} +{"repo": "mobly", "function": "def get_timezone_olson_id():\n tzoffset = int(time.timezone / 3600)\n if tzoffset <= 0:\n gmt = f'GMT+{-tzoffset}'\n else:\n gmt = f'GMT-{tzoffset}'\n return GMT_to_olson[gmt]", "docstring": "Return the Olson ID of the local (non-DST) timezone.\n\nReturns:\n A string representing one of the Olson IDs of the local (non-DST)\n timezone."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, candidate_input_ids: Optional[torch.LongTensor]=None, candidate_attention_mask: Optional[torch.FloatTensor]=None, candidate_token_type_ids: Optional[torch.LongTensor]=None, candidate_inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmScorerOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is None and inputs_embeds is None:\n raise ValueError('You have to specify either input_ids or input_embeds.')\n if candidate_input_ids is None and candidate_inputs_embeds is None:\n raise ValueError('You have to specify either candidate_input_ids or candidate_inputs_embeds.')\n query_outputs = self.query_embedder(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n flattened_input_ids, flattened_attention_mask, flattened_token_type_ids = self._flatten_inputs(candidate_input_ids, candidate_attention_mask, candidate_token_type_ids)\n candidate_outputs = self.embedder(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=candidate_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n query_score = query_outputs[0]\n candidate_score = candidate_outputs[0]\n candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size)\n relevance_score = torch.einsum('bd,bnd->bn', query_score, candidate_score)\n if not return_dict:\n return (relevance_score, query_score, candidate_score)\n return RealmScorerOutput(relevance_score=relevance_score, query_score=query_score, candidate_score=candidate_score)", "docstring": "candidate_input_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`):\n Indices of candidate input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\ncandidate_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\ncandidate_token_type_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\ncandidate_inputs_embeds (`torch.FloatTensor` of shape `(batch_size * num_candidates, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `candidate_input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert *candidate_input_ids* indices\n into associated vectors than the model's internal embedding lookup matrix.\n\nReturns:\n\nExample:\n\n```python\n>>> import torch\n>>> from transformers import AutoTokenizer, RealmScorer\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/realm-cc-news-pretrained-scorer\")\n>>> model = RealmScorer.from_pretrained(\"google/realm-cc-news-pretrained-scorer\", num_candidates=2)\n\n>>> # batch_size = 2, num_candidates = 2\n>>> input_texts = [\"How are you?\", \"What is the item in the picture?\"]\n>>> candidates_texts = [[\"Hello world!\", \"Nice to meet you!\"], [\"A cute cat.\", \"An adorable dog.\"]]\n\n>>> inputs = tokenizer(input_texts, return_tensors=\"pt\")\n>>> candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors=\"pt\")\n\n>>> outputs = model(\n... **inputs,\n... candidate_input_ids=candidates_inputs.input_ids,\n... candidate_attention_mask=candidates_inputs.attention_mask,\n... candidate_token_type_ids=candidates_inputs.token_type_ids,\n... )\n>>> relevance_score = outputs.relevance_score\n```"} +{"repo": "transformers", "function": "def check_onnxruntime_requirements(minimum_version: Version):\n try:\n import onnxruntime\n ort_version = parse(onnxruntime.__version__)\n if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:\n raise ImportError(f'We found an older version of onnxruntime ({onnxruntime.__version__}) but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\\nPlease update onnxruntime by running `pip install --upgrade onnxruntime`')\n except ImportError:\n raise ImportError(\"onnxruntime doesn't seem to be currently installed. Please install the onnxruntime by running `pip install onnxruntime` and relaunch the conversion.\")", "docstring": "Check onnxruntime is installed and if the installed version match is recent enough\n\nRaises:\n ImportError: If onnxruntime is not installed or too old version is found"} +{"repo": "tf-quant-finance", "function": "def _apply_op(self, op_fn):\n raise NotImplementedError()", "docstring": "Applies given tensor-to-tensor op.\n\nThis method is used for implementing ops that take a tensor and return a new\ntensor, such as tf.expand_dims or tf.transpose. Implementing wrappers\nshould apply `op_fn` to the backing tensor(s) and return an new wrapper\ninstance with the updated backing tensor.\n\nArgs:\n op_fn: Callable that applies tensor-to-tensor op to the given Tensor.\n E.g. applies tf.expand_dims.\n\nReturns:\n A TensorWrapper instance with updated backing tensor(s)."} +{"repo": "transformers", "function": "def __init__(self, batch_size: int, ngram_len: int, context_history_size: int, device: torch.device):\n self.context = torch.zeros((batch_size, ngram_len - 1), dtype=torch.int64, device=device)\n self.context_history = torch.zeros((batch_size, context_history_size), dtype=torch.int64, device=device)\n self.num_calls = 0", "docstring": "Initializes the state.\n\nArgs:\n batch_size (`int`): Batch size.\n ngram_len (`int`): Ngram length.\n context_history_size (`int`): Size of the tensor to keep track of seen contexts.\n device (`int`): Device to use."} +{"repo": "fhir-py", "function": "class TerminologyServiceClient:\n\n def __init__(self, auth_per_terminology_server: Dict[str, Union[Tuple[str, str], str]]) -> None:\n self.auth_per_terminology_server = auth_per_terminology_server\n\n def expand_value_set_url(self, value_set_url: str) -> value_set_pb2.ValueSet:\n \"\"\"Expands the value set using a terminology server.\n\n Requests an expansion of the value set from the appropriate terminology\n server for the given URL and version if present on the URL. The terminology\n service is chosen based on the domain of `value_set_url`.\n\n Retrieves the current definition of the value set from the terminology\n service as well as its expansion.\n\n Args:\n value_set_url: The url of the value set to expand.\n\n Raises:\n ValueError: If a terminology service can not be found for `value_set_url`.\n\n Returns:\n The current definition of the value set from the server with its expanded\n codes present.\n \"\"\"\n value_set_url, value_set_version = url_utils.parse_url_version(value_set_url)\n base_url, terminology_service_url = _expansion_request_url_for_value_set_url(value_set_url)\n auth = self.auth_per_terminology_server.get(base_url)\n return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)\n\n def expand_value_set_url_using_service(self, value_set_url: str, terminology_service_url: str) -> value_set_pb2.ValueSet:\n \"\"\"Expands the value set using the requested terminology service.\n\n Requests an expansion of the value set from the terminology\n server at `terminology_service_url` for the given URL and version if present\n on the URL.\n\n If the terminology service requires credentials to access,\n `terminology_service_url` must have an entry in the\n `auth_per_terminology_server` given to this class' constructor.\n\n Retrieves the current definition of the value set from the terminology\n service as well as its expansion.\n\n Args:\n value_set_url: The url of the value set to expand.\n terminology_service_url: The url of the terminology service to use when\n expanding `value_set_url`.\n\n Returns:\n The current definition of the value set from the server with its expanded\n codes present.\n \"\"\"\n value_set_url, value_set_version = url_utils.parse_url_version(value_set_url)\n auth = self.auth_per_terminology_server.get(terminology_service_url)\n return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)\n\n def expand_value_set_definition(self, value_set: value_set_pb2.ValueSet) -> value_set_pb2.ValueSet:\n \"\"\"Expands the value set definition using a terminology server.\n\n Requests an expansion of the given value set from the appropriate\n terminology server. Attempts to expand arbitrary value sets by passing their\n entire definition to the terminology service for expansion.\n\n If possible, requests expansion from the domain associated with the value\n set's URL. If the value set URL is not associated with a known terminology\n service, uses the tx.fhir.org service as it is able to expand value sets\n defined outside its own specifications.\n\n Retrieves the current definition of the value set from the terminology\n service as well as its expansion.\n\n Args:\n value_set: The value set to expand.\n\n Returns:\n The current definition of the value set from the server with its expanded\n codes present.\n \"\"\"\n base_url, request_url = _expansion_request_url_for_value_set_url(value_set.url.value)\n request_json = json_format.print_fhir_to_json_string(value_set).encode('utf-8')\n session_ = self.create_session()\n session_.headers.update({'Accept': 'application/json', 'Content-Type': 'application/json'})\n auth = self.auth_per_terminology_server.get(base_url)\n if auth is not None:\n if isinstance(auth, tuple) and len(auth) == 2:\n logging.debug('Using Basic auth for auth')\n session_.auth = auth\n else:\n logging.debug('Using Bearer token for auth')\n session_.headers['Authorization'] = auth\n logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set.url.value, value_set.version.value, base_url)\n with session_ as session:\n\n def request_func(offset: int) -> requests.Response:\n return session.post(request_url, data=request_json, params={'offset': offset})\n expanded_value_set = _paginate_expand_value_set_request(request_func, value_set.url.value, value_set.version.value)\n logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set.url.value, value_set.version.value, base_url)\n return expanded_value_set\n\n @classmethod\n def create_session(cls) -> requests.Session:\n \"\"\"Builds a request session with exponential back-off retries.\"\"\"\n session = requests.Session()\n retry_policy = requests.packages.urllib3.util.Retry(backoff_factor=2)\n adapter = requests.adapters.HTTPAdapter(max_retries=retry_policy)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n def _expand_value_set_url_using_service(self, value_set_url: str, value_set_version: Optional[str], terminology_service_url: str, auth: Optional[Union[Tuple[str, str], str]]) -> value_set_pb2.ValueSet:\n \"\"\"Expands the value set using the requested terminology service.\n\n Requests an expansion of the value set from the terminology\n server at `terminology_service_url` for the given URL and version.\n\n Args:\n value_set_url: The url of the value set to expand.\n value_set_version: The version of the value set to retrieve or None for\n the latest version.\n terminology_service_url: The url of the terminology service to use when\n expanding `value_set_url`.\n auth: A tuple of (user_name, password) to use when performing basic auth\n with the terminology service or a singular token added to the\n Authorization header or None if no authentication is required.\n\n Returns:\n The current definition of the value set from the server with its expanded\n codes present.\n \"\"\"\n params = {'url': value_set_url}\n if value_set_version is not None:\n params['valueSetVersion'] = value_set_version\n session_ = self.create_session()\n session_.headers.update({'Accept': 'application/json'})\n if auth is not None:\n if isinstance(auth, tuple) and len(auth) == 2:\n logging.debug('Using Basic auth for auth')\n session_.auth = auth\n else:\n logging.debug('Using Bearer token for auth')\n session_.headers['Authorization'] = auth\n logging.info('Expanding value set url: %s version: %s using terminology service: %s', value_set_url, value_set_version, terminology_service_url)\n with session_ as session:\n\n def request_func(offset: int) -> requests.Response:\n return session.get(terminology_service_url, params={'offset': offset, **params})\n expanded_value_set = _paginate_expand_value_set_request(request_func, value_set_url, value_set_version)\n logging.info('Retrieved %d codes for value set url: %s version: %s using terminology service: %s', len(expanded_value_set.expansion.contains), value_set_url, value_set_version, terminology_service_url)\n return expanded_value_set", "docstring": "Client for interacting with terminology servers.\n\nAttributes:\n auth_per_terminology_server: The basic auth values to use when communicating\n with each terminology server. The keys of this dictionary should be root\n URLs of terminology services. The values can a be tuple of (username,\n password) strings for use in basic auth or a singular value that is sent\n as a Bearer auth. If the terminology server does not require an\n authorization to access, the entry for that server may be omitted from\n api_keys_per_terminology_server or given a value of None."} +{"repo": "sprockets", "function": "class StateResolved(stl.base.ParameterizedObject):\n\n def __init__(self, name, state):\n stl.base.ParameterizedObject.__init__(self, name)\n self.state = state\n self.resolved_params = []\n\n def __str__(self):\n return 'STATE %s(%s)' % (self.name, stl.base.GetCSV(self.resolved_params))\n\n def __eq__(self, other):\n return stl.base.ParameterizedObject.__eq__(self, other) and self.state == other.state and (self.resolved_params == other.resolved_params)\n\n def InitialValue(self):\n \"\"\"Returns the first state value which is defined as the initial value.\"\"\"\n return StateValue(self, self.state.values[0])", "docstring": "State specified in trasition spec and resolved.\n\nAttributes:\n state: Original parameterized state.\n resolved_params: List of parameter values resolved. The order of values is\n same to that of parameters."} +{"repo": "transformers", "function": "class TrOCRProcessor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = 'AutoImageProcessor'\n tokenizer_class = 'AutoTokenizer'\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n feature_extractor = None\n if 'feature_extractor' in kwargs:\n warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)\n feature_extractor = kwargs.pop('feature_extractor')\n image_processor = image_processor if image_processor is not None else feature_extractor\n if image_processor is None:\n raise ValueError('You need to specify an `image_processor`.')\n if tokenizer is None:\n raise ValueError('You need to specify a `tokenizer`.')\n super().__init__(image_processor, tokenizer)\n self.current_processor = self.image_processor\n self._in_target_context_manager = False\n\n def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[TrOCRProcessorKwargs]) -> BatchFeature:\n \"\"\"\n When used in normal mode, this method forwards all its arguments to AutoImageProcessor's\n [`~AutoImageProcessor.__call__`] and returns its output. If used in the context\n [`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's\n [`~TrOCRTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.\n \"\"\"\n if self._in_target_context_manager:\n return self.current_processor(images, **kwargs)\n if images is None and text is None:\n raise ValueError('You need to specify either an `images` or `text` input to process.')\n output_kwargs = self._merge_kwargs(TrOCRProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if images is not None:\n inputs = self.image_processor(images, **output_kwargs['images_kwargs'])\n if text is not None:\n encodings = self.tokenizer(text, **output_kwargs['text_kwargs'])\n if text is None:\n return inputs\n elif images is None:\n return encodings\n else:\n inputs['labels'] = encodings['input_ids']\n return inputs\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer\n to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the\n docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n @contextmanager\n def as_target_processor(self):\n \"\"\"\n Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.\n \"\"\"\n warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your images inputs, or in a separate call.')\n self._in_target_context_manager = True\n self.current_processor = self.tokenizer\n yield\n self.current_processor = self.image_processor\n self._in_target_context_manager = False\n\n @property\n def feature_extractor_class(self):\n warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)\n return self.image_processor_class\n\n @property\n def feature_extractor(self):\n warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)\n return self.image_processor", "docstring": "Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.\n\n[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and\n[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for\nmore information.\n\nArgs:\n image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):\n An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.\n tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):\n An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input."} +{"repo": "transformers", "function": "class Emu3VQVAEConfig(PretrainedConfig):\n model_type = 'emu3_vqgan'\n base_config_key = 'vq_config'\n\n def __init__(self, codebook_size: int=32768, embed_dim: int=4, latent_channels: int=4, double_latent: bool=False, in_channels: int=3, out_channels: int=3, temporal_downsample_factor: int=4, base_channels: int=256, channel_multiplier: List[int]=[1, 2, 2, 4], num_res_blocks: int=2, attn_resolutions: List[int]=[3], hidden_size: int=1024, num_attention_heads: int=1, attention_dropout: float=0.0, **kwargs):\n super().__init__(**kwargs)\n self.codebook_size = codebook_size\n self.embed_dim = embed_dim\n self.latent_channels = latent_channels\n self.double_latent = double_latent\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.temporal_downsample_factor = temporal_downsample_factor\n self.base_channels = base_channels\n self.channel_multiplier = channel_multiplier\n self.num_res_blocks = num_res_blocks\n self.attn_resolutions = attn_resolutions\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.attention_dropout = attention_dropout", "docstring": "This is the configuration class to store the configuration of a [`Emu3VQVAE`]. It is used to instantiate an VQ-VAE\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a configuration to the VQ model presented in Emu3 paper.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\nArgs:\n codebook_size (`int`, *optional*, defaults to 32768):\n Codebook size of the VQ model.\n embed_dim (`int`, *optional*, defaults to 4):\n Dimension of the quantized vector in codebook.\n latent_channels (`int`, *optional*, defaults to 4):\n Dimension of the output channel of encoder and the input channel of decoder\n double_latent (`bool`, *optional*, defaults to `False`):\n Whether double the output dim of the encoder.\n in_channels (`int`, *optional*, defaults to 3):\n Input channel of encoder.\n out_channels (`int`, *optional*, defaults to 3):\n Output channel of decoder.\n temporal_downsample_factor (`int`, *optional*, defaults to 4):\n Temporal downsample factor.\n base_channels (`int`, *optional*, defaults to 256):\n Basic channel number of the intermediate blocks.\n channel_multiplier (`List[int]`, *optional*, defaults to `[1, 2, 2, 4]`):\n Channel scaling factor of the intermediate blocks.\n num_res_blocks (`int`, *optional*, defaults to 2):\n Residual block number in each stage.\n attn_resolutions (`List[int]`, *optional*, defaults to `[3]`):\n Stage indices to apply attention.\n hidden_size (`int`, *optional*, defaults to 1024):\n Dimension of the hidden representations in the attention layer.\n num_attention_heads (`int`, *optional*, defaults to 1):\n Number of attention heads for each attention layer.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n\n```python\n>>> from transformers import Emu3VQVAE, Emu3VQVAEConfig\n\n>>> # Initializing a video VQ model of Emu3 configuration\n>>> configuration = Emu3VQVAEConfig()\n\n>>> # Initializing a model from the Emu3 VQ model style configuration\n>>> model = Emu3VQVAE(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def get_text_config(self, decoder=False) -> 'PretrainedConfig':\n return self.thinker_config.get_text_config()", "docstring": "Returns the config that is meant to be used with text IO. On most models, it is the original config instance\nitself. On specific composite models, it is under a set of valid names.\n\nArgs:\n decoder (`Optional[bool]`, *optional*, defaults to `False`):\n If set to `True`, then only search for decoder config names."} +{"repo": "transformers", "function": "def with_past(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfigWithPast':\n return cls(config, task=task, use_past=True)", "docstring": "Instantiate a OnnxConfig with `use_past` attribute set to True\n\nArgs:\n config: The underlying model's config to use when exporting to ONNX\n\nReturns:\n OnnxConfig with `.use_past = True`"} +{"repo": "tensorflow", "function": "def do_test(create_module_fn, exported_names=None, show_debug_info=False):\n if exported_names is None:\n exported_names = []\n logging.set_stderrthreshold('error')\n tf.enable_v2_behavior()\n\n def app_main(argv):\n \"\"\"Function passed to absl.app.run.\"\"\"\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n if FLAGS.save_model_path:\n save_model_path = FLAGS.save_model_path\n else:\n save_model_path = tempfile.mkdtemp(suffix='.saved_model')\n save_options = tf.saved_model.SaveOptions(save_debug_info=show_debug_info)\n tf.saved_model.save(create_module_fn(), save_model_path, options=save_options)\n logging.info('Saved model to: %s', save_model_path)\n mlir = pywrap_mlir.experimental_convert_saved_model_to_mlir(save_model_path, ','.join(exported_names), show_debug_info)\n mlir = pywrap_mlir.experimental_run_pass_pipeline(mlir, 'canonicalize', show_debug_info)\n print(mlir)\n filename = '%s/result.mlirbc' % save_model_path\n pywrap_mlir.experimental_write_bytecode(filename, mlir)\n if not file_io.file_exists(filename):\n raise app.UsageError('Failed to create bytecode output.')\n app.run(app_main)", "docstring": "Runs test.\n\n1. Performs absl and tf \"main\"-like initialization that must run before almost\n anything else.\n2. Converts `tf.Module` to SavedModel\n3. Converts SavedModel to MLIR\n4. Prints the textual MLIR to stdout (it is expected that the caller will have\n FileCheck checks in its file to check this output).\n\nThis is only for use by the MLIR SavedModel importer tests.\n\nArgs:\n create_module_fn: A callable taking no arguments, which returns the\n `tf.Module` to be converted and printed.\n exported_names: A set of exported names for the MLIR converter (default is\n \"export all\").\n show_debug_info: If true, shows debug locations in the resulting MLIR."} +{"repo": "tensorflow", "function": "def SyncSleep(delay, name=None):\n return examples_sync_sleep(delay=delay, name=name)", "docstring": "Pause for `delay` seconds (which need not be an integer).\n\nThis is a synchronous (blocking) version of a sleep op. It's purpose is\nto be contrasted with Examples>AsyncSleep.\n\nArgs:\n delay: tf.Tensor which is a scalar of type float.\n name: An optional name for the op.\n\nReturns:\n The `delay` value."} +{"repo": "transformers", "function": "def get_audio_features(self, input_features: Optional[torch.Tensor]=None, is_longer: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n audio_outputs = self.audio_model(input_features=input_features, is_longer=is_longer, return_dict=return_dict)\n pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output\n audio_features = self.audio_projection(pooled_output)\n audio_features = F.normalize(audio_features, dim=-1)\n return audio_features", "docstring": "input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Input audio features. This should be returned by the [`ClapFeatureExtractor`] class that you can also\n retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.\nis_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):\n Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance\n the features.\n\nReturns:\n audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by\n applying the projection layer to the pooled output of [`ClapAudioModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoFeatureExtractor, ClapModel\n>>> import torch\n\n>>> model = ClapModel.from_pretrained(\"laion/clap-htsat-unfused\")\n>>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"laion/clap-htsat-unfused\")\n>>> random_audio = torch.rand((16_000))\n>>> inputs = feature_extractor(random_audio, return_tensors=\"pt\")\n>>> audio_features = model.get_audio_features(**inputs)\n```"} +{"repo": "tensorflow", "function": "def on_test_batch_end(self, batch, logs=None):", "docstring": "Called at the end of a batch in `evaluate` methods.\n\nAlso called at the end of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch."} +{"repo": "tensorflow", "function": "def distribute(processing_mode, service, job_name=None, consumer_index=None, num_consumers=None, max_outstanding_requests=None, data_transfer_protocol=None, compression='AUTO', cross_trainer_cache=None, target_workers='AUTO') -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:\n _validate_job_name(job_name)\n return _distribute(processing_mode=processing_mode, service=service, job_name=job_name, consumer_index=consumer_index, num_consumers=num_consumers, max_outstanding_requests=max_outstanding_requests, data_transfer_protocol=data_transfer_protocol, compression=compression, cross_trainer_cache=cross_trainer_cache, target_workers=target_workers)", "docstring": "A transformation that moves dataset processing to the tf.data service.\n\nWhen you iterate over a dataset containing the `distribute` transformation,\nthe tf.data service creates a \"job\" which produces data for the dataset\niteration.\n\nThe tf.data service uses a cluster of workers to prepare data for training\nyour model.\nThe `processing_mode` argument to `tf.data.experimental.service.distribute`\ndescribes how to leverage multiple workers to process the input dataset.\nCurrently, there are two processing modes to choose from: \"distributed_epoch\"\nand \"parallel_epochs\".\n\n\"distributed_epoch\" means that the dataset will be split across all tf.data\nservice workers.\nThe dispatcher produces \"splits\" for the dataset and sends them to workers for\nfurther processing. For example, if a dataset begins with a list of filenames,\nthe dispatcher will iterate through the filenames and send the filenames to\ntf.data workers, which will perform the rest of the dataset transformations on\nthose files. \"distributed_epoch\" is useful when your model needs to see each\nelement of the dataset exactly once, or if it needs to see the data in a\ngenerally-sequential order. \"distributed_epoch\" only works for datasets with\nsplittable sources, such as `Dataset.from_tensor_slices`,\n`Dataset.list_files`, or `Dataset.range`.\n\n\"parallel_epochs\" means that the entire input dataset will be processed\nindependently by each of the tf.data service workers.\nFor this reason, it is important to shuffle data (e.g. filenames)\nnon-deterministically, so that each worker will process the elements of the\ndataset in a different order. \"parallel_epochs\" can be used to distribute\ndatasets that aren't splittable.\n\nWith two workers, \"parallel_epochs\" will produce every element of the dataset\ntwice:\n\n>>> dispatcher = tf.data.experimental.service.DispatchServer()\n>>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n>>> # Start two workers\n>>> workers = [\n... tf.data.experimental.service.WorkerServer(\n... tf.data.experimental.service.WorkerConfig(\n... dispatcher_address=dispatcher_address)) for _ in range(2)\n... ]\n>>> dataset = tf.data.Dataset.range(10)\n>>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n... processing_mode=\"parallel_epochs\", service=dispatcher.target))\n>>> sorted([a.item() for a in dataset.as_numpy_iterator()])\n[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]\n\n\"distributed_epoch\", on the other hand, will still produce each element once:\n\n>>> dispatcher = tf.data.experimental.service.DispatchServer()\n>>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n>>> workers = [\n... tf.data.experimental.service.WorkerServer(\n... tf.data.experimental.service.WorkerConfig(\n... dispatcher_address=dispatcher_address)) for _ in range(2)\n... ]\n>>> dataset = tf.data.Dataset.range(10)\n>>> dataset = dataset.apply(tf.data.experimental.service.distribute(\n... processing_mode=\"distributed_epoch\", service=dispatcher.target))\n>>> sorted([a.item() for a in dataset.as_numpy_iterator()])\n[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\nWhen using `apply(tf.data.experimental.service.distribute(...))`, the dataset\nbefore the `apply` transformation executes within the tf.data service, while\nthe operations after `apply` happen within the local process.\n\n>>> dispatcher = tf.data.experimental.service.DispatchServer()\n>>> dispatcher_address = dispatcher.target.split(\"://\")[1]\n>>> workers = [\n... tf.data.experimental.service.WorkerServer(\n... tf.data.experimental.service.WorkerConfig(\n... dispatcher_address=dispatcher_address)) for _ in range(2)\n... ]\n>>> dataset = tf.data.Dataset.range(5)\n>>> dataset = dataset.map(lambda x: x*x)\n>>> dataset = dataset.apply(\n... tf.data.experimental.service.distribute(\"parallel_epochs\",\n... dispatcher.target))\n>>> dataset = dataset.map(lambda x: x+1)\n>>> sorted([a.item() for a in dataset.as_numpy_iterator()])\n[1, 1, 2, 2, 5, 5, 10, 10, 17, 17]\n\nIn the above example, the dataset operations (before applying the `distribute`\nfunction on the elements) will be executed on the tf.data workers,\nand the elements are provided over RPC. The remaining transformations\n(after the call to `distribute`) will be executed locally. The dispatcher\nand the workers will bind to usused free ports (which are chosen at random),\nin order to communicate with each other. However, to bind them to specific\nports, the `port` parameter can be passed.\n\nThe `job_name` argument allows jobs to be shared across multiple\ndatasets. Instead of each dataset creating its own job, all\ndatasets with the same `job_name` will consume from the same job. A new job\nwill be created for each iteration of the dataset (with each repetition of\n`Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`\nis serving on `localhost:5000` and two training workers (in either a single\nclient or multi-client setup) iterate over the below dataset, and there is a\nsingle tf.data worker:\n\n```\nrange5_dataset = tf.data.Dataset.range(5)\ndataset = range5_dataset.apply(tf.data.experimental.service.distribute(\n \"parallel_epochs\", \"localhost:5000\", job_name=\"my_job_name\"))\nfor iteration in range(3):\n print(list(dataset))\n```\n\nThe elements of each job will be split between the two processes, with\nelements being consumed by the processes on a first-come first-served basis.\nOne possible result is that process 1 prints\n\n```\n[0, 2, 4]\n[0, 1, 3]\n[1]\n```\n\nand process 2 prints\n\n```\n[1, 3]\n[2, 4]\n[0, 2, 3, 4]\n```\n\nJob names must not be re-used across different training jobs within the\nlifetime of the tf.data service. In general, the tf.data service is expected\nto live for the duration of a single training job.\nTo use the tf.data service with multiple training jobs, make sure to use\ndifferent job names to avoid conflicts. For example, suppose a training job\ncalls `distribute` with `job_name=\"job\"` and reads until end of input. If\nanother independent job connects to the same tf.data service and tries to read\nfrom `job_name=\"job\"`, it will immediately receive end of input, without\ngetting any data.\n\n**Coordinated data read**\n\nBy default, when multiple consumers read from the same job, they receive data\non a first-come first-served basis. In some use cases, it is advantageous to\ncoordinate the consumers. At each step, consumers read data from the same\nworker.\n\nFor example, the tf.data service can be used to coordinate example sizes\nacross a cluster during synchronous training, so that during each step all\nreplicas train on similar-sized elements. To achieve this, define a dataset\nwhich generates rounds of `num_consumers` consecutive similar-sized batches,\nthen enable coordinated reads by setting `consumer_index` and `num_consumers`.\n\nNOTE: To keep consumers in sync, round robin data consumption requires that\nthe dataset have infinite cardinality. You can get this by adding `.repeat()`\nat the end of the dataset definition.\n\n**Keras and Distribution Strategies**\n\nThe dataset produced by the `distribute` transformation can be passed to\nKeras' `Model.fit` or Distribution Strategy's\n`tf.distribute.Strategy.experimental_distribute_dataset` like any other\n`tf.data.Dataset`. We recommend setting a `job_name` on the call to\n`distribute` so that if there are multiple workers, they read data from the\nsame job. Note that the autosharding normally performed by\n`experimental_distribute_dataset` will be disabled when setting a `job_name`,\nsince sharing the job already results in splitting data across the workers.\nWhen using a shared job, data will be dynamically balanced across workers, so\nthat they reach end of input about the same time. This results in better\nworker utilization than with autosharding, where each worker processes an\nindependent set of files, and some workers may run out of data earlier than\nothers.\n\nArgs:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible for multiple datasets to\n share the same job. The default behavior is that the dataset creates\n anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. If not provided, a protocol is determined\n at runtime.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress. \"SNAPPY\" forces\n the use of snappy compression.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-process\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\nReturns:\n Dataset: A `Dataset` of the elements produced by the data service."} +{"repo": "tensorflow", "function": "def run_op_benchmark(self, op, iters=1, warmup=True, session_config=None):\n if context.executing_eagerly():\n return self._run_eager_benchmark(iterable=op, iters=iters, warmup=warmup)\n return self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config)", "docstring": "Benchmarks the op.\n\nRuns the op `iters` times. In each iteration, the benchmark measures\nthe time it takes to go execute the op.\n\nArgs:\n op: The tf op to benchmark.\n iters: Number of times to repeat the timing.\n warmup: If true, warms up the session caches by running an untimed run.\n session_config: A ConfigProto protocol buffer with configuration options\n for the session. Applicable only for benchmarking in graph mode.\n\nReturns:\n A float, representing the per-execution wall time of the op in seconds.\n This is the median time (with respect to `iters`) it takes for the op\n to be executed `iters` num of times."} +{"repo": "transformers", "function": "def __getitem__(self, index: Any) -> Rigid:\n if type(index) is not tuple:\n index = (index,)\n return Rigid(self._rots[index], self._trans[index + (slice(None),)])", "docstring": "Indexes the affine transformation with PyTorch-style indices. The index is applied to the shared dimensions of\nboth the rotation and the translation.\n\nE.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None) t = Rigid(r, torch.rand(10, 10, 3)) indexed =\n t[3, 4:6] assert(indexed.shape == (2,)) assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\nArgs:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\nReturns:\n The indexed tensor"} +{"repo": "fhir-py", "function": "def get_resource(self, uri: str) -> Optional[message.Message]:\n for collection in (self.structure_definitions, self.search_parameters, self.code_systems, self.value_sets):\n resource = collection.get(uri)\n if resource is not None:\n return resource\n return None", "docstring": "Retrieves a protocol buffer representation of the given resource.\n\nArgs:\n uri: The URI of the resource to retrieve.\n\nReturns:\n Protocol buffer for the resource or `None` if the `uri` can not be found."} +{"repo": "tensorflow", "function": "def __init__(self, num_workers, dispatcher_port=0, work_dir=TMP_WORK_DIR, fault_tolerant_mode=True, job_gc_check_interval_ms=TEST_JOB_GC_CHECK_INTERNAL_MS, job_gc_timeout_ms=None, worker_timeout_ms=TEST_WORKER_TIMEOUT_MS, worker_shutdown_quiet_period_ms=0, snapshot_max_chunk_size_bytes=TEST_SNAPSHOT_MAX_CHUNK_SIZE_BYTES, worker_max_concurrent_snapshots=0, start=True, protocol=PROTOCOL, data_transfer_protocol=None):\n if work_dir == TMP_WORK_DIR:\n work_dir = tempfile.mkdtemp(dir=googletest.GetTempDir())\n self._worker_shutdown_quiet_period_ms = worker_shutdown_quiet_period_ms\n self._snapshot_max_chunk_size_bytes = snapshot_max_chunk_size_bytes\n self._protocol = protocol\n self._data_transfer_protocol = data_transfer_protocol\n self._job_gc_check_interval_ms = job_gc_check_interval_ms\n self._job_gc_timeout_ms = job_gc_timeout_ms\n self._worker_timeout_ms = worker_timeout_ms\n self._worker_max_concurrent_snapshots = worker_max_concurrent_snapshots\n self.dispatcher = server_lib.DispatchServer(server_lib.DispatcherConfig(port=dispatcher_port, work_dir=work_dir, protocol=protocol, fault_tolerant_mode=fault_tolerant_mode, job_gc_check_interval_ms=job_gc_check_interval_ms, job_gc_timeout_ms=job_gc_timeout_ms, worker_timeout_ms=worker_timeout_ms, worker_max_concurrent_snapshots=worker_max_concurrent_snapshots), start=start)\n self.workers = []\n for _ in range(num_workers):\n self.add_worker(start=start)", "docstring": "Creates a tf.data service test cluster.\n\nArgs:\n num_workers: The number of workers to initially add to the cluster.\n dispatcher_port: The port to use for the dispatcher.\n work_dir: The work directory to use for the dispatcher. If set to\n `TMP_WORK_DIR`, the cluster will create a new temporary directory to use\n as the work directory. If set to `NO_WORK_DIR`, no work directory will\n be used.\n fault_tolerant_mode: Whether the dispatcher should write its state to a\n journal so that it can recover from restarts.\n job_gc_check_interval_ms: How often the dispatcher should scan through to\n delete old and unused jobs, in milliseconds.\n job_gc_timeout_ms: How long a job needs to be unused before it becomes a\n candidate for garbage collection, in milliseconds.\n worker_timeout_ms: How long to wait for a worker to heartbeat before\n considering it missing, in milliseconds.\n worker_shutdown_quiet_period_ms: When shutting down a worker, how long to\n wait for the gRPC server to process the final requests.\n snapshot_max_chunk_size_bytes: The maximum size of a distributed snapshot\n chunk file.\n worker_max_concurrent_snapshots: The maximum number of snapshots a worker\n can concurrently process.\n start: Whether to immediately start the servers in the cluster. If\n `False`, the servers can be started later by calling\n `start_dispatcher()` and `start_workers()`.\n protocol: The protocol to use for communicating with the tf.data service,\n e.g. \"grpc\".\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service."} +{"repo": "beam", "function": "def open(self, url, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO:\n _, path = self._parse_url(url)\n return self._open(path, mime_type, compression_type)", "docstring": "Returns:\n A Python File-like object."} +{"repo": "tensorflow", "function": "def resize_tensor_input(self, input_index, tensor_size, strict=False):\n self._ensure_safe()\n tensor_size = np.array(tensor_size, dtype=np.int32)\n self._interpreter.ResizeInputTensor(input_index, tensor_size, strict)", "docstring": "Resizes an input tensor.\n\nArgs:\n input_index: Tensor index of input to set. This value can be gotten from\n the 'index' field in get_input_details.\n tensor_size: The tensor_shape to resize the input to.\n strict: Only unknown dimensions can be resized when `strict` is True.\n Unknown dimensions are indicated as `-1` in the `shape_signature`\n attribute of a given tensor. (default False)\n\nRaises:\n ValueError: If the interpreter could not resize the input tensor.\n\nUsage:\n```\ninterpreter = Interpreter(model_content=tflite_model)\ninterpreter.resize_tensor_input(0, [num_test_images, 224, 224, 3])\ninterpreter.allocate_tensors()\ninterpreter.set_tensor(0, test_images)\ninterpreter.invoke()\n```"} +{"repo": "tensorflow", "function": "def ldu(load_v, name):\n try:\n return load_v()\n except (KeyError, AttributeError, NameError):\n return Undefined(name)", "docstring": "Load variable operator that returns Undefined when failing to evaluate.\n\nNote: the name (\"load or return undefined\") is abbreviated to minimize\nthe amount of clutter in generated code.\n\nThis variant of `ld` is useful when loading symbols that may be undefined at\nruntime, such as composite symbols, and whether they are defined or not cannot\nbe determined statically. For example `d['a']` is undefined when `d` is an\nempty dict.\n\nArgs:\n load_v: Lambda that executes the actual read.\n name: Human-readable name of the symbol being read.\nReturns:\n Either the value of the symbol, or Undefined, if the symbol is not fully\n defined."} +{"repo": "beam", "function": "def _parse_test_option_args(self, argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--test-pipeline-options', type=str, action='store', help='only run tests providing service options')\n parser.add_argument('--not-use-test-runner-api', action='store_true', default=False, help='whether not to use test-runner-api')\n known, unused_argv = parser.parse_known_args(argv)\n test_pipeline_options = known.test_pipeline_options or TestPipeline.pytest_test_pipeline_options\n if self.is_integration_test and (not test_pipeline_options):\n raise SkipTest('IT is skipped because --test-pipeline-options is not specified')\n self.not_use_test_runner_api = known.not_use_test_runner_api\n return shlex.split(test_pipeline_options) if test_pipeline_options else []", "docstring": "Parse value of command line argument: --test-pipeline-options to get\npipeline options.\n\nArgs:\n argv: An iterable of command line arguments to be used. If not specified\n then sys.argv will be used as input for parsing arguments.\n\nReturns:\n An argument list of options that can be parsed by argparser or directly\n build a pipeline option."} +{"repo": "transformers", "function": "def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n if do_normalize:\n image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n all_images.append(image)\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]\n data = {'pixel_values': images}\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Preprocess an image or batch of images.\n\nArgs:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format."} +{"repo": "sprockets", "function": "def distance(a, b):\n a = a.lower()\n b = b.lower()\n if len(a) == 0:\n return len(b)\n if len(b) == 0:\n return len(a)\n dist = [[0 for _ in range(len(b) + 1)] for _ in range(len(a) + 1)]\n for i in range(len(a) + 1):\n dist[i][0] = i\n for j in range(len(b) + 1):\n dist[0][j] = j\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n cost = 0 if a[i - 1] == b[j - 1] else 1\n dist[i][j] = min(dist[i - 1][j] + 1, dist[i][j - 1] + 1, dist[i - 1][j - 1] + cost)\n return dist[-1][-1]", "docstring": "Returns the case-insensitive Levenshtein edit distance between |a| and |b|.\n\nThe Levenshtein distance is a metric for measuring the difference between\ntwo strings. If |a| == |b|, the distance is 0. It is roughly the number\nof insertions, deletions, and substitutions needed to convert |a| -> |b|.\n\nThis distance is at most the length of the longer string.\nThis distance is 0 iff the strings are equal.\n\nExamples:\n levenshtein_distance(\"cow\", \"bow\") == 1\n levenshtein_distance(\"cow\", \"bowl\") == 2\n levenshtein_distance(\"cow\", \"blrp\") == 4\n\nSee https://en.wikipedia.org/wiki/Levenshtein_distance for more background.\n\nArgs:\n a: A string\n b: A string\n\nReturns:\n The Levenshtein distance between the inputs."} +{"repo": "transformers", "function": "def _pad(self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, max_entity_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:\n entities_provided = bool('entity_ids' in encoded_inputs)\n if return_attention_mask is None:\n return_attention_mask = 'attention_mask' in self.model_input_names\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = len(encoded_inputs['input_ids'])\n if entities_provided:\n max_entity_length = len(encoded_inputs['entity_ids'])\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n if entities_provided and max_entity_length is not None and (pad_to_multiple_of is not None) and (max_entity_length % pad_to_multiple_of != 0):\n max_entity_length = (max_entity_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and (len(encoded_inputs['input_ids']) != max_length or (entities_provided and len(encoded_inputs['entity_ids']) != max_entity_length))\n if return_attention_mask and 'attention_mask' not in encoded_inputs:\n encoded_inputs['attention_mask'] = [1] * len(encoded_inputs['input_ids'])\n if entities_provided and return_attention_mask and ('entity_attention_mask' not in encoded_inputs):\n encoded_inputs['entity_attention_mask'] = [1] * len(encoded_inputs['entity_ids'])\n if needs_to_be_padded:\n difference = max_length - len(encoded_inputs['input_ids'])\n padding_side = padding_side if padding_side is not None else self.padding_side\n if entities_provided:\n entity_difference = max_entity_length - len(encoded_inputs['entity_ids'])\n if padding_side == 'right':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference\n if entities_provided:\n encoded_inputs['entity_attention_mask'] = encoded_inputs['entity_attention_mask'] + [0] * entity_difference\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [0] * difference\n if entities_provided:\n encoded_inputs['entity_token_type_ids'] = encoded_inputs['entity_token_type_ids'] + [0] * entity_difference\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference\n encoded_inputs['input_ids'] = encoded_inputs['input_ids'] + [self.pad_token_id] * difference\n if entities_provided:\n encoded_inputs['entity_ids'] = encoded_inputs['entity_ids'] + [self.entity_pad_token_id] * entity_difference\n encoded_inputs['entity_position_ids'] = encoded_inputs['entity_position_ids'] + [[-1] * self.max_mention_length] * entity_difference\n if self.task == 'entity_span_classification':\n encoded_inputs['entity_start_positions'] = encoded_inputs['entity_start_positions'] + [0] * entity_difference\n encoded_inputs['entity_end_positions'] = encoded_inputs['entity_end_positions'] + [0] * entity_difference\n elif padding_side == 'left':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']\n if entities_provided:\n encoded_inputs['entity_attention_mask'] = [0] * entity_difference + encoded_inputs['entity_attention_mask']\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = [0] * difference + encoded_inputs['token_type_ids']\n if entities_provided:\n encoded_inputs['entity_token_type_ids'] = [0] * entity_difference + encoded_inputs['entity_token_type_ids']\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']\n encoded_inputs['input_ids'] = [self.pad_token_id] * difference + encoded_inputs['input_ids']\n if entities_provided:\n encoded_inputs['entity_ids'] = [self.entity_pad_token_id] * entity_difference + encoded_inputs['entity_ids']\n encoded_inputs['entity_position_ids'] = [[-1] * self.max_mention_length] * entity_difference + encoded_inputs['entity_position_ids']\n if self.task == 'entity_span_classification':\n encoded_inputs['entity_start_positions'] = [0] * entity_difference + encoded_inputs['entity_start_positions']\n encoded_inputs['entity_end_positions'] = [0] * entity_difference + encoded_inputs['entity_end_positions']\n else:\n raise ValueError('Invalid padding strategy:' + str(padding_side))\n return encoded_inputs", "docstring": "Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n\nArgs:\n encoded_inputs:\n Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n max_length: maximum length of the returned list and optionally padding length (see below).\n Will truncate by taking into account the special tokens.\n max_entity_length: The maximum length of the entity sequence.\n padding_strategy: PaddingStrategy to use for padding.\n\n\n - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n - PaddingStrategy.DO_NOT_PAD: Do not pad\n The tokenizer padding sides are defined in self.padding_side:\n\n\n - 'left': pads on the left of the sequences\n - 'right': pads on the right of the sequences\n pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n padding_side:\n The side on which the model should have padding applied. Should be selected between ['right', 'left'].\n Default value is picked from the class attribute of the same name.\n return_attention_mask:\n (optional) Set to False to avoid returning attention mask (default: set to model specifics)"} +{"repo": "tensorflow", "function": "def get_module(dir_path: str, relative_to_dir: str) -> str:\n dir_path = dir_path[len(relative_to_dir):]\n dir_path = dir_path.replace(os.sep, '/')\n return dir_path.replace('/', '.').strip('.')", "docstring": "Get module that corresponds to path relative to relative_to_dir.\n\nArgs:\n dir_path: Path to directory.\n relative_to_dir: Get module relative to this directory.\n\nReturns:\n Name of module that corresponds to the given directory."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "tensorflow", "function": "def move_test_classes_into_scope(wrapped_test_module):\n for name, obj in wrapped_test_module.__dict__.items():\n if _is_test_class(obj):\n module_variables['tpu_test_imported_%s' % name] = obj", "docstring": "Add all test classes defined in wrapped module to our module.\n\nThe test runner works by inspecting the main module for TestCase classes, so\nby adding a module-level reference to the TestCase we cause it to execute the\nwrapped TestCase.\n\nArgs:\n wrapped_test_module: The user-provided test code to run."} +{"repo": "tensorflow", "function": "def reinterpret(value, new_type):\n if not isinstance(value, ExtensionType):\n raise ValueError(f'reinterpret expects `value` to be a tf.ExtensionType instance; got {value!r}')\n if not (isinstance(new_type, type) and issubclass(new_type, ExtensionType)):\n raise ValueError(f'reinterpret expects `new_type` to be a subclass of tf.ExtensionType; got {new_type!r}')\n fields = [item for item in value.__dict__.items() if not extension_type_field.ExtensionTypeField.is_reserved_name(item[0])]\n new_value = _create_object_from_type_and_dict(new_type, fields)\n new_value._tf_extension_type_convert_fields()\n new_value.__validate__()\n return new_value", "docstring": "Converts a given `ExtensionType` to a new type with compatible fields.\n\nIn particular, this can be used to convert a concrete subclass of\n`ExtensionType` to an `AnonymousExtensionType`, or vice versa. When\nconverting to a non-anonymous ExtensionType, field values are type-checked to\nensure they are consistent with `new_type`'s type annotations, and validated\nwith `new_type.__validate__`.\n\nArgs:\n value: An instance of a subclass of `tf.ExtensionType`\n new_type: A subclass of `tf.ExtensionType`\n\nReturns:\n An instance of `new_type`, whose fields are copied from `value`."} +{"repo": "transformers", "function": "class TvltDecoderOutput(ModelOutput):\n logits: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Class for TvltDecoder's outputs, with potential hidden states and attentions.\n\nArgs:\n logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):\n Pixel reconstruction logits.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads."} +{"repo": "transformers", "function": "class TFXLNetForMultipleChoiceOutput(ModelOutput):\n loss: tf.Tensor | None = None\n logits: Optional[tf.Tensor] = None\n mems: List[tf.Tensor] | None = None\n hidden_states: Tuple[tf.Tensor, ...] | None = None\n attentions: Tuple[tf.Tensor, ...] | None = None", "docstring": "Output type of [`TFXLNetForMultipleChoice`].\n\nArgs:\n loss (`tf.Tensor` of shape *(1,)*, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`tf.Tensor` of shape `(batch_size, num_choices)`):\n *num_choices* is the second dimension of the input tensors. (see *input_ids* above).\n\n Classification scores (before SoftMax).\n mems (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states. Can be used (see `mems` input) to speed up sequential decoding. The\n token ids which have their past given to this model should not be passed as `input_ids` as they have\n already been computed.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def _patch_compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1):\n del filename\n del mode\n source_ast = ast.parse(source)\n final = source_ast.body[-1]\n if isinstance(final, ast.Expr):\n print_it = ast.Expr(lineno=-1, col_offset=-1, value=ast.Call(func=ast.Name(id='_print_if_not_none', ctx=ast.Load(), lineno=-1, col_offset=-1), lineno=-1, col_offset=-1, args=[final], keywords=[]))\n source_ast.body[-1] = print_it\n source = astor.to_source(source_ast)\n return compile(source, filename='dummy.py', mode='exec', flags=flags, dont_inherit=dont_inherit, optimize=optimize)", "docstring": "Patch `doctest.compile` to make doctest to behave like a notebook.\n\nDefault settings for doctest are configured to run like a repl: one statement\nat a time. The doctest source uses `compile(..., mode=\"single\")`\n\nSo to let doctest act like a notebook:\n\n1. We need `mode=\"exec\"` (easy)\n2. We need the last expression to be printed (harder).\n\nTo print the last expression, just wrap the last expression in\n`_print_if_not_none(expr)`. To detect the last expression use `AST`.\nIf the last node is an expression modify the ast to call\n`_print_if_not_none` on it, convert the ast back to source and compile that.\n\nhttps://docs.python.org/3/library/functions.html#compile\n\nArgs:\n source: Can either be a normal string, a byte string, or an AST object.\n filename: Argument should give the file from which the code was read; pass\n some recognizable value if it wasn\u2019t read from a file ('' is\n commonly used).\n mode: [Ignored] always use exec.\n flags: Compiler options.\n dont_inherit: Compiler options.\n optimize: Compiler options.\n\nReturns:\n The resulting code object."} +{"repo": "transformers", "function": "def make_transform_from_reference(n_xyz: torch.Tensor, ca_xyz: torch.Tensor, c_xyz: torch.Tensor, eps: float=1e-20) -> Rigid:\n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n rots = rot_matmul(n_rots, c_rots)\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n rot_obj = Rotation(rot_mats=rots, quats=None)\n return Rigid(rot_obj, translation)", "docstring": "Returns a transformation object from reference coordinates.\n\nNote that this method does not take care of symmetries. If you provide the atom positions in the non-standard\nway, the N atom will end up not at [-0.527250, 1.359329, 0.0] but instead at [-0.527250, -1.359329, 0.0]. You\nneed to take care of such cases in your code.\n\nArgs:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\nReturns:\n A transformation object. After applying the translation and rotation to the reference backbone, the\n coordinates will approximately equal to the input coordinates."} +{"repo": "tensorflow", "function": "def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1):\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if isinstance(strides, int):\n strides = (strides,)\n if isinstance(dilation_rate, int):\n dilation_rate = (dilation_rate,)\n x, tf_data_format = _preprocess_conv1d_input(x, data_format)\n padding = _preprocess_padding(padding)\n if not isinstance(strides, tuple):\n strides = tuple(strides)\n if tf_data_format == 'NWC':\n spatial_start_dim = 1\n strides = (1,) + strides * 2 + (1,)\n else:\n spatial_start_dim = 2\n strides = (1, 1) + strides * 2\n x = array_ops.expand_dims(x, spatial_start_dim)\n depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)\n pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)\n dilation_rate = (1,) + dilation_rate\n x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format)\n x = array_ops.squeeze(x, [spatial_start_dim])\n if data_format == 'channels_first' and tf_data_format == 'NWC':\n x = array_ops.transpose(x, (0, 2, 1))\n return x", "docstring": "1D convolution with separable filters.\n\nArgs:\n x: input tensor\n depthwise_kernel: convolution kernel for the depthwise convolution.\n pointwise_kernel: kernel for the 1x1 convolution.\n strides: stride integer.\n padding: string, `\"same\"` or `\"valid\"`.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n dilation_rate: integer dilation rate.\n\nReturns:\n Output tensor.\n\nRaises:\n ValueError: if `data_format` is neither `channels_last` or\n `channels_first`."} +{"repo": "beam", "function": "def __init__(self, name: str, coder: Optional[Coder]=None, combine_fn: Any=None) -> None:\n from apache_beam.transforms.core import CombineFn\n if combine_fn is None:\n if coder is None:\n raise ValueError('combine_fn must be provided')\n else:\n coder, combine_fn = (None, coder)\n self.combine_fn = CombineFn.maybe_from_callable(combine_fn)\n if coder is None:\n coder = self.combine_fn.get_accumulator_coder()\n super().__init__(name, coder)", "docstring": "Initialize the specification for CombiningValue state.\n\nCombiningValueStateSpec(name, combine_fn) -> Coder-inferred combining value\n state spec.\nCombiningValueStateSpec(name, coder, combine_fn) -> Combining value state\n spec with coder and combine_fn specified.\n\nArgs:\n name (str): The name by which the state is identified.\n coder (Coder): Coder specifying how to encode the values to be combined.\n May be inferred.\n combine_fn (``CombineFn`` or ``callable``): Function specifying how to\n combine the values passed to state."} +{"repo": "fhir-py", "function": "def _print_primitive_field(self, field_name: str, field: descriptor.FieldDescriptor, value: Any) -> None:\n if proto_utils.field_is_repeated(field):\n string_values = []\n elements = []\n extensions_found = False\n nonnull_values_found = False\n for primitive in value:\n wrapper = self.primitive_handler.primitive_wrapper_from_primitive(primitive)\n string_values.append(wrapper.json_value())\n elements.append(wrapper.get_element())\n nonnull_values_found = nonnull_values_found or wrapper.has_value()\n extensions_found = extensions_found or wrapper.has_element()\n if nonnull_values_found:\n self.generator.add_field(field_name)\n self._print_list(string_values, self.generator.push)\n if extensions_found:\n if nonnull_values_found:\n self.generator.push(',')\n self.generator.add_newline()\n self.generator.add_field(f'_{field_name}')\n self._print_list(elements, self._print)\n elif self.json_format == _FhirJsonFormat.ANALYTIC and field.message_type.name == 'ReferenceId':\n str_value = proto_utils.get_value_at_field(value, 'value')\n self.generator.add_field(field_name, f'\"{str_value}\"')\n else:\n wrapper = self.primitive_handler.primitive_wrapper_from_primitive(value)\n if wrapper.has_value():\n self.generator.add_field(field_name, wrapper.json_value())\n if wrapper.has_element() and self.json_format == _FhirJsonFormat.PURE:\n if wrapper.has_value():\n self.generator.push(',')\n self.generator.add_newline()\n self.generator.add_field(f'_{field_name}')\n self._print(wrapper.get_element())", "docstring": "Prints the primitive field.\n\nArgs:\n field_name: The name of the field.\n field: The FielDescriptor whose contents to print.\n value: The value present at field to print."} +{"repo": "fhir-py", "function": "def get_enum_value_original_code(enum_value_descriptor: descriptor.EnumValueDescriptor) -> Optional[str]:\n return get_value_for_annotation_extension(enum_value_descriptor, annotations_pb2.fhir_original_code)", "docstring": "Returns the original name if the provided enum value had to be renamed.\n\nArgs:\n enum_value_descriptor: The EnumValueDescriptor to examine.\n\nReturns:\n If the code had to be renamed to make a valid enum identifier, this function\n returns the original name. Otherwise returns None.\n\nRaises:\n ValueError: Unable to retrieve options for type: ."} +{"repo": "transformers", "function": "def pad(self, image: np.ndarray, size: int, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n old_height, old_width = get_image_size(image, input_data_format)\n pad_height = (old_height // size + 1) * size - old_height\n pad_width = (old_width // size + 1) * size - old_width\n return pad(image, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=data_format, input_data_format=input_data_format)", "docstring": "Pad an image to make the height and width divisible by `size`.\n\nArgs:\n image (`np.ndarray`):\n Image to pad.\n size (`int`):\n The size to make the height and width divisible by.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n\nReturns:\n `np.ndarray`: The padded image."} +{"repo": "tensorflow", "function": "def build_as_function_and_v1_graph(func: Callable[..., Any]) -> Callable[..., None]:\n if tf_inspect.isclass(func):\n raise ValueError('`run_in_graph_mode_and_function` only supports test methods.')\n\n @parameterized.named_parameters(('_v1_graph', 'v1_graph'), ('_function', 'function'))\n @functools.wraps(func)\n def decorated(self: 'TensorFlowTestCase', run_mode: str, *args, **kwargs) -> None:\n if run_mode == 'v1_graph':\n with ops.Graph().as_default():\n func(self, *args, **kwargs)\n elif run_mode == 'function':\n\n @def_function.function\n def function_in_eager():\n func(self, *args, **kwargs)\n graph_for_eager_test = ops.Graph()\n with graph_for_eager_test.as_default(), context.eager_mode():\n function_in_eager()\n ops.dismantle_graph(graph_for_eager_test)\n else:\n raise ValueError('Unknown run mode %s' % run_mode)\n return decorated", "docstring": "Run a test case in v1 graph mode and inside tf.function in eager mode.\n\nWARNING: This decorator can only be used in test cases that statically checks\ngenerated graph. Attempting to evaluate graph or function results via.\nsession.run() or self.evaluate() will fail.\n\nWARNING: This decorator can only be used for test cases that inherit from\nabsl.testing.parameterized.TestCase.\n\nArgs:\n func: Test case function to be decorated.\n\nReturns:\n Decorated test case function."} +{"repo": "transformers", "function": "class TFBaseModelOutputWithCLSToken(ModelOutput):\n last_hidden_state: Optional[tf.Tensor] = None\n cls_token_value: Optional[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor, ...] | None = None", "docstring": "Base class for model's outputs.\n\nArgs:\n last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n cls_token_value (`tf.Tensor` of shape `(batch_size, 1, hidden_size)`):\n Classification token at the output of the last layer of the model.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus\n the initial embedding outputs."} +{"repo": "transformers", "function": "def get_imports(filename: Union[str, os.PathLike]) -> list[str]:\n with open(filename, encoding='utf-8') as f:\n content = f.read()\n imported_modules = set()\n import transformers.utils\n\n def recursive_look_for_imports(node):\n if isinstance(node, ast.Try):\n return\n elif isinstance(node, ast.If):\n test = node.test\n for condition_node in ast.walk(test):\n if isinstance(condition_node, ast.Call):\n check_function = getattr(condition_node.func, 'id', '')\n if check_function.endswith('available') and check_function.startswith('is_flash_attn') or hasattr(transformers.utils.import_utils, check_function):\n return\n elif isinstance(node, ast.Import):\n for alias in node.names:\n top_module = alias.name.split('.')[0]\n if top_module:\n imported_modules.add(top_module)\n elif isinstance(node, ast.ImportFrom):\n if node.level == 0 and node.module:\n top_module = node.module.split('.')[0]\n if top_module:\n imported_modules.add(top_module)\n for child in ast.iter_child_nodes(node):\n recursive_look_for_imports(child)\n tree = ast.parse(content)\n recursive_look_for_imports(tree)\n return sorted(imported_modules)", "docstring": "Extracts all the libraries (not relative imports this time) that are imported in a file.\n\nArgs:\n filename (`str` or `os.PathLike`): The module file to inspect.\n\nReturns:\n `list[str]`: The list of all packages required to use the input module."} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros."} +{"repo": "beam", "function": "def _create_extra_packages(extra_packages, temp_dir) -> List[beam_runner_api_pb2.ArtifactInformation]:\n resources: List[beam_runner_api_pb2.ArtifactInformation] = []\n staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)\n local_packages: List[str] = []\n for package in extra_packages:\n if not (os.path.basename(package).endswith('.tar') or os.path.basename(package).endswith('.tar.gz') or os.path.basename(package).endswith('.whl') or os.path.basename(package).endswith('.zip')):\n raise RuntimeError('The --extra_package option expects a full path ending with \".tar\", \".tar.gz\", \".whl\" or \".zip\" instead of %s' % package)\n if os.path.basename(package).endswith('.whl'):\n _LOGGER.warning('The .whl package \"%s\" provided in --extra_package must be binary-compatible with the worker runtime environment.' % package)\n if not os.path.isfile(package):\n if Stager._is_remote_path(package):\n _LOGGER.info('Downloading extra package: %s locally before staging', package)\n _, last_component = FileSystems.split(package)\n local_file_path = FileSystems.join(staging_temp_dir, last_component)\n Stager._download_file(package, local_file_path)\n else:\n raise RuntimeError('The file %s cannot be found. It was specified in the --extra_packages command line option.' % package)\n else:\n local_packages.append(package)\n local_packages.extend([FileSystems.join(staging_temp_dir, f) for f in os.listdir(staging_temp_dir)])\n for package in local_packages:\n basename = os.path.basename(package)\n resources.append(Stager._create_file_stage_to_artifact(package, basename))\n with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f:\n for package in local_packages:\n f.write('%s\\n' % os.path.basename(package))\n resources.append(Stager._create_file_stage_to_artifact(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), EXTRA_PACKAGES_FILE))\n return resources", "docstring": "Creates a list of local extra packages.\n\nArgs:\n extra_packages: Ordered list of local paths to extra packages to be\n staged. Only packages on localfile system and GCS are supported.\n temp_dir: Temporary folder where the resource building can happen.\n Caller is responsible for cleaning up this folder after this function\n returns.\n\nReturns:\n A list of ArtifactInformation of local file paths and file names\n (no paths) for the resources staged. All the files are assumed to be\n staged in staging_location.\n\nRaises:\n RuntimeError: If files specified are not found or do not have expected\n name patterns."} +{"repo": "keras", "function": "class DTypePolicyMap(DTypePolicy, MutableMapping):\n\n def __init__(self, default_policy=None, policy_map=None):\n if isinstance(default_policy, DTypePolicyMap):\n raise ValueError('`default_policy` cannot be a `DTypePolicyMap`.')\n if policy_map is not None and (not isinstance(policy_map, dict)):\n raise TypeError(f'If specified, `policy_map` must be a dict. Received: policy_map={policy_map} of type {type(policy_map)}')\n self._default_policy_arg = default_policy\n self._default_policy = dtype_policies.get(default_policy)\n self._policy_map = policy_map or dict()\n\n @property\n def name(self):\n return 'map_' + self.default_policy._name\n\n @property\n def default_policy(self):\n \"\"\"The default dtype policy.\n\n If `default_policy` is not specified in the constructor, this property\n will be `keras.config.dtype_policy()`.\n \"\"\"\n return dtype_policies.get(self._default_policy)\n\n @property\n def variable_dtype(self):\n return self.default_policy.variable_dtype\n\n @property\n def compute_dtype(self):\n return self.default_policy.compute_dtype\n\n @property\n def quantization_mode(self):\n return self.default_policy.quantization_mode\n\n def __getitem__(self, key):\n \"\"\"Retrieves the corresponding `DTypePolicy` by the string key.\n\n When there isn't an exact match, all the existing keys in the map\n will be treated as a regex and map against the input key again. When\n there are multiple matches for the regex, an `ValueError` will be\n raised. Returns `self.default_policy` if there isn't any match found.\n\n Args:\n key: String key to query a `DTypePolicy`.\n\n Returns:\n Corresponding `DTypePolicy` based on the query.\n \"\"\"\n if key in self._policy_map:\n return self._policy_map[key]\n matching_keys = []\n for k in self._policy_map:\n if re.search(k, key):\n matching_keys.append(k)\n if len(matching_keys) > 1:\n raise ValueError(f\"Path '{key}' matches multiple dtype policy specification keys: {matching_keys}. Please make sure each path only matches at most one dtype policy specification key in the DTypePolicyMap.\")\n elif len(matching_keys) == 1:\n return self._policy_map[matching_keys[0]]\n return self.default_policy\n\n def __setitem__(self, key, policy):\n \"\"\"Insert `DTypePolicy` to the `DTypePolicyMap`.\n\n Args:\n key: String key for the `DTypePolicy`.\n policy: The `DTypePolicy`.\n \"\"\"\n if key in self._policy_map:\n raise ValueError(f'{key} already exist in the DTypePolicyMap with value {self._policy_map[key]}. Please make sure to not use duplicated keys.')\n try:\n policy = dtype_policies.get(policy)\n except Exception:\n raise ValueError(f'Cannot interpret the assigned value by `keras.dtype_policies.get`. Received: {policy} of type {type(policy)}')\n self._policy_map[key] = policy\n\n def __delitem__(self, key):\n return self._policy_map.pop(key)\n\n def __contains__(self, key):\n return key in self._policy_map\n\n def get_config(self):\n from keras.src.saving import serialization_lib\n policy_map = self._policy_map\n if self._default_policy_arg is None:\n for policy in policy_map.values():\n if isinstance(policy, dtype_policies.QuantizedDTypePolicy):\n policy._name = None\n policy._source_name = None\n elif isinstance(policy, dtype_policies.DTypePolicy):\n policy._name = None\n return {'default_policy': self._default_policy_arg, 'policy_map': serialization_lib.serialize_keras_object(policy_map)}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n from keras.src.saving import serialization_lib\n config = config.copy()\n config['policy_map'] = serialization_lib.deserialize_keras_object(config['policy_map'], custom_objects=custom_objects)\n return cls(**config)\n\n def __len__(self):\n return len(self._policy_map)\n\n def __iter__(self):\n return iter(self._policy_map)\n\n def __repr__(self):\n default_policy = self._default_policy.name if self._default_policy is not None else None\n mapping = []\n for k, v in self._policy_map.items():\n mapping.append((k, v.name))\n return f''", "docstring": "Dict-like object mapping layer paths to `DTypePolicy` instances.\n\n`DTypePolicyMap` can be used in `get_config` in layers and subclasses to\nsupport a complex configurations of dtype policies.\n\nFor example, we can modify `get_config` in `layers.MultiHeadAttention` as\nfollows to support the mixing of dtype policies, such as quantization.\n\n```python\n@keras.saving.register_keras_serializable(\"MyPackage\")\nclass MyMultiHeadAttention(keras.layers.MultiHeadAttention):\n def get_config(self):\n config = super().get_config()\n dtype_policy_map = dtype_policies.DTypePolicyMap()\n for layer in self._flatten_layers():\n if layer.dtype_policy.quantization_mode is not None:\n dtype_policy_map[layer.path] = layer.dtype_policy\n if len(dtype_policy_map) > 0:\n config.update({\"dtype\": dtype_policy_map})\n return config\n```\n\nInternally, `DTypePolicyMap` uses a string as a key and a `DTypePolicy`\nas the value. Typically, the key used for querying is the `Layer.path`.\nHowever, it is also possible to set a regex as the key. See the docstring of\n`get` for more details.\n\nSee below for a usage example. You can define the naming schema\nof the `DTypePolicy`, and then retrieve the corresponding `DTypePolicy`\ninstance.\n\n```python\ndtype_policy_map = DTypePolicyMap()\ndtype_policy_map[\"layer/dense_0\"] = DTypePolicy(\"bfloat16\")\ndtype_policy_map[\"layer/dense_1\"] = QuantizedDTypePolicy(\"int8\", \"bfloat16\")\n\npolicy_0 = dtype_policy_map[\"layer/dense_0\"]\npolicy_1 = dtype_policy_map[\"layer/dense_1\"]\npolicy_2 = dtype_policy_map[\"layer/dense_2\"] # No hit\nassert policy_0 == DTypePolicy(\"bfloat16\")\nassert policy_1 == QuantizedDTypePolicy(\"int8\", \"bfloat16\")\nassert policy_2 == keras.config.dtype_policy()\n```\n\nArgs:\n default_policy: An optional `DTypePolicy` instance specifying the\n default dtype policy. If not specified, the value will default to\n `keras.config.dtype_policy()`.\n policy_map: An optional dict that maps string to `DTypePolicy`\n instances. Defaults to `None`"} +{"repo": "tensorflow", "function": "def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files=None):\n if saved_files is None:\n saved_files = set()\n assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir)\n for asset_basename, asset_source_filepath in asset_filename_map.items():\n asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename))\n if file_io.file_exists(asset_source_filepath) and asset_source_filepath != asset_destination_filepath and (asset_destination_filepath not in saved_files):\n file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite=True)\n saved_files.add(asset_destination_filepath)\n tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))", "docstring": "Copy all assets from source path to destination path.\n\nArgs:\n asset_filename_map: a dict of filenames used for saving the asset in\n the SavedModel to full paths from which the filenames were derived.\n destination_dir: the destination directory that assets are stored in.\n saved_files: a set of destination filepaths that have already been copied\n and will be skipped"} +{"repo": "keras", "function": "def conv_block(x, growth_rate, name):\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_0_bn')(x)\n x1 = layers.Activation('relu', name=name + '_0_relu')(x1)\n x1 = layers.Conv2D(4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(x1)\n x1 = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name=name + '_1_bn')(x1)\n x1 = layers.Activation('relu', name=name + '_1_relu')(x1)\n x1 = layers.Conv2D(growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(x1)\n x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])\n return x", "docstring": "A building block for a dense block.\n\nArgs:\n x: input tensor.\n growth_rate: float, growth rate at dense layers.\n name: string, block label.\n\nReturns:\n Output tensor for the block."} +{"repo": "tf-quant-finance", "function": "def sample_paths(self, initial_forward: types.RealTensor, initial_volatility: types.RealTensor, times: types.RealTensor, time_step: types.RealTensor, num_samples: types.RealTensor=1, random_type: Optional[random.RandomType]=None, seed: Optional[types.IntTensor]=None, skip: types.IntTensor=0, validate_args: bool=False, precompute_normal_draws: bool=True, name: Optional[str]=None):\n name = name or self._name + '_sample_path'\n with tf.name_scope(name):\n initial_forward = tf.convert_to_tensor(initial_forward, self._dtype, name='initial_forward')\n initial_volatility = tf.convert_to_tensor(initial_volatility, self._dtype, name='initial_volatility')\n times = tf.convert_to_tensor(times, self._dtype, name='times')\n time_step = tf.convert_to_tensor(time_step, self._dtype, name='time_step')\n if validate_args:\n self.control_dependencies.append(tf.compat.v1.debugging.Assert(tf.compat.v1.debugging.is_strictly_increasing(times), [times]))\n with tf.compat.v1.control_dependencies(self.control_dependencies):\n initial_forward += self._shift\n if self._enable_unbiased_sampling and (not (_is_callable(self._beta) or _is_callable(self._volvol) or _is_callable(self._rho) or (self._beta == 1))):\n paths = self._sabr_sample_paths(initial_forward, initial_volatility, times, time_step, num_samples, random_type, seed, precompute_normal_draws, skip=skip)\n else:\n paths = super(SabrModel, self).sample_paths(times, num_samples, [initial_forward, initial_volatility], random_type, seed, skip=skip, time_step=time_step, precompute_normal_draws=precompute_normal_draws)\n forwards = tf.expand_dims(paths[:, :, 0] - self._shift, axis=-1)\n volatilities = tf.expand_dims(paths[:, :, 1], axis=-1)\n return tf.concat([forwards, volatilities], axis=-1)", "docstring": "Returns a sample of paths from the process.\n\nGenerates samples of paths from the process at the specified time points.\n\nCurrently only supports absorbing boundary conditions.\n\nArgs:\n initial_forward: Initial value of the forward. A scalar real tensor.\n initial_volatility: Initial value of the volatilities. A scalar real\n tensor.\n times: The times at which the path points are to be evaluated. Rank 1\n `Tensor` of positive real values. This `Tensor` should be sorted in\n ascending order.\n time_step: Positive Python float or a scalar `Tensor `to denote time\n discretization parameter.\n num_samples: Positive scalar `int`. The number of paths to draw.\n random_type: Enum value of `RandomType`. The type of (quasi)-random number\n generator to use to generate the paths.\n Default value: None which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is\n only relevant if `random_type` is one of\n `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,\n STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and\n `HALTON_RANDOMIZED` the seed should be an Python integer. For\n `STATELESS` and `STATELESS_ANTITHETIC` must be supplied as an integer\n `Tensor` of shape `[2]`.\n Default value: `None` which means no seed is set.\n skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or\n Halton sequence to skip. Used only when `random_type` is 'SOBOL',\n 'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.\n Default value: `0`.\n validate_args: Python `bool`. When `True`, input `Tensor's` are checked\n for validity despite possibly degrading runtime performance. The checks\n verify that `times` is strictly increasing. When `False` invalid inputs\n may silently render incorrect outputs. Default value: False.\n precompute_normal_draws: Python bool. Indicates whether the noise\n increments are precomputed upfront (see `models.euler_sampling.sample`).\n For `HALTON` and `SOBOL` random types the increments are always\n precomputed. While the resulting graph consumes more memory, the\n performance gains might be significant. Default value: `True`.\n name: str. The name to give this op. If not supplied, default name of\n `sample_paths` is used.\n\nReturns:\n A `Tensor`s of shape [num_samples, k, 2] where `k` is the size of the\n `times`. The first values in `Tensor` are the simulated forward `F(t)`,\n whereas the second values in `Tensor` are the simulated volatility\n trajectories `V(t)`."} +{"repo": "transformers", "function": "class XLMRobertaConfig(PretrainedConfig):\n model_type = 'xlm-roberta'\n\n def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.position_embedding_type = position_embedding_type\n self.use_cache = use_cache\n self.classifier_dropout = classifier_dropout", "docstring": "This is the configuration class to store the configuration of a [`XLMRobertaModel`] or a [`TFXLMRobertaModel`]. It\nis used to instantiate a XLM-RoBERTa model according to the specified arguments, defining the model architecture.\nInstantiating a configuration with the defaults will yield a similar configuration to that of the XLMRoBERTa\n[FacebookAI/xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 30522):\n Vocabulary size of the XLM-RoBERTa model. Defines the number of different tokens that can be represented by\n the `inputs_ids` passed when calling [`XLMRobertaModel`] or [`TFXLMRobertaModel`].\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `Callable`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 512):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n type_vocab_size (`int`, *optional*, defaults to 2):\n The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaModel`] or\n [`TFXLMRobertaModel`].\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n position_embedding_type (`str`, *optional*, defaults to `\"absolute\"`):\n Type of position embedding. Choose one of `\"absolute\"`, `\"relative_key\"`, `\"relative_key_query\"`. For\n positional embeddings use `\"absolute\"`. For more information on `\"relative_key\"`, please refer to\n [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).\n For more information on `\"relative_key_query\"`, please refer to *Method 4* in [Improve Transformer Models\n with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n classifier_dropout (`float`, *optional*):\n The dropout ratio for the classification head.\n\nExamples:\n\n```python\n>>> from transformers import XLMRobertaConfig, XLMRobertaModel\n\n>>> # Initializing a XLM-RoBERTa FacebookAI/xlm-roberta-base style configuration\n>>> configuration = XLMRobertaConfig()\n\n>>> # Initializing a model (with random weights) from the FacebookAI/xlm-roberta-base style configuration\n>>> model = XLMRobertaModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "pytype", "function": "def _compare_traceback_strings(left, right):\n if left == right:\n return 0\n left = left[len(TRACEBACK_MARKER):] if left else ''\n right = right[len(TRACEBACK_MARKER):] if right else ''\n if left.endswith(right):\n return 1\n elif right.endswith(left):\n return -1\n else:\n return None", "docstring": "Try to compare two traceback strings.\n\nTwo traceback strings are comparable if they are equal, or if one ends with\nthe other. For example, these two tracebacks are comparable:\n Traceback:\n line 1, in \n line 2, in foo\n Traceback:\n line 2, in foo\nand the first is greater than the second.\n\nArgs:\n left: A string or None.\n right: A string or None.\n\nReturns:\n None if the inputs aren't comparable, else an integer."} +{"repo": "tensorflow", "function": "def _sort_dump_data_by(self, data, sort_by, reverse):\n if sort_by == SORT_TENSORS_BY_TIMESTAMP:\n return sorted(data, reverse=reverse, key=lambda x: x.timestamp)\n elif sort_by == SORT_TENSORS_BY_DUMP_SIZE:\n return sorted(data, reverse=reverse, key=lambda x: x.dump_size_bytes)\n elif sort_by == SORT_TENSORS_BY_OP_TYPE:\n return sorted(data, reverse=reverse, key=lambda x: self._debug_dump.node_op_type(x.node_name))\n elif sort_by == SORT_TENSORS_BY_TENSOR_NAME:\n return sorted(data, reverse=reverse, key=lambda x: '%s:%d' % (x.node_name, x.output_slot))\n else:\n raise ValueError('Unsupported key to sort tensors by: %s' % sort_by)", "docstring": "Sort a list of DebugTensorDatum in specified order.\n\nArgs:\n data: (list of DebugTensorDatum) the data to be sorted.\n sort_by: The field to sort data by.\n reverse: (bool) Whether to use reversed (descending) order.\n\nReturns:\n (list of DebugTensorDatum) in sorted order.\n\nRaises:\n ValueError: given an invalid value of sort_by."} +{"repo": "pytype", "function": "class Module:\n path: str\n target: str\n name: str\n kind: str = 'Local'\n\n @property\n def full_path(self):\n return path_utils.join(self.path, self.target)", "docstring": "Inferred information about a module.\n\nAttributes:\n path: The path to the module, e.g., foo/.\n target: The filename relative to the path, e.g., bar/baz.py.\n name: The module name, e.g., bar.baz.\n kind: The module kind: Builtin, Direct, Local, or System. See\n https://github.com/google/importlab/blob/main/importlab/resolve.py.\n full_path: The full path to the module (path + target)."} +{"repo": "transformers", "function": "class LayoutLMv3TokenizerFast(PreTrainedTokenizerFast):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n slow_tokenizer_class = LayoutLMv3Tokenizer\n\n def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=True, trim_offsets=True, cls_token_box=[0, 0, 0, 0], sep_token_box=[0, 0, 0, 0], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs):\n super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs)\n tokenizer_component = 'post_processor'\n tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)\n if tokenizer_component_instance:\n state = json.loads(tokenizer_component_instance.__getstate__())\n if 'sep' in state:\n state['sep'] = tuple(state['sep'])\n if 'cls' in state:\n state['cls'] = tuple(state['cls'])\n changes_to_apply = False\n if state.get('add_prefix_space', add_prefix_space) != add_prefix_space:\n state['add_prefix_space'] = add_prefix_space\n changes_to_apply = True\n if state.get('trim_offsets', trim_offsets) != trim_offsets:\n state['trim_offsets'] = trim_offsets\n changes_to_apply = True\n if changes_to_apply:\n component_class = getattr(processors, state.pop('type'))\n new_value = component_class(**state)\n setattr(self.backend_tokenizer, tokenizer_component, new_value)\n self.cls_token_box = cls_token_box\n self.sep_token_box = sep_token_box\n self.pad_token_box = pad_token_box\n self.pad_token_label = pad_token_label\n self.only_label_first_subword = only_label_first_subword\n\n @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)\n def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n \"\"\"\n Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\n sequences with word-level normalized bounding boxes and optional labels.\n\n Args:\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings\n (words of a single example or questions of a batch of examples) or a list of list of strings (batch of\n words).\n text_pair (`List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence should be a list of strings\n (pretokenized string).\n boxes (`List[List[int]]`, `List[List[List[int]]]`):\n Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.\n word_labels (`List[int]`, `List[List[int]]`, *optional*):\n Word-level integer labels (for token classification tasks such as FUNSD, CORD).\n \"\"\"\n\n def _is_valid_text_input(t):\n if isinstance(t, str):\n return True\n elif isinstance(t, (list, tuple)):\n if len(t) == 0:\n return True\n elif isinstance(t[0], str):\n return True\n elif isinstance(t[0], (list, tuple)):\n return len(t[0]) == 0 or isinstance(t[0][0], str)\n else:\n return False\n else:\n return False\n if text_pair is not None:\n if not _is_valid_text_input(text):\n raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ')\n if not isinstance(text_pair, (list, tuple)):\n raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n elif not isinstance(text, (list, tuple)):\n raise ValueError('Words must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).')\n if text_pair is not None:\n is_batched = isinstance(text, (list, tuple))\n else:\n is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))\n words = text if text_pair is None else text_pair\n if boxes is None:\n raise ValueError('You must provide corresponding bounding boxes')\n if is_batched:\n if len(words) != len(boxes):\n raise ValueError('You must provide words and boxes for an equal amount of examples')\n for words_example, boxes_example in zip(words, boxes):\n if len(words_example) != len(boxes_example):\n raise ValueError('You must provide as many words as there are bounding boxes')\n elif len(words) != len(boxes):\n raise ValueError('You must provide as many words as there are bounding boxes')\n if is_batched:\n if text_pair is not None and len(text) != len(text_pair):\n raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.')\n batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n is_pair = bool(text_pair is not None)\n return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n else:\n return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n\n @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)\n def batch_encode_plus(self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair], List[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[List[List[List[int]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n return self._batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n\n def tokenize(self, text: str, pair: Optional[str]=None, add_special_tokens: bool=False, **kwargs) -> List[str]:\n batched_input = [(text, pair)] if pair else [text]\n encodings = self._tokenizer.encode_batch(batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs)\n return encodings[0].tokens\n\n @add_end_docstrings(LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING, LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)\n def encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n \"\"\"\n Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,\n `__call__` should be used instead.\n\n Args:\n text (`str`, `List[str]`, `List[List[str]]`):\n The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.\n text_pair (`List[str]` or `List[int]`, *optional*):\n Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a\n list of list of strings (words of a batch of examples).\n \"\"\"\n padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n return self._encode_plus(text=text, boxes=boxes, text_pair=text_pair, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n\n def _batch_encode_plus(self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair], List[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[List[List[List[int]]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n if not isinstance(batch_text_or_text_pairs, list):\n raise TypeError(f'batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})')\n self.set_truncation_and_padding(padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side)\n if is_pair:\n batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]\n encodings = self._tokenizer.encode_batch(batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True)\n tokens_and_encodings = [self._convert_encoding(encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, return_length=return_length, verbose=verbose) for encoding in encodings]\n sanitized_tokens = {}\n for key in tokens_and_encodings[0][0].keys():\n stack = [e for item, _ in tokens_and_encodings for e in item[key]]\n sanitized_tokens[key] = stack\n sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]\n if return_overflowing_tokens:\n overflow_to_sample_mapping = []\n for i, (toks, _) in enumerate(tokens_and_encodings):\n overflow_to_sample_mapping += [i] * len(toks['input_ids'])\n sanitized_tokens['overflow_to_sample_mapping'] = overflow_to_sample_mapping\n for input_ids in sanitized_tokens['input_ids']:\n self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)\n token_boxes = []\n for batch_index in range(len(sanitized_tokens['input_ids'])):\n if return_overflowing_tokens:\n original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]\n else:\n original_index = batch_index\n token_boxes_example = []\n for id, sequence_id, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids):\n if word_id is not None:\n if is_pair and sequence_id == 0:\n token_boxes_example.append(self.pad_token_box)\n else:\n token_boxes_example.append(boxes[original_index][word_id])\n elif id == self.cls_token_id:\n token_boxes_example.append(self.cls_token_box)\n elif id == self.sep_token_id:\n token_boxes_example.append(self.sep_token_box)\n elif id == self.pad_token_id:\n token_boxes_example.append(self.pad_token_box)\n else:\n raise ValueError('Id not recognized')\n token_boxes.append(token_boxes_example)\n sanitized_tokens['bbox'] = token_boxes\n if word_labels is not None:\n labels = []\n for batch_index in range(len(sanitized_tokens['input_ids'])):\n if return_overflowing_tokens:\n original_index = sanitized_tokens['overflow_to_sample_mapping'][batch_index]\n else:\n original_index = batch_index\n labels_example = []\n previous_token_empty = False\n for id, offset, word_id in zip(sanitized_tokens['input_ids'][batch_index], sanitized_tokens['offset_mapping'][batch_index], sanitized_encodings[batch_index].word_ids):\n if word_id is not None:\n if self.only_label_first_subword:\n if offset[0] == 0 and (not previous_token_empty):\n labels_example.append(word_labels[original_index][word_id])\n else:\n labels_example.append(self.pad_token_label)\n if offset == (0, 0):\n previous_token_empty = True\n else:\n previous_token_empty = False\n else:\n labels_example.append(word_labels[original_index][word_id])\n else:\n labels_example.append(self.pad_token_label)\n labels.append(labels_example)\n sanitized_tokens['labels'] = labels\n if not return_offsets_mapping:\n del sanitized_tokens['offset_mapping']\n return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)\n\n def _encode_plus(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[int]]=None, add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[bool]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n batched_input = [(text, text_pair)] if text_pair else [text]\n batched_boxes = [boxes]\n batched_word_labels = [word_labels] if word_labels is not None else None\n batched_output = self._batch_encode_plus(batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n if return_tensors is None and (not return_overflowing_tokens):\n batched_output = BatchEncoding({key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items()}, batched_output.encodings)\n self._eventual_warn_about_too_long_sequence(batched_output['input_ids'], max_length, verbose)\n return batched_output\n\n def _pad(self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:\n \"\"\"\n Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\n Args:\n encoded_inputs:\n Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n max_length: maximum length of the returned list and optionally padding length (see below).\n Will truncate by taking into account the special tokens.\n padding_strategy: PaddingStrategy to use for padding.\n\n - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n - PaddingStrategy.DO_NOT_PAD: Do not pad\n The tokenizer padding sides are defined in self.padding_side:\n\n - 'left': pads on the left of the sequences\n - 'right': pads on the right of the sequences\n pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n padding_side:\n The side on which the model should have padding applied. Should be selected between ['right', 'left'].\n Default value is picked from the class attribute of the same name.\n return_attention_mask:\n (optional) Set to False to avoid returning attention mask (default: set to model specifics)\n \"\"\"\n if return_attention_mask is None:\n return_attention_mask = 'attention_mask' in self.model_input_names\n required_input = encoded_inputs[self.model_input_names[0]]\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = len(required_input)\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n if return_attention_mask and 'attention_mask' not in encoded_inputs:\n encoded_inputs['attention_mask'] = [1] * len(required_input)\n if needs_to_be_padded:\n difference = max_length - len(required_input)\n padding_side = padding_side if padding_side is not None else self.padding_side\n if padding_side == 'right':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference\n if 'bbox' in encoded_inputs:\n encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference\n encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference\n elif padding_side == 'left':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']\n if 'bbox' in encoded_inputs:\n encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']\n encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n else:\n raise ValueError('Invalid padding strategy:' + str(padding_side))\n return encoded_inputs\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n return tuple(files)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]\n if token_ids_1 is None:\n return output\n return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Args:\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:\n make use of token type ids, therefore a list of zeros is returned.\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n Returns:\n `List[int]`: List of zeros.\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Construct a \"fast\" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.\n\nThis tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\nrefer to this superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the beginning of\n sequence. The token used is the `cls_token`.\n\n \n\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the end of sequence.\n The token used is the `sep_token`.\n\n \n\n sep_token (`str`, *optional*, defaults to `\"\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n cls_token (`str`, *optional*, defaults to `\"\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n mask_token (`str`, *optional*, defaults to `\"\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (RoBERTa tokenizer detect beginning of words by the preceding space).\n trim_offsets (`bool`, *optional*, defaults to `True`):\n Whether the post processing step should trim offsets to avoid including whitespaces.\n cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):\n The bounding box to use for the special [CLS] token.\n sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):\n The bounding box to use for the special [SEP] token.\n pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):\n The bounding box to use for the special [PAD] token.\n pad_token_label (`int`, *optional*, defaults to -100):\n The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's\n CrossEntropyLoss.\n only_label_first_subword (`bool`, *optional*, defaults to `True`):\n Whether or not to only label the first subword, in case word labels are provided."} +{"repo": "transformers", "function": "def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model\nconfiguration.\n\nReturns:\n [`AlignConfig`]: An instance of a configuration object"} +{"repo": "tensorflow", "function": "def from_lengths(cls, lengths: Sequence[Union[Sequence[int], int]], num_row_partitions=None, dtype=dtypes.int64):\n if not isinstance(lengths, list):\n raise ValueError('lengths should be a list')\n for x in lengths:\n if not _is_int_or_tuple_of_ints(x):\n raise ValueError('element of lengths should be int or tuple of ints: instead %r' % (x,))\n if num_row_partitions is None:\n is_list = [not isinstance(x, int) for x in lengths]\n if any(is_list):\n num_row_partitions = len(is_list) - is_list[-1::-1].index(True) - 1\n else:\n num_row_partitions = 0\n if not isinstance(num_row_partitions, int):\n raise ValueError('num_row_partitions should be an int or None')\n if not lengths:\n if num_row_partitions > 0:\n raise ValueError('num_row_partitions==0 for a scalar shape')\n return DynamicRaggedShape([], [], dtype=dtype)\n if not num_row_partitions < len(lengths):\n raise ValueError('num_row_partitions should be less than `len(lengths)` if shape is not scalar.')\n if num_row_partitions > 0:\n row_partitions, nvals = _to_row_partitions_and_nvals_from_lengths(lengths[:num_row_partitions + 1])\n inner_shape = [nvals] + lengths[num_row_partitions + 1:]\n return DynamicRaggedShape(row_partitions, inner_shape, dtype=dtype)\n else:\n return DynamicRaggedShape([], lengths, dtype=dtype)", "docstring": "Creates a shape with the given lengths and num_row_partitions.\n\nThe lengths can either be a nonnegative int or a list of nonnegative ints.\n\nIf num_row_partitions is None, then the minimal num_row_partitions is used.\n\nFor example, [2, (3, 2)] is the shape of [[0, 0, 0], [0, 0]], and\n[2, 2] is the shape of [[0, 0], [0, 0]]\n\nThis chooses the minimal num_row_partitions required (including zero).\n\nThe following table gives a few examples (where `RP(lengths)` is short\nfor `RowPartition.from_lengths(lengths)`):\n\nFor example:\nfrom_lengths | row_partitions | inner_shape\n---------------------- | --------------------------| -------------\n[] | [] | []\n[2, (3, 2)] | [RP([3, 2])] | [5]\n[2, 2] | [] | [2, 2]\n[2, (3, 2), 7] | [RP([3, 2])] | [5, 7]\n[2, (2, 2), 3] | [RP([2, 2])] | [4, 3]\n[2, 2, 3] | [] | [2, 2, 3]\n[2, (2, 1), (2, 0, 3)] | [RP(2, 1), RP([2, 0, 3])] | [5]\n\nIf we want the row partitions to end with uniform row partitions, then\nwe can set num_row_partitions.\n\nFor example,\nbelow URP(3, 12) is RowPartition.from_uniform_row_length(3, 12)\n\nfrom_lengths | num_row_partitions | row_partitions | inner_shape\n---------------| -------------------|--------------------------|------------\n[2, (3, 2), 2] | 2 | [RP([3, 2]), URP(2, 10)] | [10]\n[2, 2] | 1 | [URP(2, 4)] | [4]\n[2, 2, 3] | 0 | [] | [2, 2, 3]\n[2, 2, 3] | 1 | [URP(2, 4)] | [4, 3]\n[2, 2, 3] | 2 | [URP(2, 4), URP(3, 12)] | [12]\n\n\n\nRepresenting the shapes from init():\n\nfrom_lengths | Tensor Example\n------------------------ | ------------------------------\n`[2, 3]` | `[[1, 2, 3], [4, 5, 6]]`\n`[3, (2, 0, 3)]` | `[[1, 2], [], [3, 4, 5]]`\n`[2, (2, 1), 2]` | `[[[1, 2], [3, 4]], [[5, 6]]]`\n`[2, (2, 1), (2, 1, 2)]` | `[[[1, 2], [3]], [[4, 5]]]`\n\nArgs:\n lengths: the lengths of sublists along each axis.\n num_row_partitions: the num_row_partitions of the result or None\n indicating the minimum number of row_partitions.\n dtype: the dtype of the shape (tf.int32 or tf.int64).\n\nReturns:\n a new DynamicRaggedShape"} +{"repo": "tf-quant-finance", "function": "def update(lower: types.FloatTensor, upper: types.FloatTensor, estimate: types.FloatTensor, error: types.FloatTensor, tolerance: float, dtype: Optional[tf.DType]=None, name: Optional[str]=None) -> (types.FloatTensor, types.FloatTensor, types.FloatTensor):\n with tf.name_scope(name=name or 'adaptive_update'):\n lower = tf.convert_to_tensor(lower, dtype=dtype, name='lower')\n dtype = lower.dtype\n upper = tf.convert_to_tensor(upper, dtype=dtype, name='upper')\n relative_error = error / estimate\n condition = relative_error > tolerance\n num_bad_sub_intervals = tf.reduce_max(tf.math.count_nonzero(condition, axis=1, dtype=tf.int32), axis=0)\n indices = tf.math.top_k(relative_error, k=num_bad_sub_intervals, sorted=False).indices\n sum_all = tf.reduce_sum(estimate, axis=-1)\n sum_bad = tf.reduce_sum(tf.gather(estimate, indices, batch_dims=-1), axis=-1)\n sum_goods = sum_all - sum_bad\n filtered_lower = tf.gather(lower, indices, batch_dims=-1)\n filtered_upper = tf.gather(upper, indices, batch_dims=-1)\n mid_points = (filtered_lower + filtered_upper) / 2\n new_lower = tf.concat([filtered_lower, mid_points], axis=-1)\n new_upper = tf.concat([mid_points, filtered_upper], axis=-1)\n return (new_lower, new_upper, sum_goods)", "docstring": "Calculates new values for the limits for any adaptive quadrature.\n\nChecks which intervals have estimated results that are within the provided\ntolerance. The values for these intervals are added to the sum of good\nestimations. The other intervals get divided in half.\n\n#### Example\n```python\n l = tf.constant([[[0.0], [1.0]]])\n u = tf.constant([[[1.0], [2.0]]])\n estimate = tf.constant([[[3.0], [4.0]]])\n err = tf.constant([[[0.01], [0.02]]])\n tol = 0.004\n update(l, u, estimate, err, tol)\n # tf.constant([[1.0, 1.5]]), tf.constant([[1.5, 2.0]]), tf.constant([3.0])\n```\n\nArgs:\n lower: Represents the lower limits of integration. Must be a 2-dimensional\n tensor of shape `[batch_dim, n]` (where `n` is defined by the algorithm\n and represents the number of subintervals).\n upper: Same shape and dtype as `lower` representing the upper limits of\n intergation.\n estimate: Same shape and dtype as `lower` representing the integration\n results calculated with some quadrature method for the corresponding\n limits.\n error: Same shape and dtype as `lower` representing the estimated\n integration error for corresponding `estimate` values.\n tolerance: Represents the tolerance for the estimated error of the integral\n estimation, at which to stop further dividing the intervals.\n dtype: If supplied, the dtype for the `lower` and `upper`. Result will have\n the same dtype. Default value: None which maps to dtype of `lower`.\n name: The name to give to the ops created by this function. Default value:\n None which maps to 'adaptive_update'.\n\nReturns:\n A tuple:\n * `Tensor` of shape `[batch_dim, new_n]`, containing values of the new\n lower limits,\n * `Tensor` of shape `[batch_dim, new_n]`, containing values of the new\n upper limits,\n * `Tensor` of shape `[batch_dim]`, containing sum values of the quadrature\n method results of the good intervals."} +{"repo": "keras", "function": "class Normalization(TFDataLayer):\n\n def __init__(self, axis=-1, mean=None, variance=None, invert=False, **kwargs):\n super().__init__(**kwargs)\n if axis is None:\n axis = ()\n elif isinstance(axis, int):\n axis = (axis,)\n else:\n axis = tuple(axis)\n self.axis = axis\n self.input_mean = mean\n self.input_variance = variance\n self.invert = invert\n self.supports_masking = True\n self._build_input_shape = None\n self.mean = None\n if (mean is not None) != (variance is not None):\n raise ValueError(f'When setting values directly, both `mean` and `variance` must be set. Received: mean={mean} and variance={variance}')\n\n def build(self, input_shape):\n if input_shape is None:\n return\n ndim = len(input_shape)\n self._build_input_shape = input_shape\n if any((a < -ndim or a >= ndim for a in self.axis)):\n raise ValueError(f'All `axis` values must be in the range [-ndim, ndim). Received inputs with ndim={ndim}, while axis={self.axis}')\n self._keep_axis = tuple(sorted([d if d >= 0 else d + ndim for d in self.axis]))\n for d in self._keep_axis:\n if input_shape[d] is None:\n raise ValueError(f'All `axis` values to be kept must have a known shape. Received axis={self.axis}, inputs.shape={input_shape}, with unknown axis at index {d}')\n self._reduce_axis = tuple((d for d in range(ndim) if d not in self._keep_axis))\n self._reduce_axis_mask = [0 if d in self._keep_axis else 1 for d in range(ndim)]\n self._broadcast_shape = [input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)]\n mean_and_var_shape = tuple((input_shape[d] for d in self._keep_axis))\n self._mean_and_var_shape = mean_and_var_shape\n if self.input_mean is None:\n self.adapt_mean = self.add_weight(name='mean', shape=mean_and_var_shape, initializer='zeros', trainable=False)\n self.adapt_variance = self.add_weight(name='variance', shape=mean_and_var_shape, initializer='ones', trainable=False)\n self.count = self.add_weight(name='count', shape=(), dtype='int', initializer='zeros', trainable=False)\n self.built = True\n self.finalize_state()\n else:\n mean = ops.convert_to_tensor(self.input_mean)\n variance = ops.convert_to_tensor(self.input_variance)\n mean = ops.broadcast_to(mean, self._broadcast_shape)\n variance = ops.broadcast_to(variance, self._broadcast_shape)\n self.mean = ops.cast(mean, dtype=self.compute_dtype)\n self.variance = ops.cast(variance, dtype=self.compute_dtype)\n\n def adapt(self, data):\n \"\"\"Computes the mean and variance of values in a dataset.\n\n Calling `adapt()` on a `Normalization` layer is an alternative to\n passing in `mean` and `variance` arguments during layer construction. A\n `Normalization` layer should always either be adapted over a dataset or\n passed `mean` and `variance`.\n\n During `adapt()`, the layer will compute a `mean` and `variance`\n separately for each position in each axis specified by the `axis`\n argument. To calculate a single `mean` and `variance` over the input\n data, simply pass `axis=None` to the layer.\n\n Arg:\n data: The data to train on. It can be passed either as a\n `tf.data.Dataset`, as a NumPy array, or as a backend-native\n eager tensor.\n If a dataset, *it must be batched*. Keras will assume that the\n data is batched, and if that assumption doesn't hold, the mean\n and variance may be incorrectly computed.\n \"\"\"\n if isinstance(data, np.ndarray) or backend.is_tensor(data):\n input_shape = data.shape\n elif isinstance(data, tf.data.Dataset):\n input_shape = tuple(data.element_spec.shape)\n if len(input_shape) == 1:\n data = data.batch(128)\n input_shape = tuple(data.element_spec.shape)\n if not self.built:\n self.build(input_shape)\n else:\n for d in self._keep_axis:\n if input_shape[d] != self._build_input_shape[d]:\n raise ValueError(f'The layer was built with input_shape={self._build_input_shape}, but adapt() is being called with data with an incompatible shape, data.shape={input_shape}')\n if isinstance(data, np.ndarray):\n total_mean = np.mean(data, axis=self._reduce_axis)\n total_var = np.var(data, axis=self._reduce_axis)\n elif backend.is_tensor(data):\n total_mean = ops.mean(data, axis=self._reduce_axis)\n total_var = ops.var(data, axis=self._reduce_axis)\n elif isinstance(data, tf.data.Dataset):\n total_mean = ops.zeros(self._mean_and_var_shape)\n total_var = ops.zeros(self._mean_and_var_shape)\n total_count = 0\n for batch in data:\n batch = backend.convert_to_tensor(batch, dtype=self.compute_dtype)\n batch_mean = ops.mean(batch, axis=self._reduce_axis)\n batch_var = ops.var(batch, axis=self._reduce_axis)\n if self._reduce_axis:\n batch_reduce_shape = (batch.shape[d] for d in self._reduce_axis)\n batch_count = math.prod(batch_reduce_shape)\n else:\n batch_count = 1\n total_count += batch_count\n batch_weight = float(batch_count) / total_count\n existing_weight = 1.0 - batch_weight\n new_total_mean = total_mean * existing_weight + batch_mean * batch_weight\n total_var = (total_var + (total_mean - new_total_mean) ** 2) * existing_weight + (batch_var + (batch_mean - new_total_mean) ** 2) * batch_weight\n total_mean = new_total_mean\n else:\n raise NotImplementedError(f'Unsupported data type: {type(data)}')\n self.adapt_mean.assign(total_mean)\n self.adapt_variance.assign(total_var)\n self.finalize_state()\n\n def finalize_state(self):\n if self.input_mean is not None or not self.built:\n return\n self.mean = ops.reshape(self.adapt_mean, self._broadcast_shape)\n self.mean = ops.cast(self.mean, self.compute_dtype)\n self.variance = ops.reshape(self.adapt_variance, self._broadcast_shape)\n self.variance = ops.cast(self.variance, self.compute_dtype)\n\n def call(self, inputs):\n if self.mean is None:\n raise ValueError('You must call `.build(input_shape)` on the layer before using it.')\n inputs = self.backend.core.convert_to_tensor(inputs, dtype=self.compute_dtype)\n mean = self.convert_weight(self.mean)\n variance = self.convert_weight(self.variance)\n if self.invert:\n return self.backend.numpy.add(mean, self.backend.numpy.multiply(inputs, self.backend.numpy.maximum(self.backend.numpy.sqrt(variance), backend.epsilon())))\n else:\n return self.backend.numpy.divide(self.backend.numpy.subtract(inputs, mean), self.backend.numpy.maximum(self.backend.numpy.sqrt(variance), backend.epsilon()))\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = super().get_config()\n config.update({'axis': self.axis, 'invert': self.invert, 'mean': np.array(self.input_mean).tolist(), 'variance': np.array(self.input_variance).tolist()})\n return config\n\n def load_own_variables(self, store):\n super().load_own_variables(store)\n self.finalize_state()\n\n def get_build_config(self):\n if self._build_input_shape:\n return {'input_shape': self._build_input_shape}\n\n def build_from_config(self, config):\n if config:\n self.build(config['input_shape'])", "docstring": "A preprocessing layer that normalizes continuous features.\n\nThis layer will shift and scale inputs into a distribution centered around\n0 with standard deviation 1. It accomplishes this by precomputing the mean\nand variance of the data, and calling `(input - mean) / sqrt(var)` at\nruntime.\n\nThe mean and variance values for the layer must be either supplied on\nconstruction or learned via `adapt()`. `adapt()` will compute the mean and\nvariance of the data and store them as the layer's weights. `adapt()` should\nbe called before `fit()`, `evaluate()`, or `predict()`.\n\nArgs:\n axis: Integer, tuple of integers, or None. The axis or axes that should\n have a separate mean and variance for each index in the shape.\n For example, if shape is `(None, 5)` and `axis=1`, the layer will\n track 5 separate mean and variance values for the last axis.\n If `axis` is set to `None`, the layer will normalize\n all elements in the input by a scalar mean and variance.\n When `-1`, the last axis of the input is assumed to be a\n feature dimension and is normalized per index.\n Note that in the specific case of batched scalar inputs where\n the only axis is the batch axis, the default will normalize\n each index in the batch separately.\n In this case, consider passing `axis=None`. Defaults to `-1`.\n mean: The mean value(s) to use during normalization. The passed value(s)\n will be broadcast to the shape of the kept axes above;\n if the value(s) cannot be broadcast, an error will be raised when\n this layer's `build()` method is called.\n variance: The variance value(s) to use during normalization. The passed\n value(s) will be broadcast to the shape of the kept axes above;\n if the value(s) cannot be broadcast, an error will be raised when\n this layer's `build()` method is called.\n invert: If `True`, this layer will apply the inverse transformation\n to its inputs: it would turn a normalized input back into its\n original form.\n\nExamples:\n\nCalculate a global mean and variance by analyzing the dataset in `adapt()`.\n\n>>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32')\n>>> input_data = np.array([1., 2., 3.], dtype='float32')\n>>> layer = keras.layers.Normalization(axis=None)\n>>> layer.adapt(adapt_data)\n>>> layer(input_data)\narray([-1.4142135, -0.70710677, 0.], dtype=float32)\n\nCalculate a mean and variance for each index on the last axis.\n\n>>> adapt_data = np.array([[0., 7., 4.],\n... [2., 9., 6.],\n... [0., 7., 4.],\n... [2., 9., 6.]], dtype='float32')\n>>> input_data = np.array([[0., 7., 4.]], dtype='float32')\n>>> layer = keras.layers.Normalization(axis=-1)\n>>> layer.adapt(adapt_data)\n>>> layer(input_data)\narray([-1., -1., -1.], dtype=float32)\n\nPass the mean and variance directly.\n\n>>> input_data = np.array([[1.], [2.], [3.]], dtype='float32')\n>>> layer = keras.layers.Normalization(mean=3., variance=2.)\n>>> layer(input_data)\narray([[-1.4142135 ],\n [-0.70710677],\n [ 0. ]], dtype=float32)\n\nUse the layer to de-normalize inputs (after adapting the layer).\n\n>>> adapt_data = np.array([[0., 7., 4.],\n... [2., 9., 6.],\n... [0., 7., 4.],\n... [2., 9., 6.]], dtype='float32')\n>>> input_data = np.array([[1., 2., 3.]], dtype='float32')\n>>> layer = keras.layers.Normalization(axis=-1, invert=True)\n>>> layer.adapt(adapt_data)\n>>> layer(input_data)\narray([2., 10., 8.], dtype=float32)"} +{"repo": "tensorflow", "function": "def all(x, axis=None, keepdims=False):\n x = math_ops.cast(x, dtypes_module.bool)\n return math_ops.reduce_all(x, axis, keepdims)", "docstring": "Bitwise reduction (logical AND).\n\nArgs:\n x: Tensor or variable.\n axis: axis along which to perform the reduction.\n keepdims: whether the drop or broadcast the reduction axes.\n\nReturns:\n A uint8 tensor (0s and 1s)."} +{"repo": "tensorflow", "function": "def read_graphs_event(self, offset):\n return debug_event_pb2.DebugEvent.FromString(self._get_reader(self._graphs_path).read(offset)[0])", "docstring": "Read a DebugEvent proto at a given offset from the .graphs file.\n\nArgs:\n offset: Offset to read the DebugEvent proto from.\n\nReturns:\n A DebugEventProto.\n\nRaises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file."} +{"repo": "tensorflow", "function": "def get_profiles(self, cmd):\n if cmd not in self._views:\n raise ValueError('No autoprofiler for command: {}, was run'.format(cmd))\n return self._views[cmd]", "docstring": "Returns profiling results for each step at which `cmd` was run.\n\nArgs:\n cmd: string, profiling command used in an `add_auto_profiling` call.\n\nReturns:\n dict[int: (MultiGraphNodeProto | GraphNodeProto)]. Keys are steps at which\n the profiling command was run. Values are the outputs of profiling.\n For \"code\" and \"op\" commands this will be a `MultiGraphNodeProto`, for\n \"scope\" and \"graph\" commands this will be a `GraphNodeProto.\n\nRaises:\n ValueError: if `cmd` was never run (either because no session.run call was\n made or because there was no `add_auto_profiling` call with the specified\n `cmd`."} +{"repo": "beam", "function": "def _AssignTimestamps(pcoll, timestamp: Union[str, dict[str, str]], language: Optional[str]=None):\n timestamp_fn = _as_callable_for_pcoll(pcoll, timestamp, 'timestamp', language)\n T = TypeVar('T')\n return pcoll | beam.Map(lambda x: TimestampedValue(x, timestamp_fn(x))).with_input_types(T).with_output_types(T)", "docstring": "Assigns a new timestamp each element of its input.\n\nThis can be useful when reading records that have the timestamp embedded\nin them, for example with various file types or other sources that by default\nset all timestamps to the infinite past.\n\nNote that the timestamp should only be set forward, as setting it backwards\nmay not cause it to hold back an already advanced watermark and the data\ncould become droppably late.\n\nArgs:\n timestamp: A field, callable, or expression giving the new timestamp.\n language: The language of the timestamp expression.\n error_handling: Whether and how to handle errors during timestamp\n evaluation."} +{"repo": "transformers", "function": "class FlaubertSQuADHead(nn.Module):\n\n def __init__(self, config: FlaubertConfig):\n super().__init__()\n self.start_n_top = config.start_n_top\n self.end_n_top = config.end_n_top\n self.start_logits = FlaubertPoolerStartLogits(config)\n self.end_logits = FlaubertPoolerEndLogits(config)\n self.answer_class = FlaubertPoolerAnswerClass(config)\n\n @auto_docstring\n def forward(self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor]=None, end_positions: Optional[torch.LongTensor]=None, cls_index: Optional[torch.LongTensor]=None, is_impossible: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None, return_dict: bool=False) -> Union[FlaubertSquadHeadOutput, Tuple[torch.FloatTensor]]:\n \"\"\"\n hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\n Final hidden states of the model on the sequence tokens.\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Positions of the first token for the labeled span.\n end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Positions of the last token for the labeled span.\n cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Position of the CLS token for each sentence in the batch. If `None`, takes the last token.\n is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Whether the question has a possible answer in the paragraph or not.\n p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\n Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\n should be masked.\n \"\"\"\n start_logits = self.start_logits(hidden_states, p_mask=p_mask)\n if start_positions is not None and end_positions is not None:\n for x in (start_positions, end_positions, cls_index, is_impossible):\n if x is not None and x.dim() > 1:\n x.squeeze_(-1)\n end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)\n loss_fct = CrossEntropyLoss()\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n if cls_index is not None and is_impossible is not None:\n cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)\n loss_fct_cls = nn.BCEWithLogitsLoss()\n cls_loss = loss_fct_cls(cls_logits, is_impossible)\n total_loss += cls_loss * 0.5\n return FlaubertSquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)\n else:\n bsz, slen, hsz = hidden_states.size()\n start_log_probs = nn.functional.softmax(start_logits, dim=-1)\n start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1)\n start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)\n start_states = torch.gather(hidden_states, -2, start_top_index_exp)\n start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)\n hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states)\n p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None\n end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)\n end_log_probs = nn.functional.softmax(end_logits, dim=1)\n end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1)\n end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)\n end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)\n start_states = torch.einsum('blh,bl->bh', hidden_states, start_log_probs)\n cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)\n if not return_dict:\n return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)\n else:\n return FlaubertSquadHeadOutput(start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits)", "docstring": "A SQuAD head inspired by XLNet.\n\nArgs:\n config ([`FlaubertConfig`]):\n The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\n to use."} +{"repo": "tensorflow", "function": "def compute_dtype(self):\n return self._compute_dtype", "docstring": "The compute dtype of this policy.\n\nThis is the dtype layers will do their computations in. Typically layers\noutput tensors with the compute dtype as well.\n\nNote that even if the compute dtype is float16 or bfloat16, hardware devices\nmay not do individual adds, multiplies, and other fundamental operations in\nfloat16 or bfloat16, but instead may do some of them in float32 for numeric\nstability. The compute dtype is the dtype of the inputs and outputs of the\nTensorFlow ops that the layer executes. Internally, many TensorFlow ops will\ndo certain internal calculations in float32 or some other device-internal\nintermediate format with higher precision than float16/bfloat16, to increase\nnumeric stability.\n\nFor example, a `tf.keras.layers.Dense` layer, when run on a GPU with a\nfloat16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`. But,\n`tf.linalg.matmul` will do use float32 intermediate math. The performance\nbenefit of float16 is still apparent, due to increased memory bandwidth and\nthe fact modern GPUs have specialized hardware for computing matmuls on\nfloat16 inputs while still keeping intermediate computations in float32.\n\nReturns:\n The compute dtype of this policy, as a string."} +{"repo": "tensorflow", "function": "def incomplete_size(self, name=None):\n if name is None:\n name = '%s_BarrierIncompleteSize' % self._name\n return gen_data_flow_ops.barrier_incomplete_size(self._barrier_ref, name=name)", "docstring": "Compute the number of incomplete elements in the given barrier.\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n A single-element tensor containing the number of incomplete elements in\n the given barrier."} +{"repo": "pyglove", "function": "def trace(fun: Callable[[], Any], *, where: Optional[Callable[[base.HyperPrimitive], bool]]=None, require_hyper_name: bool=False, per_thread: bool=True) -> DynamicEvaluationContext:\n context = DynamicEvaluationContext(where=where, require_hyper_name=require_hyper_name, per_thread=per_thread)\n with context.collect():\n fun()\n return context", "docstring": "Trace the hyper primitives called within a function by executing it.\n\nSee examples in :class:`pyglove.hyper.DynamicEvaluationContext`.\n\nArgs:\n fun: Function in which the search space is defined.\n where: A callable object that decide whether a hyper primitive should be\n included when being instantiated under `collect`.\n If None, all hyper primitives under `collect` will be included.\n require_hyper_name: If True, all hyper primitives defined in this scope\n will need to carry their names, which is usually a good idea when the\n function that instantiates the hyper primtives need to be called multiple\n times.\n per_thread: If True, the context manager will be applied to current thread\n only. Otherwise, it will be applied on current process.\n\nReturns:\n An DynamicEvaluationContext that can be passed to `pg.sample`."} +{"repo": "transformers", "function": "def _scale_boxes(boxes, target_sizes):\n if isinstance(target_sizes, (list, tuple)):\n image_height = torch.tensor([i[0] for i in target_sizes])\n image_width = torch.tensor([i[1] for i in target_sizes])\n elif isinstance(target_sizes, torch.Tensor):\n image_height, image_width = target_sizes.unbind(1)\n else:\n raise ValueError('`target_sizes` must be a list, tuple or torch.Tensor')\n scale_factor = torch.stack([image_width, image_height, image_width, image_height], dim=1)\n scale_factor = scale_factor.unsqueeze(1).to(boxes.device)\n boxes = boxes * scale_factor\n return boxes", "docstring": "Scale batch of bounding boxes to the target sizes.\n\nArgs:\n boxes (`torch.Tensor` of shape `(batch_size, num_boxes, 4)`):\n Bounding boxes to scale. Each box is expected to be in (x1, y1, x2, y2) format.\n target_sizes (`List[Tuple[int, int]]` or `torch.Tensor` of shape `(batch_size, 2)`):\n Target sizes to scale the boxes to. Each target size is expected to be in (height, width) format.\n\nReturns:\n `torch.Tensor` of shape `(batch_size, num_boxes, 4)`: Scaled bounding boxes."} +{"repo": "tensorflow", "function": "def set_callback(self, property_name, callback):\n if property_name not in self._config:\n raise KeyError('%s is not a valid property name.' % property_name)\n if not callable(callback):\n raise TypeError('The callback object provided is not callable.')\n self._set_callbacks[property_name] = callback", "docstring": "Set a set-callback for given property.\n\nArgs:\n property_name: Name of the property.\n callback: The callback as a `callable` of signature:\n def cbk(config):\n where config is the config after it is set to the new value.\n The callback is invoked each time the set() method is called with the\n matching property_name.\n\nRaises:\n KeyError: If property_name does not exist.\n TypeError: If `callback` is not callable."} +{"repo": "tensorflow", "function": "def configure_collective_ops(self, collective_leader='', scoped_allocator_enabled_ops=('CollectiveReduce',), use_nccl_communication=False, device_filters=None):\n if self._collective_leader is not None:\n if self._collective_leader != collective_leader or self._collective_scoped_allocator_enabled_ops != scoped_allocator_enabled_ops or self._collective_use_nccl_communication != use_nccl_communication or (self._collective_device_filters != device_filters):\n raise ValueError('Collective ops are already configured.')\n else:\n return\n if self._context_handle is not None:\n raise RuntimeError('Collective ops must be configured at program startup')\n self._collective_leader = collective_leader\n self._collective_scoped_allocator_enabled_ops = scoped_allocator_enabled_ops\n self._collective_use_nccl_communication = use_nccl_communication\n self._collective_device_filters = device_filters", "docstring": "Configure collective ops.\n\n Collective group leader is necessary for collective ops to run, other\n configurations are mainly for the purpose of performance.\n\nArgs:\n collective_leader: a device string for collective leader, e.g.\n \"/job:worker/replica:0/task:0\"; empty string means local execution of\n collective ops.\n scoped_allocator_enabled_ops: a tuple or a list of op names for scoped\n allocator to run with.\n use_nccl_communication: whether to use nccl communication for collective\n ops.\n device_filters: a tuple or a list of device strings. If set, corresponding\n task can only see the devices filtered by these device filters.\n\nRaises:\n RuntimeError: if this method is not called at program startup."} +{"repo": "etils", "function": "def dataclass(cls=None, *, kw_only=False, replace=True, repr=True, auto_cast=True, contextvars=True, allow_unfrozen=False):\n if cls is None:\n return functools.partial(dataclass, kw_only=kw_only, replace=replace, repr=repr, auto_cast=auto_cast, allow_unfrozen=allow_unfrozen)\n if kw_only:\n cls = _make_kw_only(cls)\n if repr:\n cls = add_repr(cls)\n if replace:\n cls = _add_replace(cls)\n if allow_unfrozen:\n cls = frozen_utils.add_unfrozen(cls)\n descriptor_fns = []\n if auto_cast:\n descriptor_fns.append(helpers.DescriptorInfo(annotation=cast_utils.AutoCast, descriptor_fn=cast_utils.make_auto_cast_descriptor))\n if contextvars:\n descriptor_fns.append(helpers.DescriptorInfo(annotation=context.ContextVar, descriptor_fn=context.make_contextvar_descriptor))\n cls = helpers.wrap_new(cls, descriptor_fns)\n return cls", "docstring": "Augment a dataclass with additional features.\n\n`auto_cast`: Auto-convert init assignements to the annotated class.\n\n```python\n@edc.dataclass\nclass A:\n path: edc.AutoCast[epath.Path]\n some_enum: edc.AutoCast[MyEnum]\n x: edc.AutoCast[str]\n\na = A(\n path='/some/path',\n some_enum='A',\n x=123\n)\n# Fields annotated with `AutoCast` are automatically casted to their type\nassert a.path == epath.Path('/some/path')\nassert a.some_enum is MyEnum.A\nassert a.x == '123'\n```\n\n`allow_unfrozen`: allow nested dataclass to be updated. This add two methods:\n\n * `.unfrozen()`: Create a lazy deep-copy of the current dataclass. Updates\n to nested attributes will be propagated to the top-level dataclass.\n * `.frozen()`: Returns the frozen dataclass, after it was mutated.\n\nExample:\n\n```python\nold_x = X(y=Y(z=123))\n\nx = old_x.unfrozen()\nx.y.z = 456\nx = x.frozen()\n\nassert x == X(y=Y(z=123)) # Only new x is mutated\nassert old_x == X(y=Y(z=456)) # Old x is not mutated\n```\n\nNote:\n\n* Only the last `.frozen()` call resolve the dataclass by calling `.replace`\n recursivelly.\n* Dataclass returned by `.unfrozen()` and nested attributes are not the\n original dataclass but proxy objects which track the mutations. As such,\n those object are not compatible with `isinstance()`, `jax.tree.map`,...\n* Only the top-level dataclass need to be `allow_unfrozen=True`\n* Avoid using `unfrozen` if 2 attributes of the dataclass point to the\n same nested dataclass. Updates on one attribute might not be reflected on\n the other.\n\n ```python\n y = Y(y=123)\n x = X(x0=y, x1=y) # Same instance assigned twice in `x0` and `x1`\n x = x.unfrozen()\n x.x0.y = 456 # Changes in `x0` not reflected in `x1`\n x = x.frozen()\n\n assert x == X(x0=Y(y=456), x1=Y(y=123))\n ```\n\n This is because only attributes which are accessed are tracked, so `etils`\n do not know the object exist somewhere else in the attribute tree.\n\n* After `.frozen()` has been called, any of the temporary sub-attribute\n become invalid:\n\n ```python\n a = a.unfrozen()\n y = a.y\n a = a.frozen()\n\n y.x # Raise error (created between the unfrozen/frozen call)\n a.y.x # Work\n ```\n\n`contextvars`: Fields annotated as `edc.ContextVar` are wrapped in\na `contextvars.ContextVar`. Afterward each thread / asyncio coroutine will\nhave its own version of the fields (similarly to `threading.local`).\n\nThe contextvars are lazily initialized at first usage.\n\nExample:\n\n```python\n@edc.dataclass\n@dataclasses.dataclass\nclass Context:\n thread_id: edc.ContextVar[int] = dataclasses.field(\n default_factory=threading.get_native_id\n )\n stack: edc.ContextVar[list[str]] = dataclasses.field(default_factory=list)\n\n# Global context object\ncontext = Context(thread_id=0)\n\ndef worker():\n # Inside each thread, the worker use its own context\n assert context.thread_id != 0\n context.stack.append(1)\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n for _ in range(10):\n executor.submit(worker)\n```\n\nArgs:\n cls: The dataclass to decorate\n kw_only: If True, make the dataclass `__init__` keyword-only.\n replace: If `True`, add a `.replace(` alias of `dataclasses.replace`.\n repr: If `True`, the class `__repr__` will return a pretty-printed `str`\n (one attribute per line)\n auto_cast: If `True`, fields annotated as `x: edc.AutoCast[Cls]` will be\n converted to `x: Cls = edc.field(validator=Cls)`.\n contextvars: It `True`, fields annotated as `x: edc.AutoCast[T]` are\n converted to `contextvars`. This allow to have a `threading.local`-like\n API for contextvars.\n allow_unfrozen: If `True`, add `.frozen`, `.unfrozen` methods.\n\nReturns:\n Decorated class"} +{"repo": "tensorflow", "function": "def show_backref(target, max_depth=3):\n if objgraph is None:\n raise NotImplementedError('objgraph is not installed.')\n string_io = io.StringIO()\n objgraph.show_backrefs(target, max_depth=max_depth, output=string_io)\n graph = string_io.getvalue()\n string_io.close()\n return graph", "docstring": "Returns a dot graph of all the objects that are referencing the target.\n\nA object referencing graph is useful to debug memory leak like circular\nreference. objgraph provides a good visualization of the memory graph than\nmost python built-in utilities like gc.get_referrers(), which are not\nhuman-readable sometimes.\n\nThe dot graph will be written to a string IO object, and can be rendered with\ngraphviz in operating system.\nE.g. dot -Tpng {$dot_graph} -o output.png\nArgs:\n target: The target object for the memory graph.\n max_depth: The maximum depth of the graph. By default 3 layers of references\n are used. Increases this a lot may result in the graph growing too big.\n\nReturns:\n A string that contains the object reference graph.\nRaises:\n NotImplementedError: if objgraph is not installed."} +{"repo": "tensorflow", "function": "def load_model_from_hdf5(filepath, custom_objects=None, compile=True):\n if h5py is None:\n raise ImportError('`load_model` requires h5py.')\n if not custom_objects:\n custom_objects = {}\n opened_new_file = not isinstance(filepath, h5py.File)\n if opened_new_file:\n f = h5py.File(filepath, mode='r')\n else:\n f = filepath\n model = None\n try:\n model_config = f.attrs.get('model_config')\n if model_config is None:\n raise ValueError('No model found in config file.')\n if hasattr(model_config, 'decode'):\n model_config = model_config.decode('utf-8')\n model_config = json_utils.decode(model_config)\n model = model_config_lib.model_from_config(model_config, custom_objects=custom_objects)\n load_weights_from_hdf5_group(f['model_weights'], model.layers)\n if compile:\n training_config = f.attrs.get('training_config')\n if hasattr(training_config, 'decode'):\n training_config = training_config.decode('utf-8')\n if training_config is None:\n logging.warning('No training configuration found in the save file, so the model was *not* compiled. Compile it manually.')\n return model\n training_config = json_utils.decode(training_config)\n model.compile(**saving_utils.compile_args_from_training_config(training_config, custom_objects), from_serialized=True)\n saving_utils.try_build_compiled_arguments(model)\n if 'optimizer_weights' in f:\n try:\n model.optimizer._create_all_weights(model.trainable_variables)\n except (NotImplementedError, AttributeError):\n logging.warning('Error when creating the weights of optimizer {}, making it impossible to restore the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n optimizer_weight_values = load_optimizer_weights_from_hdf5_group(f)\n try:\n model.optimizer.set_weights(optimizer_weight_values)\n except ValueError:\n logging.warning('Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.')\n finally:\n if opened_new_file:\n f.close()\n return model", "docstring": "Loads a model saved via `save_model_to_hdf5`.\n\nArgs:\n filepath: One of the following:\n - String, path to the saved model\n - `h5py.File` object from which to load the model\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model\n after loading.\n\nReturns:\n A Keras model instance. If an optimizer was found\n as part of the saved model, the model is already\n compiled. Otherwise, the model is uncompiled and\n a warning will be displayed. When `compile` is set\n to False, the compilation is omitted without any\n warning.\n\nRaises:\n ImportError: if h5py is not available.\n ValueError: In case of an invalid savefile."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is not None:\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "keras", "function": "def take_along_axis(x, indices, axis=None):\n if any_symbolic_tensors((x, indices)):\n return TakeAlongAxis(axis=axis).symbolic_call(x, indices)\n return backend.numpy.take_along_axis(x, indices, axis=axis)", "docstring": "Select values from `x` at the 1-D `indices` along the given axis.\n\nArgs:\n x: Source tensor.\n indices: The indices of the values to extract.\n axis: The axis over which to select values. By default, the flattened\n input tensor is used.\n\nReturns:\n The corresponding tensor of values."} +{"repo": "tensorflow", "function": "def compute_gradient(x, x_shape, y, y_shape, x_init_value=None, delta=0.001, init_targets=None, extra_feed_dict=None):\n if extra_feed_dict is None:\n extra_feed_dict = {}\n if isinstance(x, list):\n return _compute_gradient_list(x, x_shape, y, y_shape, x_init_value, delta, init_targets, extra_feed_dict=extra_feed_dict)\n else:\n if init_targets is not None:\n assert isinstance(init_targets, (list, tuple))\n for init in init_targets:\n init.run()\n dx, dy = _compute_dx_and_dy(x, y, y_shape)\n ret = _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta, extra_feed_dict=extra_feed_dict)\n return ret", "docstring": "Computes and returns the theoretical and numerical Jacobian.\n\nIf `x` or `y` is complex, the Jacobian will still be real but the\ncorresponding Jacobian dimension(s) will be twice as large. This is required\neven if both input and output is complex since TensorFlow graphs are not\nnecessarily holomorphic, and may have gradients not expressible as complex\nnumbers. For example, if `x` is complex with shape `[m]` and `y` is complex\nwith shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with\n\n J[:m, :n] = d(Re y)/d(Re x)\n J[:m, n:] = d(Im y)/d(Re x)\n J[m:, :n] = d(Re y)/d(Im x)\n J[m:, n:] = d(Im y)/d(Im x)\n\nArgs:\n x: a tensor or list of tensors\n x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,\n then this is the list of shapes.\n y: a tensor\n y_shape: the dimensions of y as a tuple or an array of ints.\n x_init_value: (optional) a numpy array of the same shape as \"x\"\n representing the initial value of x. If x is a list, this should be a list\n of numpy arrays. If this is none, the function will pick a random tensor\n as the initial value.\n delta: (optional) the amount of perturbation.\n init_targets: list of targets to run to initialize model params.\n extra_feed_dict: dict that allows fixing specified tensor values\n during the Jacobian calculation.\n\nReturns:\n Two 2-d numpy arrays representing the theoretical and numerical\n Jacobian for dy/dx. Each has \"x_size\" rows and \"y_size\" columns\n where \"x_size\" is the number of elements in x and \"y_size\" is the\n number of elements in y. If x is a list, returns a list of two numpy arrays."} +{"repo": "transformers", "function": "def post_process_panoptic_sample(out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85) -> Dict:\n scores, labels = score_labels_from_class_probabilities(out_logits)\n keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)\n cur_scores = scores[keep]\n cur_classes = labels[keep]\n cur_boxes = center_to_corners_format(boxes[keep])\n if len(cur_boxes) != len(cur_classes):\n raise ValueError('Not as many boxes as there are classes')\n cur_masks = masks[keep]\n cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)\n cur_masks = safe_squeeze(cur_masks, 1)\n b, h, w = cur_masks.shape\n cur_masks = cur_masks.reshape(b, -1)\n stuff_equiv_classes = defaultdict(list)\n for k, label in enumerate(cur_classes):\n if not is_thing_map[label]:\n stuff_equiv_classes[label].append(k)\n seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)\n area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))\n if cur_classes.size() > 0:\n filtered_small = np.array([a <= 4 for a in area], dtype=bool)\n while filtered_small.any():\n cur_masks = cur_masks[~filtered_small]\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)\n area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))\n filtered_small = np.array([a <= 4 for a in area], dtype=bool)\n else:\n cur_classes = np.ones((1, 1), dtype=np.int64)\n segments_info = [{'id': i, 'isthing': is_thing_map[cat], 'category_id': int(cat), 'area': a} for i, (cat, a) in enumerate(zip(cur_classes, area))]\n del cur_classes\n with io.BytesIO() as out:\n PIL.Image.fromarray(seg_img).save(out, format='PNG')\n predictions = {'png_string': out.getvalue(), 'segments_info': segments_info}\n return predictions", "docstring": "Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.\n\nArgs:\n out_logits (`torch.Tensor`):\n The logits for this sample.\n masks (`torch.Tensor`):\n The predicted segmentation masks for this sample.\n boxes (`torch.Tensor`):\n The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,\n width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).\n processed_size (`Tuple[int, int]`):\n The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size\n after data augmentation but before batching.\n target_size (`Tuple[int, int]`):\n The target size of the image, `(height, width)` corresponding to the requested final size of the\n prediction.\n is_thing_map (`Dict`):\n A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.\n threshold (`float`, *optional*, defaults to 0.85):\n The threshold used to binarize the segmentation masks."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n return encoder_outputs", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. UMT5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n To know more on how to prepare `input_ids` for pretraining take a look a [UMT5 Training](./umt5#training).\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, UMT5EncoderModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/umt5-small\")\n>>> model = UMT5EncoderModel.from_pretrained(\"google/umt5-small\")\n>>> input_ids = tokenizer(\n... \"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\"\n... ).input_ids # Batch size 1\n>>> outputs = model(input_ids=input_ids)\n>>> last_hidden_states = outputs.last_hidden_state\n```"} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.sep_token_id] + token_ids_0\n sep = [self.sep_token_id]\n return sep + token_ids_0 + sep + sep + token_ids_1", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An XLM-RoBERTa sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def streaming_restore(status, session=None):\n if context.executing_eagerly():\n return\n if session is None:\n session = get_session()\n if isinstance(status, NameBasedSaverStatus):\n raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.')\n status.run_restore_ops(session=session)\n status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict=status._feed_dict)", "docstring": "When graph building, runs restore ops as soon as they come in.\n\nArgs:\n status: A _LoadStatus objects from an object-based saver's restore().\n Streaming restore from name-based checkpoints is not currently supported.\n session: A session to run new restore ops in."} +{"repo": "temporian", "function": "def arccos(self: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.unary import arccos\n return arccos(self)", "docstring": "Calculates the inverse cosine of an [`EventSet`][temporian.EventSet]'s features.\n\nCan only be used on floating point features.\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 3],\n ... features={\"M\": [1.0, 0, -1.0]},\n ... )\n >>> a.arccos()\n indexes: ...\n timestamps: [1. 2. 3.]\n 'M': [0. 1.5708 3.1416]\n ...\n\n ```\n\nReturns:\n EventSetOrNode with inverse cosine of input features."} +{"repo": "transformers", "function": "class PegasusXEncoder(PegasusXPreTrainedModel):\n\n def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding]=None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.encoder_layerdrop\n embed_dim = config.d_model\n padding_idx = config.pad_token_id\n self.max_source_positions = config.max_position_embeddings\n embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = PegasusXScaledWordEmbedding(config.vocab_size, embed_dim, padding_idx, embed_scale=embed_scale)\n self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim)\n self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim)\n self.layers = nn.ModuleList([PegasusXEncoderLayer(stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config) for i in range(config.encoder_layers)])\n self.layer_norm = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def resize_position_embeddings(self, new_num_position_embeddings: int):\n \"\"\"\n Resizes position embeddings matrix of the model if `new_num_position_embeddings !=\n config.max_position_embeddings`.\n\n Arguments:\n new_num_position_embeddings (`int`):\n The number of new position embeddings. If position embeddings are learned, increasing the size will add\n newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If\n position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will\n add correct vectors at the end following the position encoding algorithm, whereas reducing the size\n will remove vectors from the end.\n \"\"\"\n logger.info(f'Setting `config.max_position_embeddings={new_num_position_embeddings}`...')\n self.config.max_position_embeddings = new_num_position_embeddings\n self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model)\n self.embed_positions.to(self.device)\n\n def get_position_embeddings(self) -> nn.Embedding:\n \"\"\"\n Returns the position embeddings matrix\n \"\"\"\n return self.embed_positions\n\n def forward(self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n embed_pos = self.embed_positions(inputs_embeds)\n hidden_states = inputs_embeds + embed_pos\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n batch_size, seq_len, _ = hidden_states.shape\n if attention_mask is None:\n attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device)\n attention_mask = attention_mask.to(dtype=hidden_states.dtype)\n mask_min_value = torch.finfo(hidden_states.dtype).min\n inverted_mask = 1.0 - attention_mask\n attention_mask = inverted_mask.masked_fill(inverted_mask.to(torch.bool), mask_min_value)\n if seq_len % self.config.block_size != 0:\n pad_len = self.config.block_size - seq_len % self.config.block_size\n hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0)\n attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value)\n global_hidden_states = self.embed_global(torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1))\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, global_hidden_states, attention_mask, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, global_hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n global_hidden_states = layer_outputs[1]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[2],)\n hidden_states = hidden_states[:, :seq_len]\n hidden_states = self.layer_norm(hidden_states)\n if output_hidden_states:\n encoder_states = encoder_states + ((hidden_states, global_hidden_states),)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a\n[`PegasusXEncoderLayer`].\n\nArgs:\n config: PegasusXConfig\n embed_tokens (nn.Embedding): output embedding"} +{"repo": "transformers", "function": "def masks_to_boxes(masks: np.ndarray) -> np.ndarray:\n if masks.size == 0:\n return np.zeros((0, 4))\n h, w = masks.shape[-2:]\n y = np.arange(0, h, dtype=np.float32)\n x = np.arange(0, w, dtype=np.float32)\n y, x = np.meshgrid(y, x, indexing='ij')\n x_mask = masks * np.expand_dims(x, axis=0)\n x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)\n x = np.ma.array(x_mask, mask=~np.array(masks, dtype=bool))\n x_min = x.filled(fill_value=100000000.0)\n x_min = x_min.reshape(x_min.shape[0], -1).min(-1)\n y_mask = masks * np.expand_dims(y, axis=0)\n y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)\n y = np.ma.array(y_mask, mask=~np.array(masks, dtype=bool))\n y_min = y.filled(fill_value=100000000.0)\n y_min = y_min.reshape(y_min.shape[0], -1).min(-1)\n return np.stack([x_min, y_min, x_max, y_max], 1)", "docstring": "Compute the bounding boxes around the provided panoptic segmentation masks.\n\nArgs:\n masks: masks in format `[number_masks, height, width]` where N is the number of masks\n\nReturns:\n boxes: bounding boxes in format `[number_masks, 4]` in xyxy format"} +{"repo": "tensorflow", "function": "def batch_sizes_for_worker(global_batch_size, num_workers, num_replicas_per_worker, worker_index):\n num_subbatches = num_workers * num_replicas_per_worker\n offset = worker_index * num_replicas_per_worker\n const_value = tensor_util.constant_value(global_batch_size)\n if const_value is not None:\n global_batch_size = const_value\n floor = global_batch_size // num_subbatches\n num_ceil = global_batch_size - num_subbatches * floor\n if const_value is not None:\n worker_0 = [floor + 1] * num_ceil + [floor] * (num_subbatches - num_ceil)\n return ops.convert_to_tensor(worker_0[offset:] + worker_0[:offset], dtype=dtypes.int64, name='batch_sizes')\n worker_0 = array_ops.ones(num_subbatches, dtype=dtypes.int64)\n worker_0 = floor * worker_0 + array_ops.concat([array_ops.ones(num_ceil, dtype=dtypes.int64), array_ops.zeros(num_subbatches - num_ceil, dtype=dtypes.int64)], axis=0)\n return array_ops.concat([worker_0[offset:], worker_0[:offset]], axis=0)", "docstring": "Determines how to rebatch a dataset for the given worker.\n\nGiven the global batch size, number of workers, number of replicas per worker,\nand worker index, returns the correct batch sizes for rebatching a dataset\non worker `worker_index` of `num_workers`, such that each global step (across\nall workers and replicas) will consume global_batch_size elements. The\nreturned value should be passed as the `batch_sizes` input parameter to\n`tf.data.experimental.rebatch()`. The returned batch sizes meet the following\nconstraints:\n\nLet G = global_batch_size, W = num_workers, R = num_replicas_per_worker\n(A) for any worker, len(batch_sizes) = W * R\n(B) for any worker, sum(batch_sizes) == G\n(C) for any global step (i.e. R iterations on each worker), the sum of batches\n consumed by replicas across all workers is G.\n(D) any two batch sizes of any two replicas differs by at most one.\n\nFor example, suppose we have G = 7, W = 2, R = 2, and suppose we have two\nfiles which each contain 7 elements:\n\n```python\n# WORKER 0\nbatch_sizes_0 = batch_sizes_for_worker(global_batch_size=global_batch_size,\n num_workers=2,\n num_replicas_per_worker=2,\n worker_index=0)\nprint(batch_sizes_0)\n>> [2, 2, 2, 1]\n\ndataset_0 = tf.data.Dataset.from_tensor_slices([\"file_a\", \"file_b\"])\ndataset_0 = dataset_0.shard(num_shards, index=0)\ndataset_0 = dataset_0.batch(7)\ndataset_0 = dataset_0.apply(tf.data.experimental.rebatch(batch_sizes_0))\nfor elem in dataset_0:\n print(elem)\n>> [[A0, A1], [A2, A3], [A4, A5], [A6]]\n\n# WORKER 1\nbatch_sizes_1 = batch_sizes_for_worker(global_batch_size=global_batch_size,\n num_workers=2,\n num_replicas_per_worker=2,\n worker_index=1)\nprint(batch_sizes_1)\n>> [2, 1, 2, 2]\n\ndataset_1 = tf.data.Dataset.from_tensor_slices([\"file_a\", \"file_b\"])\ndataset_1 = dataset_1.shard(num_shards, index=1)\ndataset_1 = dataset_1.batch(7)\ndataset_1 = dataset_1.apply(tf.data.experimental.rebatch(batch_sizes_1))\nfor elem in dataset_1:\n print(elem)\n>> [[B0, B1], [B2], [B3, B4], [B5, B6]]\n```\n\nThe above example will produce the following elements:\n\nStep 1:\n Worker 0 Replica 0: [A0, A1]\n Worker 0 Replica 1: [A2, A3]\n Worker 1 Replica 0: [B0, B1]\n Worker 1 Replica 1: [B2]\nTotal batch size = 7\n\nStep 2:\n Worker 0 Replica 0: [A4, A5]\n Worker 0 Replica 1: [A6]\n Worker 1 Replica 0: [B3, B4]\n Worker 1 Replica 1: [B5, B6]\nTotal batch size = 7\n\nArgs:\n global_batch_size: A `tf.int64` scalar, representing the global batch size.\n num_workers: An integer representing the number of workers the dataset will\n be distributed across.\n num_replicas_per_worker: An integer representing the number of replicas per\n worker. All workers are assumed to have the same number of replicas.\n worker_index: An integer index of the worker to be rebatched.\n\nReturns:\n A `tf.int64` vector, representing the batch sizes to rebatch the dataset\n into."} +{"repo": "transformers", "function": "def sigmoid_focal_loss(inputs: torch.Tensor, targets: torch.Tensor, num_boxes: int, alpha: float=0.25, gamma: float=2):\n prob = inputs.sigmoid()\n ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction='none')\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * (1 - p_t) ** gamma\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n return loss.sum() / num_boxes", "docstring": "Loss used in RetinaNet for dense detection: https://huggingface.co/papers/1708.02002.\n\nArgs:\n inputs (`torch.FloatTensor` of arbitrary shape):\n The predictions for each example.\n targets (`torch.FloatTensor` with the same shape as `inputs`)\n A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class\n and 1 for the positive class).\n num_boxes (`int`):\n The total number of boxes in the batch.\n alpha (`float`, *optional*, defaults to 0.25):\n Optional weighting factor in the range (0,1) to balance positive vs. negative examples.\n gamma (`int`, *optional*, defaults to 2):\n Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples.\n\nReturns:\n Loss tensor"} +{"repo": "keras", "function": "def pad(boxes, top, left, height=None, width=None, bounding_box_format='xyxy'):\n if bounding_box_format != 'xyxy':\n raise NotImplementedError\n box_utils = BoundingBox()\n if backend_utils.in_tf_graph():\n box_utils.backend.set_backend('tensorflow')\n outputs = box_utils.pad(boxes, top, left)\n box_utils.backend.reset()\n return outputs", "docstring": "Pads bounding boxes by adding top and left offsets.\n\nThis function adds padding to the bounding boxes by increasing the 'top'\nand 'left' coordinates by the specified amounts. The method assume the\ninput bounding_box_format is `xyxy`.\n\nArgs:\n boxes: Bounding boxes to pad. Shape `(N, 4)` or `(batch, N, 4)`.\n top: Vertical padding to add.\n left: Horizontal padding to add.\n height: Image height. Defaults to None.\n width: Image width. Defaults to None.\n bounding_box_format: The format of the input bounding boxes. Defaults to\n `\"xyxy\"`.\n\nReturns:\n Padded bounding boxes in the original format."} +{"repo": "tensorflow", "function": "def _create_variables(self, num_clusters):\n init_value = array_ops.placeholder_with_default([], shape=None)\n cluster_centers = variable_v1.VariableV1(init_value, name=CLUSTERS_VAR_NAME, validate_shape=False)\n cluster_centers_initialized = variable_v1.VariableV1(False, dtype=dtypes.bool, name='initialized')\n if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1:\n cluster_centers_updated = variable_v1.VariableV1(init_value, name='clusters_updated', validate_shape=False)\n update_in_steps = variable_v1.VariableV1(self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps')\n cluster_counts = variable_v1.VariableV1(array_ops.zeros([num_clusters], dtype=dtypes.int64))\n else:\n cluster_centers_updated = cluster_centers\n update_in_steps = None\n cluster_counts = variable_v1.VariableV1(array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None\n return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps)", "docstring": "Creates variables.\n\nArgs:\n num_clusters: an integer Tensor providing the number of clusters.\n\nReturns:\n Tuple with following elements:\n - cluster_centers: a Tensor for storing cluster centers\n - cluster_centers_initialized: bool Variable indicating whether clusters\n are initialized.\n - cluster_counts: a Tensor for storing counts of points assigned to this\n cluster. This is used by mini-batch training.\n - cluster_centers_updated: Tensor representing copy of cluster centers\n that are updated every step.\n - update_in_steps: numbers of steps left before we sync\n cluster_centers_updated back to cluster_centers."} +{"repo": "transformers", "function": "def _pad_image(self, images: 'torch.tensor', size_divisibility: int=32) -> 'torch.tensor':\n height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST)\n pad_height = 0 if height % size_divisibility == 0 else size_divisibility - height % size_divisibility\n pad_width = 0 if width % size_divisibility == 0 else size_divisibility - width % size_divisibility\n if pad_width + pad_height > 0:\n padding = (0, 0, pad_width, pad_height)\n images = F.pad(images, padding)\n return images", "docstring": "Pads an image or batched images constantly so that width and height are divisible by size_divisibility\n\nArgs:\n image (`torch,tensor`):\n Image to pad.\n size_divisibility (`int`, *optional*, defaults to 32):\n The width and height of the image will be padded to be divisible by this number."} +{"repo": "tensorflow", "function": "def _remove_ancillary_layers(model, layer_map, layers):\n ancillary_layers = []\n if not model._is_graph_network:\n return (layers, ancillary_layers)\n depths = [depth for depth in model._nodes_by_depth.keys() if depth < 0]\n depths.sort(reverse=True)\n for depth in depths:\n for node in model._nodes_by_depth[depth]:\n ancillary_layers.append(layer_map[node.outbound_layer])\n return ([l for l in layers if l not in ancillary_layers], ancillary_layers)", "docstring": "Removes and returns any ancillary layers from `layers` based on `model`.\n\nAncillary layers are part of the model topology but not used to compute the\nmodel outputs, e.g., layers from `add_loss` and `add_metric`.\n\nArgs:\n model: A Keras Model.\n layer_map: A map to from layers in the `model` to those in `layers`.\n layers: A list of all layers.\n\nReturns:\n Two lists of layers: (1) `layers` with the ancillary layers removed, and (2)\n the ancillary layers."} +{"repo": "tensorflow", "function": "def set_number_of_shards(self, number_of_shards):\n for policy in self._sharding_policies:\n policy.set_number_of_shards(number_of_shards)\n policy.set_number_of_partitions(self._number_of_partitions)\n self._validate()", "docstring": "Sets the number of shards to use for the InfeedQueue.\n\nArgs:\n number_of_shards: number of ways to shard the InfeedQueue.\n\nRaises:\n ValueError: if number_of_shards is not > 0; or the policies have\n been frozen and number_of_shards was already set to something\n else."} +{"repo": "tensorflow", "function": "def _save_and_write_assets(self, assets_collection_to_add=None):\n asset_filename_map = _maybe_save_assets(_add_asset_to_collection, assets_collection_to_add)\n if not asset_filename_map:\n tf_logging.info('No assets to write.')\n return\n copy_assets_to_destination_dir(asset_filename_map, self._export_dir, self._saved_asset_files)", "docstring": "Saves asset to the meta graph and writes asset files to disk.\n\nArgs:\n assets_collection_to_add: The collection where the asset paths are setup."} +{"repo": "pyglove", "function": "def oneof(candidates: Iterable[Any], *, name: Optional[str]=None, hints: Optional[Any]=None) -> Any:\n return OneOf(candidates=list(candidates), name=name, hints=hints)", "docstring": "N choose 1.\n\nExample::\n\n @pg.members([\n ('x', pg.typing.Int())\n ])\n class A(pg.Object):\n pass\n\n # A single categorical choice:\n v = pg.oneof([1, 2, 3])\n\n # A complex type as candidate.\n v1 = pg.oneof(['a', {'x': 1}, A(1)])\n\n # A hierarchical categorical choice:\n v2 = pg.oneof([\n 'foo',\n 'bar',\n A(pg.oneof([1, 2, 3]))\n ])\n\nSee also:\n\n * :class:`pyglove.hyper.OneOf`\n * :func:`pyglove.manyof`\n * :func:`pyglove.floatv`\n * :func:`pyglove.permutate`\n * :func:`pyglove.evolve`\n\n.. note::\n\n Under symbolic mode (by default), `pg.oneof` returns a ``pg.hyper.OneOf``\n object. Under dynamic evaluation mode, which is called under the context of\n :meth:`pyglove.hyper.DynamicEvaluationContext.collect` or\n :meth:`pyglove.hyper.DynamicEvaluationContext.apply`, it evaluates to\n a concrete candidate value.\n\n To use conditional search space in dynamic evaluation mode, the candidate\n should be wrapped with a `lambda` function, which is not necessary under\n symbolic mode. For example::\n\n pg.oneof([lambda: pg.oneof([0, 1], name='sub'), 2], name='root')\n\nArgs:\n candidates: Candidates to select from. Items of candidate can be any type,\n therefore it can have nested hyper primitives, which forms a hierarchical\n search space.\n name: A name that can be used to identify a decision point in the search\n space. This is needed when the code to instantiate the same hyper\n primitive may be called multiple times under a\n `pg.DynamicEvaluationContext.collect` context or under a\n `pg.DynamicEvaluationContext.apply` context.\n hints: An optional value which acts as a hint for the controller.\n\nReturns:\n In symbolic mode, this function returns a `ChoiceValue`.\n In dynamic evaluation mode, this function returns one of the items in\n `candidates`.\n If evaluated under a `pg.DynamicEvaluationContext.apply` scope,\n this function will return the selected candidate.\n If evaluated under a `pg.DynamicEvaluationContext.collect`\n scope, it will return the first candidate."} +{"repo": "tensorflow", "function": "def _validate_snapshot(path: str, metadata: snapshot_pb2.DistributedSnapshotMetadata, element_spec: Any, compression: str) -> None:\n error_file = _pywrap_snapshot_utils.TF_DATA_SnapshotErrorFilePath(path)\n if gfile.Exists(error_file):\n with gfile.GFile(error_file, 'r') as f:\n raise ValueError(f'Failed to load tf.data snapshot at {path}. The save job failed to write it. Status: {f.read()}')\n snapshot_element_spec = _parse_element_spec(metadata.element_spec)\n if element_spec and element_spec != snapshot_element_spec:\n raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified element_spec {element_spec}, but the actual element_spec is {snapshot_element_spec}.')\n if compression and compression != metadata.compression:\n raise ValueError(f'Failed to load tf.data snapshot at {path}. User specified compression {compression}, but the actual compression is {metadata.compression}.')", "docstring": "Validates a tf.data distributed snapshot.\n\nArgs:\n path: Root path of the distributed snapshot.\n metadata: The DistributedSnapshotMetadata of the snapshot.\n element_spec: Dataset element_spec.\n compression: Compression method used for saving.\n\nRaises:\n ValueError if the snapshot is invalid."} +{"repo": "keras", "function": "class TorchModuleWrapper(Layer):\n\n def __init__(self, module, name=None, output_shape=None, **kwargs):\n super().__init__(name=name, **kwargs)\n import torch.nn as nn\n from keras.src.backend.torch.core import get_device\n if isinstance(module, nn.modules.lazy.LazyModuleMixin) and module.has_uninitialized_params():\n raise ValueError(f'LazyModules are not supported unless they are already initialized. Received uninitialized LazyModule: module={module}')\n self.module = module.to(get_device())\n self._track_module_parameters()\n self.output_shape = output_shape\n\n def parameters(self, recurse=True):\n return self.module.parameters(recurse=recurse)\n\n def _track_module_parameters(self):\n for param in self.module.parameters():\n variable = backend.Variable(initializer=param, trainable=param.requires_grad)\n self._track_variable(variable)\n self.built = True\n\n def call(self, *args, training=None, **kwargs):\n if training is False:\n self.eval()\n else:\n self.train()\n return self.module(*args, **kwargs)\n\n def save_own_variables(self, store):\n \"\"\"Saves model's state from `state_dict`.\n `model.parameters` excludes some of model's state like\n `BatchNorm` mean and variance. So, use `state_dict` to obtain\n all of model's state.\n \"\"\"\n state_dict = self.module.state_dict()\n for key in state_dict.keys():\n store[key] = convert_to_numpy(state_dict[key])\n\n def load_own_variables(self, store):\n \"\"\"Loads model's state via `state_dict`.\"\"\"\n state_dict = {}\n for key in store.keys():\n if isinstance(key, bytes):\n key = key.decode()\n state_dict[key] = convert_to_tensor(store[key])\n self.module.load_state_dict(state_dict)\n\n def compute_output_shape(self, input_shape):\n if self.output_shape is None:\n return super().compute_output_shape(input_shape)\n return self.output_shape\n\n def get_config(self):\n base_config = super().get_config()\n import torch\n buffer = io.BytesIO()\n torch.save(self.module, buffer)\n config = {'module': buffer.getvalue(), 'output_shape': self.output_shape}\n return {**base_config, **config}\n\n @classmethod\n def from_config(cls, config):\n import torch\n if 'module' in config:\n buffer = io.BytesIO(config['module'])\n config['module'] = torch.load(buffer, weights_only=False)\n return cls(**config)", "docstring": "Torch module wrapper layer.\n\n`TorchModuleWrapper` is a wrapper class that can turn any\n`torch.nn.Module` into a Keras layer, in particular by making its\nparameters trackable by Keras.\n\n`TorchModuleWrapper` is only compatible with the PyTorch backend and\ncannot be used with the TensorFlow or JAX backends.\n\nArgs:\n module: `torch.nn.Module` instance. If it's a `LazyModule`\n instance, then its parameters must be initialized before\n passing the instance to `TorchModuleWrapper` (e.g. by calling\n it once).\n output_shape :The shape of the output of this layer. It helps Keras\n perform automatic shape inference.\n name: The name of the layer (string).\n\nExample:\n\nHere's an example of how the `TorchModuleWrapper` can be used with vanilla\nPyTorch modules.\n\n```python\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport keras\nfrom keras.layers import TorchModuleWrapper\n\nclass Classifier(keras.Model):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # Wrap `torch.nn.Module`s with `TorchModuleWrapper`\n # if they contain parameters\n self.conv1 = TorchModuleWrapper(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3))\n )\n self.conv2 = TorchModuleWrapper(\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3))\n )\n self.pool = nn.MaxPool2d(kernel_size=(2, 2))\n self.flatten = nn.Flatten()\n self.dropout = nn.Dropout(p=0.5)\n self.fc = TorchModuleWrapper(nn.Linear(1600, 10))\n\n def call(self, inputs):\n x = F.relu(self.conv1(inputs))\n x = self.pool(x)\n x = F.relu(self.conv2(x))\n x = self.pool(x)\n x = self.flatten(x)\n x = self.dropout(x)\n x = self.fc(x)\n return F.softmax(x, dim=1)\n\n\nmodel = Classifier()\nmodel.build((1, 28, 28))\nprint(\"Output shape:\", model(torch.ones(1, 1, 28, 28).to(\"cuda\")).shape)\n\nmodel.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"]\n)\nmodel.fit(train_loader, epochs=5)\n```"} +{"repo": "transformers", "function": "class TFBaseModelOutputWithCrossAttentions(ModelOutput):\n last_hidden_state: Optional[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None\n cross_attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for model's outputs, with potential hidden states and attentions.\n\nArgs:\n last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(tf.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads."} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. LUKE does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros."} +{"repo": "tensorflow", "function": "def write(tag, tensor, step=None, metadata=None, name=None):\n with ops.name_scope(name, 'write_summary') as scope:\n if _summary_state.writer is None:\n return constant_op.constant(False)\n if step is None:\n step = get_step()\n if metadata is None:\n serialized_metadata = b''\n elif hasattr(metadata, 'SerializeToString'):\n serialized_metadata = metadata.SerializeToString()\n else:\n serialized_metadata = metadata\n\n def record():\n \"\"\"Record the actual summary and return True.\"\"\"\n if step is None:\n raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()')\n with ops.device('cpu:0'):\n summary_tensor = tensor() if callable(tensor) else array_ops.identity(tensor)\n writer = _summary_state.writer\n summary_value = _maybe_convert_tensor_to_dtensor(writer, summary_tensor)\n step_value = _maybe_convert_tensor_to_dtensor(writer, step)\n write_summary_op = gen_summary_ops.write_summary(writer._resource, step_value, summary_value, tag, serialized_metadata, name=scope)\n with ops.control_dependencies([write_summary_op]):\n return constant_op.constant(True)\n op = smart_cond.smart_cond(should_record_summaries(), record, _nothing, name='summary_cond')\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op)\n return op", "docstring": "Writes a generic summary to the default SummaryWriter if one exists.\n\nThis exists primarily to support the definition of type-specific summary ops\nlike scalar() and image(), and is not intended for direct use unless defining\na new type-specific summary op.\n\nArgs:\n tag: string tag used to identify the summary (e.g. in TensorBoard), usually\n generated with `tf.summary.summary_scope`\n tensor: the Tensor holding the summary data to write or a callable that\n returns this Tensor. If a callable is passed, it will only be called when\n a default SummaryWriter exists and the recording condition specified by\n `record_if()` is met.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which must\n not be None.\n metadata: Optional SummaryMetadata, as a proto or serialized bytes\n name: Optional string name for this op.\n\nReturns:\n True on success, or false if no summary was written because no default\n summary writer was available.\n\nRaises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None."} +{"repo": "beam", "function": "def create(path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO):\n filesystem = FileSystems.get_filesystem(path)\n return filesystem.create(path, mime_type, compression_type)", "docstring": "Returns a write channel for the given file path.\n\nArgs:\n path: string path of the file object to be written to the system\n mime_type: MIME type to specify the type of content in the file object\n compression_type: Type of compression to be used for this object. See\n ``CompressionTypes`` for possible values.\n\nReturns: file handle with a ``close`` function for the user to use."} +{"repo": "tensorflow", "function": "def _load_saved_model(self, saved_model_dir, saved_model_tags):\n graph = _ops.Graph()\n saved_model = _loader_impl.SavedModelLoader(saved_model_dir)\n saved_model.load_graph(graph, tags=saved_model_tags)\n meta_graph = saved_model.get_meta_graph_def_from_tags(saved_model_tags)\n graph_def = meta_graph.graph_def\n signature_def = meta_graph.signature_def[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n input_tensors = [graph.get_tensor_by_name(signature_def.inputs[key].name) for key in signature_def.inputs]\n output_tensors = [graph.get_tensor_by_name(signature_def.outputs[key].name) for key in signature_def.outputs]\n return (graph_def, input_tensors, output_tensors)", "docstring": "Load graph_def from saved model with the default serving signature key.\n\nArgs:\n saved_model_dir: Directory of the SavedModel.\n saved_model_tags: Set of tags identifying the MetaGraphDef within the\n SavedModel to analyze.\n\nReturns:\n graph_def: The loaded GraphDef.\n input_tensors: List of input tensors.\n output_tensors: List of output tensors."} +{"repo": "tensorflow", "function": "def _save_model_and_copy_assets(exported_model: exported_model_pb2.ExportedModel, src_saved_model_path: str, dst_saved_model_path: str, signature_def_map: Mapping[str, meta_graph_pb2.SignatureDef], tags: Collection[str]) -> bool:\n save_model.save_model_v1(exported_model.graph_def, dst_saved_model_path, signature_def_map, tags, init_op_name=exported_model.init_node_name, saver_def=_get_saver_def_or_none(exported_model), checkpoint_dir=exported_model.checkpoint_dir, function_aliases=exported_model.function_aliases, asset_file_defs=exported_model.asset_file_defs)\n _copy_assets(src_saved_model_path, dst_saved_model_path)\n return True", "docstring": "Saves the model and copies the assets from the source model.\n\nArgs:\n exported_model: ExportedModel to save.\n src_saved_model_path: Path to the source SavedModel. This will be used to\n copy the asset files to `dst_saved_model_path`.\n dst_saved_model_path: Destination path to save the exported model.\n signature_def_map: Signature key -> SignatureDef mapping.\n tags: Tags to attach to the saved MetaGraphDef.\n\nReturns:\n `True` upon successfully saving the model."} +{"repo": "tf-quant-finance", "function": "def implied_vol(*, prices, strikes, expiries, spots=None, forwards=None, discount_factors=None, is_call_options=None, validate_args=False, polya_factor=2 / np.pi, dtype=None, name=None):\n if (spots is None) == (forwards is None):\n raise ValueError('Either spots or forwards must be supplied but not both.')\n name = name or 'implied_vol'\n with tf.name_scope(name):\n prices = tf.convert_to_tensor(prices, dtype=dtype, name='prices')\n dtype = prices.dtype\n strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n if discount_factors is None:\n discount_factors = tf.convert_to_tensor(1.0, dtype=dtype, name='discount_factors')\n else:\n discount_factors = tf.convert_to_tensor(discount_factors, dtype=dtype, name='discount_factors')\n if forwards is not None:\n forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')\n else:\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n forwards = spots / discount_factors\n control_inputs = None\n if validate_args:\n control_inputs = _validate_args_control_deps(prices, forwards, strikes, expiries, discount_factors, is_call_options)\n with tf.compat.v1.control_dependencies(control_inputs):\n adjusted_strikes = strikes * discount_factors\n normalized_prices = prices / adjusted_strikes\n normalized_forwards = forwards / strikes\n return _approx_implied_vol_polya(normalized_prices, normalized_forwards, expiries, is_call_options, polya_factor)", "docstring": "Approximates the implied vol using the Stefanica-Radiocic algorithm.\n\nFinds an approximation to the implied vol using the Polya approximation for\nthe Normal CDF. This algorithm was described by Stefanica and Radiocic in\nref [1]. They show that if the Normal CDFs appearing in the Black Scholes\nformula for the option price are replaced with Polya's approximation, the\nimplied vol can be solved for analytically. The Polya approximation produces\nabsolute errors of less than 0.003 and the resulting implied vol is fairly\nclose to the true value. For practical purposes, this may not be accurate\nenough so this result should be used as a starting point for some method with\ncontrollable tolerance (e.g. a root finder).\n\n#### References:\n[1]: Dan Stefanica and Rados Radoicic. An explicit implied volatility formula.\n International Journal of Theoretical and Applied Finance,\n Vol. 20, no. 7, 2017.\n https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2908494\n[2]: Omar Eidous, Samar Al-Salman. One-term approximation for Normal\n distribution function. Mathematics and Statistics 4(1), 2016.\n http://www.hrpub.org/download/20160229/MS2-13405192.pdf\n\nArgs:\n prices: A real `Tensor` of any shape. The prices of the options whose\n implied vol is to be calculated.\n strikes: A real `Tensor` of the same dtype as `prices` and a shape that\n broadcasts with `prices`. The strikes of the options.\n expiries: A real `Tensor` of the same dtype as `prices` and a shape that\n broadcasts with `prices`. The expiry for each option. The units should\n be such that `expiry * volatility**2` is dimensionless.\n spots: A real `Tensor` of any shape that broadcasts to the shape\n of the `prices`. The current spot price of the underlying. Either this\n argument or the `forwards` (but not both) must be supplied.\n forwards: A real `Tensor` of any shape that broadcasts to the shape of\n `prices`. The forwards to maturity. Either this argument or the `spots`\n must be supplied but both must not be supplied.\n discount_factors: An optional real `Tensor` of same dtype as the `prices`.\n If not None, these are the discount factors to expiry (i.e. e^(-rT)).\n If None, no discounting is applied (i.e. it is assumed that the\n undiscounted option prices are provided ). If `spots` is supplied and\n `discount_factors` is not None then this is also used to compute the\n forwards to expiry.\n Default value: None, equivalent to discount factors = 1.\n is_call_options: A boolean `Tensor` of a shape compatible with `prices`.\n Indicates whether the option is a call (if True) or a put (if False).\n If not supplied, call options are assumed.\n validate_args: A Python bool. If True, indicates that arguments should be\n checked for correctness before performing the computation. The checks\n performed are: (1) Forwards/spots and strikes are positive. (2) The prices\n satisfy the arbitrage bounds (i.e. for call options, checks the\n inequality `max(F-K, 0) <= Price <= F` and for put options, checks that\n `max(K-F, 0) <= Price <= K`.). (3) Checks that the prices are not too\n close to the bounds. It is numerically unstable to compute the implied\n vols from options too far in the money or out of the money.\n Default value: False\n polya_factor: A real scalar. The coefficient to use in the\n approximation for the Normal CDF. The approximation is: `N(x) ~ 0.5 + 0.5\n * sign(x) * sqrt[ 1 - exp(-k * x**2) ]` where `k` is the coefficient\n supplied with `polya_factor`. The original Polya approximation has the\n value `2 / pi` and this is approximation used in Ref [1]. However, as\n described in Ref [2], a slightly more accurate approximation is achieved\n if we use the value of `k=5/8`).\n dtype: `tf.Dtype` to use when converting arguments to `Tensor`s. If not\n supplied, the default TensorFlow conversion will take place. Note that\n this argument does not do any casting for `Tensor`s or numpy arrays.\n Default value: None.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'implied_vol' is\n used.\n Default value: None\n\nReturns:\n implied_vols: A `Tensor` of the same dtype as `prices` and shape as the\n common broadcasted shape of `(prices, spots/forwards, strikes, expiries)`.\n The approximate implied total volatilities computed using the Polya\n approximation method.\n\nRaises:\n ValueError: If both `forwards` and `spots` are supplied or if neither is\n supplied."} +{"repo": "transformers", "function": "class DonutFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n do_thumbnail: Optional[bool]\n do_align_long_axis: Optional[bool]\n do_pad: Optional[bool]", "docstring": "Args:\n do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):\n Whether to resize the image using thumbnail method.\n do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):\n Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random\n amount of padding on each size, up to the largest image size in the batch. Otherwise, all images are\n padded to the largest image size in the batch."} +{"repo": "mobly", "function": "class TestResult:\n\n def __init__(self):\n self.requested = []\n self.failed = []\n self.executed = []\n self.passed = []\n self.skipped = []\n self.error = []\n self.controller_info = []\n\n def __add__(self, r):\n \"\"\"Overrides '+' operator for TestResult class.\n\n The add operator merges two TestResult objects by concatenating all of\n their lists together.\n\n Args:\n r: another instance of TestResult to be added\n\n Returns:\n A TestResult instance that's the sum of two TestResult instances.\n \"\"\"\n if not isinstance(r, TestResult):\n raise TypeError('Operand %s of type %s is not a TestResult.' % (r, type(r)))\n sum_result = TestResult()\n for name in sum_result.__dict__:\n r_value = getattr(r, name)\n l_value = getattr(self, name)\n if isinstance(r_value, list):\n setattr(sum_result, name, l_value + r_value)\n return sum_result\n\n def add_record(self, record):\n \"\"\"Adds a test record to test result.\n\n A record is considered executed once it's added to the test result.\n\n Adding the record finalizes the content of a record, so no change\n should be made to the record afterwards.\n\n Args:\n record: A test record object to add.\n \"\"\"\n record.update_record()\n if record.result == TestResultEnums.TEST_RESULT_SKIP:\n self.skipped.append(record)\n return\n self.executed.append(record)\n if record.result == TestResultEnums.TEST_RESULT_FAIL:\n self.failed.append(record)\n elif record.result == TestResultEnums.TEST_RESULT_PASS:\n self.passed.append(record)\n else:\n self.error.append(record)\n\n def add_controller_info_record(self, controller_info_record):\n \"\"\"Adds a controller info record to results.\n\n This can be called multiple times for each test class.\n\n Args:\n controller_info_record: ControllerInfoRecord object to be added to\n the result.\n \"\"\"\n self.controller_info.append(controller_info_record)\n\n def add_class_error(self, test_record):\n \"\"\"Add a record to indicate a test class has failed before any test\n could execute.\n\n This is only called before any test is actually executed. So it only\n adds an error entry that describes why the class failed to the tally\n and does not affect the total number of tests requrested or exedcuted.\n\n Args:\n test_record: A TestResultRecord object for the test class.\n \"\"\"\n test_record.update_record()\n self.error.append(test_record)\n\n def is_test_executed(self, test_name):\n \"\"\"Checks if a specific test has been executed.\n\n Args:\n test_name: string, the name of the test to check.\n\n Returns:\n True if the test has been executed according to the test result,\n False otherwise.\n \"\"\"\n for record in self.executed:\n if record.test_name == test_name:\n return True\n return False\n\n def _count_eventually_passing_retries(self):\n \"\"\"Counts the number of retry iterations that eventually passed.\n\n If a test is retried and eventually passed, all the associated non-passing\n iterations should not be considered when devising the final state of the\n test run.\n\n Returns:\n Int, the number that should be subtracted from the result altering error\n counts.\n \"\"\"\n count = 0\n for record in self.passed:\n r = record\n while r.parent is not None and r.parent[1] == TestParentType.RETRY:\n count += 1\n r = r.parent[0]\n return count\n\n @property\n def is_all_pass(self):\n \"\"\"True if no tests failed or threw errors, False otherwise.\"\"\"\n num_of_result_altering_errors = len(self.failed) + len(self.error) - self._count_eventually_passing_retries()\n if num_of_result_altering_errors == 0:\n return True\n return False\n\n def requested_test_names_dict(self):\n \"\"\"Gets the requested test names of a test run in a dict format.\n\n Note a test can be requested multiple times, so there can be duplicated\n values\n\n Returns:\n A dict with a key and the list of strings.\n \"\"\"\n return {'Requested Tests': copy.deepcopy(self.requested)}\n\n def summary_str(self):\n \"\"\"Gets a string that summarizes the stats of this test result.\n\n The summary provides the counts of how many tests fall into each\n category, like 'Passed', 'Failed' etc.\n\n Format of the string is:\n Requested , Executed , ...\n\n Returns:\n A summary string of this test result.\n \"\"\"\n kv_pairs = ['%s %d' % (k, v) for k, v in self.summary_dict().items()]\n msg = ', '.join(sorted(kv_pairs))\n return msg\n\n def summary_dict(self):\n \"\"\"Gets a dictionary that summarizes the stats of this test result.\n\n The summary provides the counts of how many tests fall into each\n category, like 'Passed', 'Failed' etc.\n\n Returns:\n A dictionary with the stats of this test result.\n \"\"\"\n d = {}\n d['Requested'] = len(self.requested)\n d['Executed'] = len(self.executed)\n d['Passed'] = len(self.passed)\n d['Failed'] = len(self.failed)\n d['Skipped'] = len(self.skipped)\n d['Error'] = len(self.error)\n return d", "docstring": "A class that contains metrics of a test run.\n\nThis class is essentially a container of TestResultRecord objects.\n\nAttributes:\n requested: A list of strings, each is the name of a test requested\n by user.\n failed: A list of records for tests failed.\n executed: A list of records for tests that were actually executed.\n passed: A list of records for tests passed.\n skipped: A list of records for tests skipped.\n error: A list of records for tests with error result token.\n controller_info: list of ControllerInfoRecord."} +{"repo": "pytype", "function": "def _get_traces(self, lineno, ops, symbol, maxmatch=-1, num_lines=1):\n if not isinstance(symbol, _SymbolMatcher):\n symbol = _SymbolMatcher.from_one_match(symbol)\n for tr in itertools.chain.from_iterable((self.source.traces[line] for line in range(lineno, lineno + num_lines))):\n if maxmatch == 0:\n break\n m_matched = self._matched\n assert m_matched is not None\n if id(tr) not in m_matched and tr.op in ops and symbol.match(tr.symbol):\n maxmatch -= 1\n m_matched.add(id(tr))\n yield tr", "docstring": "Yields matching traces.\n\nArgs:\n lineno: A starting line number.\n ops: A list of opcode names to match on.\n symbol: A symbol or _SymbolMatcher instance to match on.\n maxmatch: The maximum number of traces to yield. -1 for no maximum.\n num_lines: The number of consecutive lines to search."} +{"repo": "transformers", "function": "def forward(self, input_ids: torch.Tensor, cache_position: torch.Tensor):\n _, seqlen = input_ids.shape\n position_ids = cache_position.unsqueeze(0)\n past_key_values = self.static_cache\n outs = self.model(input_ids=input_ids, attention_mask=None, position_ids=position_ids, cache_position=cache_position, past_key_values=past_key_values, use_cache=True)\n return outs.logits", "docstring": "Forward pass of the module, which is compatible with the ExecuTorch runtime.\n\nArgs:\n input_ids (`torch.Tensor`): Tensor representing current input token id to the module.\n cache_position (`torch.Tensor`): Tensor representing current input position in the cache.\n\nReturns:\n torch.Tensor: Logits output from the model.\n\nThis forward adapter serves two primary purposes:\n\n1. **Making the Model `torch.export`-Compatible**:\n The adapter hides unsupported objects, such as the `Cache`, from the graph inputs and outputs,\n enabling the model to be exportable using `torch.export` without encountering issues.\n\n2. **Ensuring Compatibility with `ExecuTorch` runtime**:\n The adapter matches the model's forward signature with that in `executorch/extension/llm/runner`,\n ensuring that the exported model can be executed in `ExecuTorch` out-of-the-box."} +{"repo": "tensorflow", "function": "def get_arg_value(node, arg_name, arg_pos=None):\n if arg_name is not None:\n for kw in node.keywords:\n if kw.arg == arg_name:\n return (True, kw.value)\n if arg_pos is not None:\n idx = 0\n for arg in node.args:\n if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):\n continue\n if idx == arg_pos:\n return (True, arg)\n idx += 1\n return (False, None)", "docstring": "Get the value of an argument from a ast.Call node.\n\nThis function goes through the positional and keyword arguments to check\nwhether a given argument was used, and if so, returns its value (the node\nrepresenting its value).\n\nThis cannot introspect *args or **args, but it safely handles *args in\nPython3.5+.\n\nArgs:\n node: The ast.Call node to extract arg values from.\n arg_name: The name of the argument to extract.\n arg_pos: The position of the argument (in case it's passed as a positional\n argument).\n\nReturns:\n A tuple (arg_present, arg_value) containing a boolean indicating whether\n the argument is present, and its value in case it is."} +{"repo": "pyglove", "function": "def get_metadata(self, key: str, per_trial: bool=True) -> Optional[Any]:", "docstring": "Gets metadata for current trial or current sampling.\n\nArgs:\n key: A string as key to metadata.\n per_trial: If True, the key is retrieved per curent trial. Otherwise, it\n is retrieved per current sampling.\n\nReturns:\n A value that can be deserialized by `pg.from_json_str`."} +{"repo": "transformers", "function": "def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)", "docstring": "Rescale the image by the given factor. image = image * rescale_factor.\n\nArgs:\n image (`np.ndarray`):\n Image to rescale.\n rescale_factor (`float`):\n The value to use for rescaling.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. If unset, is inferred from the input image. Can be\n one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format."} +{"repo": "transformers", "function": "def _batch_prepare_for_model(self, batch_ids_pairs: List[Tuple[List[int], None]], batch_entity_ids_pairs: List[Tuple[Optional[List[int]], Optional[List[int]]]], batch_entity_token_spans_pairs: List[Tuple[Optional[List[Tuple[int, int]]], Optional[List[Tuple[int, int]]]]], add_special_tokens: bool=True, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy=TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[str]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_length: bool=False, verbose: bool=True) -> BatchEncoding:\n batch_outputs = {}\n for input_ids, entity_ids, entity_token_span_pairs in zip(batch_ids_pairs, batch_entity_ids_pairs, batch_entity_token_spans_pairs):\n first_ids, second_ids = input_ids\n first_entity_ids, second_entity_ids = entity_ids\n first_entity_token_spans, second_entity_token_spans = entity_token_span_pairs\n outputs = self.prepare_for_model(first_ids, second_ids, entity_ids=first_entity_ids, pair_entity_ids=second_entity_ids, entity_token_spans=first_entity_token_spans, pair_entity_token_spans=second_entity_token_spans, add_special_tokens=add_special_tokens, padding=PaddingStrategy.DO_NOT_PAD.value, truncation=truncation_strategy.value, max_length=max_length, max_entity_length=max_entity_length, stride=stride, pad_to_multiple_of=None, padding_side=None, return_attention_mask=False, return_token_type_ids=return_token_type_ids, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_length=return_length, return_tensors=None, prepend_batch_axis=False, verbose=verbose)\n for key, value in outputs.items():\n if key not in batch_outputs:\n batch_outputs[key] = []\n batch_outputs[key].append(value)\n batch_outputs = self.pad(batch_outputs, padding=padding_strategy.value, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)\n return batch_outputs", "docstring": "Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It\nadds special tokens, truncates sequences if overflowing while taking into account the special tokens and\nmanages a moving window (with user defined stride) for overflowing tokens\n\n\nArgs:\n batch_ids_pairs: list of tokenized input ids or input ids pairs\n batch_entity_ids_pairs: list of entity ids or entity ids pairs\n batch_entity_token_spans_pairs: list of entity spans or entity spans pairs\n max_entity_length: The maximum length of the entity sequence."} +{"repo": "tensorflow", "function": "def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None):\n return gen_sparse_ops.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name)", "docstring": "Converts a sparse representation into a dense tensor.\n\nBuilds an array `dense` with shape `output_shape` such that\n\n```python\n# If sparse_indices is scalar\ndense[i] = (i == sparse_indices ? sparse_values : default_value)\n\n# If sparse_indices is a vector, then for each i\ndense[sparse_indices[i]] = sparse_values[i]\n\n# If sparse_indices is an n by d matrix, then for each i in [0, n)\ndense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]\n```\n\nAll other values in `dense` are set to `default_value`. If `sparse_values`\nis a scalar, all sparse indices are set to this single value.\n\nIndices should be sorted in lexicographic order, and indices must not\ncontain any repeats. If `validate_indices` is True, these properties\nare checked during execution.\n\nArgs:\n sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.\n `sparse_indices[i]` contains the complete index where `sparse_values[i]`\n will be placed.\n output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape\n of the dense output tensor.\n sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of\n `sparse_indices`, or a scalar value to be used for all sparse indices.\n default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value\n to set for indices not specified in `sparse_indices`. Defaults to zero.\n validate_indices: A boolean value. If True, indices are checked to make\n sure they are sorted in lexicographic order and that there are no repeats.\n name: A name for the operation (optional).\n\nReturns:\n Dense `Tensor` of shape `output_shape`. Has the same type as\n `sparse_values`."} +{"repo": "tensorflow", "function": "def _slice_length(value_length, slice_key):\n zeros = array_ops.zeros(value_length, dtype=dtypes.bool)\n return array_ops.size(zeros[slice_key], out_type=value_length.dtype)", "docstring": "Computes the number of elements in a slice of a value with a given length.\n\nReturns the equivalent of: `len(range(value_length)[slice_key])`\n\nArgs:\n value_length: Scalar int `Tensor`: the length of the value being sliced.\n slice_key: A `slice` object used to slice elements from the value.\n\nReturns:\n The number of elements in the sliced value."} +{"repo": "tensorflow", "function": "def _get_compile_args(self, user_metrics=True):\n self._assert_compile_was_called()\n saved_metrics = self.compiled_metrics._user_metrics\n saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics\n if not user_metrics:\n if saved_metrics is not None:\n saved_metrics = self.compiled_metrics._metrics\n if saved_weighted_metrics is not None:\n saved_weighted_metrics = self.compiled_metrics._weighted_metrics\n compile_args = {'optimizer': self.optimizer, 'loss': self.compiled_loss._user_losses, 'metrics': saved_metrics, 'weighted_metrics': saved_weighted_metrics, 'loss_weights': self.compiled_loss._user_loss_weights}\n return compile_args", "docstring": "Used for saving or cloning a Model.\n\nArgs:\n user_metrics: Whether to return user-supplied metrics or `Metric` objects.\n Defaults to returning the user-supplied metrics.\n\nReturns:\n Dictionary of arguments that were used when compiling the model."} +{"repo": "tensorflow", "function": "class InputSpec(object):\n\n def __init__(self, dtype=None, shape=None, ndim=None, max_ndim=None, min_ndim=None, axes=None, allow_last_axis_squeeze=False, name=None):\n self.dtype = dtypes.as_dtype(dtype).name if dtype is not None else None\n shape = tensor_shape.TensorShape(shape)\n if shape.rank is None:\n shape = None\n else:\n shape = tuple(shape.as_list())\n if shape is not None:\n self.ndim = len(shape)\n self.shape = shape\n else:\n self.ndim = ndim\n self.shape = None\n self.max_ndim = max_ndim\n self.min_ndim = min_ndim\n self.name = name\n self.allow_last_axis_squeeze = allow_last_axis_squeeze\n try:\n axes = axes or {}\n self.axes = {int(k): axes[k] for k in axes}\n except (ValueError, TypeError):\n raise TypeError('The keys in axes must be integers.')\n if self.axes and (self.ndim is not None or self.max_ndim is not None):\n max_dim = (self.ndim if self.ndim else self.max_ndim) - 1\n max_axis = max(self.axes)\n if max_axis > max_dim:\n raise ValueError('Axis {} is greater than the maximum allowed value: {}'.format(max_axis, max_dim))\n\n def __repr__(self):\n spec = ['dtype=' + str(self.dtype) if self.dtype else '', 'shape=' + str(self.shape) if self.shape else '', 'ndim=' + str(self.ndim) if self.ndim else '', 'max_ndim=' + str(self.max_ndim) if self.max_ndim else '', 'min_ndim=' + str(self.min_ndim) if self.min_ndim else '', 'axes=' + str(self.axes) if self.axes else '']\n return 'InputSpec(%s)' % ', '.join((x for x in spec if x))\n\n def get_config(self):\n return {'dtype': self.dtype, 'shape': self.shape, 'ndim': self.ndim, 'max_ndim': self.max_ndim, 'min_ndim': self.min_ndim, 'axes': self.axes}\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)", "docstring": "Specifies the rank, dtype and shape of every input to a layer.\n\nLayers can expose (if appropriate) an `input_spec` attribute:\nan instance of `InputSpec`, or a nested structure of `InputSpec` instances\n(one per input tensor). These objects enable the layer to run input\ncompatibility checks for input structure, input rank, input shape, and\ninput dtype.\n\nA None entry in a shape is compatible with any dimension,\na None shape is compatible with any shape.\n\nArgs:\n dtype: Expected DataType of the input.\n shape: Shape tuple, expected shape of the input\n (may include None for unchecked axes). Includes the batch size.\n ndim: Integer, expected rank of the input.\n max_ndim: Integer, maximum rank of the input.\n min_ndim: Integer, minimum rank of the input.\n axes: Dictionary mapping integer axes to\n a specific dimension value.\n allow_last_axis_squeeze: If True, then allow inputs of rank N+1 as long\n as the last axis of the input is 1, as well as inputs of rank N-1\n as long as the last axis of the spec is 1.\n name: Expected key corresponding to this input when passing data as\n a dictionary.\n\nExample:\n\n```python\nclass MyLayer(Layer):\n def __init__(self):\n super(MyLayer, self).__init__()\n # The layer will accept inputs with shape (?, 28, 28) & (?, 28, 28, 1)\n # and raise an appropriate error message otherwise.\n self.input_spec = InputSpec(\n shape=(None, 28, 28, 1),\n allow_last_axis_squeeze=True)\n```"} +{"repo": "keras", "function": "def fake_quant_with_min_max_vars(inputs, min_vals, max_vals, num_bits=8, narrow_range=False, axis=None):\n if any_symbolic_tensors((inputs,)):\n return FakeQuantWithMinMaxVars().symbolic_call(inputs, min_vals, max_vals)\n inputs = ops.convert_to_tensor(inputs)\n min_vals = ops.convert_to_tensor(min_vals)\n max_vals = ops.convert_to_tensor(max_vals)\n num_bits = int(num_bits)\n if axis is not None:\n axis = canonicalize_axis(axis, inputs.ndim)\n if backend.backend() == 'tensorflow':\n import tensorflow as tf\n dtype = backend.standardize_dtype(inputs.dtype)\n if axis is None:\n outputs = tf.quantization.fake_quant_with_min_max_vars(ops.cast(inputs, 'float32'), ops.cast(ops.reshape(min_vals, ()), 'float32'), ops.cast(ops.reshape(max_vals, ()), 'float32'), num_bits=num_bits, narrow_range=narrow_range)\n return ops.cast(outputs, dtype=dtype)\n else:\n last_axis = inputs.ndim - 1\n inputs = ops.swapaxes(inputs, axis, last_axis)\n outputs = tf.quantization.fake_quant_with_min_max_vars_per_channel(ops.cast(inputs, 'float32'), ops.cast(min_vals, 'float32'), ops.cast(max_vals, 'float32'), num_bits=num_bits, narrow_range=narrow_range)\n outputs = ops.cast(outputs, dtype=dtype)\n return ops.swapaxes(outputs, last_axis, axis)\n\n @ops.custom_gradient\n def _fake_quant_with_min_max_vars_per_channel(x, min_val, max_val):\n dtype = backend.standardize_dtype(x.dtype)\n nudged_min, nudged_max, scale, inv_scale = adjust_and_nudge(min_val, max_val, num_bits, narrow_range)\n quant_zero = ops.floor(ops.add(ops.multiply(-nudged_min, inv_scale), 0.5))\n x_clamped = ops.clip(x, ops.cast(nudged_min, x.dtype), ops.cast(nudged_max, x.dtype))\n x_clamped_shifted = ops.subtract(x_clamped, nudged_min)\n result = ops.multiply(ops.floor(ops.add(ops.subtract(ops.multiply(x_clamped_shifted, inv_scale), quant_zero), 0.5)), scale)\n result = ops.cast(result, dtype=dtype)\n masks = ops.logical_and(ops.greater_equal(x, nudged_min), ops.less_equal(x, nudged_max))\n\n def grad(*args, upstream=None):\n if upstream is None:\n upstream, = args\n dx = ops.where(masks, upstream, 0.0)\n axes = [i for i in range(len(dx.shape)) if i != axis]\n min_mask = ops.less_equal(x, nudged_min)\n grad_min = ops.where(min_mask, upstream, 0.0)\n if axis is not None:\n grad_min = ops.sum(grad_min, axis=axes)\n else:\n grad_min = ops.sum(grad_min)\n max_mask = ops.greater_equal(x, nudged_max)\n grad_max = ops.where(max_mask, upstream, 0.0)\n if axis is not None:\n grad_max = ops.sum(grad_max, axis=axes)\n else:\n grad_max = ops.sum(grad_max)\n return (dx, grad_min, grad_max)\n return (result, grad)\n return _fake_quant_with_min_max_vars_per_channel(inputs, min_vals, max_vals)", "docstring": "Perform per-tensor or per-channel fake quantization.\n\n`[min_vals, max_vals]` define the clamping range for the `inputs`.\n\nThe `inputs` are quantized into the quantization range:\n- `[0, 2^num_bits - 1]` when `narrow_range=False`\n- `[1, 2^num_bits - 1]` when `narrow_range=True`\n\nAfter quantization, the values are dequantized and output as floats within\nthe `[min_vals, max_vals]` interval.\n\nThis operation supports gradient computation, allowing `min_vals` and\n`max_vals` to be trained.\n\nArgs:\n inputs: Input Keras tensor of float dtype.\n min_vals: A global minimum scalar or a per-channel minimum tensor.\n max_vals: A global maximum scalar or a per-channel maximum tensor.\n num_bits: Quantization bit width (e.g., `8` for int8). Defaults to `8`.\n narrow_range: Whether to use narrow quantization range. Defaults to\n `False`.\n axis: Axis along which to perform per-channel quantization. If `None`,\n per-tensor quantization is performed. Defaults to `None`.\n\n\nReturns:\n Tensor: A Keras tensor with fake quantization applied."} +{"repo": "transformers", "function": "class ModernBertConfig(PretrainedConfig):\n model_type = 'modernbert'\n keys_to_ignore_at_inference = ['past_key_values']\n\n def __init__(self, vocab_size=50368, hidden_size=768, intermediate_size=1152, num_hidden_layers=22, num_attention_heads=12, hidden_activation='gelu', max_position_embeddings=8192, initializer_range=0.02, initializer_cutoff_factor=2.0, norm_eps=1e-05, norm_bias=False, pad_token_id=50283, eos_token_id=50282, bos_token_id=50281, cls_token_id=50281, sep_token_id=50282, global_rope_theta=160000.0, attention_bias=False, attention_dropout=0.0, global_attn_every_n_layers=3, local_attention=128, local_rope_theta=10000.0, embedding_dropout=0.0, mlp_bias=False, mlp_dropout=0.0, decoder_bias=True, classifier_pooling: Literal['cls', 'mean']='cls', classifier_dropout=0.0, classifier_bias=False, classifier_activation='gelu', deterministic_flash_attn=False, sparse_prediction=False, sparse_pred_ignore_index=-100, reference_compile=None, repad_logits_with_grad=False, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, cls_token_id=cls_token_id, sep_token_id=sep_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.initializer_range = initializer_range\n self.initializer_cutoff_factor = initializer_cutoff_factor\n self.norm_eps = norm_eps\n self.norm_bias = norm_bias\n self.global_rope_theta = global_rope_theta\n self.attention_bias = attention_bias\n self.attention_dropout = attention_dropout\n self.hidden_activation = hidden_activation\n self.global_attn_every_n_layers = global_attn_every_n_layers\n self.local_attention = local_attention\n self.local_rope_theta = local_rope_theta\n self.embedding_dropout = embedding_dropout\n self.mlp_bias = mlp_bias\n self.mlp_dropout = mlp_dropout\n self.decoder_bias = decoder_bias\n self.classifier_pooling = classifier_pooling\n self.classifier_dropout = classifier_dropout\n self.classifier_bias = classifier_bias\n self.classifier_activation = classifier_activation\n self.deterministic_flash_attn = deterministic_flash_attn\n self.sparse_prediction = sparse_prediction\n self.sparse_pred_ignore_index = sparse_pred_ignore_index\n self.reference_compile = reference_compile\n self.repad_logits_with_grad = repad_logits_with_grad\n if self.classifier_pooling not in ['cls', 'mean']:\n raise ValueError(f'Invalid value for `classifier_pooling`, should be either \"cls\" or \"mean\", but is {self.classifier_pooling}.')\n\n def to_dict(self):\n output = super().to_dict()\n output.pop('reference_compile', None)\n return output", "docstring": "This is the configuration class to store the configuration of a [`ModernBertModel`]. It is used to instantiate an ModernBert\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the ModernBERT-base.\ne.g. [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 50368):\n Vocabulary size of the ModernBert model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`ModernBertModel`]\n hidden_size (`int`, *optional*, defaults to 768):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 1152):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 22):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer decoder.\n hidden_activation (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the decoder. Will default to `\"gelu\"`\n if not specified.\n max_position_embeddings (`int`, *optional*, defaults to 8192):\n The maximum sequence length that this model might ever be used with.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_cutoff_factor (`float`, *optional*, defaults to 2.0):\n The cutoff factor for the truncated_normal_initializer for initializing all weight matrices.\n norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the rms normalization layers.\n norm_bias (`bool`, *optional*, defaults to `False`):\n Whether to use bias in the normalization layers.\n pad_token_id (`int`, *optional*, defaults to 50283):\n Padding token id.\n eos_token_id (`int`, *optional*, defaults to 50282):\n End of stream token id.\n bos_token_id (`int`, *optional*, defaults to 50281):\n Beginning of stream token id.\n cls_token_id (`int`, *optional*, defaults to 50281):\n Classification token id.\n sep_token_id (`int`, *optional*, defaults to 50282):\n Separation token id.\n global_rope_theta (`float`, *optional*, defaults to 160000.0):\n The base period of the global RoPE embeddings.\n attention_bias (`bool`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n global_attn_every_n_layers (`int`, *optional*, defaults to 3):\n The number of layers between global attention layers.\n local_attention (`int`, *optional*, defaults to 128):\n The window size for local attention.\n local_rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the local RoPE embeddings.\n embedding_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the embeddings.\n mlp_bias (`bool`, *optional*, defaults to `False`):\n Whether to use bias in the MLP layers.\n mlp_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the MLP layers.\n decoder_bias (`bool`, *optional*, defaults to `True`):\n Whether to use bias in the decoder layers.\n classifier_pooling (`str`, *optional*, defaults to `\"cls\"`):\n The pooling method for the classifier. Should be either `\"cls\"` or `\"mean\"`. In local attention layers, the\n CLS token doesn't attend to all tokens on long sequences.\n classifier_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the classifier.\n classifier_bias (`bool`, *optional*, defaults to `False`):\n Whether to use bias in the classifier.\n classifier_activation (`str`, *optional*, defaults to `\"gelu\"`):\n The activation function for the classifier.\n deterministic_flash_attn (`bool`, *optional*, defaults to `False`):\n Whether to use deterministic flash attention. If `False`, inference will be faster but not deterministic.\n sparse_prediction (`bool`, *optional*, defaults to `False`):\n Whether to use sparse prediction for the masked language model instead of returning the full dense logits.\n sparse_pred_ignore_index (`int`, *optional*, defaults to -100):\n The index to ignore for the sparse prediction.\n reference_compile (`bool`, *optional*):\n Whether to compile the layers of the model which were compiled during pretraining. If `None`, then parts of\n the model will be compiled if 1) `triton` is installed, 2) the model is not on MPS, 3) the model is not\n shared between devices, and 4) the model is not resized after initialization. If `True`, then the model may\n be faster in some scenarios.\n repad_logits_with_grad (`bool`, *optional*, defaults to `False`):\n When True, ModernBertForMaskedLM keeps track of the logits' gradient when repadding for output. This only\n applies when using Flash Attention 2 with passed labels. Otherwise output logits always have a gradient.\n\nExamples:\n\n```python\n>>> from transformers import ModernBertModel, ModernBertConfig\n\n>>> # Initializing a ModernBert style configuration\n>>> configuration = ModernBertConfig()\n\n>>> # Initializing a model from the modernbert-base style configuration\n>>> model = ModernBertModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "transformers", "function": "class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor):\n\n def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor):\n if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0:\n raise ValueError(f'`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}')\n self.ngram_size = encoder_ngram_size\n if len(encoder_input_ids.shape) == 1:\n encoder_input_ids = encoder_input_ids.unsqueeze(0)\n self.batch_size = encoder_input_ids.shape[0]\n self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size)\n\n @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n num_hypos = scores.shape[0]\n num_beams = num_hypos // self.batch_size\n cur_len = input_ids.shape[-1]\n scores_processed = scores.clone()\n banned_batch_tokens = [_get_generated_ngrams(self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len) for hypo_idx in range(num_hypos)]\n for i, banned_tokens in enumerate(banned_batch_tokens):\n scores_processed[i, banned_tokens] = -float('inf')\n return scores_processed", "docstring": "[`LogitsProcessor`] that works similarly to [`NoRepeatNGramLogitsProcessor`], but applied exclusively to prevent\nthe repetition of n-grams present in the prompt.\n\nIt was designed to promote chattiness in a language model, by preventing the generation of n-grams present in\nprevious conversation rounds.\n\nArgs:\n encoder_ngram_size (`int`):\n All ngrams of size `ngram_size` can only occur within the encoder input ids.\n encoder_input_ids (`int`):\n The encoder_input_ids that should not be repeated within the decoder ids.\n\nExamples:\n\n```py\n>>> from transformers import AutoTokenizer, AutoModelForCausalLM\n\n>>> model = AutoModelForCausalLM.from_pretrained(\"bigscience/bloomz-560m\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"bigscience/bloomz-560m\")\n\n>>> inputs = tokenizer(\"Alice: I love cats. What do you love?\\nBob:\", return_tensors=\"pt\")\n\n>>> # With greedy decoding, we see Bob repeating Alice's opinion. If Bob was a chatbot, it would be a poor one.\n>>> outputs = model.generate(**inputs)\n>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])\nAlice: I love cats. What do you love?\nBob: I love cats. What do you\n\n>>> # With this logits processor, we can prevent Bob from repeating Alice's opinion.\n>>> outputs = model.generate(**inputs, encoder_no_repeat_ngram_size=2)\n>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])\nAlice: I love cats. What do you love?\nBob: My cats are very cute.\n```"} +{"repo": "tensorflow", "function": "def softmax_v2(logits, axis=None, name=None):\n if axis is None:\n axis = -1\n return _wrap_2d_function(logits, gen_nn_ops.softmax, axis, name)", "docstring": "Computes softmax activations.\n\nUsed for multi-class predictions. The sum of all outputs generated by softmax\nis 1.\n\nThis function performs the equivalent of\n\n```python\nsoftmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis, keepdims=True)\n```\nExample usage:\n\n>>> softmax = tf.nn.softmax([-1, 0., 1.])\n>>> softmax\n\n>>> sum(softmax)\n\n\nArgs:\n logits: A non-empty `Tensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n axis: The dimension softmax would be performed on. The default is -1 which\n indicates the last dimension.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor`. Has the same type and shape as `logits`.\n\nRaises:\n InvalidArgumentError: if `logits` is empty or `axis` is beyond the last\n dimension of `logits`."} +{"repo": "beam", "function": "def RemoveBitbucketServerConnectedRepository(self, request, global_params=None):\n config = self.GetMethodConfig('RemoveBitbucketServerConnectedRepository')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Remove a Bitbucket Server repository from an given BitbucketServerConfig's connected repositories. This API is experimental.\n\nArgs:\n request: (CloudbuildProjectsLocationsBitbucketServerConfigsRemoveBitbucketServerConnectedRepositoryRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Empty) The response message."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "beam", "function": "def apply_transform(self, data: OperationInputT, output_column_name: str) -> dict[str, OperationOutputT]:", "docstring": "Define any processing logic in the apply_transform() method.\nprocessing logics are applied on inputs and returns a transformed\noutput.\nArgs:\n inputs: input data."} +{"repo": "transformers", "function": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\n x: torch.Tensor x:\n\nReturns: torch.Tensor"} +{"repo": "tensorflow", "function": "def __init__(self, key_dtype, value_dtype, default_value, name='MutableHashTable', checkpoint=True, experimental_is_anonymous=False):\n self._default_value = ops.convert_to_tensor(default_value, dtype=value_dtype)\n self._value_shape = self._default_value.get_shape()\n self._checkpoint = checkpoint\n self._key_dtype = key_dtype\n self._value_dtype = value_dtype\n self._name = name\n self._is_anonymous = experimental_is_anonymous\n if not self._is_anonymous:\n self._shared_name = None\n if context.executing_eagerly():\n self._shared_name = 'table_%d' % (ops.uid(),)\n super(MutableHashTable, self).__init__(key_dtype, value_dtype)\n self._resource_handle = self._create_resource()\n if checkpoint:\n saveable = MutableHashTable._Saveable(self, name)\n if not context.executing_eagerly():\n ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)", "docstring": "Creates an empty `MutableHashTable` object.\n\nCreates a table, the type of its keys and values are specified by key_dtype\nand value_dtype, respectively.\n\nArgs:\n key_dtype: the type of the key tensors.\n value_dtype: the type of the value tensors.\n default_value: The value to use if a key is missing in the table.\n name: A name for the operation (optional).\n checkpoint: if True, the contents of the table are saved to and restored\n from checkpoints. If `shared_name` is empty for a checkpointed table, it\n is shared using the table node name.\n experimental_is_anonymous: Whether to use anonymous mode for the\n table (default is False). In anonymous mode, the table\n resource can only be accessed via a resource handle. It can't\n be looked up by a name. When all resource handles pointing to\n that resource are gone, the resource will be deleted\n automatically.\n\nReturns:\n A `MutableHashTable` object.\n\nRaises:\n ValueError: If checkpoint is True and no name was specified."} +{"repo": "beam", "function": "def _nested_type_wrapper(fun):\n\n def wrapper(pickler, obj):\n if _is_nested_class(obj) and obj.__module__ != '__main__':\n containing_class_and_name = _find_containing_class(obj)\n if containing_class_and_name is not None:\n return pickler.save_reduce(getattr, containing_class_and_name, obj=obj)\n try:\n return fun(pickler, obj)\n except dill.dill.PicklingError:\n return pickler.save_reduce(dill.dill._create_type, (type(obj), obj.__name__, obj.__bases__, _dict_from_mappingproxy(obj.__dict__)), obj=obj)\n return wrapper", "docstring": "A wrapper for the standard pickler handler for class objects.\n\nArgs:\n fun: Original pickler handler for type objects.\n\nReturns:\n A wrapper for type objects that handles nested classes.\n\nThe wrapper detects if an object being pickled is a nested class object.\nFor nested class object only it will save the containing class object so\nthe nested structure is recreated during unpickle."} +{"repo": "transformers", "function": "class Mask2FormerPixelLevelModuleOutput(ModelOutput):\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_last_hidden_state: Optional[torch.FloatTensor] = None\n decoder_hidden_states: Tuple[torch.FloatTensor] = None", "docstring": "Mask2Former's pixel level module output. It returns the output of the encoder (optional) and all hidden states\n(multi-scale features) from the `decoder`. By default, the `encoder` is a Swin Backbone and the `decoder` is a\nMulti-Scale Deformable Attention based decoder.\n\nThe `decoder_last_hidden_state` are the **per-pixel embeddings** while `decoder_hidden_states` refer to multi-scale\nfeature maps produced using **multi-scaling strategy** defined in the paper.\n\nArgs:\n encoder_last_hidden_state (`torch.FloatTensor`):\n Last hidden states (final feature map of shape `(batch_size, num_channels, height, width)`) of the last\n stage of the encoder.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*):\n Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also\n called feature maps) of the model at the output of each stage. Returned if output_hidden_states is set to\n True.\n decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)):\n 1/4 scale features from the last Pixel Decoder Layer.\n decoder_hidden_states (`tuple(torch.FloatTensor)`):\n Tuple of `torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`. Hidden states (also\n called feature maps) of the model at the output of each stage."} +{"repo": "transformers", "function": "def pair_wise_sigmoid_focal_loss(inputs: Tensor, labels: Tensor, alpha: float=0.25, gamma: float=2.0) -> Tensor:\n if alpha < 0:\n raise ValueError('alpha must be positive')\n height_and_width = inputs.shape[1]\n criterion = nn.BCEWithLogitsLoss(reduction='none')\n prob = inputs.sigmoid()\n cross_entropy_loss_pos = criterion(inputs, torch.ones_like(inputs))\n focal_pos = (1 - prob) ** gamma * cross_entropy_loss_pos\n focal_pos *= alpha\n cross_entropy_loss_neg = criterion(inputs, torch.zeros_like(inputs))\n focal_neg = prob ** gamma * cross_entropy_loss_neg\n focal_neg *= 1 - alpha\n loss = torch.matmul(focal_pos, labels.T) + torch.matmul(focal_neg, (1 - labels).T)\n return loss / height_and_width", "docstring": "A pair wise version of the focal loss, see `sigmoid_focal_loss` for usage.\n\nArgs:\n inputs (`torch.Tensor`):\n A tensor representing a mask.\n labels (`torch.Tensor`):\n A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha (float, *optional*, defaults to 0.25):\n Weighting factor in range (0,1) to balance positive vs negative examples.\n gamma (float, *optional*, defaults to 2.0):\n Exponent of the modulating factor \\\\(1 - p_t\\\\) to balance easy vs hard examples.\n\nReturns:\n `torch.Tensor`: The computed loss between each pairs."} +{"repo": "mobly", "function": "def _exec_one_test_with_retry(self, test_name, test_method, max_count):\n\n def should_retry(record):\n return record.result in [records.TestResultEnums.TEST_RESULT_FAIL, records.TestResultEnums.TEST_RESULT_ERROR]\n previous_record = self.exec_one_test(test_name, test_method)\n if not should_retry(previous_record):\n return\n for i in range(max_count - 1):\n retry_name = f'{test_name}_retry_{i + 1}'\n new_record = records.TestResultRecord(retry_name, self.TAG)\n new_record.retry_parent = previous_record\n new_record.parent = (previous_record, records.TestParentType.RETRY)\n previous_record = self.exec_one_test(retry_name, test_method, new_record)\n if not should_retry(previous_record):\n break", "docstring": "Executes one test and retry the test if needed.\n\nRepeatedly execute a test case until it passes or the maximum count of\niteration has been reached.\n\nArgs:\n test_name: string, Name of the test.\n test_method: function, The test method to execute.\n max_count: int, the maximum number of iterations to execute the test for."} +{"repo": "tensorflow", "function": "def batch_shape(self):\n return self.shape[:-2]", "docstring": "`TensorShape` of batch dimensions of this `LinearOperator`.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns\n`TensorShape([B1,...,Bb])`, equivalent to `A.shape[:-2]`\n\nReturns:\n `TensorShape`, statically determined, may be undefined."} +{"repo": "tensorflow", "function": "def find_all_hinted_output_nodes(session=None, graph_def=None):\n if session is not None and graph_def is not None:\n raise ValueError('Provide only one of session and graph_def.')\n hinted_outputs_nodes = []\n if session is not None:\n hints = _find_all_hints_in_nodes(session.graph_def.node)\n elif graph_def is not None:\n hints = _find_all_hints_in_nodes(graph_def.node)\n for hint in hints.values():\n _, output_nodes = hint.flattened_inputs_and_outputs()\n hinted_outputs_nodes.extend(output_nodes)\n return hinted_outputs_nodes", "docstring": "Find all Ophints output nodes in the graph.\n\nThis is used to get all the output nodes those are ophinted, it is important\nfor operation like convert_variables_to_constants keep all ophints structure.\nNote: only one of session or graph_def should be used, not both.\nWhy this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can\ngenerate multiple outputs for unfused subgraph. If not all output nodes are\nconsumed, graph optimization can potentially drop the unused nodes and cause\nophints in an invalid states (due to missing ophinted output nodes). So it's\nimportant for us to find all those hinted output nodes and make sure they're\nnot discarded away.\n\nArgs:\n session: A TensorFlow session that contains the graph to convert.\n graph_def: A graph def that we should convert.\n\nReturns:\n A list of OpHints output nodes.\nRaises:\n ValueError: If both session and graph_def are provided."} +{"repo": "tensorflow", "function": "def counter(start=0, step=1, dtype=dtypes.int64, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import counter_op\n return counter_op._counter(start, step, dtype, name=name)", "docstring": "Creates a `Dataset` that counts from `start` in steps of size `step`.\n\nUnlike `tf.data.Dataset.range`, which stops at some ending number,\n`tf.data.Dataset.counter` produces elements indefinitely.\n\n>>> dataset = tf.data.experimental.Counter().take(5)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[0, 1, 2, 3, 4]\n>>> dataset.element_spec\nTensorSpec(shape=(), dtype=tf.int64, name=None)\n>>> dataset = tf.data.experimental.Counter(dtype=tf.int32)\n>>> dataset.element_spec\nTensorSpec(shape=(), dtype=tf.int32, name=None)\n>>> dataset = tf.data.experimental.Counter(start=2).take(5)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[2, 3, 4, 5, 6]\n>>> dataset = tf.data.experimental.Counter(start=2, step=5).take(5)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[2, 7, 12, 17, 22]\n>>> dataset = tf.data.experimental.Counter(start=10, step=-1).take(5)\n>>> [a.item() for a in dataset.as_numpy_iterator()]\n[10, 9, 8, 7, 6]\n\nArgs:\n start: (Optional.) The starting value for the counter. Defaults to 0.\n step: (Optional.) The step size for the counter. Defaults to 1.\n dtype: (Optional.) The data type for counter elements. Defaults to\n `tf.int64`.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A `Dataset` of scalar `dtype` elements."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, global_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, LongformerBaseModelOutputWithPooling]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n if global_attention_mask is not None:\n attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)\n padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id)\n extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[:, 0, 0, :]\n embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)\n encoder_outputs = self.encoder(embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, padding_len=padding_len, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = encoder_outputs[0]\n pooled_output = self.pooler(sequence_output) if self.pooler is not None else None\n if not return_dict:\n return (sequence_output, pooled_output) + encoder_outputs[1:]\n return LongformerBaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, global_attentions=encoder_outputs.global_attentions)", "docstring": "global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to decide the attention given on each token, local attention or global attention. Tokens with global\n attention attends to all other tokens, and all other tokens attend to them. This is important for\n task-specific finetuning because it makes the model more flexible at representing the task. For example,\n for classification, the token should be given global attention. For QA, all question tokens should also\n have global attention. Please refer to the [Longformer paper](https://huggingface.co/papers/2004.05150) for more\n details. Mask values selected in `[0, 1]`:\n\n - 0 for local attention (a sliding window attention),\n - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).\n\nExamples:\n\n```python\n>>> import torch\n>>> from transformers import LongformerModel, AutoTokenizer\n\n>>> model = LongformerModel.from_pretrained(\"allenai/longformer-base-4096\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"allenai/longformer-base-4096\")\n\n>>> SAMPLE_TEXT = \" \".join([\"Hello world! \"] * 1000) # long input document\n>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1\n\n>>> attention_mask = torch.ones(\n... input_ids.shape, dtype=torch.long, device=input_ids.device\n... ) # initialize to local attention\n>>> global_attention_mask = torch.zeros(\n... input_ids.shape, dtype=torch.long, device=input_ids.device\n... ) # initialize to global attention to be deactivated for all tokens\n>>> global_attention_mask[\n... :,\n... [\n... 1,\n... 4,\n... 21,\n... ],\n... ] = 1 # Set global attention to random tokens for the sake of this example\n>>> # Usually, set global attention based on the task. For example,\n>>> # classification: the token\n>>> # QA: question tokens\n>>> # LM: potentially on the beginning of sentences and paragraphs\n>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)\n>>> sequence_output = outputs.last_hidden_state\n>>> pooled_output = outputs.pooler_output\n```"} +{"repo": "transformers", "function": "class NllbTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n prefix_tokens: List[int] = []\n suffix_tokens: List[int] = []\n\n def __init__(self, vocab_file, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', tokenizer_file=None, src_lang=None, tgt_lang=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, additional_special_tokens=None, legacy_behaviour=False, **kwargs):\n if additional_special_tokens is None:\n additional_special_tokens = FAIRSEQ_LANGUAGE_CODES\n bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token\n pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token\n eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token\n mask_token = AddedToken(mask_token, normalized=True, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n self.legacy_behaviour = legacy_behaviour\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(str(vocab_file))\n self.vocab_file = vocab_file\n self._added_tokens_decoder = {0: bos_token, 1: pad_token, 2: eos_token, 3: unk_token}\n self.fairseq_offset = 1\n self.sp_model_size = len(self.sp_model)\n super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, additional_special_tokens=additional_special_tokens, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=legacy_behaviour, **kwargs)\n self._src_lang = src_lang if src_lang is not None else 'eng_Latn'\n self.cur_lang_code_id = self.convert_tokens_to_ids(self._src_lang)\n self.tgt_lang = tgt_lang\n self.set_src_lang_special_tokens(self._src_lang)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state['sp_model'] = None\n state['sp_model_proto'] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n if not hasattr(self, 'sp_model_kwargs'):\n self.sp_model_kwargs = {}\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n return len(self.sp_model) + self.fairseq_offset\n\n @property\n def src_lang(self) -> str:\n return self._src_lang\n\n @src_lang.setter\n def src_lang(self, new_src_lang: str) -> None:\n self._src_lang = new_src_lang\n self.set_src_lang_special_tokens(self._src_lang)\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n prefix_ones = [1] * len(self.prefix_tokens)\n suffix_ones = [1] * len(self.suffix_tokens)\n if token_ids_1 is None:\n return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:\n\n - `input_ids` (for encoder) `X [eos, src_lang_code]`\n - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\n BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\n separator.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. nllb does not\n make use of token type ids, therefore a list of zeros is returned.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of zeros.\n\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]\n\n def _build_translation_inputs(self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs):\n \"\"\"Used by translation pipeline, to prepare inputs for the generate function\"\"\"\n if src_lang is None or tgt_lang is None:\n raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')\n self.src_lang = src_lang\n inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)\n tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)\n inputs['forced_bos_token_id'] = tgt_lang_id\n return inputs\n\n def get_vocab(self):\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n def _tokenize(self, text: str) -> List[str]:\n return self.sp_model.encode(text, out_type=str)\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n spm_id = self.sp_model.PieceToId(token)\n return spm_id + self.fairseq_offset if spm_id else self.unk_token_id\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.sp_model.IdToPiece(index - self.fairseq_offset)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (strings for sub-words) in a single string.\"\"\"\n out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()\n return out_string\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n return\n out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, 'wb') as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n return (out_vocab_file,)\n\n def prepare_seq2seq_batch(self, src_texts: List[str], src_lang: str='eng_Latn', tgt_texts: Optional[List[str]]=None, tgt_lang: str='fra_Latn', **kwargs) -> BatchEncoding:\n self.src_lang = src_lang\n self.tgt_lang = tgt_lang\n return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)\n\n def _switch_to_input_mode(self):\n return self.set_src_lang_special_tokens(self.src_lang)\n\n def _switch_to_target_mode(self):\n return self.set_tgt_lang_special_tokens(self.tgt_lang)\n\n def set_src_lang_special_tokens(self, src_lang) -> None:\n \"\"\"Reset the special tokens to the source lang setting.\n - In legacy mode: No prefix and suffix=[eos, src_lang_code].\n - In default mode: Prefix=[src_lang_code], suffix = [eos]\n \"\"\"\n self.cur_lang_code = self.convert_tokens_to_ids(src_lang)\n if self.legacy_behaviour:\n self.prefix_tokens = []\n self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]\n else:\n self.prefix_tokens = [self.cur_lang_code]\n self.suffix_tokens = [self.eos_token_id]\n\n def set_tgt_lang_special_tokens(self, lang: str) -> None:\n \"\"\"Reset the special tokens to the target lang setting.\n - In legacy mode: No prefix and suffix=[eos, tgt_lang_code].\n - In default mode: Prefix=[tgt_lang_code], suffix = [eos]\n \"\"\"\n self.cur_lang_code = self.convert_tokens_to_ids(lang)\n if self.legacy_behaviour:\n self.prefix_tokens = []\n self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]\n else:\n self.prefix_tokens = [self.cur_lang_code]\n self.suffix_tokens = [self.eos_token_id]", "docstring": "Construct an NLLB tokenizer.\n\nAdapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on\n[SentencePiece](https://github.com/google/sentencepiece).\n\nThe tokenization method is ` ` for source language documents, and `\n ` for target language documents.\n\nExamples:\n\n```python\n>>> from transformers import NllbTokenizer\n\n>>> tokenizer = NllbTokenizer.from_pretrained(\n... \"facebook/nllb-200-distilled-600M\", src_lang=\"eng_Latn\", tgt_lang=\"fra_Latn\"\n... )\n>>> example_english_phrase = \" UN Chief Says There Is No Military Solution in Syria\"\n>>> expected_translation_french = \"Le chef de l'ONU affirme qu'il n'y a pas de solution militaire en Syrie.\"\n>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_french, return_tensors=\"pt\")\n```\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the beginning of\n sequence. The token used is the `cls_token`.\n\n \n\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the end of sequence.\n The token used is the `sep_token`.\n\n \n\n sep_token (`str`, *optional*, defaults to `\"\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n cls_token (`str`, *optional*, defaults to `\"\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n mask_token (`str`, *optional*, defaults to `\"\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenizer_file (`str`, *optional*):\n The path to a tokenizer file to use instead of the vocab file.\n src_lang (`str`, *optional*):\n The language to use as source language for translation.\n tgt_lang (`str`, *optional*):\n The language to use as target language for translation.\n sp_model_kwargs (`Dict[str, str]`):\n Additional keyword arguments to pass to the model initialization."} +{"repo": "beam", "function": "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Update an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\n request: (CloudbuildProjectsGithubEnterpriseConfigsPatchRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Operation) The response message."} +{"repo": "keras", "function": "class PiecewiseConstantDecay(LearningRateSchedule):\n\n def __init__(self, boundaries, values, name='PiecewiseConstant'):\n super().__init__()\n if len(boundaries) != len(values) - 1:\n raise ValueError(f'The length of boundaries should be 1 less than the length of values. Received: boundaries={boundaries} of length {len(boundaries)}, and values={values} of length {len(values)}.')\n self.boundaries = boundaries\n self.values = values\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope(self.name):\n boundaries = [ops.convert_to_tensor(x) for x in self.boundaries]\n values = [ops.convert_to_tensor(x) for x in self.values]\n step = ops.convert_to_tensor(step)\n for i, b in enumerate(boundaries):\n if b.dtype != step.dtype:\n b = ops.cast(b, step.dtype)\n boundaries[i] = b\n result_dtype = values[0].dtype\n result_value = ops.array(0, dtype=result_dtype)\n step_less_than_first_boundary = ops.cast(step <= boundaries[0], result_dtype)\n result_value += step_less_than_first_boundary * values[0]\n step_greater_than_last_boundary = ops.cast(step > boundaries[-1], result_dtype)\n result_value += step_greater_than_last_boundary * values[-1]\n for low, high, value in zip(boundaries[:-1], boundaries[1:], values[1:-1]):\n step_in_range = ops.cast((step > low) & (step <= high), result_dtype)\n result_value += step_in_range * value\n return result_value\n\n def get_config(self):\n return {'boundaries': self.boundaries, 'values': self.values, 'name': self.name}", "docstring": "A `LearningRateSchedule` that uses a piecewise constant decay schedule.\n\nThe function returns a 1-arg callable to compute the piecewise constant\nwhen passed the current optimizer step. This can be useful for changing the\nlearning rate value across different invocations of optimizer functions.\n\nExample: use a learning rate that's 1.0 for the first 100001 steps, 0.5\n for the next 10000 steps, and 0.1 for any additional steps.\n\n```python\nstep = ops.array(0)\nboundaries = [100000, 110000]\nvalues = [1.0, 0.5, 0.1]\nlearning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(\n boundaries, values)\n\n# Later, whenever we perform an optimization step, we pass in the step.\nlearning_rate = learning_rate_fn(step)\n```\n\nYou can pass this schedule directly into a `keras.optimizers.Optimizer`\nas the learning rate. The learning rate schedule is also serializable and\ndeserializable using `keras.optimizers.schedules.serialize` and\n`keras.optimizers.schedules.deserialize`.\n\nArgs:\n boundaries: A list of Python numbers with strictly increasing\n entries, and with all elements having the same type as the\n optimizer step.\n values: A list of Python numbers that specifies the values for the\n intervals defined by `boundaries`. It should have one more\n element than `boundaries`, and all elements should have the same\n type.\n name: A string. Optional name of the operation. Defaults to\n `\"PiecewiseConstant\"`.\n\nReturns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar tensor of the\n same type as the boundary tensors.\n\n The output of the 1-arg function that takes the `step`\n is `values[0]` when `step <= boundaries[0]`,\n `values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`,\n ..., and `values[-1]` when `step > boundaries[-1]`.\n\n\nRaises:\n ValueError: if the number of elements in the `boundaries` and `values`\n lists do not match."} +{"repo": "tensorflow", "function": "def _ScaleAndTranslateGrad(op, grad):\n grad0 = gen_image_ops.scale_and_translate_grad(grad, op.inputs[0], op.inputs[2], op.inputs[3], kernel_type=op.get_attr('kernel_type'), antialias=op.get_attr('antialias'))\n return [grad0, None, None, None]", "docstring": "The derivatives for ScaleAndTranslate transformation op.\n\nArgs:\n op: The ScaleAndTranslate op.\n grad: The tensor representing the gradient w.r.t. the output.\n\nReturns:\n The gradients w.r.t. the input."} +{"repo": "keras", "function": "def get_uid(prefix=''):\n object_name_uids = global_state.get_global_attribute('object_name_uids', default=collections.defaultdict(int), set_to_default=True)\n object_name_uids[prefix] += 1\n return object_name_uids[prefix]", "docstring": "Associates a string prefix with an integer counter.\n\nArgs:\n prefix: String prefix to index.\n\nReturns:\n Unique integer ID.\n\nExample:\n\n>>> get_uid('dense')\n1\n>>> get_uid('dense')\n2"} +{"repo": "mobly", "function": "def exec_one_test(self, test_name, test_method, record=None):\n tr_record = record or records.TestResultRecord(test_name, self.TAG)\n tr_record.uid = getattr(test_method, 'uid', None)\n tr_record.test_begin()\n self.current_test_info = runtime_test_info.RuntimeTestInfo(test_name, self.log_path, tr_record)\n expects.recorder.reset_internal_states(tr_record)\n logging.info('%s %s', TEST_CASE_TOKEN, test_name)\n teardown_test_failed = False\n try:\n try:\n try:\n self._setup_test(test_name)\n except signals.TestFailure as e:\n _, _, traceback = sys.exc_info()\n raise signals.TestError(e.details, e.extras).with_traceback(traceback)\n test_method()\n except (signals.TestPass, signals.TestAbortSignal, signals.TestSkip):\n raise\n except Exception:\n logging.exception('Exception occurred in %s.', self.current_test_info.name)\n raise\n finally:\n before_count = expects.recorder.error_count\n try:\n self._teardown_test(test_name)\n except signals.TestAbortSignal:\n raise\n except Exception as e:\n logging.exception('Exception occurred in %s of %s.', STAGE_NAME_TEARDOWN_TEST, self.current_test_info.name)\n tr_record.test_error()\n tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)\n teardown_test_failed = True\n else:\n if before_count < expects.recorder.error_count:\n tr_record.test_error()\n teardown_test_failed = True\n except (signals.TestFailure, AssertionError) as e:\n tr_record.test_fail(e)\n except signals.TestSkip as e:\n tr_record.test_skip(e)\n except signals.TestAbortSignal as e:\n tr_record.test_fail(e)\n raise\n except signals.TestPass as e:\n tr_record.test_pass(e)\n except Exception as e:\n tr_record.test_error(e)\n else:\n if expects.recorder.has_error and (not teardown_test_failed):\n tr_record.test_fail()\n elif not teardown_test_failed:\n tr_record.test_pass()\n finally:\n tr_record.update_record()\n try:\n if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR, records.TestResultEnums.TEST_RESULT_FAIL):\n self._exec_procedure_func(self._on_fail, tr_record)\n elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:\n self._exec_procedure_func(self._on_pass, tr_record)\n elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:\n self._exec_procedure_func(self._on_skip, tr_record)\n finally:\n logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name, tr_record.result)\n self.results.add_record(tr_record)\n self.summary_writer.dump(tr_record.to_dict(), records.TestSummaryEntryType.RECORD)\n self.current_test_info = None\n return tr_record", "docstring": "Executes one test and update test results.\n\nExecutes setup_test, the test method, and teardown_test; then creates a\nrecords.TestResultRecord object with the execution information and adds\nthe record to the test class's test results.\n\nArgs:\n test_name: string, Name of the test.\n test_method: function, The test method to execute.\n record: records.TestResultRecord, optional arg for injecting a record\n object to use for this test execution. If not set, a new one is created\n created. This is meant for passing information between consecutive test\n case execution for retry purposes. Do NOT abuse this for \"magical\"\n features.\n\nReturns:\n TestResultRecord, the test result record object of the test execution.\n This object is strictly for read-only purposes. Modifying this record\n will not change what is reported in the test run's summary yaml file."} +{"repo": "transformers", "function": "def apply_rotary_unpadded(qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):\n return ApplyRotaryEmbUnpad.apply(qkv, cos, sin, cu_seqlens, max_seqlen)", "docstring": "Arguments:\n qkv: (total_nnz, 3, nheads, headdim) - input tensor for packed QKV.\n cos, sin: (seqlen_rotary, rotary_dim / 2)\n interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead\n of 1st half and 2nd half (GPT-NeoX style).\n inplace: if True, apply rotary embedding in-place.\n seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.\n Most commonly used in inference when we have KV cache.\n cu_seqlens: (batch + 1,) or None\n max_seqlen: int\nReturn:\n out: (total_nnz, dim)\nrotary_dim must be <= headdim\nApply rotary embedding to the first rotary_dim of x."} +{"repo": "beam", "function": "def __init__(self, topic_path, add_uuids=None, expansion_service=None):\n if add_uuids is None:\n add_uuids = False\n if expansion_service is None:\n expansion_service = _default_io_expansion_service()\n super().__init__('beam:transform:org.apache.beam:pubsublite_write:v1', NamedTupleBasedPayloadBuilder(_WriteSchema(topic_path=topic_path, add_uuids=add_uuids)), expansion_service)", "docstring": "Initializes a write operation to Pub/Sub Lite, writing the serialized bytes\nof PubSubMessage protos.\n\nArgs:\n topic_path: A Pub/Sub Lite Topic path.\n add_uuids: Whether to add uuids to the 'x-goog-pubsublite-dataflow-uuid'\n uuid attribute."} +{"repo": "sprockets", "function": "def HasDefinition(self, name):\n return name in self.consts or name in self.roles or name in self.states or (name in self.qualifiers) or (name in self.messages) or (name in self.events) or (name in self.transitions)", "docstring": "Whether this module has a named object |name|.\n\nArgs:\n name: The string name of the object to look for.\n\nReturns:\n True if this module has an object with name |name|, False otherwise."} +{"repo": "tensorflow", "function": "def get_optimizer_experimental_options(self):\n rewrite_options = self.config.graph_options.rewrite_options\n options = {}\n\n def rewriter_toggle(option):\n attr = getattr(rewrite_options, option)\n if attr != 0:\n options[option] = attr == rewriter_config_pb2.RewriterConfig.ON\n\n def rewriter_bool(option):\n options[option] = getattr(rewrite_options, option)\n rewriter_toggle('layout_optimizer')\n rewriter_toggle('constant_folding')\n rewriter_toggle('shape_optimization')\n rewriter_toggle('remapping')\n rewriter_toggle('arithmetic_optimization')\n rewriter_toggle('dependency_optimization')\n rewriter_toggle('loop_optimization')\n rewriter_toggle('function_optimization')\n rewriter_toggle('debug_stripper')\n rewriter_bool('disable_model_pruning')\n rewriter_toggle('scoped_allocator_optimization')\n rewriter_toggle('pin_to_host_optimization')\n rewriter_toggle('implementation_selector')\n rewriter_toggle('auto_mixed_precision')\n rewriter_toggle('use_plugin_optimizers')\n rewriter_bool('disable_meta_optimizer')\n rewriter_toggle('auto_mixed_precision_onednn_bfloat16')\n rewriter_toggle('auto_mixed_precision_mkl')\n if rewrite_options.min_graph_nodes != 0:\n options['min_graph_nodes'] = rewrite_options.min_graph_nodes\n return options", "docstring": "Get experimental options for the optimizer.\n\nReturns:\n Dictionary of current option values"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.roberta(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n sequence_output = self.pre_LN(sequence_output)\n projection_state = self.transformation(sequence_output)\n pooler_output = projection_state[:, 0]\n if not return_dict:\n return (projection_state, pooler_output) + outputs[2:4]\n return BaseModelOutputWithPoolingAndProjection(last_hidden_state=projection_state, pooler_output=pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Examples:\n\n```python\n>>> from transformers import AutoProcessor, AltCLIPTextModel\n\n>>> model = AltCLIPTextModel.from_pretrained(\"BAAI/AltCLIP\")\n>>> processor = AutoProcessor.from_pretrained(\"BAAI/AltCLIP\")\n\n>>> texts = [\"it's a cat\", \"it's a dog\"]\n\n>>> inputs = processor(text=texts, padding=True, return_tensors=\"pt\")\n\n>>> outputs = model(**inputs)\n>>> last_hidden_state = outputs.last_hidden_state\n>>> pooled_output = outputs.pooler_output # pooled CLS states\n```"} +{"repo": "tensorflow", "function": "def shutdown_tpu_system(cluster_resolver=None):\n tpu_strategy_util.shutdown_tpu_system_impl(cluster_resolver, TPUClusterResolver)", "docstring": "Shuts down the TPU devices.\n\nThis will clear all caches, even those that are maintained through sequential\ncalls to tf.tpu.experimental.initialize_tpu_system, such as the compilation\ncache.\n\nArgs:\n cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\n which provides information about the TPU cluster.\n\nRaises:\n RuntimeError: If no TPU devices found for eager execution or if run in a\n tf.function."} +{"repo": "transformers", "function": "class PromptDepthAnythingPreActResidualLayer(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.activation1 = nn.ReLU()\n self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n self.activation2 = nn.ReLU()\n self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=True)\n\n def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n residual = hidden_state\n hidden_state = self.activation1(hidden_state)\n hidden_state = self.convolution1(hidden_state)\n hidden_state = self.activation2(hidden_state)\n hidden_state = self.convolution2(hidden_state)\n return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\n config (`[PromptDepthAnythingConfig]`):\n Model configuration class defining the model architecture."} +{"repo": "transformers", "function": "def __call__(self, text: Union[TextInput, List[TextInput]], text_pair: Optional[Union[TextInput, List[TextInput]]]=None, entity_spans: Optional[Union[EntitySpanInput, List[EntitySpanInput]]]=None, entity_spans_pair: Optional[Union[EntitySpanInput, List[EntitySpanInput]]]=None, entities: Optional[Union[EntityInput, List[EntityInput]]]=None, entities_pair: Optional[Union[EntityInput, List[EntityInput]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, max_entity_length: Optional[int]=None, stride: int=0, is_split_into_words: Optional[bool]=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n is_valid_single_text = isinstance(text, str)\n is_valid_batch_text = isinstance(text, (list, tuple)) and (len(text) == 0 or isinstance(text[0], str))\n if not (is_valid_single_text or is_valid_batch_text):\n raise ValueError('text input must be of type `str` (single example) or `List[str]` (batch).')\n is_valid_single_text_pair = isinstance(text_pair, str)\n is_valid_batch_text_pair = isinstance(text_pair, (list, tuple)) and (len(text_pair) == 0 or isinstance(text_pair[0], str))\n if not (text_pair is None or is_valid_single_text_pair or is_valid_batch_text_pair):\n raise ValueError('text_pair input must be of type `str` (single example) or `List[str]` (batch).')\n is_batched = bool(isinstance(text, (list, tuple)))\n if is_batched:\n batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text\n if entities is None:\n batch_entities_or_entities_pairs = None\n else:\n batch_entities_or_entities_pairs = list(zip(entities, entities_pair)) if entities_pair is not None else entities\n if entity_spans is None:\n batch_entity_spans_or_entity_spans_pairs = None\n else:\n batch_entity_spans_or_entity_spans_pairs = list(zip(entity_spans, entity_spans_pair)) if entity_spans_pair is not None else entity_spans\n return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, batch_entity_spans_or_entity_spans_pairs=batch_entity_spans_or_entity_spans_pairs, batch_entities_or_entities_pairs=batch_entities_or_entities_pairs, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n else:\n return self.encode_plus(text=text, text_pair=text_pair, entity_spans=entity_spans, entity_spans_pair=entity_spans_pair, entities=entities, entities_pair=entities_pair, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, max_entity_length=max_entity_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of\nsequences, depending on the task you want to prepare them for.\n\nArgs:\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this\n tokenizer does not support tokenization based on pretokenized strings.\n text_pair (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence must be a string. Note that this\n tokenizer does not support tokenization based on pretokenized strings.\n entity_spans (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):\n The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each\n with two integers denoting character-based start and end positions of entities. If you specify\n `\"entity_classification\"` or `\"entity_pair_classification\"` as the `task` argument in the constructor,\n the length of each sequence must be 1 or 2, respectively. If you specify `entities`, the length of each\n sequence must be equal to the length of each sequence of `entities`.\n entity_spans_pair (`List[Tuple[int, int]]`, `List[List[Tuple[int, int]]]`, *optional*):\n The sequence or batch of sequences of entity spans to be encoded. Each sequence consists of tuples each\n with two integers denoting character-based start and end positions of entities. If you specify the\n `task` argument in the constructor, this argument is ignored. If you specify `entities_pair`, the\n length of each sequence must be equal to the length of each sequence of `entities_pair`.\n entities (`List[str]`, `List[List[str]]`, *optional*):\n The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings\n representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los\n Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of\n each sequence must be equal to the length of each sequence of `entity_spans`. If you specify\n `entity_spans` without specifying this argument, the entity sequence or the batch of entity sequences\n is automatically constructed by filling it with the [MASK] entity.\n entities_pair (`List[str]`, `List[List[str]]`, *optional*):\n The sequence or batch of sequences of entities to be encoded. Each sequence consists of strings\n representing entities, i.e., special entities (e.g., [MASK]) or entity titles of Wikipedia (e.g., Los\n Angeles). This argument is ignored if you specify the `task` argument in the constructor. The length of\n each sequence must be equal to the length of each sequence of `entity_spans_pair`. If you specify\n `entity_spans_pair` without specifying this argument, the entity sequence or the batch of entity\n sequences is automatically constructed by filling it with the [MASK] entity.\n max_entity_length (`int`, *optional*):\n The maximum length of `entity_ids`."} +{"repo": "transformers", "function": "def bpe_decode(self, sequences):\n return self.bpe_tokenizer.batch_decode(sequences)", "docstring": "Convert a list of lists of bpe token ids into a list of strings by calling bpe tokenizer.\n\nArgs:\n sequences (`torch.Tensor`):\n List of tokenized input ids.\nReturns:\n `List[str]`: The list of bpe decoded sentences."} +{"repo": "tensorflow", "function": "class SeparableConv1D(SeparableConv):\n\n def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, depth_multiplier=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', pointwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, pointwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, pointwise_constraint=None, bias_constraint=None, **kwargs):\n super(SeparableConv1D, self).__init__(rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, depth_multiplier=depth_multiplier, activation=activations.get(activation), use_bias=use_bias, depthwise_initializer=initializers.get(depthwise_initializer), pointwise_initializer=initializers.get(pointwise_initializer), bias_initializer=initializers.get(bias_initializer), depthwise_regularizer=regularizers.get(depthwise_regularizer), pointwise_regularizer=regularizers.get(pointwise_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), depthwise_constraint=constraints.get(depthwise_constraint), pointwise_constraint=constraints.get(pointwise_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs)\n\n def call(self, inputs):\n if self.padding == 'causal':\n inputs = array_ops.pad(inputs, self._compute_causal_padding(inputs))\n if self.data_format == 'channels_last':\n strides = (1,) + self.strides * 2 + (1,)\n spatial_start_dim = 1\n else:\n strides = (1, 1) + self.strides * 2\n spatial_start_dim = 2\n inputs = array_ops.expand_dims(inputs, spatial_start_dim)\n depthwise_kernel = array_ops.expand_dims(self.depthwise_kernel, 0)\n pointwise_kernel = array_ops.expand_dims(self.pointwise_kernel, 0)\n dilation_rate = (1,) + self.dilation_rate\n if self.padding == 'causal':\n op_padding = 'valid'\n else:\n op_padding = self.padding\n outputs = nn.separable_conv2d(inputs, depthwise_kernel, pointwise_kernel, strides=strides, padding=op_padding.upper(), rate=dilation_rate, data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n if self.use_bias:\n outputs = nn.bias_add(outputs, self.bias, data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n outputs = array_ops.squeeze(outputs, [spatial_start_dim])\n if self.activation is not None:\n return self.activation(outputs)\n return outputs", "docstring": "Depthwise separable 1D convolution.\n\nThis layer performs a depthwise convolution that acts separately on\nchannels, followed by a pointwise convolution that mixes channels.\nIf `use_bias` is True and a bias initializer is provided,\nit adds a bias vector to the output.\nIt then optionally applies an activation function to produce the final output.\n\nArgs:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: A single integer specifying the spatial\n dimensions of the filters.\n strides: A single integer specifying the strides\n of the convolution.\n Specifying any `stride` value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"`, `\"same\"`, or `\"causal\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding with zeros evenly\n to the left/right or up/down of the input such that output has the same\n height/width dimension as the input. `\"causal\"` results in causal\n (dilated) convolutions, e.g. `output[t]` does not depend on `input[t+1:]`.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch_size, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch_size, channels, length)`.\n dilation_rate: A single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n depth_multiplier: The number of depthwise convolution output channels for\n each input channel. The total number of depthwise convolution output\n channels will be equal to `num_filters_in * depth_multiplier`.\n activation: Activation function to use.\n If you don't specify anything, no activation is applied (\n see `keras.activations`).\n use_bias: Boolean, whether the layer uses a bias.\n depthwise_initializer: An initializer for the depthwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer (\n 'glorot_uniform') will be used.\n pointwise_initializer: An initializer for the pointwise convolution kernel (\n see `keras.initializers`). If None, then the default initializer \n ('glorot_uniform') will be used.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer ('zeros') will be used (see `keras.initializers`).\n depthwise_regularizer: Optional regularizer for the depthwise\n convolution kernel (see `keras.regularizers`).\n pointwise_regularizer: Optional regularizer for the pointwise\n convolution kernel (see `keras.regularizers`).\n bias_regularizer: Optional regularizer for the bias vector (\n see `keras.regularizers`).\n activity_regularizer: Optional regularizer function for the output (\n see `keras.regularizers`).\n depthwise_constraint: Optional projection function to be applied to the\n depthwise kernel after being updated by an `Optimizer` (e.g. used for\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training (\n see `keras.constraints`).\n pointwise_constraint: Optional projection function to be applied to the\n pointwise kernel after being updated by an `Optimizer` (\n see `keras.constraints`).\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer` (\n see `keras.constraints`).\n trainable: Boolean, if `True` the weights of this layer will be marked as\n trainable (and listed in `layer.trainable_weights`).\n\nInput shape:\n 3D tensor with shape:\n `(batch_size, channels, steps)` if data_format='channels_first'\n or 5D tensor with shape:\n `(batch_size, steps, channels)` if data_format='channels_last'.\n\nOutput shape:\n 3D tensor with shape:\n `(batch_size, filters, new_steps)` if data_format='channels_first'\n or 3D tensor with shape:\n `(batch_size, new_steps, filters)` if data_format='channels_last'.\n `new_steps` value might have changed due to padding or strides.\n\nReturns:\n A tensor of rank 3 representing\n `activation(separableconv1d(inputs, kernel) + bias)`.\n\nRaises:\n ValueError: when both `strides` > 1 and `dilation_rate` > 1."} +{"repo": "tensorflow", "function": "def run_independently(self, op):\n self._independent_ops.append(op)\n op._set_attr('_independent_side_effects', attr_value_pb2.AttrValue(b=True))", "docstring": "Marks the given op as independent.\n\nOverrides any other rule for the op.\n\nIndependent ops are guaranteed to execute before the return values, but\nare allowed to run in parallel with everything else. Use in programs which\ncan guarantee that an op has side effects that don't affect any other op.\n\nArgs:\n op: An operation"} +{"repo": "transformers", "function": "def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :])", "docstring": "Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory\nstorage as `fused_qkv`\n\nArgs:\n fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]\n\nReturns:\n query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]\n value: [batch_size, seq_length, num_heads, head_dim]"} +{"repo": "tensorflow", "function": "def call(self, inputs, **kwargs):\n return inputs", "docstring": "This is where the layer's logic lives.\n\nArgs:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\nReturns:\n A tensor or list/tuple of tensors."} +{"repo": "transformers", "function": "def run_generate(verbose=True):\n parser = argparse.ArgumentParser()\n parser.add_argument('model_name', type=str, help='like facebook/bart-large-cnn,google-t5/t5-base, etc.')\n parser.add_argument('input_path', type=str, help='like cnn_dm/test.source')\n parser.add_argument('save_path', type=str, help='where to save summaries')\n parser.add_argument('--reference_path', type=str, required=False, help='like cnn_dm/test.target')\n parser.add_argument('--score_path', type=str, required=False, default='metrics.json', help='where to save metrics')\n parser.add_argument('--device', type=str, required=False, default=DEFAULT_DEVICE, help='cuda, cuda:1, cpu etc.')\n parser.add_argument('--prefix', type=str, required=False, default=None, help='will be added to the beginning of src examples')\n parser.add_argument('--task', type=str, default='summarization', help='used for task_specific_params + metrics')\n parser.add_argument('--bs', type=int, default=8, required=False, help='batch size')\n parser.add_argument('--n_obs', type=int, default=-1, required=False, help='How many observations. Defaults to all.')\n parser.add_argument('--fp16', action='store_true')\n parser.add_argument('--dump-args', action='store_true', help='print the custom hparams with the results')\n parser.add_argument('--info', nargs='?', type=str, const=datetime_now(), help=\"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.\")\n args, rest = parser.parse_known_args()\n parsed_args = parse_numeric_n_bool_cl_kwargs(rest)\n if parsed_args and verbose:\n print(f'parsed the following generate kwargs: {parsed_args}')\n examples = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]\n if args.n_obs > 0:\n examples = examples[:args.n_obs]\n Path(args.save_path).parent.mkdir(exist_ok=True)\n if args.reference_path is None and Path(args.score_path).exists():\n warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.')\n if args.device == 'cpu' and args.fp16:\n raise ValueError(\"Can't mix --fp16 and --device cpu\")\n runtime_metrics = generate_summaries_or_translations(examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args)\n if args.reference_path is None:\n return {}\n score_fn = calculate_bleu if 'translation' in args.task else calculate_rouge\n output_lns = [x.rstrip() for x in open(args.save_path).readlines()]\n reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][:len(output_lns)]\n scores: dict = score_fn(output_lns, reference_lns)\n scores.update(runtime_metrics)\n if args.dump_args:\n scores.update(parsed_args)\n if args.info:\n scores['info'] = args.info\n if verbose:\n print(scores)\n if args.score_path is not None:\n json.dump(scores, open(args.score_path, 'w'))\n return scores", "docstring": "Takes input text, generates output, and then using reference calculates the BLEU scores.\n\nThe results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed.\n\nArgs:\n verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout\n\nReturns:\n a tuple: ``(scores, params}``\n - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}``\n - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}``"} +{"repo": "tensorflow", "function": "def recover_last_checkpoints(self, checkpoint_paths):\n checkpoints_with_mtimes = []\n for checkpoint_path in checkpoint_paths:\n try:\n mtime = checkpoint_management.get_checkpoint_mtimes([checkpoint_path])\n except errors.NotFoundError:\n continue\n if mtime:\n checkpoints_with_mtimes.append((checkpoint_path, mtime[0]))\n self.set_last_checkpoints_with_time(checkpoints_with_mtimes)", "docstring": "Recovers the internal saver state after a crash.\n\nThis method is useful for recovering the \"self._last_checkpoints\" state.\n\nGlobs for the checkpoints pointed to by `checkpoint_paths`. If the files\nexist, use their mtime as the checkpoint timestamp.\n\nArgs:\n checkpoint_paths: a list of checkpoint paths."} +{"repo": "keras", "function": "def _compute_sequence_length_from_mask(mask, batch_first):\n timestep_index = 0 if not batch_first else 1\n return torch.sum(mask.int(), dim=timestep_index)", "docstring": "Calculate the sequence length tensor (1-D) based on the masking tensor.\n\nThe masking tensor is a 2D boolean tensor with shape [batch, timestep]. For\nany timestep that should be masked, the corresponding field will be False.\nConsider the following example:\n a = [[True, True, False, False]\n [True, True, True, False]]\nIt is a (2, 4) tensor, and the corresponding sequence length result should\nbe 1D tensor with value [2, 3]. Note that the masking tensor must be right\npadded that could be checked by, e.g., `is_sequence_right_padded()`.\n\nArgs:\n mask: Boolean tensor with shape [batch, timestep] or [timestep, batch]\n if time_major=True.\n time_major: Boolean, which indicates whether the mask is time major or\n batch major.\n\nReturns:\n sequence_length: 1D int32 tensor."} +{"repo": "transformers", "function": "class DataCollatorWithPadding:\n tokenizer: PreTrainedTokenizerBase\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n return_tensors: str = 'pt'\n\n def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]:\n batch = pad_without_fast_tokenizer_warning(self.tokenizer, features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors)\n if 'label' in batch:\n batch['labels'] = batch['label']\n del batch['label']\n if 'label_ids' in batch:\n batch['labels'] = batch['label_ids']\n del batch['label_ids']\n return batch", "docstring": "Data collator that will dynamically pad the inputs received.\n\nArgs:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single\n sequence is provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.0 (Volta).\n return_tensors (`str`, *optional*, defaults to `\"pt\"`):\n The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\"."} +{"repo": "transformers", "function": "class Qwen2_5OmniAudioEncoderConfig(PretrainedConfig):\n model_type = 'qwen2_5_omni_audio_encoder'\n\n def __init__(self, num_mel_bins=128, encoder_layers=32, encoder_attention_heads=20, encoder_ffn_dim=5120, d_model=1280, dropout=0, attention_dropout=0, activation_function='gelu', activation_dropout=0, scale_embedding=False, initializer_range=0.02, max_source_positions=1500, n_window=100, output_dim=3584, **kwargs):\n super().__init__(**kwargs)\n self.num_mel_bins = num_mel_bins\n self.d_model = d_model\n self.encoder_layers = encoder_layers\n self.encoder_attention_heads = encoder_attention_heads\n self.encoder_ffn_dim = encoder_ffn_dim\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.activation_dropout = activation_dropout\n self.num_hidden_layers = encoder_layers\n self.initializer_range = initializer_range\n self.scale_embedding = scale_embedding\n self.max_source_positions = max_source_positions\n self.n_window = n_window\n self.output_dim = output_dim", "docstring": "This is the configuration class to store the configuration of a [`Qwen2_5OmniAudioEncoder`]. It is used to instantiate a\nQwen2.5-Omni-Thinker audio encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio\narchitecture.\n\ne.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n num_mel_bins (`int`, *optional*, defaults to 128):\n Number of mel features used per input features. Should correspond to the value used in the\n `Qwen2_5OmniProcessor` class.\n encoder_layers (`int`, *optional*, defaults to 32):\n Number of encoder layers.\n encoder_attention_heads (`int`, *optional*, defaults to 20):\n Number of attention heads for each attention layer in the Transformer encoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 5120):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in encoder.\n d_model (`int`, *optional*, defaults to 1280):\n Dimensionality of the layers.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n activation_function (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n activation_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for activations inside the fully connected layer.\n scale_embedding (`bool`, *optional*, defaults to `False`):\n Scale embeddings by diving by sqrt(d_model).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n max_source_positions (`int`, *optional*, defaults to 1500):\n The maximum sequence length of log-mel filter-bank features that this model might ever be used with.\n n_window (`int`, *optional*, defaults to 100):\n The chunk for conv and flash attn in AudioEncoder.\n output_dim (`int`, *optional*, defaults to 3584):\n The output dimension of AudioEncoder.\n\nExample:\n\n```python\n>>> from transformers import Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniAudioEncoder\n\n>>> # Initializing a Qwen2_5OmniAudioEncoderConfig\n>>> configuration = Qwen2_5OmniAudioEncoderConfig()\n\n>>> # Initializing a Qwen2_5OmniAudioEncoder (with random weights)\n>>> model = Qwen2_5OmniAudioEncoder(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def _BaseFusedBatchNormGrad(op: ops.Operation, version, *grad):\n x = op.inputs[0]\n grad_y = grad[0]\n scale = op.inputs[1]\n epsilon = op.get_attr('epsilon')\n data_format = op.get_attr('data_format')\n is_training = op.get_attr('is_training')\n if version == 2:\n grad_fun = gen_nn_ops.fused_batch_norm_grad_v3\n elif version == 1:\n grad_fun = gen_nn_ops.fused_batch_norm_grad_v2\n else:\n grad_fun = gen_nn_ops.fused_batch_norm_grad\n if is_training:\n args = {'y_backprop': grad_y, 'x': x, 'scale': scale, 'reserve_space_1': op.outputs[3], 'reserve_space_2': op.outputs[4], 'epsilon': epsilon, 'data_format': data_format, 'is_training': is_training}\n if version == 2:\n args['reserve_space_3'] = op.outputs[5]\n dx, dscale, doffset, _, _ = grad_fun(**args)\n else:\n pop_mean = op.inputs[3]\n pop_var = op.inputs[4]\n if data_format == b'NCHW':\n x = array_ops.transpose(x, [0, 2, 3, 1])\n grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])\n elif data_format == b'NCDHW':\n x = array_ops.transpose(x, [0, 2, 3, 4, 1])\n grad_y = array_ops.transpose(grad_y, [0, 2, 3, 4, 1])\n target_data_format = 'NHWC' if data_format in (b'NCHW', b'NHWC') else 'NDHWC'\n args = {'y_backprop': grad_y, 'x': x, 'scale': scale, 'reserve_space_1': pop_mean, 'reserve_space_2': pop_var, 'epsilon': epsilon, 'data_format': target_data_format, 'is_training': is_training}\n if version == 2:\n args['reserve_space_3'] = op.outputs[5]\n dx, dscale, doffset, _, _ = grad_fun(**args)\n if data_format == b'NCHW':\n dx = array_ops.transpose(dx, [0, 3, 1, 2])\n elif data_format == b'NCDHW':\n dx = array_ops.transpose(dx, [0, 4, 1, 2, 3])\n return (dx, dscale, doffset, None, None)", "docstring": "Return the gradients for the 3 inputs of BatchNorm.\n\nArgs:\n op: The BatchNormOp for which we need to compute gradients.\n version: Integer indicating which version to use of the fused batch\n norm gradient.\n *grad: An argument list for tensors of gradients wrt the outputs\n with grad[0] as grad_y.\n\nReturns:\n grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *\n [grad_y - mean(grad_y) - (x - mean(x)) *\n mean(grad_y * (x - mean(x))) / (variance + epsilon)]\n in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)\n in freeze mode.\n\n grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *\n rsqrt(variance + epsilon)) in training mode;\n sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))\n in freeze mode.\n\n grad_offset: gradient for offset, which is sum(grad_y) in training mode;\n sum(grad_y) in freeze mode."} +{"repo": "tensorflow", "function": "def predict_on_batch(self, x):\n self._check_call_args('predict_on_batch')\n _disallow_inside_tf_function('predict_on_batch')\n with self.distribute_strategy.scope():\n iterator = data_adapter.single_batch_iterator(self.distribute_strategy, x)\n self.predict_function = self.make_predict_function()\n outputs = self.predict_function(iterator)\n return tf_utils.sync_to_numpy_or_python_type(outputs)", "docstring": "Returns predictions for a single batch of samples.\n\nArgs:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays (in case the\n model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors (in case the model has\n multiple inputs).\n\nReturns:\n Numpy array(s) of predictions.\n\nRaises:\n RuntimeError: If `model.predict_on_batch` is wrapped in `tf.function`.\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model."} +{"repo": "pyglove", "function": "def assert_eventual(self, func, required, allowed, timeout_secs=300.0):\n required = set(required)\n assert required\n seen = set()\n start_time = time.time()\n while timeout_secs is None or time.time() - start_time < timeout_secs:\n if seen == required:\n return\n value = func()\n if value not in allowed:\n self.fail(msg=f'Disallowed value: {value}.')\n if value in required:\n seen.add(value)\n missing = [v for v in required if v not in seen]\n self.fail(msg=f'Timed out. Missing values: {str([str(v) for v in missing])}.')", "docstring": "Tests that calls to the given function meet required and allowed values.\n\nArgs:\n func: function to test.\n required: iterable of required values. Must be hashable and non-empty.\n allowed: iterable of allowed values. Must be hashable and non-empty.\n timeout_secs: fails if more than this time is required."} +{"repo": "transformers", "function": "def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs):\n cfg = AutoConfig.from_pretrained(config_name, **config_kwargs)\n model = AutoModelForSeq2SeqLM.from_config(cfg)\n model.save_pretrained(save_dir)\n AutoTokenizer.from_pretrained(config_name).save_pretrained(save_dir)\n return model", "docstring": "Save a randomly initialized version of a model using a pretrained config.\nArgs:\n config_name: which config to use\n save_dir: where to save the resulting model and tokenizer\n config_kwargs: Passed to AutoConfig\n\nUsage::\n save_randomly_initialized_version(\"facebook/bart-large-cnn\", \"distilbart_random_cnn_6_3\", encoder_layers=6, decoder_layers=3, num_beams=3)"} +{"repo": "transformers", "function": "def _calculate_expected_result(dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config):\n if config.use_gumbel_for_cells:\n gumbel_dist = tfp.distributions.RelaxedBernoulli(config.temperature, logits=dist_per_cell.logits_parameter() * config.temperature)\n scaled_probability_per_cell = gumbel_dist.sample()\n else:\n scaled_probability_per_cell = dist_per_cell.probs_parameter()\n scaled_probability_per_cell = scaled_probability_per_cell / numeric_values_scale * input_mask_float\n count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1)\n numeric_values_masked = tf.where(tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values)\n sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1)\n avg_approximation = config.average_approximation_function\n if avg_approximation == AverageApproximationFunction.RATIO:\n average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)\n elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:\n ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1\n average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1)\n elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:\n ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1\n pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)\n var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var\n multiplier = (var / tf.math.square(ex) + 1) / ex\n average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1)\n else:\n raise ValueError('Invalid average_approximation_function: %s', config.average_approximation_function)\n if config.use_gumbel_for_aggregation:\n gumbel_dist = tfp.distributions.RelaxedOneHotCategorical(config.aggregation_temperature, logits=logits_aggregation[:, 1:])\n aggregation_op_only_probs = gumbel_dist.sample()\n else:\n aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1)\n all_results = tf.concat([tf.expand_dims(sum_result, axis=1), tf.expand_dims(average_result, axis=1), tf.expand_dims(count_result, axis=1)], axis=1)\n expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1)\n return expected_result", "docstring": "Calculates the expected result given cell and aggregation probabilities.\n\nArgs:\n dist_per_cell (`tfp.distributions.Bernoulli`):\n Cell selection distribution for each cell.\n numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Numeric values of every token. Nan for tokens which are not numeric values.\n numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Scale of the numeric values of every token.\n input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):\n Mask for the table, without question tokens and table headers.\n logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):\n Logits per aggregation operation.\n config ([`TapasConfig`]):\n Model configuration class with all the hyperparameters of the model\n\nReturns:\n expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example."} +{"repo": "transformers", "function": "def from_sub_model_configs(cls, text_config: ClvpEncoderConfig, speech_config: ClvpEncoderConfig, decoder_config: ClvpDecoderConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), speech_config=speech_config.to_dict(), decoder_config=decoder_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`ClvpConfig`] (or a derived class) from CLVP text model configuration, CLVP speech model\nconfiguration and CLVP decoder model configuration.\n\nArgs:\n text_config (`ClvpEncoderConfig`):\n Text model configuration of type [`ClvpEncoderConfig`].\n speech_config (`ClvpEncoderConfig`):\n Speech model configuration of type [`ClvpEncoderConfig`].\n decoder_config (`ClvpDecoderConfig`):\n Decoder model configuration of type [`ClvpDecoderConfig`].\n\nReturns:\n [`ClvpConfig`]: An instance of a configuration object"} +{"repo": "tensorflow", "function": "def _build_node_error_message(op):\n node_error_message = [f'Detected at node {op.name!r} defined at (most recent call last):']\n field_dict = _compute_field_dict(op)\n for frame in field_dict['definition_traceback']:\n if ' 0, mask_shape)\n return tf.where(mask, new_tensor, tensor)\n if size_along_axis is None or size_along_axis > 1:\n return tf.cond(do_update, _write_update_to_result, lambda: tensor)\n else:\n return new_tensor", "docstring": "Replace `tensor` entries with `new_tensor` along a given axis.\n\nThis updates elements of `tensor` that correspond to the elements returned by\n`numpy.take(updated, ind, axis)` with the corresponding elements of\n`new_tensor`.\n\n# Example\n```python\ntensor = tf.ones([5, 4, 3, 2])\nnew_tensor = tf.zeros([5, 4, 3, 2])\nupdated_tensor = maybe_update_along_axis(tensor=tensor,\n new_tensor=new_tensor,\n axis=1,\n ind=2,\n do_update=True)\n# Returns a `Tensor` of ones where\n# `updated_tensor[:, 2, :, :].numpy() == 0`\n```\nIf the `do_update` is set to `False`, then the update does not happen unless\nthe number of dimensions along the `axis` is equal to 1. This functionality\nis useful when, for example, aggregating samples of an Ito process.\n\nArgs:\n tensor: A `Tensor` of any shape and `dtype`.\n new_tensor: A `Tensor` of the same `dtype` as `tensor` and of shape\n broadcastable with `tensor`.\n axis: A Python integer. The axis of `tensor` along which the elements have\n to be updated.\n ind: An int32 scalar `Tensor` that denotes an index on the `axis` which\n defines the updated slice of `tensor` (see example above).\n do_update: A bool scalar `Tensor`. If `False`, the output is the same as\n `tensor`, unless the dimension of the `tensor` along the `axis` is equal\n to 1.\n dtype: The `dtype` of the input `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give this op.\n Default value: `None` which maps to `maybe_update_along_axis`.\n\nReturns:\n A `Tensor` of the same shape and `dtype` as `tensor`."} +{"repo": "tensorflow", "function": "def get_all_plugin_assets(graph=None):\n if graph is None:\n graph = ops.get_default_graph()\n out = []\n for name in graph.get_collection(_PLUGIN_ASSET_PREFIX):\n collection = graph.get_collection(_PLUGIN_ASSET_PREFIX + name)\n if len(collection) != 1:\n raise ValueError('Collection for %s had %d items, expected 1' % (name, len(collection)))\n out.append(collection[0])\n return out", "docstring": "Retrieve all PluginAssets stored in the graph collection.\n\nArgs:\n graph: Optionally, the graph to get assets from. If unspecified, the default\n graph is used.\n\nReturns:\n A list with all PluginAsset instances in the graph.\n\nRaises:\n ValueError: if we unexpectedly find a collection with the wrong number of\n PluginAssets."} +{"repo": "tensorflow", "function": "def replace_composites_with_components(structure):\n if isinstance(structure, CompositeTensor):\n return replace_composites_with_components(structure._type_spec._to_components(structure))\n elif not nest.is_nested(structure):\n return structure\n else:\n return nest.map_structure(replace_composites_with_components, structure, expand_composites=False)", "docstring": "Recursively replaces CompositeTensors with their components.\n\nArgs:\n structure: A `nest`-compatible structure, possibly containing composite\n tensors.\n\nReturns:\n A copy of `structure`, where each composite tensor has been replaced by\n its components. The result will contain no composite tensors.\n Note that `nest.flatten(replace_composites_with_components(structure))`\n returns the same value as `nest.flatten(structure)`."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: list, token_ids_1: Optional[list]=None, already_has_special_tokens: bool=False) -> list[int]:\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.')\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))", "docstring": "Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of ids of the first sequence.\n token_ids_1 (`List[int]`, *optional*):\n List of ids of the second sequence.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "tensorflow", "function": "def _distribute(processing_mode, service, job_name=None, consumer_index=None, num_consumers=None, max_outstanding_requests=None, task_refresh_interval_hint_ms=None, data_transfer_protocol=None, compression='AUTO', cross_trainer_cache=None, target_workers='AUTO') -> Callable[dataset_ops.Dataset, dataset_ops.Dataset]:\n processing_mode = _get_validated_sharding_policy(processing_mode)\n _validate_compression(compression)\n\n def _apply_fn(dataset) -> dataset_ops.Dataset:\n dataset_id = _register_dataset(service, dataset, compression=compression)\n return _from_dataset_id(processing_mode, service, dataset_id, dataset.element_spec, job_name=job_name, consumer_index=consumer_index, num_consumers=num_consumers, max_outstanding_requests=max_outstanding_requests, task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, data_transfer_protocol=data_transfer_protocol, cross_trainer_cache=cross_trainer_cache, target_workers=target_workers)\n return _apply_fn", "docstring": "A transformation that moves dataset processing to the tf.data service.\n\nThis transformation is similar to `distribute`, but supports additional\nparameters which we do not yet want to add to the public Python API.\n\nArgs:\n processing_mode: A `tf.data.experimental.service.ShardingPolicy` specifying\n how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n service: A string or a tuple indicating how to connect to the tf.data\n service. If it's a string, it should be in the format\n `[://]
`, where `
` identifies the dispatcher\n address and `` can optionally be used to override the default\n protocol to use. If it's a tuple, it should be (protocol, address).\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string. This argument makes it possible for multiple datasets to\n share the same job. The default behavior is that the dataset creates\n anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from `0`\n to `num_consumers`. Must be specified alongside `num_consumers`. When\n specified, consumers will read from the job in a strict round-robin order,\n instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead of\n the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out of\n sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the amount\n of memory used, since `distribute` won't use more than `element_size` *\n `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the\n dispatcher for task changes.\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. If not provided, a protocol is determined\n at runtime.\n compression: How to compress the dataset's elements before transferring them\n over the network. \"AUTO\" leaves the decision of how to compress up to the\n tf.data service runtime. `None` indicates not to compress. \"SNAPPY\" forces\n snappy compression.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`, tf.data\n runtime decides which workers to read from. If `\"ANY\"`, reads from any\n tf.data service workers. If `\"LOCAL\"`, only reads from local in-process\n tf.data service workers. `\"AUTO\"` works well for most cases, while users\n can specify other targets. For example, `\"LOCAL\"` helps avoid RPCs and\n data copy if every TF worker colocates with a tf.data service worker.\n Consumers of a shared job must use the same `target_workers`. Defaults to\n `\"AUTO\"`.\n\nReturns:\n Dataset: A `Dataset` of the elements produced by the data service."} +{"repo": "tensorflow", "function": "def matches_any(patterns: List[Pattern[str]], line: str) -> bool:\n stripped_line = line.strip()\n for pattern in patterns:\n if pattern.match(stripped_line):\n return True\n return False", "docstring": "Checks if the line matches any of the given patterns.\n\nArgs:\n patterns: A list of compiled regular expression patterns.\n line: The line to check for matches.\n\nReturns:\n True if the line matches any of the patterns, False otherwise."} +{"repo": "transformers", "function": "def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_width=None) -> List[torch.Tensor]:\n if not isinstance(hidden_states, (tuple, list)):\n raise TypeError('hidden_states should be a tuple or list of tensors')\n if len(hidden_states) != len(self.config.neck_hidden_sizes):\n raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n if self.reassemble_stage is not None:\n hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n output = self.fusion_stage(features)\n return output", "docstring": "Args:\n hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\n List of hidden states from the backbone."} +{"repo": "transformers", "function": "class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel):\n\n def __init__(self, config: TimeSeriesTransformerConfig):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n if config.prediction_length is None:\n raise ValueError('The `prediction_length` config needs to be specified.')\n self.value_embedding = TimeSeriesValueEmbedding(feature_size=config.feature_size, d_model=config.d_model)\n self.embed_positions = TimeSeriesSinusoidalPositionalEmbedding(config.context_length + config.prediction_length, config.d_model)\n self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def forward(self, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:\n \"\"\"\n Args:\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n input_shape = inputs_embeds.size()[:-1]\n return_legacy_cache = False\n if use_cache and (not isinstance(past_key_values, Cache)):\n return_legacy_cache = True\n logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')\n past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)\n past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0\n if cache_position is None:\n cache_position = torch.arange(past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device)\n attention_mask = self._update_causal_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)\n encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)\n hidden_states = self.value_embedding(inputs_embeds)\n embed_pos = self.embed_positions(inputs_embeds.size(), past_key_values_length=self.config.context_length)\n hidden_states = self.layernorm_embedding(hidden_states + embed_pos)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')\n use_cache = False\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n next_decoder_cache = None\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):\n if attn_mask is not None:\n if attn_mask.size()[0] != len(self.layers):\n raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[3 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if return_legacy_cache:\n next_cache = past_key_values.to_legacy_cache()\n if not return_dict:\n return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))\n return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a\n[`TimeSeriesTransformerDecoderLayer`]\n\nArgs:\n config: TimeSeriesTransformerConfig"} +{"repo": "tensorflow", "function": "def global_step(self):\n return self._global_step", "docstring": "Return the global_step Tensor used by the supervisor.\n\nReturns:\n An integer Tensor for the global_step."} +{"repo": "keras", "function": "def diagonal(x, offset=0, axis1=0, axis2=1):\n if any_symbolic_tensors((x,)):\n return Diagonal(offset=offset, axis1=axis1, axis2=axis2).symbolic_call(x)\n return backend.numpy.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)", "docstring": "Return specified diagonals.\n\nIf `x` is 2-D, returns the diagonal of `x` with the given offset, i.e., the\ncollection of elements of the form `x[i, i+offset]`.\n\nIf `x` has more than two dimensions, the axes specified by `axis1`\nand `axis2` are used to determine the 2-D sub-array whose diagonal\nis returned.\n\nThe shape of the resulting array can be determined by removing `axis1`\nand `axis2` and appending an index to the right equal to the size of\nthe resulting diagonals.\n\nArgs:\n x: Input tensor.\n offset: Offset of the diagonal from the main diagonal.\n Can be positive or negative. Defaults to `0`.(main diagonal).\n axis1: Axis to be used as the first axis of the 2-D sub-arrays.\n Defaults to `0`.(first axis).\n axis2: Axis to be used as the second axis of the 2-D sub-arrays.\n Defaults to `1` (second axis).\n\nReturns:\n Tensor of diagonals.\n\nExamples:\n>>> from keras.src import ops\n>>> x = ops.arange(4).reshape((2, 2))\n>>> x\narray([[0, 1],\n [2, 3]])\n>>> x.diagonal()\narray([0, 3])\n>>> x.diagonal(1)\narray([1])\n\n>>> x = ops.arange(8).reshape((2, 2, 2))\n>>> x\narray([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n>>> x.diagonal(0, 0, 1)\narray([[0, 6],\n [1, 7]])"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n return encoder_outputs", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you\n should be able to pad the inputs on both the right and the left.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for detail.\n\n To know more on how to prepare `input_ids` for pretraining take a look a [T5 Training](./t5#training).\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, T5EncoderModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n>>> model = T5EncoderModel.from_pretrained(\"google-t5/t5-small\")\n>>> input_ids = tokenizer(\n... \"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\"\n... ).input_ids # Batch size 1\n>>> outputs = model(input_ids=input_ids)\n>>> last_hidden_states = outputs.last_hidden_state\n```"} +{"repo": "tensorflow", "function": "def create(self, batch_outs):\n raise NotImplementedError('Must be implemented in subclasses.')", "docstring": "Creates the initial results from the first batch outputs.\n\nArgs:\n batch_outs: A list of batch-level outputs."} +{"repo": "transformers", "function": "def extract_imports(module_fname: str, cache: Optional[Dict[str, List[str]]]=None) -> List[str]:\n if cache is not None and module_fname in cache:\n return cache[module_fname]\n with open(PATH_TO_REPO / module_fname, 'r', encoding='utf-8') as f:\n content = f.read()\n splits = content.split('\"\"\"')\n content = ''.join(splits[::2])\n module_parts = str(module_fname).split(os.path.sep)\n imported_modules = []\n relative_imports = _re_single_line_relative_imports.findall(content)\n relative_imports = [(mod, imp) for mod, imp in relative_imports if '# tests_ignore' not in imp and imp.strip() != '(']\n multiline_relative_imports = _re_multi_line_relative_imports.findall(content)\n relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if '# tests_ignore' not in imp]\n for module, imports in relative_imports:\n level = 0\n while module.startswith('.'):\n module = module[1:]\n level += 1\n if len(module) > 0:\n dep_parts = module_parts[:len(module_parts) - level] + module.split('.')\n else:\n dep_parts = module_parts[:len(module_parts) - level]\n imported_module = os.path.sep.join(dep_parts)\n imported_modules.append((imported_module, [imp.strip() for imp in imports.split(',')]))\n direct_imports = _re_single_line_direct_imports.findall(content)\n direct_imports = [(mod, imp) for mod, imp in direct_imports if '# tests_ignore' not in imp and imp.strip() != '(']\n multiline_direct_imports = _re_multi_line_direct_imports.findall(content)\n direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if '# tests_ignore' not in imp]\n for module, imports in direct_imports:\n import_parts = module.split('.')[1:]\n dep_parts = ['src', 'transformers'] + import_parts\n imported_module = os.path.sep.join(dep_parts)\n imported_modules.append((imported_module, [imp.strip() for imp in imports.split(',')]))\n result = []\n for module_file, imports in imported_modules:\n if (PATH_TO_REPO / f'{module_file}.py').is_file():\n module_file = f'{module_file}.py'\n elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / '__init__.py').is_file():\n module_file = os.path.sep.join([module_file, '__init__.py'])\n imports = [imp for imp in imports if len(imp) > 0 and re.match('^[A-Za-z0-9_]*$', imp)]\n if len(imports) > 0:\n result.append((module_file, imports))\n if cache is not None:\n cache[module_fname] = result\n return result", "docstring": "Get the imports a given module makes.\n\nArgs:\n module_fname (`str`):\n The name of the file of the module where we want to look at the imports (given relative to the root of\n the repo).\n cache (Dictionary `str` to `List[str]`, *optional*):\n To speed up this function if it was previously called on `module_fname`, the cache of all previously\n computed results.\n\nReturns:\n `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that\n is a subfolder will give its init file)."} +{"repo": "transformers", "function": "class BarkConfig(PretrainedConfig):\n model_type = 'bark'\n sub_configs = {'semantic_config': BarkSemanticConfig, 'coarse_acoustics_config': BarkCoarseConfig, 'fine_acoustics_config': BarkFineConfig, 'codec_config': AutoConfig}\n\n def __init__(self, semantic_config: Optional[Dict]=None, coarse_acoustics_config: Optional[Dict]=None, fine_acoustics_config: Optional[Dict]=None, codec_config: Optional[Dict]=None, initializer_range=0.02, **kwargs):\n if semantic_config is None:\n semantic_config = {}\n logger.info('semantic_config is None. initializing the semantic model with default values.')\n if coarse_acoustics_config is None:\n coarse_acoustics_config = {}\n logger.info('coarse_acoustics_config is None. initializing the coarse model with default values.')\n if fine_acoustics_config is None:\n fine_acoustics_config = {}\n logger.info('fine_acoustics_config is None. initializing the fine model with default values.')\n if codec_config is None:\n codec_config = {}\n logger.info('codec_config is None. initializing the codec model with default values.')\n self.semantic_config = BarkSemanticConfig(**semantic_config)\n self.coarse_acoustics_config = BarkCoarseConfig(**coarse_acoustics_config)\n self.fine_acoustics_config = BarkFineConfig(**fine_acoustics_config)\n codec_model_type = codec_config['model_type'] if 'model_type' in codec_config else 'encodec'\n self.codec_config = CONFIG_MAPPING[codec_model_type](**codec_config)\n self.initializer_range = initializer_range\n super().__init__(**kwargs)\n\n @classmethod\n def from_sub_model_configs(cls, semantic_config: BarkSemanticConfig, coarse_acoustics_config: BarkCoarseConfig, fine_acoustics_config: BarkFineConfig, codec_config: PretrainedConfig, **kwargs):\n \"\"\"\n Instantiate a [`BarkConfig`] (or a derived class) from bark sub-models configuration.\n\n Returns:\n [`BarkConfig`]: An instance of a configuration object\n \"\"\"\n return cls(semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), codec_config=codec_config.to_dict(), **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`BarkModel`]. It is used to instantiate a Bark\nmodel according to the specified sub-models configurations, defining the model architecture.\n\nInstantiating a configuration with the defaults will yield a similar configuration to that of the Bark\n[suno/bark](https://huggingface.co/suno/bark) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\nsemantic_config ([`BarkSemanticConfig`], *optional*):\n Configuration of the underlying semantic sub-model.\ncoarse_acoustics_config ([`BarkCoarseConfig`], *optional*):\n Configuration of the underlying coarse acoustics sub-model.\nfine_acoustics_config ([`BarkFineConfig`], *optional*):\n Configuration of the underlying fine acoustics sub-model.\ncodec_config ([`AutoConfig`], *optional*):\n Configuration of the underlying codec sub-model.\n\nExample:\n\n```python\n>>> from transformers import (\n... BarkSemanticConfig,\n... BarkCoarseConfig,\n... BarkFineConfig,\n... BarkModel,\n... BarkConfig,\n... AutoConfig,\n... )\n\n>>> # Initializing Bark sub-modules configurations.\n>>> semantic_config = BarkSemanticConfig()\n>>> coarse_acoustics_config = BarkCoarseConfig()\n>>> fine_acoustics_config = BarkFineConfig()\n>>> codec_config = AutoConfig.from_pretrained(\"facebook/encodec_24khz\")\n\n\n>>> # Initializing a Bark module style configuration\n>>> configuration = BarkConfig.from_sub_model_configs(\n... semantic_config, coarse_acoustics_config, fine_acoustics_config, codec_config\n... )\n\n>>> # Initializing a model (with random weights)\n>>> model = BarkModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def _call_with_structured_signature(self, args, kwargs):\n bound_args = function_type_utils.canonicalize_function_inputs(args, kwargs, self.function_type)\n filtered_flat_args = self.function_type.unpack_inputs(bound_args)\n return self._call_flat(filtered_flat_args, captured_inputs=self.captured_inputs)", "docstring": "Executes the wrapped function with the structured signature.\n\nArgs:\n args: Positional arguments to the concrete function.\n kwargs: Keyword arguments to the concrete function.\n\nReturns:\n The result of applying the function on the Tensors/Variables contained in\n `args` and `kwargs`.\nRaises:\n TypeError: if `args` and `kwargs` do not match the structured signature\n of this `ConcreteFunction`."} +{"repo": "genai-processors", "function": "def ttft(self) -> float | None:\n return self._ttft", "docstring": "Returns the TTFT of the wrapped processor.\n\nReturns:\n the TTFT of the wrapped processor or None if the processor has not been\n called yet."} +{"repo": "beam", "function": "def bigtable_error_code_to_grpc_status_string(grpc_status_code: Optional[int]) -> str:\n grpc_to_canonical_gcp_status = {0: 'ok', 1: 'cancelled', 2: 'unknown', 3: 'invalid_argument', 4: 'deadline_exceeded', 5: 'not_found', 6: 'already_exists', 7: 'permission_denied', 8: 'resource_exhausted', 9: 'failed_precondition', 10: 'aborted', 11: 'out_of_range', 12: 'unimplemented', 13: 'internal', 14: 'unavailable'}\n if grpc_status_code is None:\n return grpc_to_canonical_gcp_status[4]\n return grpc_to_canonical_gcp_status.get(grpc_status_code, str(grpc_status_code))", "docstring": "Converts the bigtable error code to a canonical GCP status code string.\n\nThis Bigtable client library is not using the canonical http status code\nvalues (i.e. https://cloud.google.com/apis/design/errors)\"\nInstead they are numbered using an enum with these values corresponding\nto each status code: https://cloud.google.com/bigtable/docs/status-codes\n\nArgs:\n grpc_status_code: An int that corresponds to an enum of status codes\n\nReturns:\n A GCP status code string"} +{"repo": "pyglove", "function": "def set_default(self, default: Any, use_default_apply: bool=True, root_path: Optional[utils.KeyPath]=None) -> 'ValueSpec':", "docstring": "Sets the default value and returns `self`.\n\nArgs:\n default: Default value.\n use_default_apply: If True, invoke `apply` to the value, otherwise use\n default value as is.\n root_path: (Optional) The path of the field.\n\nReturns:\n ValueSpec itself.\n\nRaises:\n ValueError: If default value cannot be applied when use_default_apply\n is set to True."} +{"repo": "tensorflow", "function": "def enqueue_many(self, vals, name=None):\n with ops.name_scope(name, '%s_EnqueueMany' % self._name, self._scope_vals(vals)) as scope:\n vals = self._check_enqueue_dtypes(vals)\n batch_dim = tensor_shape.dimension_value(vals[0].get_shape().with_rank_at_least(1)[0])\n batch_dim = tensor_shape.Dimension(batch_dim)\n for val, shape in zip(vals, self._shapes):\n val_batch_dim = tensor_shape.dimension_value(val.get_shape().with_rank_at_least(1)[0])\n val_batch_dim = tensor_shape.Dimension(val_batch_dim)\n batch_dim = batch_dim.merge_with(val_batch_dim)\n val.get_shape()[1:].assert_is_compatible_with(shape)\n return gen_data_flow_ops.queue_enqueue_many_v2(self._queue_ref, vals, name=scope)", "docstring": "Enqueues zero or more elements to this queue.\n\nThis operation slices each component tensor along the 0th dimension to\nmake multiple queue elements. All of the tensors in `vals` must have the\nsame size in the 0th dimension.\n\nIf the queue is full when this operation executes, it will block\nuntil all of the elements have been enqueued.\n\nAt runtime, this operation may raise an error if the queue is\n`tf.QueueBase.close` before or during its execution. If the\nqueue is closed before this operation runs,\n`tf.errors.CancelledError` will be raised. If this operation is\nblocked, and either (i) the queue is closed by a close operation\nwith `cancel_pending_enqueues=True`, or (ii) the session is\n`tf.Session.close`,\n`tf.errors.CancelledError` will be raised.\n\n>>> q = tf.queue.FIFOQueue(capacity=10, dtypes=tf.int32)\n>>> q.enqueue_many(tf.constant([1, 2, 3, 4, 5], dtype=tf.int32))\n>>> q.size()\n\n\nArgs:\n vals: A tensor, a list or tuple of tensors, or a dictionary\n from which the queue elements are taken.\n name: A name for the operation (optional).\n\nReturns:\n The operation that enqueues a batch of tuples of tensors to the queue."} +{"repo": "pyglove", "function": "def parse(cls, json_value: Union[int, float, str, List[Any], Tuple[Any], None], spec: Optional[DNASpec]=None) -> 'DNA':\n return DNA(json_value, spec=spec)", "docstring": "Parse DNA from a nested structure of numbers.\n\nDeprecated: use `DNA.__init__` instead.\n\nArgs:\n json_value: A nested structure of numbers.\n spec: DNA spec that will be applied to current DNA tree.\n\nReturns:\n an instance of DNA object.\n\nRaises:\n ValueError: Bad format for json_value or parsed DNA does not conform to\n the DNA spec."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BART sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "beam", "function": "def run_inference(self, batch: Sequence[np.ndarray], engine: TensorRTEngine, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n return self.inference_fn(batch, engine, inference_args)", "docstring": "Runs inferences on a batch of Tensors and returns an Iterable of\nTensorRT Predictions.\n\nArgs:\n batch: A np.ndarray or a np.ndarray that represents a concatenation\n of multiple arrays as a batch.\n engine: A TensorRT engine.\n inference_args: Any additional arguments for an inference\n that are not applicable to TensorRT.\n\nReturns:\n An Iterable of type PredictionResult."} +{"repo": "tensorflow", "function": "def get_resource(self, feature_column, resource_name):\n if feature_column not in self._cols_to_resources_map or resource_name not in self._cols_to_resources_map[feature_column]:\n raise ValueError('Resource does not exist.')\n return self._cols_to_resources_map[feature_column][resource_name]", "docstring": "Returns an already created resource.\n\nResources can be things such as tables, variables, trackables, etc.\n\nArgs:\n feature_column: A `FeatureColumn` object this variable corresponds to.\n resource_name: Name of the resource."} +{"repo": "fhir-py", "function": "def set_in_parent_or_add(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> message.Message:\n if isinstance(field, str):\n field = _field_descriptor_for_name(msg, field)\n if field_is_primitive(field):\n raise ValueError(f'Expected a composite message type at: {field.name}.')\n if field_is_repeated(field):\n return getattr(msg, field.name).add()\n getattr(msg, field.name).SetInParent()\n return getattr(msg, field.name)", "docstring": "Creates a new default instance of the type at field in message.\n\nIf this field is repeated, this function is equivalent to calling .add,\notherwise, this function is equivalent to calling .SetInParent and\nreturning the reference.\n\nArgs:\n msg: The parent message whose composite message to create and return.\n field: The FieldDescriptor or name of the field that the composite message\n is stored in.\n\nReturns:\n A reference to a newly created message of the type at field which is a child\n of parent_message.\n\nRaises:\n ValueError: If the provided FieldDescriptor is a primitive type."} +{"repo": "transformers", "function": "class DataCollatorSpeechSeq2SeqWithPadding:\n processor: Any\n decoder_start_token_id: int\n forward_attention_mask: bool\n\n def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]:\n model_input_name = self.processor.model_input_names[0]\n input_features = [{model_input_name: feature[model_input_name]} for feature in features]\n label_features = [{'input_ids': feature['labels']} for feature in features]\n batch = self.processor.feature_extractor.pad(input_features, return_tensors='pt')\n if self.forward_attention_mask:\n batch['attention_mask'] = torch.LongTensor([feature['attention_mask'] for feature in features])\n labels_batch = self.processor.tokenizer.pad(label_features, return_tensors='pt')\n labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100)\n if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():\n labels = labels[:, 1:]\n batch['labels'] = labels\n return batch", "docstring": "Data collator that will dynamically pad the inputs received.\nArgs:\n processor ([`WhisperProcessor`])\n The processor used for processing the data.\n decoder_start_token_id (`int`)\n The begin-of-sentence of the decoder.\n forward_attention_mask (`bool`)\n Whether to return attention_mask."} +{"repo": "transformers", "function": "def load_images(images: Union[list, tuple, str, 'PIL.Image.Image'], timeout: Optional[float]=None) -> Union['PIL.Image.Image', list['PIL.Image.Image'], list[list['PIL.Image.Image']]]:\n if isinstance(images, (list, tuple)):\n if len(images) and isinstance(images[0], (list, tuple)):\n return [[load_image(image, timeout=timeout) for image in image_group] for image_group in images]\n else:\n return [load_image(image, timeout=timeout) for image in images]\n else:\n return load_image(images, timeout=timeout)", "docstring": "Loads images, handling different levels of nesting.\n\nArgs:\n images: A single image, a list of images, or a list of lists of images to load.\n timeout: Timeout for loading images.\n\nReturns:\n A single image, a list of images, a list of lists of images."} +{"repo": "tensorflow", "function": "def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):\n _ = as_ref\n if dtype and (not dtype.is_compatible_with(value.dtype)):\n raise ValueError(f'Incompatible tensor conversion requested to `dtype` {dtype.name} for IndexedSlices ({value}) with dtype {value.dtype.name}')\n if value.dense_shape is None:\n raise ValueError(f'Tensor conversion requested for IndexedSlices for argument `value` without dense_shape: {value!s}')\n if not context.executing_eagerly():\n dense_shape_value = tensor_util.constant_value(value.dense_shape)\n if dense_shape_value is not None:\n num_elements = np.prod(dense_shape_value)\n if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:\n warnings.warn('Converting sparse IndexedSlices to a dense Tensor with %d elements. This may consume a large amount of memory.' % num_elements)\n return gen_math_ops.unsorted_segment_sum(value.values, value.indices, value.dense_shape[0], name=name)", "docstring": "Converts an IndexedSlices object `value` to a Tensor.\n\nNOTE(mrry): This function is potentially expensive.\n\nArgs:\n value: An ops.IndexedSlices object.\n dtype: The dtype of the Tensor to be returned.\n name: Optional name to use for the returned Tensor.\n as_ref: True if a ref is requested.\n\nReturns:\n A dense Tensor representing the values in the given IndexedSlices.\n\nRaises:\n ValueError: If the IndexedSlices does not have the same dtype."} +{"repo": "transformers", "function": "def forward(self, pixel_values: Optional[torch.FloatTensor]=None, input_points: Optional[torch.FloatTensor]=None, input_labels: Optional[torch.LongTensor]=None, input_boxes: Optional[torch.FloatTensor]=None, input_masks: Optional[torch.LongTensor]=None, image_embeddings: Optional[torch.FloatTensor]=None, multimask_output: bool=True, attention_similarity: Optional[torch.FloatTensor]=None, target_embedding: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, **kwargs) -> SamImageSegmentationOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n if pixel_values is None and image_embeddings is None:\n raise ValueError('Either pixel_values or image_embeddings must be provided.')\n if pixel_values is not None and image_embeddings is not None:\n raise ValueError('Only one of pixel_values and image_embeddings can be provided.')\n if input_points is not None and len(input_points.shape) != 4:\n raise ValueError('The input_points must be a 4D tensor. Of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.', ' got {}.'.format(input_points.shape))\n if input_boxes is not None and len(input_boxes.shape) != 3:\n raise ValueError('The input_points must be a 3D tensor. Of shape `batch_size`, `nb_boxes`, `4`.', ' got {}.'.format(input_boxes.shape))\n if input_points is not None and input_boxes is not None:\n point_batch_size = input_points.shape[1]\n box_batch_size = input_boxes.shape[1]\n if point_batch_size != box_batch_size:\n raise ValueError('You should provide as many bounding boxes as input points per box. Got {} and {}.'.format(point_batch_size, box_batch_size))\n image_positional_embeddings = self.get_image_wide_positional_embeddings()\n batch_size = pixel_values.shape[0] if pixel_values is not None else image_embeddings.shape[0]\n image_positional_embeddings = image_positional_embeddings.repeat(batch_size, 1, 1, 1)\n vision_attentions = None\n vision_hidden_states = None\n if pixel_values is not None:\n vision_outputs: SamVisionEncoderOutput = self.vision_encoder(pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n image_embeddings = vision_outputs.last_hidden_state\n if output_hidden_states:\n vision_hidden_states = vision_outputs.hidden_states\n if output_attentions:\n vision_attentions = vision_outputs.attentions\n if input_points is not None and input_labels is None:\n input_labels = torch.ones_like(input_points[:, :, :, 0], dtype=torch.int, device=input_points.device)\n if input_points is not None and image_embeddings.shape[0] != input_points.shape[0]:\n raise ValueError('The batch size of the image embeddings and the input points must be the same. ', 'Got {} and {} respectively.'.format(image_embeddings.shape[0], input_points.shape[0]), ' if you want to pass multiple points for the same image, make sure that you passed ', ' input_points of shape (batch_size, point_batch_size, num_points_per_image, 3) and ', ' input_labels of shape (batch_size, point_batch_size, num_points_per_image)')\n sparse_embeddings, dense_embeddings = self.prompt_encoder(input_points=input_points, input_labels=input_labels, input_boxes=input_boxes, input_masks=input_masks)\n low_res_masks, iou_predictions, mask_decoder_attentions = self.mask_decoder(image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions)\n return SamImageSegmentationOutput(iou_scores=iou_predictions, pred_masks=low_res_masks, vision_hidden_states=vision_hidden_states, vision_attentions=vision_attentions, mask_decoder_attentions=mask_decoder_attentions)", "docstring": "input_points (`torch.FloatTensor` of shape `(batch_size, num_points, 2)`):\n Input 2D spatial points, this is used by the prompt encoder to encode the prompt. Generally yields to much\n better results. The points can be obtained by passing a list of list of list to the processor that will\n create corresponding `torch` tensors of dimension 4. The first dimension is the image batch size, the\n second dimension is the point batch size (i.e. how many segmentation masks do we want the model to predict\n per input point), the third dimension is the number of points per segmentation mask (it is possible to pass\n multiple points for a single mask), and the last dimension is the x (vertical) and y (horizontal)\n coordinates of the point. If a different number of points is passed either for each image, or for each\n mask, the processor will create \"PAD\" points that will correspond to the (0, 0) coordinate, and the\n computation of the embedding will be skipped for these points using the labels.\ninput_labels (`torch.LongTensor` of shape `(batch_size, point_batch_size, num_points)`):\n Input labels for the points, this is used by the prompt encoder to encode the prompt. According to the\n official implementation, there are 3 types of labels\n\n - `1`: the point is a point that contains the object of interest\n - `0`: the point is a point that does not contain the object of interest\n - `-1`: the point corresponds to the background\n\n We added the label:\n\n - `-10`: the point is a padding point, thus should be ignored by the prompt encoder\n\n The padding labels should be automatically done by the processor.\ninput_boxes (`torch.FloatTensor` of shape `(batch_size, num_boxes, 4)`):\n Input boxes for the points, this is used by the prompt encoder to encode the prompt. Generally yields to\n much better generated masks. The boxes can be obtained by passing a list of list of list to the processor,\n that will generate a `torch` tensor, with each dimension corresponding respectively to the image batch\n size, the number of boxes per image and the coordinates of the top left and bottom right point of the box.\n In the order (`x1`, `y1`, `x2`, `y2`):\n\n - `x1`: the x coordinate of the top left point of the input box\n - `y1`: the y coordinate of the top left point of the input box\n - `x2`: the x coordinate of the bottom right point of the input box\n - `y2`: the y coordinate of the bottom right point of the input box\ninput_masks (`torch.FloatTensor` of shape `(batch_size, image_size, image_size)`):\n SAM model also accepts segmentation masks as input. The mask will be embedded by the prompt encoder to\n generate a corresponding embedding, that will be fed later on to the mask decoder. These masks needs to be\n manually fed by the user, and they need to be of shape (`batch_size`, `image_size`, `image_size`).\nimage_embeddings (`torch.FloatTensor` of shape `(batch_size, output_channels, window_size, window_size)`):\n Image embeddings, this is used by the mask decder to generate masks and iou scores. For more memory\n efficient computation, users can first retrieve the image embeddings using the `get_image_embeddings`\n method, and then feed them to the `forward` method instead of feeding the `pixel_values`.\nmultimask_output (`bool`, *optional*):\n In the original implementation and paper, the model always outputs 3 masks per image (or per point / per\n bounding box if relevant). However, it is possible to just output a single mask, that corresponds to the\n \"best\" mask, by specifying `multimask_output=False`.\nattention_similarity (`torch.FloatTensor`, *optional*):\n Attention similarity tensor, to be provided to the mask decoder for target-guided attention in case the\n model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).\ntarget_embedding (`torch.FloatTensor`, *optional*):\n Embedding of the target concept, to be provided to the mask decoder for target-semantic prompting in case\n the model is used for personalization as introduced in [PerSAM](https://huggingface.co/papers/2305.03048).\n\nExample:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoModel, AutoProcessor\n\n>>> model = AutoModel.from_pretrained(\"facebook/sam-vit-base\")\n>>> processor = AutoProcessor.from_pretrained(\"facebook/sam-vit-base\")\n\n>>> img_url = \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-car.png\"\n>>> raw_image = Image.open(requests.get(img_url, stream=True).raw).convert(\"RGB\")\n>>> input_points = [[[400, 650]]] # 2D location of a window on the car\n>>> inputs = processor(images=raw_image, input_points=input_points, return_tensors=\"pt\")\n\n>>> # Get segmentation mask\n>>> outputs = model(**inputs)\n\n>>> # Postprocess masks\n>>> masks = processor.post_process_masks(\n... outputs.pred_masks, inputs[\"original_sizes\"], inputs[\"reshaped_input_sizes\"]\n... )\n```"} +{"repo": "transformers", "function": "def pad(self, encoded_inputs: Union[BatchEncoding, List[BatchEncoding], Dict[str, EncodedInput], Dict[str, List[EncodedInput]], List[Dict[str, EncodedInput]]], padding: Union[bool, str, PaddingStrategy]=True, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, verbose: bool=True) -> BatchEncoding:\n if self.__class__.__name__.endswith('Fast'):\n if not self.deprecation_warnings.get('Asking-to-pad-a-fast-tokenizer', False):\n logger.warning_advice(f\"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.\")\n self.deprecation_warnings['Asking-to-pad-a-fast-tokenizer'] = True\n if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping):\n encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}\n if self.model_input_names[0] not in encoded_inputs:\n raise ValueError(f'You should supply an encoding or a list of encodings to this method that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}')\n required_input = encoded_inputs[self.model_input_names[0]]\n if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0):\n if return_attention_mask:\n encoded_inputs['attention_mask'] = []\n return encoded_inputs\n first_element = required_input[0]\n if isinstance(first_element, (list, tuple)):\n for item in required_input:\n if len(item) != 0:\n first_element = item[0]\n break\n if not isinstance(first_element, (int, list, tuple)):\n if is_tf_tensor(first_element):\n return_tensors = 'tf' if return_tensors is None else return_tensors\n elif is_torch_tensor(first_element):\n return_tensors = 'pt' if return_tensors is None else return_tensors\n elif isinstance(first_element, np.ndarray):\n return_tensors = 'np' if return_tensors is None else return_tensors\n else:\n raise ValueError(f'type of {first_element} unknown: {type(first_element)}. Should be one of a python, numpy, pytorch or tensorflow object.')\n for key, value in encoded_inputs.items():\n encoded_inputs[key] = to_py_obj(value)\n padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(padding=padding, max_length=max_length, verbose=verbose)\n required_input = encoded_inputs[self.model_input_names[0]]\n if required_input and (not isinstance(required_input[0], (list, tuple))):\n encoded_inputs = self._pad(encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n return BatchEncoding(encoded_inputs, tensor_type=return_tensors)\n batch_size = len(required_input)\n assert all((len(v) == batch_size for v in encoded_inputs.values())), 'Some items in the output dictionary have a different batch size than others.'\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = max((len(inputs) for inputs in required_input))\n padding_strategy = PaddingStrategy.MAX_LENGTH\n batch_outputs = {}\n for i in range(batch_size):\n inputs = {k: v[i] for k, v in encoded_inputs.items()}\n outputs = self._pad(inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_attention_mask=return_attention_mask)\n for key, value in outputs.items():\n if key not in batch_outputs:\n batch_outputs[key] = []\n batch_outputs[key].append(value)\n return BatchEncoding(batch_outputs, tensor_type=return_tensors)", "docstring": "Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length\nin the batch.\n\nPadding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,\n`self.pad_token_id` and `self.pad_token_type_id`).\n\nPlease note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the\ntext followed by a call to the `pad` method to get a padded encoding.\n\n\n\nIf the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the\nresult will use the same type unless you provide a different tensor type with `return_tensors`. In the case of\nPyTorch tensors, you will lose the specific device of your tensors however.\n\n\n\nArgs:\n encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):\n Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of\n tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,\n List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader\n collate function.\n\n Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see\n the note above for the return type.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n\n - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n padding_side (`str`, *optional*):\n The side on which the model should have padding applied. Should be selected between ['right', 'left'].\n Default value is picked from the class attribute of the same name.\n return_attention_mask (`bool`, *optional*):\n Whether to return the attention mask. If left to the default, will return the attention mask according\n to the specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n verbose (`bool`, *optional*, defaults to `True`):\n Whether or not to print more information and warnings."} +{"repo": "tensorflow", "function": "def concatenate(self, dataset, name=None) -> 'DatasetV2':\n from tensorflow.python.data.ops import concatenate_op\n return concatenate_op._concatenate(self, dataset, name)", "docstring": "Creates a `Dataset` by concatenating the given dataset with this dataset.\n\n>>> a = tf.data.Dataset.range(1, 4) # ==> [ 1, 2, 3 ]\n>>> b = tf.data.Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]\n>>> ds = a.concatenate(b)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[1, 2, 3, 4, 5, 6, 7]\n>>> # The input dataset and dataset to be concatenated should have\n>>> # compatible element specs.\n>>> c = tf.data.Dataset.zip((a, b))\n>>> a.concatenate(c)\nTraceback (most recent call last):\nTypeError: Two datasets to concatenate have different types\n and (tf.int64, tf.int64)\n>>> d = tf.data.Dataset.from_tensor_slices([\"a\", \"b\", \"c\"])\n>>> a.concatenate(d)\nTraceback (most recent call last):\nTypeError: Two datasets to concatenate have different types\n and \n\nArgs:\n dataset: `Dataset` to be concatenated.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A new `Dataset` with the transformation applied as described above."} +{"repo": "transformers", "function": "def create_mask(self, qlen, mlen):\n attn_mask = tf.ones([qlen, qlen])\n mask_u = tf.linalg.band_part(attn_mask, 0, -1)\n mask_dia = tf.linalg.band_part(attn_mask, 0, 0)\n attn_mask_pad = tf.zeros([qlen, mlen])\n ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1)\n if self.same_length:\n mask_l = tf.linalg.band_part(attn_mask, -1, 0)\n ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1)\n return ret", "docstring": "Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.\n\nArgs:\n qlen: TODO Lysandre didn't fill\n mlen: TODO Lysandre didn't fill\n\n```\n\n same_length=False: same_length=True:\n < qlen > < qlen >\n ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]\n [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]\n qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]\n [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]\n v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]\n```"} +{"repo": "tensorflow", "function": "def to_float(x, name='ToFloat'):\n return cast(x, dtypes.float32, name=name)", "docstring": "Casts a tensor to type `float32`.\n\nArgs:\n x: A `Tensor` or `SparseTensor` or `IndexedSlices`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with\n type `float32`.\n\nRaises:\n TypeError: If `x` cannot be cast to the `float32`.\n\n@compatibility(TF2)\n\nThis name was deprecated and removed in TF2, but has an exact replacement\n`tf.cast(..., tf.float32)`. There are no further issues with eager execution\nor tf.function.\n\nBefore:\n\n>>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))\n\n\nAfter:\n\n>>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)\n\n\n@end_compatibility"} +{"repo": "tensorflow", "function": "def testBasic(self, count, batch_size, drop_remainder, num_parallel_calls):\n components = (np.arange(7), np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis], np.array(37.0) * np.arange(7))\n\n def _map_fn(x, y, z):\n return (math_ops.square(x), math_ops.square(y), math_ops.square(z))\n dataset = dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn).repeat(count).batch(batch_size, drop_remainder, num_parallel_calls)\n get_next = self.getNext(dataset)\n if drop_remainder:\n dim0 = batch_size\n else:\n dim0 = None\n self.assertEqual([ts.as_list() for ts in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))], [[dim0] + list(c.shape[1:]) for c in components])\n num_full_batches = count * 7 // batch_size\n for i in range(num_full_batches):\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n for j in range(batch_size):\n self.assertAllEqual(component[(i * batch_size + j) % 7] ** 2, result_component[j])\n if not drop_remainder and count * 7 % batch_size > 0:\n result = self.evaluate(get_next())\n for component, result_component in zip(components, result):\n for j in range(count * 7 % batch_size):\n self.assertAllEqual(component[(num_full_batches * batch_size + j) % 7] ** 2, result_component[j])\n with self.assertRaises(errors.OutOfRangeError):\n result = self.evaluate(get_next())", "docstring": "Tests the batch dataset logic for various input configurations.\n\nArgs:\n count: the number of input elements\n batch_size: the batch size\n drop_remainder: whether a smaller batch size should be produced if batch\n size does not divide number of inputs evenly\n num_parallel_calls: the number batches to process asynchronously in\n parallel"} +{"repo": "transformers", "function": "def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(-1, 1)\n hidden_states = self.conv1(hidden_states)\n hidden_states = torch.relu(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n hidden_states = hidden_states.transpose(-1, 1)\n return hidden_states", "docstring": "Calculate forward propagation.\n\nArgs:\n hidden_states (torch.Tensor): Batch of input tensors (batch_size, time, input_channels).\n\nReturns:\n torch.Tensor: Batch of output tensors (batch_size, time, hidden_channels)."} +{"repo": "keras", "function": "def _confusion_matrix(y_true, y_pred, num_classes):\n indices = y_true * num_classes + y_pred\n conf_matrix = np.bincount(indices, minlength=num_classes * num_classes)\n conf_matrix = conf_matrix.reshape((num_classes, num_classes))\n return conf_matrix", "docstring": "Creates a confusion matrix as a numpy array using vectorized operations.\n\nParameters:\n- y_true: array-like, true class labels.\n- y_pred: array-like, predicted class labels.\n- num_classes: int, number of classes.\n\nReturns:\n- conf_matrix: np.ndarray, confusion matrix of shape (num_classes,\n num_classes)."} +{"repo": "beam", "function": "def _to_proto_str(self, for_publish=False):\n if len(self.data) > 10000000:\n raise ValueError('A pubsub message data field must not exceed 10MB')\n if self.attributes:\n if len(self.attributes) > 100:\n raise ValueError('A pubsub message must not have more than 100 attributes.')\n for key, value in self.attributes.items():\n if len(key) > 256:\n raise ValueError('A pubsub message attribute key must not exceed 256 bytes.')\n if len(value) > 1024:\n raise ValueError('A pubsub message attribute value must not exceed 1024 bytes')\n message_id = None\n publish_time = None\n if not for_publish:\n if self.message_id:\n message_id = self.message_id\n if self.publish_time:\n publish_time = self.publish_time\n if len(self.ordering_key) > 1024:\n raise ValueError('A pubsub message ordering key must not exceed 1024 bytes.')\n msg = pubsub.types.PubsubMessage(data=self.data, attributes=self.attributes, message_id=message_id, publish_time=publish_time, ordering_key=self.ordering_key)\n serialized = pubsub.types.PubsubMessage.serialize(msg)\n if len(serialized) > 10000000:\n raise ValueError('Serialized pubsub message exceeds the publish request limit of 10MB')\n return serialized", "docstring": "Get serialized form of ``PubsubMessage``.\n\nThe serialized message is validated against pubsub message limits specified\nat https://cloud.google.com/pubsub/quotas#resource_limits\n\nArgs:\n proto_msg: str containing a serialized protobuf.\n for_publish: bool, if True strip out message fields which cannot be\n published (currently message_id and publish_time) per\n https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#pubsubmessage\n\nReturns:\n A str containing a serialized protobuf of type\n https://cloud.google.com/pubsub/docs/reference/rpc/google.pubsub.v1#google.pubsub.v1.PubsubMessage\n containing the payload of this object."} +{"repo": "transformers", "function": "def resize_positional_embeddings(positional_embeddings: torch.Tensor, spatial_shapes: torch.LongTensor, max_length: int) -> torch.Tensor:\n batch_size = spatial_shapes.shape[0]\n embed_dim = positional_embeddings.shape[-1]\n source_dtype = positional_embeddings.dtype\n resulted_positional_embeddings = torch.empty((batch_size, max_length, embed_dim), device=positional_embeddings.device, dtype=source_dtype)\n positional_embeddings = positional_embeddings.permute(2, 0, 1).unsqueeze(0)\n if positional_embeddings.device.type == 'cpu':\n positional_embeddings = positional_embeddings.to(torch.float32)\n for i in range(batch_size):\n height, width = spatial_shapes[i]\n resized_embeddings = F.interpolate(positional_embeddings, size=(height, width), mode='bilinear', align_corners=False, antialias=True)\n resized_embeddings = resized_embeddings.reshape(embed_dim, height * width).transpose(0, 1)\n resized_embeddings = resized_embeddings.to(source_dtype)\n resulted_positional_embeddings[i, :height * width] = resized_embeddings\n resulted_positional_embeddings[i, height * width:] = resized_embeddings[0]\n return resulted_positional_embeddings", "docstring": "Resize positional embeddings to image-specific size and pad to a fixed size.\n\nArgs:\n positional_embeddings (`torch.Tensor`):\n Position embeddings of shape (height, width, embed_dim)\n spatial_shapes (`torch.LongTensor`):\n Spatial shapes of shape (batch_size, 2) to resize the positional embeddings to\n max_length (`int`):\n Maximum length of the positional embeddings to pad resized positional embeddings to\n\nReturns:\n `torch.Tensor`: Embeddings of shape (batch_size, max_length, embed_dim)"} +{"repo": "tensorflow", "function": "class Sequence(object):\n\n @abstractmethod\n def __getitem__(self, index):\n \"\"\"Gets batch at position `index`.\n\n Args:\n index: position of the batch in the Sequence.\n\n Returns:\n A batch\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def __len__(self):\n \"\"\"Number of batch in the Sequence.\n\n Returns:\n The number of batches in the Sequence.\n \"\"\"\n raise NotImplementedError\n\n def on_epoch_end(self):\n \"\"\"Method called at the end of every epoch.\n \"\"\"\n pass\n\n def __iter__(self):\n \"\"\"Create a generator that iterate over the Sequence.\"\"\"\n for item in (self[i] for i in range(len(self))):\n yield item", "docstring": "Base object for fitting to a sequence of data, such as a dataset.\n\nEvery `Sequence` must implement the `__getitem__` and the `__len__` methods.\nIf you want to modify your dataset between epochs you may implement\n`on_epoch_end`.\nThe method `__getitem__` should return a complete batch.\n\nNotes:\n\n`Sequence` are a safer way to do multiprocessing. This structure guarantees\nthat the network will only train once\n on each sample per epoch which is not the case with generators.\n\nExamples:\n\n```python\nfrom skimage.io import imread\nfrom skimage.transform import resize\nimport numpy as np\nimport math\n\n# Here, `x_set` is list of path to the images\n# and `y_set` are the associated classes.\n\nclass CIFAR10Sequence(Sequence):\n\n def __init__(self, x_set, y_set, batch_size):\n self.x, self.y = x_set, y_set\n self.batch_size = batch_size\n\n def __len__(self):\n return math.ceil(len(self.x) / self.batch_size)\n\n def __getitem__(self, idx):\n batch_x = self.x[idx * self.batch_size:(idx + 1) *\n self.batch_size]\n batch_y = self.y[idx * self.batch_size:(idx + 1) *\n self.batch_size]\n\n return np.array([\n resize(imread(file_name), (200, 200))\n for file_name in batch_x]), np.array(batch_y)\n```"} +{"repo": "tensorflow", "function": "def assert_rank_at_least_v2(x, rank, message=None, name=None):\n return assert_rank_at_least(x=x, rank=rank, message=message, name=name)", "docstring": "Assert that `x` has rank of at least `rank`.\n\nThis Op checks that the rank of `x` is greater or equal to `rank`.\n\nIf `x` has a rank lower than `rank`, `message`, as well as the shape of `x`\nare printed, and `InvalidArgumentError` is raised.\n\nArgs:\n x: `Tensor`.\n rank: Scalar integer `Tensor`.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to\n \"assert_rank_at_least\".\n\nReturns:\n Op raising `InvalidArgumentError` unless `x` has specified rank or higher.\n If static checks determine `x` has correct rank, a `no_op` is returned.\n This can be used with `tf.control_dependencies` inside of `tf.function`s\n to block followup computation until the check has executed.\n @compatibility(eager)\n returns None\n @end_compatibility\n\nRaises:\n InvalidArgumentError: `x` does not have rank at least `rank`, but the rank\n cannot be statically determined.\n ValueError: If static checks determine `x` has mismatched rank."} +{"repo": "tensorflow", "function": "def __init__(self, input_saved_model_dir=None, input_saved_model_tags=None, input_saved_model_signature_key=None, input_graph_def=None, nodes_denylist=None, max_batch_size=1, max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES, precision_mode=TrtPrecisionMode.FP32, minimum_segment_size=3, is_dynamic_op=False, maximum_cached_engines=1, use_calibration=True):\n if context.executing_eagerly():\n raise RuntimeError('Please use tf.experimental.tensorrt.Converter in TF 2.0.')\n if input_graph_def and input_saved_model_dir:\n raise ValueError('Can only specify one of input_graph_def and input_saved_model_dir')\n if not input_graph_def and (not input_saved_model_dir):\n raise ValueError('Must specify one of input_graph_def and input_saved_model_dir')\n _check_trt_version_compatibility()\n self._input_graph_def = input_graph_def\n self._nodes_denylist = nodes_denylist\n self._input_saved_model_dir = input_saved_model_dir\n self._converted = False\n self._grappler_meta_graph_def = None\n self._input_saved_model_tags = input_saved_model_tags or [tag_constants.SERVING]\n self._input_saved_model_signature_key = input_saved_model_signature_key or signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n self._calibration_graph = None\n self._calibration_data_collected = False\n self._need_calibration = (precision_mode == TrtPrecisionMode.INT8 or precision_mode == TrtPrecisionMode.INT8.lower()) and use_calibration\n if self._need_calibration and (not is_dynamic_op):\n logging.warn('INT8 precision mode with calibration is supported with dynamic TRT ops only. Disregarding is_dynamic_op parameter.')\n is_dynamic_op = True\n self._is_dynamic_op = is_dynamic_op\n if is_dynamic_op:\n self._max_batch_size = None\n if max_batch_size is not None:\n logging.warn('When is_dynamic_op==True max_batch_size should be None')\n else:\n if not isinstance(max_batch_size, int):\n raise ValueError('When is_dynamic_op==False max_batch_size should be an integer')\n self._max_batch_size = max_batch_size\n self._conversion_params = TrtConversionParams(max_workspace_size_bytes=max_workspace_size_bytes, precision_mode=precision_mode, minimum_segment_size=minimum_segment_size, maximum_cached_engines=maximum_cached_engines, use_calibration=use_calibration, allow_build_at_runtime=True)\n _check_conversion_params(self._conversion_params)\n self._test_only_disable_non_trt_optimizers = False", "docstring": "Initializes the converter.\n\nArgs:\n input_saved_model_dir: the directory to load the SavedModel which contains\n the input graph to transforms. Used only when input_graph_def is None.\n input_saved_model_tags: list of tags to load the SavedModel.\n input_saved_model_signature_key: the key of the signature to optimize the\n graph for.\n input_graph_def: a GraphDef object containing a model to be transformed.\n If set to None, the graph will be read from the SavedModel loaded from\n input_saved_model_dir.\n nodes_denylist: list of node names to prevent the converter from touching.\n max_batch_size: max size for the input batch.\n max_workspace_size_bytes: the maximum GPU temporary memory which the TRT\n engine can use at execution time. This corresponds to the\n 'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().\n precision_mode: one of TrtPrecisionMode.supported_precision_modes().\n minimum_segment_size: the minimum number of nodes required for a subgraph\n to be replaced by TRTEngineOp.\n is_dynamic_op: whether to generate dynamic TRT ops which will build the\n TRT network and engine at run time.\n maximum_cached_engines: max number of cached TRT engines in dynamic TRT\n ops. If the number of cached engines is already at max but none of them\n can serve the input, the TRTEngineOp will fall back to run the TF\n function based on which the TRTEngineOp is created.\n use_calibration: this argument is ignored if precision_mode is not INT8.\n If set to True, a calibration graph will be created to calibrate the\n missing ranges. The calibration graph must be converted to an inference\n graph by running calibration with calibrate(). If set to False,\n quantization nodes will be expected for every tensor in the graph\n (excluding those which will be fused). If a range is missing, an error\n will occur. Please note that accuracy may be negatively affected if\n there is a mismatch between which tensors TRT quantizes and which\n tensors were trained with fake quantization.\n\nRaises:\n ValueError: if the combination of the parameters is invalid.\n RuntimeError: if this class is used in TF 2.0."} +{"repo": "transformers", "function": "def forward(self, pixel_values: Tensor, input_ids: Tensor, token_type_ids: Optional[Tensor]=None, attention_mask: Optional[Tensor]=None, pixel_mask: Optional[Tensor]=None, encoder_outputs=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_self_attention_masks, position_ids = generate_masks_with_special_tokens_and_transfer_map(input_ids)\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n text_token_mask = attention_mask.bool()\n max_text_len = self.config.max_text_len\n if text_self_attention_masks.shape[1] > max_text_len:\n text_self_attention_masks = text_self_attention_masks[:, :max_text_len, :max_text_len]\n position_ids = position_ids[:, :max_text_len]\n input_ids = input_ids[:, :max_text_len]\n token_type_ids = token_type_ids[:, :max_text_len]\n text_token_mask = text_token_mask[:, :max_text_len]\n text_outputs = self.text_backbone(input_ids, text_self_attention_masks, token_type_ids, position_ids, return_dict=return_dict)\n text_features = text_outputs.last_hidden_state if return_dict else text_outputs[0]\n text_features = self.text_projection(text_features)\n batch_size, num_channels, height, width = pixel_values.shape\n device = pixel_values.device\n if pixel_mask is None:\n pixel_mask = torch.ones((batch_size, height, width), dtype=torch.long, device=device)\n vision_features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)\n feature_maps = []\n masks = []\n for level, (source, mask) in enumerate(vision_features):\n feature_maps.append(self.input_proj_vision[level](source))\n masks.append(mask)\n if self.config.num_feature_levels > len(feature_maps):\n _len_sources = len(feature_maps)\n for level in range(_len_sources, self.config.num_feature_levels):\n if level == _len_sources:\n source = self.input_proj_vision[level](vision_features[-1][0])\n else:\n source = self.input_proj_vision[level](feature_maps[-1])\n mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0]\n pos_l = self.backbone.position_embedding(source, mask).to(source.dtype)\n feature_maps.append(source)\n masks.append(mask)\n position_embeddings_list.append(pos_l)\n query_embeds = None\n if self.config.embedding_init_target or self.config.two_stage:\n query_embeds = self.query_position_embeddings.weight\n source_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes_list = []\n for level, (source, mask, pos_embed) in enumerate(zip(feature_maps, masks, position_embeddings_list)):\n batch_size, num_channels, height, width = source.shape\n spatial_shape = (height, width)\n spatial_shapes_list.append(spatial_shape)\n source = source.flatten(2).transpose(1, 2)\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2)\n lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n source_flatten.append(source)\n mask_flatten.append(mask)\n source_flatten = torch.cat(source_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(spatial_shapes_list, dtype=torch.long, device=source_flatten.device)\n level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1]))\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)\n valid_ratios = valid_ratios.float()\n if encoder_outputs is None:\n encoder_outputs = self.encoder(vision_features=source_flatten, vision_attention_mask=~mask_flatten, vision_position_embedding=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, valid_ratios=valid_ratios, text_features=text_features, text_attention_mask=~text_token_mask, text_position_embedding=None, text_self_attention_masks=~text_self_attention_masks, text_position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, GroundingDinoEncoderOutput)):\n encoder_outputs = GroundingDinoEncoderOutput(last_hidden_state_vision=encoder_outputs[0], last_hidden_state_text=encoder_outputs[1], vision_hidden_states=encoder_outputs[2] if output_hidden_states else None, text_hidden_states=encoder_outputs[3] if output_hidden_states else None, attentions=encoder_outputs[-1] if output_attentions else None)\n topk_proposals = None\n enc_outputs_class = None\n enc_outputs_coord_logits = None\n encoder_logits = None\n encoder_pred_boxes = None\n if self.config.two_stage:\n object_query_embedding, output_proposals = self.generate_encoder_output_proposals(encoder_outputs[0], ~mask_flatten, spatial_shapes)\n enc_outputs_class = self.encoder_output_class_embed(object_query_embedding, encoder_outputs[1], text_token_mask)\n delta_bbox = self.encoder_output_bbox_embed(object_query_embedding)\n enc_outputs_coord_logits = delta_bbox + output_proposals\n topk = self.config.num_queries\n topk_logits = enc_outputs_class.max(-1)[0]\n topk_proposals = torch.topk(topk_logits, topk, dim=1)[1]\n topk_coords_logits = torch.gather(enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))\n topk_coords_logits = topk_coords_logits.detach()\n reference_points = topk_coords_logits.sigmoid()\n init_reference_points = reference_points\n if query_embeds is not None:\n target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)\n else:\n target = torch.gather(object_query_embedding, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)).detach()\n encoder_pred_boxes = reference_points\n encoder_logits = self.encoder_output_class_embed(target, text_features, text_token_mask)\n else:\n target = query_embeds.unsqueeze(0).repeat(batch_size, 1, 1)\n reference_points = self.reference_points.weight.unsqueeze(0).repeat(batch_size, 1, 1).sigmoid()\n init_reference_points = reference_points\n decoder_outputs = self.decoder(inputs_embeds=target, vision_encoder_hidden_states=encoder_outputs[0], vision_encoder_attention_mask=mask_flatten, text_encoder_hidden_states=encoder_outputs[1], text_encoder_attention_mask=~text_token_mask, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, valid_ratios=valid_ratios, self_attn_mask=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n if not return_dict:\n enc_outputs = tuple((value for value in [enc_outputs_class, enc_outputs_coord_logits, encoder_logits, encoder_pred_boxes] if value is not None))\n tuple_outputs = (decoder_outputs[0], init_reference_points) + decoder_outputs[1:] + encoder_outputs + enc_outputs\n return tuple_outputs\n return GroundingDinoModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, init_reference_points=init_reference_points, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state_vision=encoder_outputs.last_hidden_state_vision, encoder_last_hidden_state_text=encoder_outputs.last_hidden_state_text, encoder_vision_hidden_states=encoder_outputs.vision_hidden_states, encoder_text_hidden_states=encoder_outputs.text_hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, encoder_logits=encoder_logits, encoder_pred_boxes=encoder_pred_boxes)", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide\n it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`BertTokenizer.__call__`] for details.\ntoken_type_ids (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`: 0 corresponds to a `sentence A` token, 1 corresponds to a `sentence B` token\n\n [What are token type IDs?](../glossary#token-type-ids)\n\nExamples:\n\n```python\n>>> from transformers import AutoProcessor, AutoModel\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> text = \"a cat.\"\n\n>>> processor = AutoProcessor.from_pretrained(\"IDEA-Research/grounding-dino-tiny\")\n>>> model = AutoModel.from_pretrained(\"IDEA-Research/grounding-dino-tiny\")\n\n>>> inputs = processor(images=image, text=text, return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n\n>>> last_hidden_states = outputs.last_hidden_state\n>>> list(last_hidden_states.shape)\n[1, 900, 256]\n```"} +{"repo": "transformers", "function": "def _linear(self, inputs):\n first_dims = shape_list(inputs)[:-1]\n x = tf.reshape(inputs, [-1, self.hidden_size])\n logits = tf.matmul(x, self.weight, transpose_b=True)\n return tf.reshape(logits, first_dims + [self.vocab_size])", "docstring": "Computes logits by running inputs through a linear layer.\n\nArgs:\n inputs: A float32 tensor with shape [..., hidden_size]\n\nReturns:\n float32 tensor with shape [..., vocab_size]."} +{"repo": "transformers", "function": "def fix_docstring(obj: Any, old_doc_args: str, new_doc_args: str):\n source, line_number = inspect.getsourcelines(obj)\n idx = 0\n while idx < len(source) and _re_args.search(source[idx]) is None:\n idx += 1\n if idx == len(source):\n return\n indent = find_indent(source[idx])\n idx += 1\n start_idx = idx\n while idx < len(source) and (len(source[idx].strip()) == 0 or find_indent(source[idx]) > indent):\n idx += 1\n idx -= 1\n while len(source[idx].strip()) == 0:\n idx -= 1\n idx += 1\n if ''.join(source[start_idx:idx])[:-1] != old_doc_args:\n return\n obj_file = find_source_file(obj)\n with open(obj_file, 'r', encoding='utf-8') as f:\n content = f.read()\n lines = content.split('\\n')\n lines = lines[:line_number + start_idx - 1] + [new_doc_args] + lines[line_number + idx - 1:]\n print(f'Fixing the docstring of {obj.__name__} in {obj_file}.')\n with open(obj_file, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(lines))", "docstring": "Fixes the docstring of an object by replacing its arguments documentation by the one matched with the signature.\n\nArgs:\n obj (`Any`):\n The object whose dostring we are fixing.\n old_doc_args (`str`):\n The current documentation of the parameters of `obj` in the docstring (as returned by\n `match_docstring_with_signature`).\n new_doc_args (`str`):\n The documentation of the parameters of `obj` matched with its signature (as returned by\n `match_docstring_with_signature`)."} +{"repo": "starthinker", "function": "def _process_update(self, item, feed_item):\n lp = self.landing_page_dao.get(feed_item, required=True)\n feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_ID] = lp['id']\n feed_item[FieldMap.CAMPAIGN_LANDING_PAGE_NAME] = lp['name']\n item['startDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_START_DATE, None))\n item['endDate'] = StringExtensions.convertDateTimeStrToDateStr(feed_item.get(FieldMap.CAMPAIGN_END_DATE, None))\n item['name'] = feed_item.get(FieldMap.CAMPAIGN_NAME, None)\n item['defaultLandingPageId'] = lp['id']", "docstring": "Updates a campaign based on the values from the feed.\n\nArgs:\n item: Object representing the campaign to be updated, this object is\n updated directly.\n feed_item: Feed item representing campaign values from the Bulkdozer feed."} +{"repo": "keras", "function": "def func_dump(func):\n if os.name == 'nt':\n raw_code = marshal.dumps(func.__code__).replace(b'\\\\', b'/')\n code = codecs.encode(raw_code, 'base64').decode('ascii')\n else:\n raw_code = marshal.dumps(func.__code__)\n code = codecs.encode(raw_code, 'base64').decode('ascii')\n defaults = func.__defaults__\n if func.__closure__:\n closure = tuple((c.cell_contents for c in func.__closure__))\n else:\n closure = None\n return (code, defaults, closure)", "docstring": "Serializes a user-defined function.\n\nArgs:\n func: the function to serialize.\n\nReturns:\n A tuple `(code, defaults, closure)`."} +{"repo": "transformers", "function": "def post_process(self, outputs, target_sizes):\n warnings.warn('`post_process` is deprecated and will be removed in v5 of Transformers, please use `post_process_object_detection` instead, with `threshold=0.` for equivalent results.', FutureWarning)\n logits, boxes = (outputs.logits, outputs.pred_boxes)\n if len(logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n if target_sizes.shape[1] != 2:\n raise ValueError('Each element of target_sizes must contain the size (h, w) of each image of the batch')\n probs = torch.max(logits, dim=-1)\n scores = torch.sigmoid(probs.values)\n labels = probs.indices\n boxes = center_to_corners_format(boxes)\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n boxes = boxes * scale_fct[:, None, :]\n results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n return results", "docstring": "Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format.\n\nArgs:\n outputs ([`OwlViTObjectDetectionOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):\n Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original\n image size (before any data augmentation). For visualization, this should be the image size after data\n augment, but before padding.\nReturns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\n in the batch as predicted by the model."} +{"repo": "transformers", "function": "def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], TableTransformerModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n batch_size, num_channels, height, width = pixel_values.shape\n device = pixel_values.device\n if pixel_mask is None:\n pixel_mask = torch.ones((batch_size, height, width), device=device)\n features, position_embeddings_list = self.backbone(pixel_values, pixel_mask)\n feature_map, mask = features[-1]\n if mask is None:\n raise ValueError('Backbone does not return downsampled pixel mask')\n projected_feature_map = self.input_projection(feature_map)\n flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)\n object_queries = position_embeddings_list[-1].flatten(2).permute(0, 2, 1)\n flattened_mask = mask.flatten(1)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):\n encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)\n queries = torch.zeros_like(query_position_embeddings)\n decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return TableTransformerModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states)", "docstring": "decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n Not used by default. Can be used to mask object queries.\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n can choose to directly pass a flattened representation of an image.\ndecoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n embedded representation.\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, TableTransformerModel\n>>> from huggingface_hub import hf_hub_download\n>>> from PIL import Image\n\n>>> file_path = hf_hub_download(repo_id=\"nielsr/example-pdf\", repo_type=\"dataset\", filename=\"example_pdf.png\")\n>>> image = Image.open(file_path).convert(\"RGB\")\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"microsoft/table-transformer-detection\")\n>>> model = TableTransformerModel.from_pretrained(\"microsoft/table-transformer-detection\")\n\n>>> # prepare image for the model\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n\n>>> # forward pass\n>>> outputs = model(**inputs)\n\n>>> # the last hidden states are the final query embeddings of the Transformer decoder\n>>> # these are of shape (batch_size, num_queries, hidden_size)\n>>> last_hidden_states = outputs.last_hidden_state\n>>> list(last_hidden_states.shape)\n[1, 15, 256]\n```"} +{"repo": "keras", "function": "def vstack(xs):\n if any_symbolic_tensors((xs,)):\n return Vstack().symbolic_call(xs)\n return backend.numpy.vstack(xs)", "docstring": "Stack tensors in sequence vertically (row wise).\n\nArgs:\n xs: Sequence of tensors.\n\nReturns:\n Tensor formed by stacking the given tensors."} +{"repo": "tensorflow", "function": "def rank(input, name=None):\n return rank_internal(input, name, optimize=True)", "docstring": "Returns the rank of a tensor.\n\nSee also `tf.shape`.\n\nReturns a 0-D `int32` `Tensor` representing the rank of `input`.\n\nFor example:\n\n```python\n# shape of tensor 't' is [2, 2, 3]\nt = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])\ntf.rank(t) # 3\n```\n\n**Note**: The rank of a tensor is not the same as the rank of a matrix. The\nrank of a tensor is the number of indices required to uniquely select each\nelement of the tensor. Rank is also known as \"order\", \"degree\", or \"ndims.\"\n\nArgs:\n input: A `Tensor` or `SparseTensor`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `int32`.\n\n@compatibility(numpy)\nEquivalent to np.ndim\n@end_compatibility"} +{"repo": "tensorflow", "function": "def new_list(iterable=None):\n if iterable:\n elements = tuple(iterable)\n else:\n elements = ()\n if elements:\n return _py_list_new(elements)\n return tf_tensor_list_new(elements)", "docstring": "The list constructor.\n\nArgs:\n iterable: Optional elements to fill the list with.\n\nReturns:\n A list-like object. The exact return value depends on the initial elements."} +{"repo": "tensorflow", "function": "def gather_nd(params, indices, name=None, batch_dims=0, bad_indices_policy=''):\n batch_dims_ = tensor_util.constant_value(batch_dims)\n if batch_dims_ is not None:\n batch_dims = int(batch_dims_)\n if batch_dims == 0 and bad_indices_policy not in ('', 'DEFAULT'):\n return gen_array_ops.gather_nd(params, indices, name=name, bad_indices_policy=bad_indices_policy)\n if batch_dims == 0:\n try:\n return params.gather_nd(indices, name=name)\n except AttributeError:\n return gen_array_ops.gather_nd(params, indices, name=name, bad_indices_policy=bad_indices_policy)\n else:\n return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name, bad_indices_policy=bad_indices_policy)", "docstring": "Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n`indices` is a `Tensor` of indices into `params`. The index vectors are\narranged along the last axis of `indices`.\n\nThis is similar to `tf.gather`, in which `indices` defines slices into the\nfirst dimension of `params`. In `tf.gather_nd`, `indices` defines slices into\nthe first `N` dimensions of `params`, where `N = indices.shape[-1]`.\n\n## Gathering scalars\n\nIn the simplest case the vectors in `indices` index the full rank of `params`:\n\n>>> tf.gather_nd(\n... indices=[[0, 0],\n... [1, 1]],\n... params = [['a', 'b'],\n... ['c', 'd']]).numpy()\narray([b'a', b'd'], dtype=object)\n\nIn this case the result has 1-axis fewer than `indices`, and each index vector\nis replaced by the scalar indexed from `params`.\n\nIn this case the shape relationship is:\n\n```\nindex_depth = indices.shape[-1]\nassert index_depth == params.shape.rank\nresult_shape = indices.shape[:-1]\n```\n\nIf `indices` has a rank of `K`, it is helpful to think `indices` as a\n(K-1)-dimensional tensor of indices into `params`.\n\n## Gathering slices\n\nIf the index vectors do not index the full rank of `params` then each location\nin the result contains a slice of params. This example collects rows from a\nmatrix:\n\n>>> tf.gather_nd(\n... indices = [[1],\n... [0]],\n... params = [['a', 'b', 'c'],\n... ['d', 'e', 'f']]).numpy()\narray([[b'd', b'e', b'f'],\n [b'a', b'b', b'c']], dtype=object)\n\nHere `indices` contains `[2]` index vectors, each with a length of `1`.\nThe index vectors each refer to rows of the `params` matrix. Each\nrow has a shape of `[3]` so the output shape is `[2, 3]`.\n\nIn this case, the relationship between the shapes is:\n\n```\nindex_depth = indices.shape[-1]\nouter_shape = indices.shape[:-1]\nassert index_depth <= params.shape.rank\ninner_shape = params.shape[index_depth:]\noutput_shape = outer_shape + inner_shape\n```\n\nIt is helpful to think of the results in this case as tensors-of-tensors.\nThe shape of the outer tensor is set by the leading dimensions of `indices`.\nWhile the shape of the inner tensors is the shape of a single slice.\n\n## Batches\n\nAdditionally, both `params` and `indices` can have `M` leading batch\ndimensions that exactly match. In this case `batch_dims` must be set to `M`.\n\nFor example, to collect one row from each of a batch of matrices you could\nset the leading elements of the index vectors to be their location in the\nbatch:\n\n>>> tf.gather_nd(\n... indices = [[0, 1],\n... [1, 0],\n... [2, 4],\n... [3, 2],\n... [4, 1]],\n... params=tf.zeros([5, 7, 3])).shape.as_list()\n[5, 3]\n\nThe `batch_dims` argument lets you omit those leading location dimensions\nfrom the index:\n\n>>> tf.gather_nd(\n... batch_dims=1,\n... indices = [[1],\n... [0],\n... [4],\n... [2],\n... [1]],\n... params=tf.zeros([5, 7, 3])).shape.as_list()\n[5, 3]\n\nThis is equivalent to caling a separate `gather_nd` for each location in the\nbatch dimensions.\n\n\n>>> params=tf.zeros([5, 7, 3])\n>>> indices=tf.zeros([5, 1])\n>>> batch_dims = 1\n>>>\n>>> index_depth = indices.shape[-1]\n>>> batch_shape = indices.shape[:batch_dims]\n>>> assert params.shape[:batch_dims] == batch_shape\n>>> outer_shape = indices.shape[batch_dims:-1]\n>>> assert index_depth <= params.shape.rank\n>>> inner_shape = params.shape[batch_dims + index_depth:]\n>>> output_shape = batch_shape + outer_shape + inner_shape\n>>> output_shape.as_list()\n[5, 3]\n\n### More examples\n\nIndexing into a 3-tensor:\n\n>>> tf.gather_nd(\n... indices = [[1]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[[b'a1', b'b1'],\n [b'c1', b'd1']]], dtype=object)\n\n\n\n>>> tf.gather_nd(\n... indices = [[0, 1], [1, 0]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n>>> tf.gather_nd(\n... indices = [[0, 0, 1], [1, 0, 1]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([b'b0', b'b1'], dtype=object)\n\nThe examples below are for the case when only indices have leading extra\ndimensions. If both 'params' and 'indices' have leading batch dimensions, use\nthe 'batch_dims' parameter to run gather_nd in batch mode.\n\nBatched indexing into a matrix:\n\n>>> tf.gather_nd(\n... indices = [[[0, 0]], [[0, 1]]],\n... params = [['a', 'b'], ['c', 'd']]).numpy()\narray([[b'a'],\n [b'b']], dtype=object)\n\n\n\nBatched slice indexing into a matrix:\n\n>>> tf.gather_nd(\n... indices = [[[1]], [[0]]],\n... params = [['a', 'b'], ['c', 'd']]).numpy()\narray([[[b'c', b'd']],\n [[b'a', b'b']]], dtype=object)\n\n\nBatched indexing into a 3-tensor:\n\n>>> tf.gather_nd(\n... indices = [[[1]], [[0]]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[[[b'a1', b'b1'],\n [b'c1', b'd1']]],\n [[[b'a0', b'b0'],\n [b'c0', b'd0']]]], dtype=object)\n\n\n>>> tf.gather_nd(\n... indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[[b'c0', b'd0'],\n [b'a1', b'b1']],\n [[b'a0', b'b0'],\n [b'c1', b'd1']]], dtype=object)\n\n>>> tf.gather_nd(\n... indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[b'b0', b'b1'],\n [b'd0', b'c1']], dtype=object)\n\n\nExamples with batched 'params' and 'indices':\n\n>>> tf.gather_nd(\n... batch_dims = 1,\n... indices = [[1],\n... [0]],\n... params = [[['a0', 'b0'],\n... ['c0', 'd0']],\n... [['a1', 'b1'],\n... ['c1', 'd1']]]).numpy()\narray([[b'c0', b'd0'],\n [b'a1', b'b1']], dtype=object)\n\n\n>>> tf.gather_nd(\n... batch_dims = 1,\n... indices = [[[1]], [[0]]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[[b'c0', b'd0']],\n [[b'a1', b'b1']]], dtype=object)\n\n>>> tf.gather_nd(\n... batch_dims = 1,\n... indices = [[[1, 0]], [[0, 1]]],\n... params = [[['a0', 'b0'], ['c0', 'd0']],\n... [['a1', 'b1'], ['c1', 'd1']]]).numpy()\narray([[b'c0'],\n [b'b1']], dtype=object)\n\n\nSee also `tf.gather`.\n\nArgs:\n params: A `Tensor`. The tensor from which to gather values.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.\n bad_indices_policy: A string. If `\"\"` or `\"DEFAULT\"`, the default behavior\n is used (error on CPU and ignore on GPU). If `\"IGNORE\"`, the bad indices\n are ignored and 0 is stored in the corresponding output value. If\n `\"ERROR\"`, an error is raised. Accelerators generally don't support\n `\"ERROR\"`.\n\nReturns:\n A `Tensor`. Has the same type as `params`."} +{"repo": "tensorflow", "function": "def __init__(self, local_init_op: ops.Operation=None, ready_op: ops.Operation=None, ready_for_local_init_op: ops.Operation=None, graph: ops.Graph=None, recovery_wait_secs=30, local_init_run_options: 'distribute_lib.RunOptions'=None, local_init_feed_dict=None):\n if graph is None:\n graph = ops.get_default_graph()\n self._local_init_op = local_init_op\n self._ready_op = ready_op\n self._ready_for_local_init_op = ready_for_local_init_op\n self._graph = graph\n self._recovery_wait_secs = recovery_wait_secs\n self._target = None\n self._local_init_run_options = local_init_run_options\n self._local_init_feed_dict = local_init_feed_dict\n if ready_for_local_init_op is not None and local_init_op is None:\n raise ValueError('If you pass a ready_for_local_init_op you must also pass a local_init_op , ready_for_local_init_op [%s]' % ready_for_local_init_op)", "docstring": "Creates a SessionManager.\n\nThe `local_init_op` is an `Operation` that is run always after a new session\nwas created. If `None`, this step is skipped.\n\nThe `ready_op` is an `Operation` used to check if the model is ready. The\nmodel is considered ready if that operation returns an empty 1D string\ntensor. If the operation returns a non empty 1D string tensor, the elements\nare concatenated and used to indicate to the user why the model is not\nready.\n\nThe `ready_for_local_init_op` is an `Operation` used to check if the model\nis ready to run local_init_op. The model is considered ready if that\noperation returns an empty 1D string tensor. If the operation returns a non\nempty 1D string tensor, the elements are concatenated and used to indicate\nto the user why the model is not ready.\n\nIf `ready_op` is `None`, the model is not checked for readiness.\n\n`recovery_wait_secs` is the number of seconds between checks that\nthe model is ready. It is used by processes to wait for a model to\nbe initialized or restored. Defaults to 30 seconds.\n\nArgs:\n local_init_op: An `Operation` run immediately after session creation.\n Usually used to initialize tables and local variables.\n ready_op: An `Operation` to check if the model is initialized.\n ready_for_local_init_op: An `Operation` to check if the model is ready\n to run local_init_op.\n graph: The `Graph` that the model will use.\n recovery_wait_secs: Seconds between checks for the model to be ready.\n local_init_run_options: RunOptions to be passed to session.run when\n executing the local_init_op.\n local_init_feed_dict: Optional session feed dictionary to use when running\n the local_init_op.\n\nRaises:\n ValueError: If ready_for_local_init_op is not None but local_init_op is\n None"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **loss_kwargs) -> CausalLMOutputWithPast:\n if self.training and self.config._attn_implementation != 'eager':\n logger.warning_once(f\"It is strongly recommended to train Gemma3 models with the `eager` attention implementation instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('', attn_implementation='eager')`.\")\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **loss_kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n if self.config.final_logit_softcapping is not None:\n logits = logits / self.config.final_logit_softcapping\n logits = torch.tanh(logits)\n logits = logits * self.config.final_logit_softcapping\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, self.vocab_size, **loss_kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, Gemma3ForCausalLM\n\n>>> model = Gemma3ForCausalLM.from_pretrained(\"google/gemma-2-9b\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/gemma-2-9b\")\n\n>>> prompt = \"What is your favorite condiment?\"\n>>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"What is your favorite condiment?\"\n```"} +{"repo": "tensorflow", "function": "def assign(self, value, use_locking=False, name=None, read_value=True):\n raise NotImplementedError", "docstring": "Assigns a new value to the variable.\n\nThis is essentially a shortcut for `assign(self, value)`.\n\nArgs:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\nReturns:\n The updated variable. If `read_value` is false, instead returns None in\n Eager mode and the assign op in graph mode."} +{"repo": "tensorflow", "function": "def with_flat_values(self, new_values):\n if isinstance(self._values, RaggedTensor):\n return self.with_values(self.values.with_flat_values(new_values))\n else:\n new_values = _convert_to_ragged_tensor_values(new_values)\n return self.with_values(new_values)", "docstring": "Returns a copy of `self` with `flat_values` replaced by `new_value`.\n\nPreserves cached row-partitioning tensors such as `self.cached_nrows` and\n`self.cached_value_rowids` if they have values.\n\nArgs:\n new_values: Potentially ragged tensor that should replace\n `self.flat_values`. Must have `rank > 0`, and must have the same number\n of rows as `self.flat_values`.\n\nReturns:\n A `RaggedTensor`.\n `result.rank = self.ragged_rank + new_values.rank`.\n `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`."} +{"repo": "keras", "function": "def stack(x, axis=0):\n if any_symbolic_tensors((x,)):\n return Stack(axis=axis).symbolic_call(x)\n return backend.numpy.stack(x, axis=axis)", "docstring": "Join a sequence of tensors along a new axis.\n\nThe `axis` parameter specifies the index of the new axis in the\ndimensions of the result.\n\nArgs:\n x: A sequence of tensors.\n axis: Axis along which to stack. Defaults to `0`.\n\nReturns:\n The stacked tensor."} +{"repo": "nsscache", "function": "def GetSshkeyMap(self, since=None):\n return SshkeyUpdateGetter().GetUpdates(self, self.conf['sshkey_url'], since)", "docstring": "Return the sshkey map from this source.\n\nArgs:\n since: Get data only changed since this timestamp (inclusive) or None\n for all data.\n\nReturns:\n instance of sshkey.SshkeyMap"} +{"repo": "fhir-py", "function": "def _constraint_for_slice_element(self, slice_element: ElementDefinition, root_builder: expressions.Builder, slice_message: message.Message, message_type: _MessageType) -> expressions.Builder:\n if annotation_utils.is_primitive_type(slice_message.DESCRIPTOR):\n return root_builder == _primitive_message_as_value(slice_message)\n expression_parts = []\n for field in slice_message.DESCRIPTOR.fields:\n field_value = getattr(slice_message, field.name)\n expression_builder = self._get_new_child_builder(root_builder, field.json_name)\n if expression_builder is None:\n raise AttributeError(f'Could not resolve path {field.json_name} against {root_builder}.')\n if field.label != field.LABEL_REPEATED:\n if slice_message.HasField(field.name):\n expression_parts.append(self._constraint_for_slice_element(slice_element, expression_builder, field_value, message_type))\n elif message_type == _MessageType.FIXED:\n expression_parts.append(expression_builder.empty())\n elif field_value and message_type == _MessageType.FIXED:\n collection_constraints = []\n collection_constraints.append(expression_builder.count() == len(field_value))\n for i, elem in enumerate(field_value):\n collection_constraints.append(self._constraint_for_slice_element(slice_element, expression_builder[i], elem, message_type))\n expression_parts.append(functools.reduce(operator.and_, collection_constraints))\n elif field_value and message_type == _MessageType.PATTERN:\n collection_constraints = []\n for elem in field_value:\n constraint = self._constraint_for_slice_element(slice_element, expression_builder, elem, message_type)\n collection_constraints.append(expression_builder.where(constraint).exists())\n expression_parts.append(functools.reduce(operator.and_, collection_constraints))\n elif message_type == _MessageType.FIXED:\n expression_parts.append(expression_builder.empty())\n return functools.reduce(operator.and_, expression_parts)", "docstring": "Builds an expression representing the slice described by `slice_message`.\n\nIf `slice_message` is a message representing a FHIR primitive, produces a\nconstraint like:\n\"`field_name` = `slice_message`.value()\"\n\nIf `slice_message` is a message representing a non-primitive data type, for\neach sub-field in `slice_message` builds a constraint like:\n\"`field_name`.sub_field = `slice_message`.sub_field().value() and ...\"\n\nIf the sub-field is empty and the `message_type` flag is\nMessageType::kFixed,\nbuilds a constraint like\n\"`field_name`.sub_field.empty()\"\n\nIf a sub-field on `slice_message` contains repeated elements, the generated\nconstraint depends on the `message_type` flag. For MessageType::kFixed,\ngenerates a constraint like:\n\"`field_name`.sub_field[0] = `slice_message`.sub_field(0).value()\n\nFor MessageType::kPattern, generates a constraint like:\n\"`field_name`.sub_field.where(\n $this = `slice_message`.sub_field(0).value()).exists()\"\n\nThe functions FhirPathConstraintForPatternSliceElement and\nFhirPathConstraintForFixedSliceElement should be favored by callers rather\nthan calling this function directly.\n\nArgs:\n slice_element: The element definition describing inclusion criteria for a\n slice.\n root_builder: An expression builder for the path to `slice_message`.\n slice_message: The message defining the fixed or pattern value\n representing inclusion criteria for the slice.\n message_type: Whether `slice_message` represents a fixed or pattern value.\n\nReturns:\n An expression builder representing the slice constraint described by\n `slice_message`."} +{"repo": "keras", "function": "def get(self, path):\n if self.mode != 'r':\n raise ValueError('`get` is only allowed in read mode.')\n self._h5_entry_path = path\n self._h5_entry_group = {}\n if not path:\n if 'vars' in self.h5_file:\n self._h5_entry_group = self.h5_file['vars']\n elif path in self.h5_file and 'vars' in self.h5_file[path]:\n self._h5_entry_group = self.h5_file[path]['vars']\n elif '_layer_checkpoint_dependencies' in self.h5_file:\n path = path.replace('layers', '_layer_checkpoint_dependencies')\n if path in self.h5_file and 'vars' in self.h5_file[path]:\n self._h5_entry_group = self.h5_file[path]['vars']\n self._h5_entry_initialized = True\n return self", "docstring": "Get the H5 entry group.\n\nThis method is only available in read mode.\n\nArgs:\n path: `str`. The variable path."} +{"repo": "transformers", "function": "def __call__(self, raw_speech: AudioInput, sampling_rate: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding: Optional[str]='longest', max_length: Optional[int]=None, truncation: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, return_attention_mask: Optional[bool]=True, device: Optional[str]='cpu', **kwargs) -> BatchFeature:\n if sampling_rate is not None:\n if sampling_rate != self.sampling_rate:\n raise ValueError(f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')\n else:\n logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.')\n if isinstance(raw_speech, np.ndarray):\n raw_speech = torch.tensor(raw_speech)\n elif isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], np.ndarray):\n raw_speech = [torch.tensor(speech) for speech in raw_speech]\n is_batched_torch = isinstance(raw_speech, torch.Tensor) and len(raw_speech.shape) > 1\n if is_batched_torch and len(raw_speech.shape) > 2:\n logger.warning(f'Only mono-channel audio is supported for input to {self.__class__.__name__}. We will take the mean of the channels to convert to mono.')\n raw_speech = raw_speech.mean(-1)\n is_batched_sequence = isinstance(raw_speech, (list, tuple))\n if is_batched_sequence:\n for speech in raw_speech:\n if len(speech.shape) > 1:\n logger.warning(f'Only mono-channel audio is supported for input to {self.__class__.__name__}. We will take the mean of the channels to convert to mono.')\n speech = speech.mean(-1)\n if is_batched_torch or is_batched_sequence:\n raw_speech = [speech[:, None].to(torch.float32) for speech in raw_speech]\n else:\n raw_speech = [raw_speech[:, None].to(torch.float32)]\n audio_lengths = [len(speech) for speech in raw_speech]\n batched_speech = BatchFeature(data={'audio_input_features': raw_speech, 'audio_lengths': audio_lengths})\n padded_inputs = self.pad(batched_speech, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_tensors='pt')\n input_features = padded_inputs.audio_input_features.squeeze(-1)\n audio_lengths = padded_inputs.audio_lengths\n input_features = self._torch_extract_fbank_features(input_features, audio_lengths, device)\n feature_lengths = (audio_lengths - self.win_length) // self.hop_length + 1\n feature_lengths = feature_lengths * self.audio_feat_stride\n audio_embed_sizes = self._compute_audio_embed_size(feature_lengths)\n feature_attention_mask = torch.arange(0, feature_lengths.max()) if is_torch_available() else np.arange(0, feature_lengths.max())\n feature_attention_mask = feature_attention_mask[None, :] < feature_lengths[:, None] if len(feature_lengths) > 1 else None\n data = {'audio_input_features': input_features, 'audio_embed_sizes': audio_embed_sizes}\n if feature_attention_mask is not None and return_attention_mask:\n data['audio_attention_mask'] = feature_attention_mask\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Main method to featurize and prepare for the model one or several audio sequence(s). Implementation uses PyTorch for\nthe STFT computation if available, otherwise a slower NumPy based one.\n\nArgs:\n raw_speech (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The sequence or batch of sequences to be processed. Each sequence can be a numpy array or PyTorch tensor.\n For batched inputs, sequences can be a list of numpy arrays or PyTorch tensors, or a single numpy array or\n PyTorch tensor with first dimension being the batch size.\n sampling_rate (`int`, *optional*):\n The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors.\n pad_to_multiple_of (`int`, *optional*, defaults to None):\n If set will pad the sequence to a multiple of the provided value.\n padding (`str`, *optional*, defaults to \"longest\"):\n Padding strategy. Can be \"longest\" to pad to the longest sequence in the batch, or a specific length.\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length.\n truncation (`bool`, *optional*, defaults to False):\n Activates truncation to cut input sequences longer than *max_length* to *max_length*.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of numpy arrays. Acceptable values are:\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n return_attention_mask (`bool`, *optional*, defaults to `True`):\n Whether to return the extracted audio input features' attention mask.\n device (`str`, *optional*, defaults to \"cpu\"):\n Specifies the device for computation of the audio features. (e.g., \"cpu\", \"cuda\")\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n - **audio_input_features** -- Audio features extracted from the raw audio input, shape (batch_size, max_feature_length, feature_size).\n - **audio_lengths** -- Length of each audio sample in the batch, shape (batch_size,).\n - **audio_attention_mask** -- Attention mask for the audio input, shape (batch_size, max_feature_length).\n If `return_tensors` is not specified, the fields will be PyTorch tensors if PyTorch is available, otherwise NumPy arrays."} +{"repo": "transformers", "function": "class CodeGenTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n\n def __init__(self, vocab_file, merges_file, errors='replace', unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', pad_token=None, add_prefix_space=False, add_bos_token=False, return_token_type_ids=False, **kwargs):\n bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token\n self.add_bos_token = add_bos_token\n self.return_token_type_ids = return_token_type_ids\n if self.return_token_type_ids:\n self.model_input_names.append('token_type_ids')\n with open(vocab_file, encoding='utf-8') as vocab_handle:\n self.encoder = json.load(vocab_handle)\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.errors = errors\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n with open(merges_file, encoding='utf-8') as merges_handle:\n bpe_merges = merges_handle.read().split('\\n')[1:-1]\n bpe_merges = [tuple(merge.split()) for merge in bpe_merges]\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n self.cache = {}\n self.add_prefix_space = add_prefix_space\n self.pat = re.compile(\"'s|'t|'re|'ve|'m|'ll|'d| ?\\\\p{L}+| ?\\\\p{N}+| ?[^\\\\s\\\\p{L}\\\\p{N}]+|\\\\s+(?!\\\\S)|\\\\s+\")\n super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, return_token_type_ids=return_token_type_ids, **kwargs)\n\n @property\n def vocab_size(self):\n return len(self.encoder)\n\n def get_vocab(self):\n return dict(self.encoder, **self.added_tokens_encoder)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token)\n pairs = get_pairs(word)\n if not pairs:\n return token\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n except ValueError:\n new_word.extend(word[i:])\n break\n else:\n new_word.extend(word[i:j])\n i = j\n if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n if self.add_bos_token:\n bos_token_ids = [self.bos_token_id]\n else:\n bos_token_ids = []\n output = bos_token_ids + token_ids_0\n if token_ids_1 is None:\n return output\n return output + bos_token_ids + token_ids_1\n\n def _tokenize(self, text):\n \"\"\"Tokenize a string.\"\"\"\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))\n bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))\n return bpe_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.decoder.get(index)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n text = ''.join(tokens)\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\n return text\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n return\n vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])\n with open(vocab_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\\n')\n index = 0\n with open(merge_file, 'w', encoding='utf-8') as writer:\n writer.write('#version: 0.2\\n')\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')\n index = token_index\n writer.write(' '.join(bpe_tokens) + '\\n')\n index += 1\n return (vocab_file, merge_file)\n\n def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):\n add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)\n if is_split_into_words or add_prefix_space:\n text = ' ' + text\n return (text, kwargs)\n\n def decode(self, token_ids: Union[int, List[int], 'np.ndarray', 'torch.Tensor', 'tf.Tensor'], skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, truncate_before_pattern: Optional[List[str]]=None, **kwargs) -> str:\n \"\"\"\n Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\n tokens and clean up tokenization spaces.\n\n Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\n Args:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces. If `None`, will default to\n `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).\n truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):\n A list of regular expression strings that will be used to truncate the returned string. This can be\n used to remove extra pieces of code (e.g. truncate if observing a comment symbol \"#\" at the beginning\n of a new line). An example pattern could be `[\"^#\", re.escape(\"<|endoftext|>\"), \"^'''\", \"\n\n\n\"]`.\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `str`: The decoded sentence.\n \"\"\"\n token_ids = to_py_obj(token_ids)\n decoded_text = super()._decode(token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)\n if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:\n decoded_text = self.truncate(decoded_text, truncate_before_pattern)\n return decoded_text\n\n def truncate(self, completion, truncate_before_pattern):\n\n def find_re(string, pattern, start_pos):\n m = pattern.search(string, start_pos)\n return m.start() if m else -1\n terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]\n prints = list(re.finditer('^print', completion, re.MULTILINE))\n if len(prints) > 1:\n completion = completion[:prints[1].start()]\n defs = list(re.finditer('^def', completion, re.MULTILINE))\n if len(defs) > 1:\n completion = completion[:defs[1].start()]\n start_pos = 0\n terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1]\n if len(terminals_pos) > 0:\n return completion[:min(terminals_pos)]\n else:\n return completion", "docstring": "Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding.\n\nThis tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\nbe encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n```python\n>>> from transformers import CodeGenTokenizer\n\n>>> tokenizer = CodeGenTokenizer.from_pretrained(\"Salesforce/codegen-350M-mono\")\n>>> tokenizer(\"Hello world\")[\"input_ids\"]\n[15496, 995]\n\n>>> tokenizer(\" Hello world\")[\"input_ids\"]\n[18435, 995]\n```\n\nYou can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you\ncall it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.\n\n\n\nWhen used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).\n\n\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\nthis superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n unk_token (`str`, *optional*, defaults to `\"<|endoftext|>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str`, *optional*, defaults to `\"<|endoftext|>\"`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `\"<|endoftext|>\"`):\n The end of sequence token.\n pad_token (`str`, *optional*):\n The token used for padding, for example when batching sequences of different lengths.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (CodeGen tokenizer detect beginning of words by the preceding space).\n add_bos_token (`bool`, *optional*, defaults to `False`):\n Whether to add a beginning of sequence token at the start of sequences.\n return_token_type_ids (`bool`, *optional*, defaults to `False`):\n Whether to return token type IDs."} +{"repo": "keras", "function": "def tanh(x):\n return ops.tanh(x)", "docstring": "Hyperbolic tangent activation function.\n\nIt is defined as:\n`tanh(x) = sinh(x) / cosh(x)`, i.e.\n`tanh(x) = ((exp(x) - exp(-x)) / (exp(x) + exp(-x)))`.\n\nArgs:\n x: Input tensor."} +{"repo": "tensorflow", "function": "def replace_tensors_by_numpy_ndarrays(repr_ds: RepresentativeDataset, sess: session.Session) -> RepresentativeDataset:\n new_repr_ds = []\n for sample in repr_ds:\n new_sample = {}\n for input_key, input_data in sample.items():\n if isinstance(input_data, core.Tensor):\n input_data = input_data.eval(session=sess)\n new_sample[input_key] = input_data\n new_repr_ds.append(new_sample)\n return new_repr_ds", "docstring": "Replaces tf.Tensors in samples by their evaluated numpy arrays.\n\nNote: This should be run in graph mode (default in TF1) only.\n\nArgs:\n repr_ds: Representative dataset to replace the tf.Tensors with their\n evaluated values. `repr_ds` is iterated through, so it may not be reusable\n (e.g. if it is a generator object).\n sess: Session instance used to evaluate tf.Tensors.\n\nReturns:\n The new representative dataset where each tf.Tensor is replaced by its\n evaluated numpy ndarrays."} +{"repo": "transformers", "function": "class RTDetrResNetConfig(BackboneConfigMixin, PretrainedConfig):\n model_type = 'rt_detr_resnet'\n layer_types = ['basic', 'bottleneck']\n\n def __init__(self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type='bottleneck', hidden_act='relu', downsample_in_first_stage=False, downsample_in_bottleneck=False, out_features=None, out_indices=None, **kwargs):\n super().__init__(**kwargs)\n if layer_type not in self.layer_types:\n raise ValueError(f'layer_type={layer_type} is not one of {','.join(self.layer_types)}')\n self.num_channels = num_channels\n self.embedding_size = embedding_size\n self.hidden_sizes = hidden_sizes\n self.depths = depths\n self.layer_type = layer_type\n self.hidden_act = hidden_act\n self.downsample_in_first_stage = downsample_in_first_stage\n self.downsample_in_bottleneck = downsample_in_bottleneck\n self.stage_names = ['stem'] + [f'stage{idx}' for idx in range(1, len(depths) + 1)]\n self._out_features, self._out_indices = get_aligned_output_features_output_indices(out_features=out_features, out_indices=out_indices, stage_names=self.stage_names)", "docstring": "This is the configuration class to store the configuration of a [`RTDetrResnetBackbone`]. It is used to instantiate an\nResNet model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the ResNet\n[microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n embedding_size (`int`, *optional*, defaults to 64):\n Dimensionality (hidden size) for the embedding layer.\n hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):\n Dimensionality (hidden size) at each stage.\n depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):\n Depth (number of layers) for each stage.\n layer_type (`str`, *optional*, defaults to `\"bottleneck\"`):\n The layer to use, it can be either `\"basic\"` (used for smaller models, like resnet-18 or resnet-34) or\n `\"bottleneck\"` (used for larger models like resnet-50 and above).\n hidden_act (`str`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function in each block. If string, `\"gelu\"`, `\"relu\"`, `\"selu\"` and `\"gelu_new\"`\n are supported.\n downsample_in_first_stage (`bool`, *optional*, defaults to `False`):\n If `True`, the first stage will downsample the inputs using a `stride` of 2.\n downsample_in_bottleneck (`bool`, *optional*, defaults to `False`):\n If `True`, the first conv 1x1 in ResNetBottleNeckLayer will downsample the inputs using a `stride` of 2.\n out_features (`List[str]`, *optional*):\n If used as backbone, list of features to output. Can be any of `\"stem\"`, `\"stage1\"`, `\"stage2\"`, etc.\n (depending on how many stages the model has). If unset and `out_indices` is set, will default to the\n corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the\n same order as defined in the `stage_names` attribute.\n out_indices (`List[int]`, *optional*):\n If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how\n many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.\n If unset and `out_features` is unset, will default to the last stage. Must be in the\n same order as defined in the `stage_names` attribute.\n\nExample:\n```python\n>>> from transformers import RTDetrResNetConfig, RTDetrResnetBackbone\n\n>>> # Initializing a ResNet resnet-50 style configuration\n>>> configuration = RTDetrResNetConfig()\n\n>>> # Initializing a model (with random weights) from the resnet-50 style configuration\n>>> model = RTDetrResnetBackbone(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def map_flat_values(op, *args, **kwargs):\n partition_lists = []\n flat_values_nrows = []\n inner_args = _replace_ragged_with_flat_values(args, partition_lists, flat_values_nrows)\n inner_kwargs = _replace_ragged_with_flat_values(kwargs, partition_lists, flat_values_nrows)\n if not partition_lists:\n return op(*args, **kwargs)\n if flat_values_nrows:\n flat_values_nrows = set(flat_values_nrows)\n if len(flat_values_nrows) != 1:\n raise ValueError(\"Input RaggedTensors' flat_values must all have the same outer-dimension size. Got sizes: %s\" % flat_values_nrows)\n flat_values_nrows = flat_values_nrows.pop()\n else:\n flat_values_nrows = None\n partition_dtypes = set((p[0].dtype for p in partition_lists))\n if len(partition_dtypes) > 1:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError('Input RaggedTensors have mismatched row partition dtypes; use RaggedTensor.with_row_splits_dtype() to convert them to compatible dtypes.')\n partition_lists = [[p.with_dtype(dtypes.int64) for p in partition_list] for partition_list in partition_lists]\n op_output = op(*inner_args, **inner_kwargs)\n if flat_values_nrows is not None:\n if not op_output.shape[:1].is_compatible_with([flat_values_nrows]):\n raise ValueError('tf.ragged.map_flat_values requires that the output of `op` have the same outer-dimension size as flat_values of any ragged inputs. (output shape: %s; expected outer dimension size: %s)' % (op_output.shape, flat_values_nrows))\n return ragged_tensor.RaggedTensor._from_nested_row_partitions(op_output, _merge_partition_lists(partition_lists), validate=False)", "docstring": "Applies `op` to the `flat_values` of one or more RaggedTensors.\n\nReplaces any `RaggedTensor` in `args` or `kwargs` with its `flat_values`\ntensor (which collapses all ragged dimensions), and then calls `op`. Returns\na `RaggedTensor` that is constructed from the input `RaggedTensor`s'\n`nested_row_splits` and the value returned by the `op`.\n\nIf the input arguments contain multiple `RaggedTensor`s, then they must have\nidentical `nested_row_splits`.\n\nThis operation is generally used to apply elementwise operations to each value\nin a `RaggedTensor`.\n\nWarning: `tf.ragged.map_flat_values` does *not* apply `op` to each row of a\nragged tensor. This difference is important for non-elementwise operations,\nsuch as `tf.reduce_sum`. If you wish to apply a non-elementwise operation to\neach row of a ragged tensor, use `tf.map_fn` instead. (You may need to\nspecify an `output_signature` when using `tf.map_fn` with ragged tensors.)\n\nExamples:\n\n>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])\n>>> tf.ragged.map_flat_values(tf.ones_like, rt)\n\n>>> tf.ragged.map_flat_values(tf.multiply, rt, rt)\n\n>>> tf.ragged.map_flat_values(tf.add, rt, 5)\n\n\nExample with a non-elementwise operation (note that `map_flat_values` and\n`map_fn` return different results):\n\n>>> rt = tf.ragged.constant([[1.0, 3.0], [], [3.0, 6.0, 3.0]])\n>>> def normalized(x):\n... return x / tf.reduce_sum(x)\n>>> tf.ragged.map_flat_values(normalized, rt)\n\n>>> tf.map_fn(normalized, rt)\n\n\nArgs:\n op: The operation that should be applied to the RaggedTensor `flat_values`.\n `op` is typically an element-wise operation (such as math_ops.add), but\n any operation that preserves the size of the outermost dimension can be\n used. I.e., `shape[0]` of the value returned by `op` must match\n `shape[0]` of the `RaggedTensor`s' `flat_values` tensors.\n *args: Arguments for `op`.\n **kwargs: Keyword arguments for `op`.\n\nReturns:\n A `RaggedTensor` whose `ragged_rank` matches the `ragged_rank` of all\n input `RaggedTensor`s.\nRaises:\n ValueError: If args contains no `RaggedTensors`, or if the `nested_splits`\n of the input `RaggedTensor`s are not identical."} +{"repo": "tensorflow", "function": "def do_import(self, keys, values, name=None):\n with tf.name_scope(name or '%s_lookup_table_import' % self._name):\n op = gen_simple_hash_table_op.examples_simple_hash_table_import(self.resource_handle, keys, values)\n return op", "docstring": "Import all `key` and `value` pairs.\n\n(Note that \"import\" is a python reserved word, so it cannot be the name of\na method.)\n\nArgs:\n keys: Tensor of all keys.\n values: Tensor of all values.\n name: A name for the operation (optional).\n\nReturns:\n A tuple of two tensors, the first with the `keys` and the second with\n the `values`."} +{"repo": "tensorflow", "function": "def _find_shape_dtype(fields: Mapping[str, _FieldValue], nrows: Optional[tensor.Tensor], row_partitions: Optional[Sequence[RowPartition]]) -> dtypes.DType:\n field_dtypes = [_field_shape_dtype(v) for v in fields.values()]\n nrows_dtypes = [nrows.dtype] if isinstance(nrows, tensor.Tensor) else []\n rp_dtypes = [] if row_partitions is None else [rp.dtype for rp in row_partitions]\n all_dtypes = field_dtypes + nrows_dtypes + rp_dtypes\n if dtypes.int64 in all_dtypes:\n return dtypes.int64\n if dtypes.int32 in all_dtypes:\n return dtypes.int32\n return dtypes.int64", "docstring": "Return a consistent dtype for fields, nrows, & row_partitions.\n\nIn the future, the default will switch from int64 to int32, but for now,\nwe stick with int64.\n\nArgs:\n fields: the fields of the StructuredTensor.\n nrows: the nrows of the StructuredTensor\n row_partitions: the row_partitions of the StructuredTensor.\n\nReturns:\n If anything requires int64, then return int64.\n If int32 is explicitly specified, return int32. Otherwise, return int64."} +{"repo": "keras", "function": "class PrecisionAtRecall(SensitivitySpecificityBase):\n\n def __init__(self, recall, num_thresholds=200, class_id=None, name=None, dtype=None):\n if recall < 0 or recall > 1:\n raise ValueError(f'Argument `recall` must be in the range [0, 1]. Received: recall={recall}')\n self.recall = recall\n self.num_thresholds = num_thresholds\n super().__init__(value=recall, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype)\n\n def result(self):\n recalls = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives))\n precisions = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives))\n return self._find_max_under_constraint(recalls, precisions, ops.greater_equal)\n\n def get_config(self):\n config = {'num_thresholds': self.num_thresholds, 'recall': self.recall}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Computes best precision where recall is >= specified value.\n\nThis metric creates four local variables, `true_positives`,\n`true_negatives`, `false_positives` and `false_negatives` that are used to\ncompute the precision at the given recall. The threshold for the given\nrecall value is computed and used to evaluate the corresponding precision.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nIf `class_id` is specified, we calculate precision by considering only the\nentries in the batch for which `class_id` is above the threshold\npredictions, and computing the fraction of them for which `class_id` is\nindeed a correct label.\n\nArgs:\n recall: A scalar value in range `[0, 1]`.\n num_thresholds: (Optional) Defaults to 200. The number of thresholds to\n use for matching the given recall.\n class_id: (Optional) Integer class ID for which we want binary metrics.\n This must be in the half-open interval `[0, num_classes)`, where\n `num_classes` is the last dimension of predictions.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.PrecisionAtRecall(0.5)\n>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8])\n>>> m.result()\n0.5\n\n>>> m.reset_state()\n>>> m.update_state([0, 0, 0, 1, 1], [0, 0.3, 0.8, 0.3, 0.8],\n... sample_weight=[2, 2, 2, 1, 1])\n>>> m.result()\n0.33333333\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\n optimizer='sgd',\n loss='binary_crossentropy',\n metrics=[keras.metrics.PrecisionAtRecall(recall=0.8)])\n```"} +{"repo": "transformers", "function": "def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: Tuple[int], device: torch.device, has_query: bool=False) -> torch.Tensor:\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(f'Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})')\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask", "docstring": "Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\nArguments:\n attention_mask (`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n device: (`torch.device`):\n The device of the input to the model.\n\nReturns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`."} +{"repo": "transformers", "function": "class DonutSwinModelOutput(ModelOutput):\n last_hidden_state: Optional[torch.FloatTensor] = None\n pooler_output: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "DonutSwin model's outputs that also contains a pooling of the last hidden states.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):\n Average pooling of the last layer hidden-state.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, hidden_size, height, width)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to\n include the spatial dimensions."} +{"repo": "tensorflow", "function": "def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {'channels_first', 'channels_last'}:\n raise ValueError('Unknown data_format: ' + str(data_format))\n if data_format == 'channels_first':\n pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]\n else:\n pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]\n return array_ops.pad(x, pattern)", "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor.\n\nArgs:\n x: Tensor or variable.\n padding: Tuple of 2 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\nReturns:\n A padded 4D tensor.\n\nRaises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`."} +{"repo": "transformers", "function": "class OmDetTurboConfig(PretrainedConfig):\n model_type = 'omdet-turbo'\n attribute_map = {'encoder_hidden_dim': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}\n\n def __init__(self, text_config=None, backbone_config=None, use_timm_backbone=True, backbone='swin_tiny_patch4_window7_224', backbone_kwargs=None, use_pretrained_backbone=False, apply_layernorm_after_vision_backbone=True, image_size=640, disable_custom_kernels=False, layer_norm_eps=1e-05, batch_norm_eps=1e-05, init_std=0.02, text_projection_in_dim=512, text_projection_out_dim=512, task_encoder_hidden_dim=1024, class_embed_dim=512, class_distance_type='cosine', num_queries=900, csp_activation='silu', conv_norm_activation='gelu', encoder_feedforward_activation='relu', encoder_feedforward_dropout=0.0, encoder_dropout=0.0, hidden_expansion=1, vision_features_channels=[256, 256, 256], encoder_hidden_dim=256, encoder_in_channels=[192, 384, 768], encoder_projection_indices=[2], encoder_attention_heads=8, encoder_dim_feedforward=2048, encoder_layers=1, positional_encoding_temperature=10000, num_feature_levels=3, decoder_hidden_dim=256, decoder_num_heads=8, decoder_num_layers=6, decoder_activation='relu', decoder_dim_feedforward=2048, decoder_num_points=4, decoder_dropout=0.0, eval_size=None, learn_initial_query=False, cache_size=100, is_encoder_decoder=True, **kwargs):\n if use_timm_backbone:\n if backbone_config is None:\n backbone_kwargs = {'out_indices': [1, 2, 3], 'img_size': image_size, 'always_partition': True}\n elif backbone_config is None:\n logger.info('`backbone_config` is `None`. Initializing the config with the default `swin` vision config.')\n backbone_config = CONFIG_MAPPING['swin'](window_size=7, image_size=image_size, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], out_indices=[2, 3, 4])\n elif isinstance(backbone_config, dict):\n backbone_model_type = backbone_config.get('model_type')\n config_class = CONFIG_MAPPING[backbone_model_type]\n backbone_config = config_class.from_dict(backbone_config)\n verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)\n if text_config is None:\n logger.info('`text_config` is `None`. Initializing the config with the default `clip_text_model` text config.')\n text_config = CONFIG_MAPPING['clip_text_model']()\n elif isinstance(text_config, dict):\n text_model_type = text_config.get('model_type')\n text_config = CONFIG_MAPPING[text_model_type](**text_config)\n if class_distance_type not in ['cosine', 'dot']:\n raise ValueError(f'Invalid `class_distance_type`. It should be either `cosine` or `dot`, but got {class_distance_type}.')\n self.text_config = text_config\n self.backbone_config = backbone_config\n self.use_timm_backbone = use_timm_backbone\n self.backbone = backbone\n self.backbone_kwargs = backbone_kwargs\n self.use_pretrained_backbone = use_pretrained_backbone\n self.apply_layernorm_after_vision_backbone = apply_layernorm_after_vision_backbone\n self.image_size = image_size\n self.disable_custom_kernels = disable_custom_kernels\n self.layer_norm_eps = layer_norm_eps\n self.batch_norm_eps = batch_norm_eps\n self.init_std = init_std\n self.text_projection_in_dim = text_projection_in_dim\n self.text_projection_out_dim = text_projection_out_dim\n self.task_encoder_hidden_dim = task_encoder_hidden_dim\n self.class_embed_dim = class_embed_dim\n self.class_distance_type = class_distance_type\n self.num_queries = num_queries\n self.csp_activation = csp_activation\n self.conv_norm_activation = conv_norm_activation\n self.encoder_feedforward_activation = encoder_feedforward_activation\n self.encoder_feedforward_dropout = encoder_feedforward_dropout\n self.encoder_dropout = encoder_dropout\n self.hidden_expansion = hidden_expansion\n self.vision_features_channels = vision_features_channels\n self.encoder_hidden_dim = encoder_hidden_dim\n self.encoder_in_channels = encoder_in_channels\n self.encoder_projection_indices = encoder_projection_indices\n self.encoder_attention_heads = encoder_attention_heads\n self.encoder_dim_feedforward = encoder_dim_feedforward\n self.encoder_layers = encoder_layers\n self.positional_encoding_temperature = positional_encoding_temperature\n self.num_feature_levels = num_feature_levels\n self.decoder_hidden_dim = decoder_hidden_dim\n self.decoder_num_heads = decoder_num_heads\n self.decoder_num_layers = decoder_num_layers\n self.decoder_activation = decoder_activation\n self.decoder_dim_feedforward = decoder_dim_feedforward\n self.decoder_num_points = decoder_num_points\n self.decoder_dropout = decoder_dropout\n self.eval_size = eval_size\n self.learn_initial_query = learn_initial_query\n self.cache_size = cache_size\n self.is_encoder_decoder = is_encoder_decoder\n super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`OmDetTurboForObjectDetection`].\nIt is used to instantiate a OmDet-Turbo model according to the specified arguments, defining the model architecture\nInstantiating a configuration with the defaults will yield a similar configuration to that of the OmDet-Turbo\n[omlab/omdet-turbo-swin-tiny-hf](https://huggingface.co/omlab/omdet-turbo-swin-tiny-hf) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n text_config (`PretrainedConfig`, *optional*):\n The configuration of the text backbone.\n backbone_config (`PretrainedConfig`, *optional*):\n The configuration of the vision backbone.\n use_timm_backbone (`bool`, *optional*, defaults to `True`):\n Whether to use the timm for the vision backbone.\n backbone (`str`, *optional*, defaults to `\"swin_tiny_patch4_window7_224\"`):\n The name of the pretrained vision backbone to use. If `use_pretrained_backbone=False` a randomly initialized\n backbone with the same architecture `backbone` is used.\n backbone_kwargs (`dict`, *optional*):\n Additional kwargs for the vision backbone.\n use_pretrained_backbone (`bool`, *optional*, defaults to `False`):\n Whether to use a pretrained vision backbone.\n apply_layernorm_after_vision_backbone (`bool`, *optional*, defaults to `True`):\n Whether to apply layer normalization on the feature maps of the vision backbone output.\n image_size (`int`, *optional*, defaults to 640):\n The size (resolution) of each image.\n disable_custom_kernels (`bool`, *optional*, defaults to `False`):\n Whether to disable custom kernels.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon value for layer normalization.\n batch_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon value for batch normalization.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n text_projection_in_dim (`int`, *optional*, defaults to 512):\n The input dimension for the text projection.\n text_projection_out_dim (`int`, *optional*, defaults to 512):\n The output dimension for the text projection.\n task_encoder_hidden_dim (`int`, *optional*, defaults to 1024):\n The feedforward dimension for the task encoder.\n class_embed_dim (`int`, *optional*, defaults to 512):\n The dimension of the classes embeddings.\n class_distance_type (`str`, *optional*, defaults to `\"cosine\"`):\n The type of of distance to compare predicted classes to projected classes embeddings.\n Can be `\"cosine\"` or `\"dot\"`.\n num_queries (`int`, *optional*, defaults to 900):\n The number of queries.\n csp_activation (`str`, *optional*, defaults to `\"silu\"`):\n The activation function of the Cross Stage Partial (CSP) networks of the encoder.\n conv_norm_activation (`str`, *optional*, defaults to `\"gelu\"`):\n The activation function of the ConvNormLayer layers of the encoder.\n encoder_feedforward_activation (`str`, *optional*, defaults to `\"relu\"`):\n The activation function for the feedforward network of the encoder.\n encoder_feedforward_dropout (`float`, *optional*, defaults to 0.0):\n The dropout rate following the activation of the encoder feedforward network.\n encoder_dropout (`float`, *optional*, defaults to 0.0):\n The dropout rate of the encoder multi-head attention module.\n hidden_expansion (`int`, *optional*, defaults to 1):\n The hidden expansion of the CSP networks in the encoder.\n vision_features_channels (`tuple(int)`, *optional*, defaults to `[256, 256, 256]`):\n The projected vision features channels used as inputs for the decoder.\n encoder_hidden_dim (`int`, *optional*, defaults to 256):\n The hidden dimension of the encoder.\n encoder_in_channels (`List(int)`, *optional*, defaults to `[192, 384, 768]`):\n The input channels for the encoder.\n encoder_projection_indices (`List(int)`, *optional*, defaults to `[2]`):\n The indices of the input features projected by each layers.\n encoder_attention_heads (`int`, *optional*, defaults to 8):\n The number of attention heads for the encoder.\n encoder_dim_feedforward (`int`, *optional*, defaults to 2048):\n The feedforward dimension for the encoder.\n encoder_layers (`int`, *optional*, defaults to 1):\n The number of layers in the encoder.\n positional_encoding_temperature (`int`, *optional*, defaults to 10000):\n The positional encoding temperature in the encoder.\n num_feature_levels (`int`, *optional*, defaults to 3):\n The number of feature levels for the multi-scale deformable attention module of the decoder.\n decoder_hidden_dim (`int`, *optional*, defaults to 256):\n The hidden dimension of the decoder.\n decoder_num_heads (`int`, *optional*, defaults to 8):\n The number of heads for the decoder.\n decoder_num_layers (`int`, *optional*, defaults to 6):\n The number of layers for the decoder.\n decoder_activation (`str`, *optional*, defaults to `\"relu\"`):\n The activation function for the decoder.\n decoder_dim_feedforward (`int`, *optional*, defaults to 2048):\n The feedforward dimension for the decoder.\n decoder_num_points (`int`, *optional*, defaults to 4):\n The number of points sampled in the decoder multi-scale deformable attention module.\n decoder_dropout (`float`, *optional*, defaults to 0.0):\n The dropout rate for the decoder.\n eval_size (`Tuple[int, int]`, *optional*):\n Height and width used to computes the effective height and width of the position embeddings after taking\n into account the stride (see RTDetr).\n learn_initial_query (`bool`, *optional*, defaults to `False`):\n Whether to learn the initial query.\n cache_size (`int`, *optional*, defaults to 100):\n The cache size for the classes and prompts caches.\n is_encoder_decoder (`bool`, *optional*, defaults to `True`):\n Whether the model is used as an encoder-decoder model or not.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional parameters from the architecture. The values in kwargs will be saved as part of the configuration\n and can be used to control the model outputs.\n\nExamples:\n\n```python\n>>> from transformers import OmDetTurboConfig, OmDetTurboForObjectDetection\n\n>>> # Initializing a OmDet-Turbo omlab/omdet-turbo-swin-tiny-hf style configuration\n>>> configuration = OmDetTurboConfig()\n\n>>> # Initializing a model (with random weights) from the omlab/omdet-turbo-swin-tiny-hf style configuration\n>>> model = OmDetTurboForObjectDetection(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def most_specific_common_supertype(self, others: Sequence[trace.TraceType]) -> Optional['TypeSpec']:\n if any((type(self) is not type(other) for other in others)):\n return None\n has_supertype = True\n\n def make_supertype_attribute(attribute_self, *attribute_others):\n nonlocal has_supertype\n if not has_supertype:\n return\n if isinstance(attribute_self, trace.TraceType):\n attribute_supertype = attribute_self.most_specific_common_supertype(attribute_others)\n if attribute_supertype is None:\n has_supertype = False\n return\n return attribute_supertype\n else:\n if not all((attribute_self == attribute_other for attribute_other in attribute_others)):\n has_supertype = False\n return\n return attribute_self\n try:\n serialized_supertype = nest.map_structure(make_supertype_attribute, self._serialize(), *(o._serialize() for o in others))\n except (ValueError, TypeError):\n return None\n return self._deserialize(serialized_supertype) if has_supertype else None", "docstring": "Returns the most specific supertype TypeSpec of `self` and `others`.\n\nImplements the tf.types.experimental.func.TraceType interface.\n\nIf not overridden by a subclass, the default behavior is to assume the\nTypeSpec is covariant upon attributes that implement TraceType and\ninvariant upon rest of the attributes as well as the structure and type\nof the TypeSpec.\n\nArgs:\n others: A sequence of TraceTypes."} +{"repo": "tensorflow", "function": "def softplus(features, name=None):\n return gen_nn_ops.softplus(features, name)", "docstring": "Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.\n\n`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always\ntakes on positive values.\n\n\n\nExample:\n\n>>> import tensorflow as tf\n>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()\narray([0.6931472, 1.3132616], dtype=float32)\n\nArgs:\n features: `Tensor`\n name: Optional: name to associate with this operation.\nReturns:\n `Tensor`"} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return token_ids_0 + [self.sep_token_id]\n sep = [self.sep_token_id]\n return token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A XLMProphetNet sequence has the following format:\n\n- single sequence: `X [SEP]`\n- pair of sequences: `A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def is_dtensor(tensor) -> bool:\n return _dtensor_device().is_dtensor(tensor)", "docstring": "Check whether the input tensor is a DTensor.\n\nIn Python, a DTensor has the same type as a `tf.Tensor`. This method will\nlet you check and handle the tensor differently if a tf.Tensor is a DTensor.\n\nArgs:\n tensor: an object to be checked.\n\nReturns:\n bool, True if the given tensor is a DTensor."} +{"repo": "tensorflow", "function": "def trainable_variables(self):\n return tuple((v for v in self.variables if v.trainable))", "docstring": "A sequence of trainable variables accessed by this FuncGraph.\n\nNote that functions keep only weak references to variables. Calling the\nfunction after a variable it accesses has been deleted is an error.\n\nReturns:\n Sequence of trainable variables for this func graph."} +{"repo": "tensorflow", "function": "def __init__(self, name: Union[str, bytes], bound_context: context.Context, function_type: function_type_lib.FunctionType, children: Optional[List['AtomicFunction']]=None, call_options: CallOptions=CallOptions(), cached_graph: Optional[func_graph_module.FuncGraph]=None):\n self._name = compat.as_bytes(name)\n self._bound_context = bound_context\n self._function_type = function_type\n self._children = children if children else []\n self._call_options = call_options\n self._cached_definition = None\n self._cached_graph = cached_graph\n self._generated_graph = None\n ref_key = (self._bound_context.function_scope_id, self.name)\n if ref_key not in RUNTIME_FUNCTION_REFS:\n RUNTIME_FUNCTION_REFS[ref_key] = 1\n else:\n RUNTIME_FUNCTION_REFS[ref_key] += 1", "docstring": "Construct a new AtomicFunction.\n\nArgs:\n name: str/bytes name of the runtime function in the bound context.\n bound_context: interface to the runtime for the AtomicFunction.\n function_type: input/output contract for the AtomicFunction\n children: list of AtomicFunctions that are needed to call this one.\n call_options: extra configuration options for the call.\n cached_graph: FuncGraph that this AtomicFunction was generated from (if\n known). Otherwise it will lazily construct a new corresponding FuncGraph\n if ever needed."} +{"repo": "beam", "function": "def __init__(self, host: str, port: int, command: Optional[str]=None, batch_size: int=100, embedded_columns: list=[]):\n self._host = host\n self._port = port\n self._command = command\n self._batch_size = batch_size\n self.embedded_columns = embedded_columns", "docstring": "Args:\nhost (str): The redis host\nport (int): The redis port\ncommand (str): command to be executed with redis client\nbatch_size (int): Number of key, values pairs to write at once\nembedded_columns (list): list of column whose embedding needs to be generated\n\nReturns:\n:class:`~apache_beam.transforms.ptransform.PTransform`"} +{"repo": "tensorflow", "function": "def _get_weighted_mean_squared_error(self, quant_min, quant_max) -> tuple[float, float, float]:\n dequantized_hist_mids = self._get_dequantized_hist_mids_after_quantize(quant_min, quant_max)\n squared_error = (self._hist_mids - dequantized_hist_mids) ** 2\n weighted_error = np.sum(squared_error * self._hist_freq)\n return (weighted_error, quant_min, quant_max)", "docstring": "Gets mean squared error between hist_mids and dequantized hist_mids.\n\nQuantization converts the range of numbers from [quant_min, quant_max] to\n[0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and\nvalues greater than quant_max are converted to 2^num_bits - 1.\n\nArgs:\n quant_min: The minimum real value that can be represented by a quantized\n value.\n quant_max: The maximum real value that can be represented by a quantized\n value.\n\nReturns:\n (error, quant_min, quant_max): Tuple of weighted mean squared error.\n error = (hist_mids - dequantized_hist_mids)**2 * hist_freq"} +{"repo": "keras", "function": "def from_config(cls, config):\n return cls(**config)", "docstring": "Creates a quantizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same quantizer from the config\ndictionary.\n\nThis method is used by Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nArgs:\n config: A Python dictionary, typically the output of get_config.\n\nReturns:\n A quantizer instance."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "transformers", "function": "def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):\n vision_data = {}\n if image_sizes is not None:\n num_image_tokens = [self.image_seq_length] * len(image_sizes)\n num_image_patches = [1] * len(image_sizes)\n vision_data.update({'num_image_tokens': num_image_tokens, 'num_image_patches': num_image_patches})\n return MultiModalData(**vision_data)", "docstring": "Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.\n\nArgs:\n image_sizes (List[List[str]], *optional*):\n The input sizes formatted as (height, width) per each image.\nReturns:\n Dict[str, List[int]]: A dictionary mapping each modality (\"image\", \"video\", \"audio\")\n to a list containing the number of placeholder tokens required. If the model doesn't accept\n a certain modality or no input sizes are provided, the dict value is set to an empty list."} +{"repo": "tf-quant-finance", "function": "def sobol_sample(dim: types.IntTensor, num_results: types.IntTensor, sequence_indices: types.IntTensor=None, digital_shift: types.IntTensor=None, scrambling_matrices: types.IntTensor=None, apply_tent_transform: bool=False, validate_args: bool=False, dtype: tf.DType=None, name: str=None) -> types.RealTensor:\n with tf.name_scope(name or 'sobol_sample'):\n dtype = dtype or tf.float32\n num_digits = tf.cast(tf.math.ceil(utils.log2(tf.cast(num_results, dtype=tf.float32))), tf.int32)\n generating_matrices = sobol_generating_matrices(dim, num_results, num_digits, validate_args=validate_args, dtype=tf.int32)\n if scrambling_matrices is not None:\n generating_matrices = digital_net.scramble_generating_matrices(generating_matrices, scrambling_matrices, num_digits, validate_args=validate_args)\n return digital_net.digital_net_sample(generating_matrices, num_results, num_digits, sequence_indices=sequence_indices, digital_shift=digital_shift, apply_tent_transform=apply_tent_transform, validate_args=validate_args, dtype=dtype)", "docstring": "Samples points from the Sobol sequence.\n\n#### Examples\n\n```python\nimport tf_quant_finance as tff\n\n# Example: Sampling 1,000 points from the 2D Sobol sequence.\n\ndim = 2\nnum_results = 1000\n\ntff.math.qmc.sobol_sample(dim, num_results)\n# ==> tf.Tensor([\n# [0., 0. ],\n# [0.5, 0.5 ],\n# [0.25, 0.75 ],\n# ...\n# [0.65527344, 0.9736328 ],\n# [0.40527344, 0.7236328 ],\n# [0.90527344, 0.22363281],\n# ], shape=(1000, 2), dtype=float32)\n```\n\nArgs:\n dim: Positive scalar `Tensor` of integers with rank 0. The event size of the\n sampled points.\n num_results: Positive scalar `Tensor` of integers with rank 0. The number of\n points to sample.\n sequence_indices: Optional positive scalar `Tensor` of integers with rank 1.\n The elements of the sequence to return specified by their position in the\n sequence.\n Default value: `None` which corresponds to the `[0, num_results)` range.\n digital_shift: Optional digital shift to be applied to all the points via a\n bitwise xor.\n Default value: `None`.\n scrambling_matrices: Positive scalar `Tensor` with the same `shape` and\n `dtype` as `generating_matrices`. Used to randomize `generating_matrices`.\n Default value: `None`.\n apply_tent_transform: Python `bool` indicating whether to apply a tent\n transform to the sampled points.\n Default value: `False`.\n validate_args: Python `bool` indicating whether to validate arguments.\n Default value: `False`.\n dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either\n `float32` or `float64`).\n Default value: `None` which maps to `float32`.\n name: Python `str` name prefixed to ops created by this function.\n Default value: `None` which maps to `sobol_sample`.\n\nReturns:\n A `Tensor` of samples from the Sobol sequence with `shape`\n `(num_samples, dim)` where `num_samples = min(num_results,\n size(sequence_indices))` and `dim = tf.shape(generating_matrices)[0]`."} +{"repo": "transformers", "function": "class Cohere2Config(PretrainedConfig):\n model_type = 'cohere2'\n keys_to_ignore_at_inference = ['past_key_values']\n base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}\n base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}\n\n def __init__(self, vocab_size=256000, hidden_size=8192, intermediate_size=22528, logit_scale=0.0625, num_hidden_layers=40, num_attention_heads=64, num_key_value_heads=None, hidden_act='silu', max_position_embeddings=8192, initializer_range=0.02, layer_norm_eps=1e-05, use_cache=True, pad_token_id=0, bos_token_id=5, eos_token_id=255001, tie_word_embeddings=True, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, sliding_window=4096, layer_types=None, **kwargs):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.logit_scale = logit_scale\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.use_cache = use_cache\n self.rope_theta = rope_theta\n self.rope_scaling = rope_scaling\n self.attention_bias = attention_bias\n self.attention_dropout = attention_dropout\n self.sliding_window = sliding_window\n self.layer_types = layer_types\n self.head_dim = hidden_size // num_attention_heads\n rope_config_validation(self)\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)\n if self.layer_types is None:\n sliding_window_pattern = getattr(self, 'sliding_window_pattern', 4)\n self.layer_types = ['sliding_attention' if bool((i + 1) % sliding_window_pattern) else 'full_attention' for i in range(self.num_hidden_layers)]\n layer_type_validation(self.layer_types)", "docstring": "This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere\nmodel according to the specified arguments, defining the model architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 256000):\n Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`CohereModel`]\n hidden_size (`int`, *optional*, defaults to 8192):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 22528):\n Dimension of the MLP representations.\n logit_scale (`float`, *optional*, defaults to 0.0625):\n The scaling factor for the output logits.\n num_hidden_layers (`int`, *optional*, defaults to 40):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 64):\n Number of attention heads for each attention layer in the Transformer decoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to\n `num_attention_heads`.\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 8192):\n The maximum sequence length that this model might ever be used with.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*, defaults to 0):\n Padding token id.\n bos_token_id (`int`, *optional*, defaults to 5):\n Beginning of stream token id.\n eos_token_id (`int`, *optional*, defaults to 255001):\n End of stream token id.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether to tie weight embeddings\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type\n and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value\n accordingly.\n Expected contents:\n `rope_type` (`str`):\n The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',\n 'llama3'], with 'default' being the original RoPE implementation.\n `factor` (`float`, *optional*):\n Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In\n most scaling types, a `factor` of x will enable the model to handle sequences of length x *\n original maximum pre-trained length.\n `original_max_position_embeddings` (`int`, *optional*):\n Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during\n pretraining.\n `attention_factor` (`float`, *optional*):\n Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention\n computation. If unspecified, it defaults to value recommended by the implementation, using the\n `factor` field to infer the suggested value.\n `beta_fast` (`float`, *optional*):\n Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear\n ramp function. If unspecified, it defaults to 32.\n `beta_slow` (`float`, *optional*):\n Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear\n ramp function. If unspecified, it defaults to 1.\n `short_factor` (`List[float]`, *optional*):\n Only used with 'longrope'. The scaling factor to be applied to short contexts (<\n `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden\n size divided by the number of attention heads divided by 2\n `long_factor` (`List[float]`, *optional*):\n Only used with 'longrope'. The scaling factor to be applied to long contexts (<\n `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden\n size divided by the number of attention heads divided by 2\n `low_freq_factor` (`float`, *optional*):\n Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE\n `high_freq_factor` (`float`, *optional*):\n Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n sliding_window (`int`, *optional*, defaults to 4096):\n Size of the sliding window attention context.\n layer_types (`list`, *optional*):\n Attention pattern for each layer.\n\n```python\n>>> from transformers import Cohere2Model, Cohere2Config\n\n>>> # Initializing a Cohere Nextmodel configuration\n>>> configuration = Cohere2Config()\n\n>>> # Initializing a model from the Cohere2 configuration\n>>> model = Cohere2Model(configuration) # doctest: +SKIP\n\n>>> # Accessing the model configuration\n>>> configuration = model.config # doctest: +SKIP\n```"} +{"repo": "tensorflow", "function": "def get_function_def(self, name):\n if is_oss:\n with c_api_util.tf_buffer() as buffer_:\n pywrap_tfe.TFE_ContextGetFunctionDef(self._handle, name, buffer_)\n proto_data = pywrap_tf_session.TF_GetBuffer(buffer_)\n function_def = function_pb2.FunctionDef()\n function_def.ParseFromString(proto_data)\n else:\n function_def = pywrap_tfe.TFE_ContextGetFunctionDefNoSerialization(self._handle, name)\n return function_def", "docstring": "Get a function definition from the context.\n\nArgs:\n name: function signature name.\n\nReturns:\n The requested FunctionDef.\n\nRaises:\n tf.errors.NotFoundError: if name is not the name of a registered function."} +{"repo": "transformers", "function": "def location_variable_convolution(self, hidden_states: torch.FloatTensor, kernel: torch.FloatTensor, bias: torch.FloatTensor, dilation: int=1, hop_size: int=256):\n batch, _, in_length = hidden_states.shape\n batch, _, out_channels, kernel_size, kernel_length = kernel.shape\n if in_length != kernel_length * hop_size:\n raise ValueError(f'Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check `hidden_states` or `kernel` and `hop_size` to make sure they are correct.')\n padding = dilation * int((kernel_size - 1) / 2)\n hidden_states = nn.functional.pad(hidden_states, (padding, padding), 'constant', 0)\n hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size)\n if hop_size < dilation:\n hidden_states = nn.functional.pad(hidden_states, (0, dilation), 'constant', 0)\n hidden_states = hidden_states.unfold(3, dilation, dilation)\n hidden_states = hidden_states[:, :, :, :, :hop_size]\n hidden_states = hidden_states.transpose(3, 4)\n hidden_states = hidden_states.unfold(4, kernel_size, 1)\n output_hidden_states = torch.einsum('bildsk,biokl->bolsd', hidden_states, kernel)\n output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d)\n bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)\n output_hidden_states = output_hidden_states + bias\n output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1)\n return output_hidden_states", "docstring": "Performs location-variable convolution operation on the input sequence (hidden_states) using the local\nconvolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform\nGeneration](https://huggingface.co/papers/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao.\n\nTime: 414 \u03bcs \u00b1 309 ns per loop (mean \u00b1 std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.\n\nArgs:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`):\n The input sequence of shape (batch, in_channels, in_length).\n kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`):\n The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length).\n bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`):\n The bias for the local convolution of shape (batch, out_channels, kernel_length).\n dilation (`int`, *optional*, defaults to 1):\n The dilation of convolution.\n hop_size (`int`, *optional*, defaults to 256):\n The hop_size of the conditioning sequence.\nReturns:\n `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size,\n out_channels, in_length)."} +{"repo": "tensorflow", "function": "def conv3d(inputs, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None):\n warnings.warn('`tf.layers.conv3d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv3D` instead.')\n layer = Conv3D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name)\n return layer.apply(inputs)", "docstring": "Functional interface for the 3D convolution layer.\n\nThis layer creates a convolution kernel that is convolved\n(actually cross-correlated) with the layer input to produce a tensor of\noutputs. If `use_bias` is True (and a `bias_initializer` is provided),\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nArgs:\n inputs: Tensor input.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along the depth,\n height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n dilation_rate: An integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function. Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: A string, the name of the layer.\n reuse: Boolean, whether to reuse the weights of a previous layer\n by the same name.\n\nReturns:\n Output tensor.\n\nRaises:\n ValueError: if eager execution is enabled."} +{"repo": "transformers", "function": "class TvpLoss(nn.Module):\n\n def __init__(self, losses):\n super().__init__()\n self.loss_map = {'iou': self.loss_iou, 'distance': self.loss_distance, 'duration': self.loss_duration}\n for loss in losses:\n if loss not in self.loss_map:\n raise ValueError(f'Loss {loss} not supported')\n self.losses = losses\n\n def loss_iou(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n \"\"\"\n Measure the intersection over union.\n \"\"\"\n inter = torch.min(candidates_end_time, end_time) - torch.max(candidates_start_time, start_time)\n union = torch.max(candidates_end_time, end_time) - torch.min(candidates_start_time, start_time)\n iou = 1 - inter.clamp(min=0) / union\n return iou\n\n def loss_distance(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n \"\"\"\n Measure the distance of mid points.\n \"\"\"\n mid_candidates = torch.div(torch.add(candidates_start_time, candidates_end_time), 2.0)\n mid_groundtruth = torch.div(torch.add(start_time, end_time), 2.0)\n distance_diff = torch.div(torch.max(mid_candidates, mid_groundtruth) - torch.min(mid_candidates, mid_groundtruth), duration).clamp(min=0.2)\n return distance_diff\n\n def loss_duration(self, start_time, end_time, candidates_start_time, candidates_end_time, duration):\n \"\"\"\n Measure the difference of duration.\n \"\"\"\n duration_candidates = torch.sub(candidates_end_time, candidates_start_time)\n duration_groundtruth = torch.sub(end_time, start_time)\n duration_diff = torch.square(torch.div(torch.sub(duration_candidates, duration_groundtruth), duration))\n duration_diff = duration_diff.clamp(min=0.4)\n return duration_diff\n\n def forward(self, logits, labels):\n \"\"\"\n This performs the loss computation.\n\n Args:\n logits (`torch.FloatTensor`):\n The output logits of head module.\n labels (`List[torch.FloatTensor]`):\n List of tensors ([start, end, duration]), which contains start time, end time of the video corresponding to the text, and also the duration.\n \"\"\"\n duration, start_time, end_time = labels\n candidates = torch.mul(logits, duration)\n candidates_start_time, candidates_end_time = (candidates[:, 0].float(), candidates[:, 1].float())\n losses_dict = {}\n for loss in self.losses:\n losses_dict.update({loss: self.loss_map[loss](start_time, end_time, candidates_start_time, candidates_end_time, duration)})\n return losses_dict", "docstring": "This class computes the losses for `TvpForVideoGrounding`. The process happens in two steps: 1) we compute\nhungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched\nground-truth / prediction (supervise class and box).\n\nArgs:\n losses (`List[str]`):\n List of all the losses to be applied."} +{"repo": "tensorflow", "function": "def load(path, element_spec=None, compression=None, reader_func=None):\n return dataset_ops.Dataset.load(path, element_spec, compression, reader_func)", "docstring": "Loads a previously saved dataset.\n\nExample usage:\n\n>>> import tempfile\n>>> path = os.path.join(tempfile.gettempdir(), \"saved_data\")\n>>> # Save a dataset\n>>> dataset = tf.data.Dataset.range(2)\n>>> tf.data.experimental.save(dataset, path)\n>>> new_dataset = tf.data.experimental.load(path)\n>>> for elem in new_dataset:\n... print(elem)\ntf.Tensor(0, shape=(), dtype=int64)\ntf.Tensor(1, shape=(), dtype=int64)\n\n\nIf the default option of sharding the saved dataset was used, the element\norder of the saved dataset will be preserved when loading it.\n\nThe `reader_func` argument can be used to specify a custom order in which\nelements should be loaded from the individual shards. The `reader_func` is\nexpected to take a single argument -- a dataset of datasets, each containing\nelements of one of the shards -- and return a dataset of elements. For\nexample, the order of shards can be shuffled when loading them as follows:\n\n```python\ndef custom_reader_func(datasets):\n datasets = datasets.shuffle(NUM_SHARDS)\n return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)\n\ndataset = tf.data.experimental.load(\n path=\"/path/to/data\", ..., reader_func=custom_reader_func)\n```\n\nArgs:\n path: Required. A path pointing to a previously saved dataset.\n element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching\n the structure of an element of the saved dataset and specifying the type\n of individual element components. If not provided, the nested structure of\n `tf.TypeSpec` saved with the saved dataset is used. Note that this\n argument is required in graph mode.\n compression: Optional. The algorithm to use to decompress the data when\n reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.\n reader_func: Optional. A function to control how to read data from shards.\n If present, the function will be traced and executed as graph computation.\n\nReturns:\n A `tf.data.Dataset` instance.\n\nRaises:\n FileNotFoundError: If `element_spec` is not specified and the saved nested\n structure of `tf.TypeSpec` can not be located with the saved dataset.\n ValueError: If `element_spec` is not specified and the method is executed\n in graph mode."} +{"repo": "beam", "function": "def GetConfig(self, request, global_params=None):\n config = self.GetMethodConfig('GetConfig')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Get encoded debug configuration for component. Not cacheable.\n\nArgs:\n request: (DataflowProjectsJobsDebugGetConfigRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (GetDebugConfigResponse) The response message."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A REALM sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def repeat_with_axis(data, repeats, axis, name=None):\n use_optimized_non_xla_implementation = False\n if not isinstance(axis, int):\n raise TypeError(f'Argument `axis` must be an int. Received `axis` = {axis} of type {type(axis).__name__}')\n with ops.name_scope(name, 'Repeat', [data, repeats]):\n data = ops.convert_to_tensor(data, name='data')\n if not use_optimized_non_xla_implementation:\n repeats = convert_to_int_tensor(repeats, name='repeats')\n else:\n repeats = convert_to_int_tensor(repeats, name='repeats', dtype=None)\n repeats.shape.with_rank_at_most(1)\n data = _with_nonzero_rank(data)\n data_shape = shape(data, out_type=repeats.dtype)\n axis = get_positive_axis(axis, data.shape.rank, ndims_name='rank(data)')\n if repeats.shape.num_elements() == 1:\n repeats = reshape(repeats, [])\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, repeats)\n result_shape = concat([data_shape[:axis], [repeats * data_shape[axis]], data_shape[axis + 1:]], axis=0)\n return reshape(tiled, result_shape)\n if repeats.shape.ndims == 1:\n data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])\n repeats = broadcast_to(repeats, [data_shape[axis]])\n if not use_optimized_non_xla_implementation:\n repeats_original = repeats\n if repeats.shape.ndims != axis + 1:\n repeats_shape = shape(repeats)\n repeats_ndims = rank(repeats)\n broadcast_shape = concat([data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)\n repeats = broadcast_to(repeats, broadcast_shape)\n repeats.set_shape([None] * (axis + 1))\n max_repeat = gen_math_ops._max(repeats, _all_dimensions(repeats))\n max_repeat = gen_math_ops.maximum(ops.convert_to_tensor(0, name='zero', dtype=max_repeat.dtype), max_repeat)\n mask = sequence_mask(repeats, max_repeat)\n expanded = expand_dims(data, axis + 1)\n tiled = tile_one_dimension(expanded, axis + 1, max_repeat)\n masked = boolean_mask(tiled, mask)\n if axis == 0:\n result = masked\n else:\n repeated_dim_size = gen_math_ops._sum(repeats_original, axis=gen_math_ops._range(0, rank(repeats_original), 1))\n result_shape = concat([data_shape[:axis], [repeated_dim_size], data_shape[axis + 1:]], axis=0)\n result = reshape(masked, result_shape)\n if data.shape.ndims is not None:\n new_axis_size = 0 if repeats.shape[0] == 0 else None\n result.set_shape(data.shape[:axis].concatenate([new_axis_size]).concatenate(data.shape[axis + 1:]))\n return result\n else:\n repeats_scan = gen_math_ops.cumsum(repeats)\n output_size = concat([zeros(1, dtype=repeats_scan.dtype), repeats_scan], axis=0)[-1]\n output_indices = gen_math_ops.range(output_size, dtype=repeats.dtype)\n gather_indices = searchsorted(repeats_scan, output_indices, side='right', out_type=repeats.dtype)\n return gather(data, gather_indices, axis=axis)", "docstring": "Repeats elements of `data`.\n\nArgs:\n data: An `N`-dimensional tensor.\n repeats: A 1-D integer tensor specifying how many times each element in\n `axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.\n Supports broadcasting from a scalar value.\n axis: `int`. The axis along which to repeat values. Must be less than\n `max(N, 1)`.\n name: A name for the operation.\n\nReturns:\n A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,\n except that dimension `axis` has size `sum(repeats)`.\n\nExample usage:\n\n>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)\n\n>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)\n\n>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)\n"} +{"repo": "tensorflow", "function": "def sparse_placeholder(dtype, shape=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('`sparse_placeholder` is not compatible with eager execution.')\n shape_name = name + '/shape' if name is not None else None\n default_shape_name = name + '/shape_default' if name is not None else None\n if shape is None:\n rank = None\n dense_shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)\n dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)\n else:\n if isinstance(shape, tensor_lib.Tensor):\n rank = shape.get_shape()[0]\n dense_shape_default = tensor_util.constant_value_as_shape(shape)\n else:\n rank = len(shape)\n dense_shape_default = tensor_shape.TensorShape(tuple((None if dim == -1 else dim for dim in shape)))\n shape = tuple((tensor_shape.dimension_value(dim) for dim in shape))\n shape = tuple((-1 if dim is None else dim for dim in shape))\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int64, name=default_shape_name)\n dense_shape = placeholder_with_default(shape, shape=shape.shape, name=shape_name)\n result = sparse_tensor.SparseTensor(values=placeholder(dtype, shape=[None], name=name + '/values' if name is not None else None), indices=placeholder(dtypes.int64, shape=[None, rank], name=name + '/indices' if name is not None else None), dense_shape=dense_shape)\n result.set_shape(dense_shape_default)\n return result", "docstring": "Inserts a placeholder for a sparse tensor that will be always fed.\n\n**Important**: This sparse tensor will produce an error if evaluated.\nIts value must be fed using the `feed_dict` optional argument to\n`Session.run()`, `Tensor.eval()`, or `Operation.run()`.\n\nFor example:\n\n```python\nx = tf.compat.v1.sparse.placeholder(tf.float32)\ny = tf.sparse.reduce_sum(x)\n\nwith tf.compat.v1.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)\n values = np.array([1.0, 2.0], dtype=np.float32)\n shape = np.array([7, 9, 2], dtype=np.int64)\n print(sess.run(y, feed_dict={\n x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will\n succeed.\n print(sess.run(y, feed_dict={\n x: (indices, values, shape)})) # Will succeed.\n\n sp = tf.sparse.SparseTensor(indices=indices, values=values,\n dense_shape=shape)\n sp_value = sp.eval(session=sess)\n print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.\n```\n\n\nArgs:\n dtype: The type of `values` elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a sparse tensor of any shape.\n name: A name for prefixing the operations (optional).\n\nReturns:\n A `SparseTensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n\nRaises:\n RuntimeError: if eager execution is enabled\n\n@compatibility(TF2)\nThis API is not compatible with eager execution and `tf.function`. To migrate\nto TF2, rewrite the code to be compatible with eager execution. Check the\n[migration\nguide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)\non replacing `Session.run` calls. In TF2, you can just pass tensors directly\ninto ops and layers. If you want to explicitly set up your inputs, also see\n[Keras functional API](https://www.tensorflow.org/guide/keras/functional) on\nhow to use `tf.keras.Input` to replace `tf.compat.v1.sparse_placeholder`.\n`tf.function` arguments also do the job of `tf.compat.v1.sparse_placeholder`.\nFor more details please read [Better\nperformance with tf.function](https://www.tensorflow.org/guide/function).\n@end_compatibility"} +{"repo": "fhir-py", "function": "def get_inlined_extension_url(field: descriptor.FieldDescriptor) -> str:\n options = annotation_utils.get_options(field)\n if options.HasExtension(annotations_pb2.fhir_inlined_extension_url):\n return options.Extensions[annotations_pb2.fhir_inlined_extension_url]\n return field.camelcase_name", "docstring": "Returns the FHIR inlined extension URL for a field.\n\nArgs:\n field: The FieldDescriptor to examine.\n\nReturns:\n The FHIR inlined extension URL, if one exists, otherwise returns the camel-\n case name of the FieldDescriptor."} +{"repo": "transformers", "function": "def tokenize(self, text, never_split=None):\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n unicode_normalized_text = unicodedata.normalize('NFC', text)\n orig_tokens = whitespace_tokenize(unicode_normalized_text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split."} +{"repo": "temporian", "function": "def find_all_build_files(dir: str) -> List[Tuple[str, str]]:\n build_file_dirs = []\n for root, _, files in os.walk(dir):\n for file in files:\n if file in BUILD_FILENAMES:\n root = root.strip('./')\n build_file_dirs.append((root, file))\n return build_file_dirs", "docstring": "List all the BUILD files.\n\nReturns:\n The list of (directory, filename) of all BUILD files."} +{"repo": "tf-quant-finance", "function": "def __init__(self, dim: int, mean_reversion: types.RealTensor, volatility: Union[types.RealTensor, Callable[..., types.RealTensor]], initial_discount_rate_fn: Callable[..., types.RealTensor], corr_matrix: types.RealTensor=None, validate_args: bool=False, dtype: tf.DType=None, name: str=None):\n self._name = name or 'quasi_gaussian_hjm_model'\n with tf.name_scope(self._name):\n self._dtype = dtype or tf.float32\n self._dim = dim + dim ** 2\n self._factors = dim\n\n def _instant_forward_rate_fn(t):\n t = tf.convert_to_tensor(t, dtype=self._dtype)\n\n def _log_zero_coupon_bond(x):\n r = tf.convert_to_tensor(initial_discount_rate_fn(x), dtype=self._dtype)\n return -r * x\n rate = -gradient.fwd_gradient(_log_zero_coupon_bond, t, use_gradient_tape=True, unconnected_gradients=tf.UnconnectedGradients.ZERO)\n return rate\n\n def _initial_discount_rate_fn(t):\n return tf.convert_to_tensor(initial_discount_rate_fn(t), dtype=self._dtype)\n self._instant_forward_rate_fn = _instant_forward_rate_fn\n self._initial_discount_rate_fn = _initial_discount_rate_fn\n mean_reversion = tf.convert_to_tensor(mean_reversion, dtype=dtype, name='mean_reversion')\n\n def _infer_batch_shape():\n zero = tf.constant([0], dtype=self._dtype)\n return _initial_discount_rate_fn(zero).shape.as_list()[:-1]\n self._batch_shape = _infer_batch_shape()\n self._batch_rank = len(self._batch_shape)\n self._mean_reversion = mean_reversion\n if callable(volatility):\n self._volatility = volatility\n else:\n volatility = tf.convert_to_tensor(volatility, dtype=dtype)\n if self._batch_rank > 0:\n volatility = tf.expand_dims(volatility, axis=self._batch_rank)\n\n def _tensor_to_volatility_fn(t, r):\n del t, r\n return volatility\n self._volatility = _tensor_to_volatility_fn\n if corr_matrix is None:\n corr_matrix = tf.eye(dim, dim, batch_shape=self._batch_shape, dtype=self._dtype)\n self._rho = tf.convert_to_tensor(corr_matrix, dtype=self._dtype, name='rho')\n if validate_args:\n try:\n self._sqrt_rho = tf.linalg.cholesky(self._rho)\n except:\n raise ValueError('The input correlation matrix is not positive semidefinite.')\n else:\n self._sqrt_rho = _get_valid_sqrt_matrix(self._rho)\n\n def _vol_fn(t, state):\n \"\"\"Volatility function of qG-HJM.\"\"\"\n x = state[..., :self._factors]\n batch_shape_x = x.shape.as_list()[:-1]\n r_t = self._instant_forward_rate_fn(t) + tf.reduce_sum(x, axis=-1, keepdims=True)\n volatility = self._volatility(t, r_t)\n volatility = tf.expand_dims(volatility, axis=-1)\n diffusion_x = tf.broadcast_to(tf.expand_dims(self._sqrt_rho, axis=self._batch_rank) * volatility, batch_shape_x + [self._factors, self._factors])\n paddings = tf.constant([[0, 0]] * len(batch_shape_x) + [[0, self._factors ** 2], [0, self._factors ** 2]], dtype=tf.int32)\n diffusion = tf.pad(diffusion_x, paddings)\n return diffusion\n\n def _drift_fn(t, state):\n \"\"\"Drift function of qG-HJM.\"\"\"\n x = state[..., :self._factors]\n y = state[..., self._factors:]\n batch_shape_x = x.shape.as_list()[:-1]\n y = tf.reshape(y, batch_shape_x + [self._factors, self._factors])\n r_t = self._instant_forward_rate_fn(t) + tf.reduce_sum(x, axis=-1, keepdims=True)\n volatility = self._volatility(t, r_t)\n volatility = tf.expand_dims(volatility, axis=-1)\n volatility_squared = tf.linalg.matmul(volatility, volatility, transpose_b=True)\n mr2 = tf.expand_dims(self._mean_reversion, axis=-1)\n perm = list(range(self._batch_rank)) + [self._batch_rank + 1, self._batch_rank]\n mr2 = mr2 + tf.transpose(mr2, perm=perm)\n mr2 = tf.expand_dims(mr2, axis=self._batch_rank)\n mr = self._mean_reversion\n if self._batch_rank > 0:\n mr = tf.expand_dims(self._mean_reversion, axis=1)\n drift_x = tf.math.reduce_sum(y, axis=-1) - mr * x\n drift_y = tf.expand_dims(self._rho, axis=self._batch_rank) * volatility_squared - mr2 * y\n drift_y = tf.reshape(drift_y, batch_shape_x + [self._factors * self._factors])\n drift = tf.concat([drift_x, drift_y], axis=-1)\n return drift\n super(QuasiGaussianHJM, self).__init__(self._dim, _drift_fn, _vol_fn, self._dtype, self._name)", "docstring": "Initializes a batch of HJM models.\n\nArgs:\n dim: A Python scalar which corresponds to the number of factors\n comprising the model.\n mean_reversion: A real positive `Tensor` of shape `batch_shape + [dim]`.\n `batch_shape` denotes the shape of independent HJM models within the\n batch. Corresponds to the mean reversion rate of each factor.\n volatility: A real positive `Tensor` of the same `dtype` and shape as\n `mean_reversion` or a callable with the following properties:\n (a) The callable should accept a scalar `Tensor` `t` and a `Tensor`\n `r(t)` of shape `batch_shape + [num_samples]` and returns a `Tensor` of\n shape compatible with `batch_shape + [num_samples, dim]`. The variable\n `t` stands for time and `r(t)` is the short rate at time `t`. The\n function returns instantaneous volatility `sigma(t) = sigma(t, r(t))`.\n When `volatility` is specified as a real `Tensor`, each factor is\n assumed to have a constant instantaneous volatility and the model is\n effectively a Gaussian HJM model.\n Corresponds to the instantaneous volatility of each factor.\n initial_discount_rate_fn: A Python callable that accepts expiry time as\n a real `Tensor` of the same `dtype` as `mean_reversion` and returns a\n `Tensor` of shape `batch_shape + input_shape`.\n Corresponds to the zero coupon bond yield at the present time for the\n input expiry time.\n corr_matrix: A `Tensor` of shape `batch_shape + [dim, dim]` and the same\n `dtype` as `mean_reversion`.\n Corresponds to the correlation matrix `Rho`.\n validate_args: Optional boolean flag to enable validation of the input\n correlation matrix. If the flag is enabled and the input correlation\n matrix is not positive semidefinite, an error is raised.\n Default value: False.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which maps to `tf.float32`.\n name: Python string. The name to give to the ops created by this class.\n Default value: `None` which maps to the default name\n `quasi_gaussian_hjm_model`."} +{"repo": "tensorflow", "function": "def convert(self):\n if not _jit:\n raise ImportError('Cannot import jit from jax.')\n if not self._serving_funcs:\n raise ValueError('No serving func is specified.')\n if not self._inputs:\n raise ValueError('Input tensors are not specified.')\n if len(self._inputs) != len(self._serving_funcs):\n msg = 'Input tensor mapping len {} does not match serving func len {}.'.format(len(self._inputs), len(self._serving_funcs))\n raise ValueError(msg)\n if not isinstance(self._inputs, (tuple, list)):\n raise ValueError('Input tensors should be pass in a tuple list wrapped in an array.')\n if len(self._serving_funcs) > 1:\n raise ValueError('Currently only support single serving function.')\n if not isinstance(self._inputs[0], (tuple, list)):\n raise ValueError('The input placeholders are not a dictionary.')\n input_names = []\n ordered_inputs = []\n for input_name, tensor in self._inputs[0]:\n input_names.append(input_name)\n ordered_inputs.append(tensor)\n try:\n hlo_proto = _jit(self._serving_funcs[0]).trace(*ordered_inputs).lower(lowering_platforms=('cpu',)).compiler_ir('hlo').as_serialized_hlo_module_proto()\n except Exception:\n raise ValueError('Failed to convert the given Jax function to hlo.')\n converter_kwargs = {'input_content': hlo_proto, 'input_names': input_names, 'is_proto_format': True}\n converter_kwargs.update(self._get_base_converter_args())\n quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, None, experimental_qdq_annotation=self._experimental_strict_qdq)\n self._validate_inference_input_output_types(quant_mode)\n converter_kwargs.update(quant_mode.converter_flags())\n result = _convert_jax_hlo(**converter_kwargs)\n return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Converts a Jax serving func based on instance variables.\n\nReturns:\n The converted data in serialized format.\n\nRaises:\n ImportError:\n If cannot import the jit from jax.\n ValueError:\n No serving function is specified.\n Input tensors are not specified.\n The truth value of an array with more than one element is ambiguous.\n Failed to convert the given Jax function to hlo."} +{"repo": "transformers", "function": "def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim)\n denominator = denominator.clamp_min(1.0)\n loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator\n variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator\n scale = torch.sqrt(variance + self.minimum_scale)\n return ((data - loc) / scale, loc, scale)", "docstring": "Parameters:\n data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n input for Batch norm calculation\n observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n Calculating the scale on the observed indicator.\nReturns:\n tuple of `torch.Tensor` of shapes\n (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,\n `(batch_size, 1, num_input_channels)`)"} +{"repo": "tensorflow", "function": "def are_compatible(spec1, spec2):\n try:\n nest.assert_same_structure(spec1, spec2)\n except TypeError:\n return False\n except ValueError:\n return False\n for s1, s2 in zip(nest.flatten(spec1), nest.flatten(spec2)):\n if not s1.is_compatible_with(s2) or not s2.is_compatible_with(s1):\n return False\n return True", "docstring": "Indicates whether two type specifications are compatible.\n\nTwo type specifications are compatible if they have the same nested structure\nand the their individual components are pair-wise compatible.\n\nArgs:\n spec1: A `tf.TypeSpec` object to compare.\n spec2: A `tf.TypeSpec` object to compare.\n\nReturns:\n `True` if the two type specifications are compatible and `False` otherwise."} +{"repo": "tensorflow", "function": "def __getitem__(self, key):\n if key is None:\n key = self._key()\n value = self._get_recursive(key)\n if value is None:\n value = self[key] = self.default_factory()\n return value", "docstring": "Gets the value at key (or current context), or sets default value.\n\nArgs:\n key: May be `None` or `Graph`object. When `None`, the key is set to the\n current context.\n\nReturns:\n Either the cached or default value."} +{"repo": "keras", "function": "def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None, return_dict=False):\n raise NotImplementedError", "docstring": "Runs a single gradient update on a single batch of data.\n\nArgs:\n x: Input data. Must be array-like.\n y: Target data. Must be array-like.\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape `(samples, sequence_length)`, to apply a different\n weight to every timestep of every sample.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) to apply to the model's loss for the samples\n from this class during training. This can be useful to tell the\n model to \"pay more attention\" to samples from an\n under-represented class. When `class_weight` is specified\n and targets have a rank of 2 or greater, either `y` must\n be one-hot encoded, or an explicit final dimension of 1\n must be included for sparse class labels.\n return_dict: If `True`, loss and metric results are returned as a\n dict, with each key being the name of the metric. If `False`,\n they are returned as a list.\n\nReturns:\n A scalar loss value (when no metrics and `return_dict=False`),\n a list of loss and metric values\n (if there are metrics and `return_dict=False`), or a dict of\n metric and loss values (if `return_dict=True`)."} +{"repo": "beam", "function": "def run(argv=None, save_main_session=True):\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n engine_handler = KeyedModelHandler(TensorRTEngineHandlerNumPy(min_batch_size=1, max_batch_size=1, engine_path=known_args.engine_path))\n with beam.Pipeline(options=pipeline_options) as p:\n filename_value_pair = p | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'ReadImageData' >> beam.Map(lambda image_name: read_image(image_file_name=image_name, path_to_dir=known_args.images_dir)) | 'AttachImageSizeToKey' >> beam.Map(attach_im_size_to_key) | 'PreprocessImages' >> beam.MapTuple(lambda file_name, data: (file_name, preprocess_image(data)))\n predictions = filename_value_pair | 'TensorRTRunInference' >> RunInference(engine_handler) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n _ = predictions | 'WriteOutputToGCS' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)", "docstring": "Args:\n argv: Command line arguments defined for this example."} +{"repo": "transformers", "function": "def reverse_bettertransformer(self):\n if not is_optimum_available():\n raise ImportError('The package `optimum` is required to use Better Transformer.')\n from optimum.version import __version__ as optimum_version\n if version.parse(optimum_version) < version.parse('1.7.0'):\n raise ImportError(f'Please install optimum>=1.7.0 to use Better Transformer. The version {optimum_version} was found.')\n from optimum.bettertransformer import BetterTransformer\n return BetterTransformer.reverse(self)", "docstring": "Reverts the transformation from [`~PreTrainedModel.to_bettertransformer`] so that the original modeling is\nused, for example in order to save the model.\n\nReturns:\n [`PreTrainedModel`]: The model converted back to the original modeling."} +{"repo": "transformers", "function": "def from_backbone_and_decoder_configs(cls, backbone_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs):\n return cls(backbone_config=backbone_config, decoder_config=decoder_config, **kwargs)", "docstring": "Instantiate a [`MaskFormerConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\n Args:\n backbone_config ([`PretrainedConfig`]):\n The backbone configuration.\n decoder_config ([`PretrainedConfig`]):\n The transformer decoder configuration to use.\n\n Returns:\n [`MaskFormerConfig`]: An instance of a configuration object"} +{"repo": "keras", "function": "def serialize_keras_object(obj):\n if obj is None:\n return obj\n if isinstance(obj, PLAIN_TYPES):\n return obj\n if isinstance(obj, (list, tuple)):\n config_arr = [serialize_keras_object(x) for x in obj]\n return tuple(config_arr) if isinstance(obj, tuple) else config_arr\n if isinstance(obj, dict):\n return serialize_dict(obj)\n if isinstance(obj, bytes):\n return {'class_name': '__bytes__', 'config': {'value': obj.decode('utf-8')}}\n if isinstance(obj, slice):\n return {'class_name': '__slice__', 'config': {'start': serialize_keras_object(obj.start), 'stop': serialize_keras_object(obj.stop), 'step': serialize_keras_object(obj.step)}}\n if isinstance(obj, type(Ellipsis)):\n return {'class_name': '__ellipsis__', 'config': {}}\n if isinstance(obj, backend.KerasTensor):\n history = getattr(obj, '_keras_history', None)\n if history:\n history = list(history)\n history[0] = history[0].name\n return {'class_name': '__keras_tensor__', 'config': {'shape': obj.shape, 'dtype': obj.dtype, 'keras_history': history}}\n if tf.available and isinstance(obj, tf.TensorShape):\n return obj.as_list() if obj._dims is not None else None\n if backend.is_tensor(obj):\n return {'class_name': '__tensor__', 'config': {'value': backend.convert_to_numpy(obj).tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}}\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray) and obj.ndim > 0:\n return {'class_name': '__numpy__', 'config': {'value': obj.tolist(), 'dtype': backend.standardize_dtype(obj.dtype)}}\n else:\n return obj.item()\n if tf.available and isinstance(obj, tf.DType):\n return obj.name\n if isinstance(obj, types.FunctionType) and obj.__name__ == '':\n warnings.warn(f'The object being serialized includes a `lambda`. This is unsafe. In order to reload the object, you will have to pass `safe_mode=False` to the loading function. Please avoid using `lambda` in the future, and use named Python functions instead. This is the `lambda` being serialized: {inspect.getsource(obj)}', stacklevel=2)\n return {'class_name': '__lambda__', 'config': {'value': python_utils.func_dump(obj)}}\n if tf.available and isinstance(obj, tf.TypeSpec):\n ts_config = obj._serialize()\n ts_config = list(map(lambda x: x.as_list() if isinstance(x, tf.TensorShape) else x.name if isinstance(x, tf.DType) else x, ts_config))\n return {'class_name': '__typespec__', 'spec_name': obj.__class__.__name__, 'module': obj.__class__.__module__, 'config': ts_config, 'registered_name': None}\n inner_config = _get_class_or_fn_config(obj)\n config_with_public_class = serialize_with_public_class(obj.__class__, inner_config)\n if config_with_public_class is not None:\n get_build_and_compile_config(obj, config_with_public_class)\n record_object_after_serialization(obj, config_with_public_class)\n return config_with_public_class\n if isinstance(obj, types.FunctionType):\n module = obj.__module__\n else:\n module = obj.__class__.__module__\n class_name = obj.__class__.__name__\n if module == 'builtins':\n registered_name = None\n elif isinstance(obj, types.FunctionType):\n registered_name = object_registration.get_registered_name(obj)\n else:\n registered_name = object_registration.get_registered_name(obj.__class__)\n config = {'module': module, 'class_name': class_name, 'config': inner_config, 'registered_name': registered_name}\n get_build_and_compile_config(obj, config)\n record_object_after_serialization(obj, config)\n return config", "docstring": "Retrieve the config dict by serializing the Keras object.\n\n`serialize_keras_object()` serializes a Keras object to a python dictionary\nthat represents the object, and is a reciprocal function of\n`deserialize_keras_object()`. See `deserialize_keras_object()` for more\ninformation about the config format.\n\nArgs:\n obj: the Keras object to serialize.\n\nReturns:\n A python dict that represents the object. The python dict can be\n deserialized via `deserialize_keras_object()`."} +{"repo": "transformers", "function": "class BlipTextVisionModelOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n image_embeds: Optional[torch.FloatTensor] = None\n last_hidden_state: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the\nlast hidden states. This class also adds the loss term from the text decoder.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss from the text decoder.\n image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):\n The image embeddings obtained by applying the projection layer to the pooler_output.\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def dup(node, copy_map, field_name='___pyct_anno'):\n for n in gast.walk(node):\n for k in copy_map:\n if hasanno(n, k, field_name):\n setanno(n, copy_map[k], getanno(n, k, field_name), field_name)", "docstring": "Recursively copies annotations in an AST tree.\n\nArgs:\n node: ast.AST\n copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination\n key. All annotations with the source key will be copied to identical\n annotations with the destination key.\n field_name: str"} +{"repo": "keras", "function": "class Zeros(Initializer):\n\n def __call__(self, shape, dtype=None):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes\n are supported. If not specified, `keras.backend.floatx()`\n is used, which default to `float32` unless you configured it\n otherwise (via `keras.backend.set_floatx(float_dtype)`).\n \"\"\"\n dtype = standardize_dtype(dtype)\n return ops.zeros(shape, dtype=dtype)", "docstring": "Initializer that generates tensors initialized to 0.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = Zeros()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = Zeros()\n>>> layer = Dense(units=3, kernel_initializer=initializer)"} +{"repo": "pyglove", "function": "def trivial_reward(example):\n return example", "docstring": "Reward for the trivial search space.\n\nThe reward (i.e. fitness) is the value itself. The goal of the search,\ntherefore, is to find the value 1.\n\nArgs:\n example: a materialized value.\n\nReturns:\n The corresponding reward."} +{"repo": "tensorflow", "function": "def _dynamic_range_quantize(src_saved_model_path: str, dst_saved_model_path: str, quantization_options: _QuantizationOptions) -> autotrackable.AutoTrackable:\n mode_str = 'dynamic-range quantization'\n if _is_qat_saved_model(src_saved_model_path):\n raise ValueError('The models trained with quantization-aware training (QAT) is not supported for %s.' % mode_str)\n logging.info('Running post-training %s on model: %s', mode_str, src_saved_model_path)\n logging.info('QuantizationOptions: \\n%s', quantization_options)\n signature_def_map = save_model.get_signatures_from_saved_model(src_saved_model_path, quantization_options.signature_keys, quantization_options.tags)\n pywrap_quantize_model.quantize_ptq_dynamic_range(src_saved_model_path, dst_saved_model_path, quantization_options_serialized=quantization_options.SerializeToString(), signature_keys=list(quantization_options.signature_keys), signature_def_map_serialized=_serialize_signature_def_map(signature_def_map), py_function_library=py_function_lib.PyFunctionLibrary())\n return saved_model_load.load(dst_saved_model_path)", "docstring": "Quantizes the given SavedModel via post-training dynamic range quantization.\n\nArgs:\n src_saved_model_path: Path to the saved model.\n dst_saved_model_path: The path to save the output SavedModel. The directory\n will be overwritten if not empty.\n quantization_options: QuantizationOptions proto describing quantization\n related config.\n\nReturns:\n A SavedModel object with TF quantization applied.\n\nRaises:\n ValueError: when the model is QAT model."} +{"repo": "tensorflow", "function": "def connect_raise_node(self, node, except_guards):\n for guard in except_guards:\n if guard in self.raises:\n self.raises[guard].append(node)\n else:\n self.raises[guard] = [node]", "docstring": "Adds extra connection between a raise node and containing except guards.\n\nThe node is a graph node, not an ast node.\n\nArgs:\n node: Node\n except_guards: Tuple[ast.AST, ...], the except sections that guard node"} +{"repo": "transformers", "function": "def _prepare_4d_attention_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int]=None):\n return AttentionMaskConverter._expand_mask(mask=mask, dtype=dtype, tgt_len=tgt_len)", "docstring": "Creates a non-causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`\n\nArgs:\n mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)`\n dtype (`torch.dtype`):\n The torch dtype the created mask shall have.\n tgt_len (`int`):\n The target length or query length the created mask shall have."} +{"repo": "tensorflow", "function": "def clone_model(model, input_tensors=None, clone_function=None):\n with generic_utils.DisableSharedObjectScope():\n if clone_function is None:\n clone_function = _clone_layer\n if isinstance(model, Sequential):\n return _clone_sequential_model(model, input_tensors=input_tensors, layer_fn=clone_function)\n else:\n return _clone_functional_model(model, input_tensors=input_tensors, layer_fn=clone_function)", "docstring": "Clone a Functional or Sequential `Model` instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nNote that\n`clone_model` will not preserve the uniqueness of shared objects within the\nmodel (e.g. a single variable attached to two distinct layers will be\nrestored as two separate variables).\n\nArgs:\n model: Instance of `Model`\n (could be a Functional model or a Sequential model).\n input_tensors: optional list of input tensors or InputLayer objects\n to build the model upon. If not provided,\n new `Input` objects will be created.\n clone_function: Callable to be used to clone each layer in the target\n model (except `InputLayer` instances). It takes as argument the layer\n instance to be cloned, and returns the corresponding layer instance to\n be used in the model copy. If unspecified, this callable defaults to\n the following serialization/deserialization function:\n `lambda layer: layer.__class__.from_config(layer.get_config())`.\n By passing a custom callable, you can customize your copy of the\n model, e.g. by wrapping certain layers of interest (you might want to\n replace all `LSTM` instances with equivalent\n `Bidirectional(LSTM(...))` instances, for example).\n\nReturns:\n An instance of `Model` reproducing the behavior\n of the original model, on top of new inputs tensors,\n using newly instantiated weights. The cloned model may behave\n differently from the original model if a custom `clone_function`\n modifies the layer.\n\nExample:\n\n```python\n# Create a test Sequential model.\nmodel = keras.Sequential([\n keras.Input(shape=(728,)),\n keras.layers.Dense(32, activation='relu'),\n keras.layers.Dense(1, activation='sigmoid'),\n])\n# Create a copy of the test model (with freshly initialized weights).\nnew_model = clone_model(model)\n```\n\nNote that subclassed models cannot be cloned, since their internal\nlayer structure is not known. To achieve equivalent functionality\nas `clone_model` in the case of a subclassed model, simply make sure\nthat the model class implements `get_config()`\n(and optionally `from_config()`), and call:\n\n```python\nnew_model = model.__class__.from_config(model.get_config())\n```"} +{"repo": "transformers", "function": "class GPTBigCodeConfig(PretrainedConfig):\n model_type = 'gpt_bigcode'\n keys_to_ignore_at_inference = ['past_key_values']\n attribute_map = {'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer'}\n\n def __init__(self, vocab_size=50257, n_positions=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function='gelu_pytorch_tanh', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, attention_softmax_in_fp32=True, scale_attention_softmax_in_fp32=True, multi_query=True, **kwargs):\n self.vocab_size = vocab_size\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.n_inner = n_inner\n self.activation_function = activation_function\n self.resid_pdrop = resid_pdrop\n self.embd_pdrop = embd_pdrop\n self.attn_pdrop = attn_pdrop\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.attention_softmax_in_fp32 = attention_softmax_in_fp32\n self.scale_attention_softmax_in_fp32 = scale_attention_softmax_in_fp32\n self.multi_query = multi_query\n self.bos_token_id = bos_token_id\n self.eos_token_id = eos_token_id\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`GPTBigCodeModel`]. It is used to instantiate a\nGPTBigCode model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the GPTBigCode\n[gpt_bigcode](https://huggingface.co/gpt_bigcode) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 50257):\n Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`GPTBigCodeModel`].\n n_positions (`int`, *optional*, defaults to 1024):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n n_embd (`int`, *optional*, defaults to 768):\n Dimensionality of the embeddings and hidden states.\n n_layer (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n n_head (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n n_inner (`int`, *optional*, defaults to None):\n Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd\n activation_function (`str`, *optional*, defaults to `\"gelu_pytorch_tanh\"`):\n Activation function, to be selected in the list `[\"relu\", \"silu\", \"gelu\", \"tanh\", \"gelu_new\",\n \"gelu_pytorch_tanh\"]`.\n resid_pdrop (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n embd_pdrop (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the embeddings.\n attn_pdrop (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention.\n layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):\n The epsilon to use in the layer normalization layers.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n scale_attn_weights (`bool`, *optional*, defaults to `True`):\n Scale attention weights by dividing by sqrt(hidden_size)..\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):\n Whether to call the fused softmax in float32.\n scale_attention_softmax_in_fp32 (`bool`, *optional*, defaults to `True`):\n Whether to scale the attention softmax in float32.\n attention_type (`bool`, *optional*, defaults to `True`):\n Whether to use Multi-Query Attion (`True`) or Multi-Head Attention (`False`).\nExample:\n\n```python\n>>> from transformers import GPTBigCodeConfig, GPTBigCodeModel\n\n>>> # Initializing a GPTBigCode configuration\n>>> configuration = GPTBigCodeConfig()\n\n>>> # Initializing a model (with random weights) from the configuration\n>>> model = GPTBigCodeModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def get_np_doc_form():\n return _np_doc_form", "docstring": "Gets the form of the original numpy docstrings.\n\nReturns:\n See `set_np_doc_form` for the list of valid values."} +{"repo": "transformers", "function": "def create_network_inputs(self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, past_observed_mask: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n time_feat = torch.cat((past_time_features[:, self._past_length - self.config.context_length:, ...], future_time_features), dim=1) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length:, ...]\n if past_observed_mask is None:\n past_observed_mask = torch.ones_like(past_values)\n context = past_values[:, -self.config.context_length:]\n observed_context = past_observed_mask[:, -self.config.context_length:]\n _, loc, scale = self.scaler(context, observed_context)\n inputs = (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale\n log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p()\n log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log()\n static_feat = torch.cat((log_abs_loc, log_scale), dim=1)\n if static_real_features is not None:\n static_feat = torch.cat((static_real_features, static_feat), dim=1)\n if static_categorical_features is not None:\n embedded_cat = self.embedder(static_categorical_features)\n static_feat = torch.cat((embedded_cat, static_feat), dim=1)\n expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1)\n features = torch.cat((expanded_static_feat, time_feat), dim=-1)\n subsequences_length = self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length\n lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length)\n lags_shape = lagged_sequence.shape\n reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1)\n if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]:\n raise ValueError(f'input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match')\n return (reshaped_lagged_sequence, features, loc, scale, static_feat)", "docstring": "Creates the inputs for the network given the past and future values, time features, and static features.\n\nArgs:\n past_values (`torch.Tensor`):\n A tensor of shape `(batch_size, past_length, input_size)` containing the past values.\n past_time_features (`torch.Tensor`):\n A tensor of shape `(batch_size, past_length, num_features)` containing the past time features.\n static_categorical_features (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical\n features.\n static_real_features (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, num_real_features)` containing the static real features.\n past_observed_mask (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed\n values in the past.\n future_values (`Optional[torch.Tensor]`):\n An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values.\n\nReturns:\n A tuple containing the following tensors:\n - reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags *\n input_size)` containing the lagged subsequences of the inputs.\n - features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the\n concatenated static and time features.\n - loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input\n values.\n - scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input\n values.\n - static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the\n concatenated static features."} +{"repo": "python-fire", "function": "def _CompletionsFromArgs(fn_args):\n completions = []\n for arg in fn_args:\n arg = arg.replace('_', '-')\n completions.append(f'--{arg}')\n return completions", "docstring": "Takes a list of fn args and returns a list of the fn's completion strings.\n\nArgs:\n fn_args: A list of the args accepted by a function.\nReturns:\n A list of possible completion strings for that function."} +{"repo": "transformers", "function": "def forward(self, pixel_values: torch.FloatTensor, prompt_depth: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], DepthEstimatorOutput]:\n loss = None\n if labels is not None:\n raise NotImplementedError('Training is not implemented yet')\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n outputs = self.backbone.forward_with_filtered_kwargs(pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions)\n hidden_states = outputs.feature_maps\n _, _, height, width = pixel_values.shape\n patch_size = self.config.patch_size\n patch_height = height // patch_size\n patch_width = width // patch_size\n if prompt_depth is not None:\n batch_size = prompt_depth.shape[0]\n depth_min = torch.min(prompt_depth.reshape(batch_size, -1), dim=1).values\n depth_max = torch.max(prompt_depth.reshape(batch_size, -1), dim=1).values\n depth_min, depth_max = (depth_min.view(batch_size, 1, 1, 1), depth_max.view(batch_size, 1, 1, 1))\n prompt_depth = (prompt_depth - depth_min) / (depth_max - depth_min)\n hidden_states = self.neck(hidden_states, patch_height, patch_width, prompt_depth=prompt_depth)\n predicted_depth = self.head(hidden_states, patch_height, patch_width)\n if prompt_depth is not None:\n depth_min = depth_min.squeeze(1).to(predicted_depth.device)\n depth_max = depth_max.squeeze(1).to(predicted_depth.device)\n predicted_depth = predicted_depth * (depth_max - depth_min) + depth_min\n if not return_dict:\n if output_hidden_states:\n output = (predicted_depth,) + outputs[1:]\n else:\n output = (predicted_depth,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return DepthEstimatorOutput(loss=loss, predicted_depth=predicted_depth, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions)", "docstring": "prompt_depth (`torch.FloatTensor` of shape `(batch_size, 1, height, width)`, *optional*):\n Prompt depth is the sparse or low-resolution depth obtained from multi-view geometry or a\n low-resolution depth sensor. It generally has shape (height, width), where height\n and width can be smaller than those of the images. It is optional and can be None, which means no prompt depth\n will be used. If it is None, the output will be a monocular relative depth.\n The values are recommended to be in meters, but this is not necessary.\n\nExample:\n\n```python\n>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation\n>>> import torch\n>>> import numpy as np\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"depth-anything/prompt-depth-anything-vits-hf\")\n>>> model = AutoModelForDepthEstimation.from_pretrained(\"depth-anything/prompt-depth-anything-vits-hf\")\n\n>>> prompt_depth_url = \"https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true\"\n>>> prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw)\n\n>>> # prepare image for the model\n>>> inputs = image_processor(images=image, return_tensors=\"pt\", prompt_depth=prompt_depth)\n\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n\n>>> # interpolate to original size\n>>> post_processed_output = image_processor.post_process_depth_estimation(\n... outputs,\n... target_sizes=[(image.height, image.width)],\n... )\n\n>>> # visualize the prediction\n>>> predicted_depth = post_processed_output[0][\"predicted_depth\"]\n>>> depth = predicted_depth * 1000.\n>>> depth = depth.detach().cpu().numpy()\n>>> depth = Image.fromarray(depth.astype(\"uint16\")) # mm\n```"} +{"repo": "beam", "function": "def get_dict(self):\n self.is_valid()\n return self._get_dict()", "docstring": "Returns the internal-API dictionary representing the\n:class:`DisplayDataItem`.\n\nReturns:\n Dict[str, Any]: A dictionary. The internal-API dictionary representing\n the :class:`DisplayDataItem`.\n\nRaises:\n ValueError: if the item is not valid."} +{"repo": "temporian", "function": "def convert_date_to_duration(date: Timestamp) -> NormalizedDuration:\n if isinstance(date, float):\n return date\n if isinstance(date, int):\n return float(date)\n if isinstance(date, np.datetime64):\n return convert_numpy_datetime64_to_duration(date)\n if isinstance(date, datetime.datetime):\n return convert_datetime_to_duration(date)\n if isinstance(date, datetime.date):\n return convert_datetime_date_to_duration(date)\n raise TypeError(f'Unsupported type: {type(date)}')", "docstring": "Converts date value to a number representing the Unix timestamp.\n\nIf a float or int, it is returned as float.\nIf a date, it is converted to a Unix timestamp (number of seconds from Unix\nepoch).\n\nArgs:\n date: Date to convert.\n\nReturns:\n Unix timestamp (seconds elapsed from unix epoch).\n\nRaises:\n TypeError: unsupported type. Supported types are:\n - np.datetime64\n - datetime.datetime"} +{"repo": "tensorflow", "function": "def _process_stack_frames(self):\n stack_frames = tf_stack.extract_stack()\n stack_frame_ids = []\n writer = None\n for file_path, lineno, func, _ in stack_frames:\n abs_path = os.path.abspath(file_path)\n if (abs_path, lineno, func) in self._stack_frame_to_id:\n stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n continue\n with self._stack_frame_to_id_lock:\n if (abs_path, lineno, func) not in self._stack_frame_to_id:\n stack_frame_id = _get_id()\n self._stack_frame_to_id[abs_path, lineno, func] = stack_frame_id\n file_index = self._write_source_file_content(abs_path)\n file_line_col = graph_debug_info_pb2.GraphDebugInfo.FileLineCol(file_index=file_index, line=lineno, func=func)\n stack_frame_with_id = debug_event_pb2.StackFrameWithId(id=stack_frame_id, file_line_col=file_line_col)\n writer = self.get_writer()\n writer.WriteStackFrameWithId(stack_frame_with_id)\n stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])\n code_location = debug_event_pb2.CodeLocation(host_name=self._hostname, stack_frame_ids=stack_frame_ids)\n return code_location", "docstring": "Process stack frames.\n\nSend the content of source-files, on a best-effort basis.\n\nReturns:\n A list of stack frame IDs."} +{"repo": "transformers", "function": "def create_position_ids_from_inputs_embeds(self, inputs_embeds):\n input_shape = inputs_embeds.size()[:-1]\n sequence_length = input_shape[1]\n position_ids = torch.arange(self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device)\n return position_ids.unsqueeze(0).expand(input_shape)", "docstring": "We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.\n\nArgs:\n inputs_embeds: torch.Tensor\n\nReturns: torch.Tensor"} +{"repo": "beam", "function": "def infer_element_type(elements):\n element_type = typehints.Union[[trivial_inference.instance_to_type(e) for e in elements]]\n return element_type", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nInfer a Beam type for a list of elements.\n\nArgs:\n elements (List[Any]): A list of elements for which the type should be\n inferred.\n\nReturns:\n A Beam type encompassing all elements."} +{"repo": "transformers", "function": "class PromptDepthAnythingFeatureFusionLayer(nn.Module):\n\n def __init__(self, config: PromptDepthAnythingConfig):\n super().__init__()\n self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)\n self.residual_layer1 = PromptDepthAnythingPreActResidualLayer(config)\n self.residual_layer2 = PromptDepthAnythingPreActResidualLayer(config)\n self.prompt_depth_layer = PromptDepthAnythingLayer(config)\n\n def forward(self, hidden_state, residual=None, size=None, prompt_depth=None):\n if residual is not None:\n if hidden_state.shape != residual.shape:\n residual = nn.functional.interpolate(residual, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)\n hidden_state = hidden_state + self.residual_layer1(residual)\n hidden_state = self.residual_layer2(hidden_state)\n if prompt_depth is not None:\n prompt_depth = nn.functional.interpolate(prompt_depth, size=hidden_state.shape[2:], mode='bilinear', align_corners=False)\n res = self.prompt_depth_layer(prompt_depth)\n hidden_state = hidden_state + res\n modifier = {'scale_factor': 2} if size is None else {'size': size}\n hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True)\n hidden_state = self.projection(hidden_state)\n return hidden_state", "docstring": "Feature fusion layer, merges feature maps from different stages.\n\nArgs:\n config (`[PromptDepthAnythingConfig]`):\n Model configuration class defining the model architecture."} +{"repo": "transformers", "function": "def from_backbone_config(cls, backbone_config: PretrainedConfig, **kwargs):\n return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`Mask2FormerConfig`] (or a derived class) from a pre-trained backbone model configuration.\n\nArgs:\n backbone_config ([`PretrainedConfig`]):\n The backbone configuration.\n\nReturns:\n [`Mask2FormerConfig`]: An instance of a configuration object"} +{"repo": "starthinker", "function": "def recipe_bigquery_run_query(config, auth_write, query, legacy):\n bigquery(config, {'auth': auth_write, 'run': {'query': query, 'legacy': legacy}})", "docstring": "Run query on a project.\n\nArgs:\n auth_write (authentication) - Credentials used for writing data.\n query (text) - SQL with newlines and all.\n legacy (boolean) - Query type must match table and query format."} +{"repo": "fhir-py", "function": "def forEach(self) -> 'ColumnExpressionBuilder':\n return ColumnExpressionBuilder(self._builder, self._column_name, self._children, True, True)", "docstring": "The forEach() function.\n\nUnnests the repeated values from a FHIR path. If the FHIR path does not\nreturn a collection, we treat that as a collection with a single value.\nOnce this function is called, the FHIR path is sealed to be immutable.\n\nReturns:\n A new ColumnExpressionBuilder with needs_unnest set to True."} +{"repo": "tensorflow", "function": "def _write_cache(step, event_file_suffix=None, **kwargs):\n file_suffix = _TT_EVENT_FILE_SUFFIX\n if event_file_suffix is not None:\n file_suffix = string_ops.string_join([file_suffix, event_file_suffix], separator='.')\n summary_write_ops = []\n summary_writer = summary.create_file_writer_v2(self._parameters.trace_dir, filename_suffix=file_suffix, max_queue=_TT_SUMMARY_MAX_QUEUE)\n graph.add_to_collection(TENSOR_TRACER_SUMMARY_COLLECTION, summary_writer)\n step_value = step[0]\n dt = step_value.dtype\n if dt.__ne__(dtypes.int64) and dt.__ne__(dtypes.uint64) and dt.__ne__(dtypes.float64):\n step_value = math_ops.cast(step_value, dtypes.int64)\n with summary_writer.as_default():\n summary_metadata = summary_pb2.SummaryMetadata(plugin_data=summary_pb2.SummaryMetadata.PluginData(plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))\n for key, value in kwargs.items():\n if not self._parameters.collect_summary_per_core:\n if key == _TT_SUMMARY_TAG and value.shape.as_list()[0] != 1:\n value = self.aggregate_global_cache(value)\n with ops.control_dependencies([summary_writer.init()]):\n summary_write_ops.append(summary.write(_TT_SUMMARY_TAG + '/' + key + '.' + graph_summary_tag, value, metadata=summary_metadata, step=step_value))\n return control_flow_ops.group(summary_write_ops)", "docstring": "Writes the given caches as tensor summary.\n\nArgs:\n step: Step tensor with dimension [num_cores].\n event_file_suffix: Event filename suffix tensor.\n **kwargs: The dictionary of tensors that needs to be written as\n summaries. Key and value pairs within kwargs correspond to the tag\n name, and tensor content that will be written using summary.write.\n The trace_modes that use this function are:\n - summary: In summary mode, kwargs includes a single (tag, content)\n pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache\n variable. The dimension of the signature_cache is:\n num_cores x num_traced_tensors x num_signatures.\n - full_tensor_summary: kwargs will include all traced tensors. Tag\n and content correspond to the name of the tensor, and its actual\n content.\nReturns:\n A tf.Operation that needs to be executed for the host call dependencies."} +{"repo": "tensorflow", "function": "def _jvp_helper_wrapper(op_name, attr_tuple, inputs, outputs, tangents, use_batch):\n if use_batch:\n for primal, tangent in zip(inputs, tangents):\n if not tangent.shape.is_compatible_with([None] + primal.shape):\n raise ValueError('Tangent {} was expected to be of shape {} but is instead of shape {}'.format(tangent, [None] + primal.shape, tangent.shape))\n return control_flow_ops.vectorized_map(functools.partial(_jvp_helper, op_name, attr_tuple, inputs, outputs), tangents)\n return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)", "docstring": "Computes a batch of Jacobian-vector product for an op.\n\nArgs:\n op_name: A string, the type of operation being executed.\n attr_tuple: Attributes of the operation.\n inputs: A flat list of input Tensors to the operation.\n outputs: A flat list of output Tensors from the operation.\n tangents: A flat list of Tensors, compatible with shape `[None] +\n input_shape`.\n use_batch: A bool, True to vetorize over batch of tangents of shape `[None]\n + input_shape`.\n\nReturns:\n A flat list of tangents compatible with `outputs`\n or `[None] + output_shape`.\n\nRaises:\n ValueError: if tangent shapes are not compatible with input shapes."} +{"repo": "tensorflow", "function": "def assert_shapes_v2(shapes, data=None, summarize=None, message=None, name=None):\n assert_shapes(shapes, data=data, summarize=summarize, message=message, name=name)", "docstring": "Assert tensor shapes and dimension size relationships between tensors.\n\nThis Op checks that a collection of tensors shape relationships\nsatisfies given constraints.\n\nExample:\n\n>>> n = 10\n>>> q = 3\n>>> d = 7\n>>> x = tf.zeros([n,q])\n>>> y = tf.ones([n,d])\n>>> param = tf.Variable([1.0, 2.0, 3.0])\n>>> scalar = 1.0\n>>> tf.debugging.assert_shapes([\n... (x, ('N', 'Q')),\n... (y, ('N', 'D')),\n... (param, ('Q',)),\n... (scalar, ()),\n... ])\n\n>>> tf.debugging.assert_shapes([\n... (x, ('N', 'D')),\n... (y, ('N', 'D'))\n... ])\nTraceback (most recent call last):\n...\nValueError: ...\n\nIf `x`, `y`, `param` or `scalar` does not have a shape that satisfies\nall specified constraints, `message`, as well as the first `summarize` entries\nof the first encountered violating tensor are printed, and\n`InvalidArgumentError` is raised.\n\nSize entries in the specified shapes are checked against other entries by\ntheir __hash__, except:\n - a size entry is interpreted as an explicit size if it can be parsed as an\n integer primitive.\n - a size entry is interpreted as *any* size if it is None or '.'.\n\nIf the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates\na variable number of outer dimensions of unspecified size, i.e. the constraint\napplies to the inner-most dimensions only.\n\nScalar tensors and specified shapes of length zero (excluding the 'inner-most'\nprefix) are both treated as having a single dimension of size one.\n\nArgs:\n shapes: dictionary with (`Tensor` to shape) items, or a list of\n (`Tensor`, shape) tuples. A shape must be an iterable.\n data: The tensors to print out if the condition is False. Defaults to error\n message and first few entries of the violating tensor.\n summarize: Print this many entries of the tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to \"assert_shapes\".\n\nRaises:\n ValueError: If static checks determine any shape constraint is violated."} +{"repo": "tensorflow", "function": "def get_gradients(self, loss, params):\n grads = backend.gradients(loss, params)\n if any((g is None for g in grads)):\n raise ValueError('An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: backend.argmax, backend.round, backend.eval.')\n if hasattr(self, 'clipnorm'):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, 'clipvalue'):\n grads = [clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue) for g in grads]\n return grads", "docstring": "Returns gradients of `loss` with respect to `params`.\n\nArgs:\n loss: Loss tensor.\n params: List of variables.\n\nReturns:\n List of gradient tensors.\n\nRaises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented)."} +{"repo": "tf-quant-finance", "function": "def fd_solver_forward(self, start_time, end_time, coord_grid, values_grid, one_step_fn=None, boundary_conditions=None, start_step_count=0, num_steps=None, time_step=None, values_transform_fn=None, dtype=None, name=None, **kwargs):\n pde_solver_fn = kwargs.get('pde_solver_fn', fd_solvers.solve_forward)\n backward_second_order, backward_first_order, backward_zeroth_order = _backward_pde_coeffs(self._drift_fn, self._volatility_fn, discounting=None)\n inner_second_order_coeff_fn = lambda t, x: -backward_second_order(t, x)\n inner_first_order_coeff_fn = backward_first_order\n zeroth_order_coeff_fn = backward_zeroth_order\n return pde_solver_fn(start_time=start_time, end_time=end_time, coord_grid=coord_grid, values_grid=values_grid, num_steps=num_steps, start_step_count=start_step_count, time_step=time_step, one_step_fn=one_step_fn, boundary_conditions=boundary_conditions, values_transform_fn=values_transform_fn, inner_second_order_coeff_fn=inner_second_order_coeff_fn, inner_first_order_coeff_fn=inner_first_order_coeff_fn, zeroth_order_coeff_fn=zeroth_order_coeff_fn, dtype=dtype, name=name)", "docstring": "Returns a solver for the Fokker Plank equation of this process.\n\nThe Fokker Plank equation (also known as the Kolmogorov Forward equation)\nassociated to this Ito process is given by:\n\n```None\n dV/dt + d(mean_i(t, x) V) / dx\n - (1/2) d^2(volatility^2(t, x) V) / dx^2 = = 0\n```\n\nwith the initial value condition $$V(0, x) = u(x)$$.\n\nThis method evolves a spatially discretized solution of the above PDE from\ntime `t0` to time `t1 < t0` (i.e. backwards in time).\nThe solution `V(t,x)` is assumed to be discretized on a grid.\n\nThis method allows batching of solutions. In this context, batching means\nthe ability to represent and evolve multiple independent functions `V`\n(e.g. V1, V2 ...) simultaneously corresponding to `mean_1, mean_2 ...` and\n`volatility_1, volatility_2 ....`.\n\nThe evolution of the solution from `t0` to `t1` is often done by\ndiscretizing the differential equation to a difference equation along\nthe spatial and temporal axes. The temporal discretization is given by a\n(sequence of) time steps [dt_1, dt_2, ... dt_k] such that the sum of the\ntime steps is equal to the total time step `t0 - t1`. If a uniform time\nstep is used, it may equivalently be specified by stating the number of\nsteps (n_steps) to take. This method provides both options via the\n`time_step` and `num_steps` parameters. However, not all methods need\ndiscretization along time direction (e.g. method of lines) so this argument\nmay not be applicable to some implementations.\n\nThe workhorse of this method is the `one_step_fn`. For the commonly used\nmethods, see functions in `math.pde.steppers` module.\n\nThe mapping between the arguments of this method and the above\nequation are described in the Args section below.\n\nFor a simple instructive example of implementation of this method, see\n`models.GenericItoProcess.fd_solver_forward`.\n\nArgs:\n start_time: Real positive scalar `Tensor`. The start time of the grid.\n Corresponds to time `t0` above.\n end_time: Real scalar `Tensor` smaller than the `start_time` and greater\n than zero. The time to step back to. Corresponds to time `t1` above.\n coord_grid: List of `n` rank 1 real `Tensor`s. `n` is the dimension of the\n domain. The i-th `Tensor` has shape, `[d_i]` where `d_i` is the size of\n the grid along axis `i`. The coordinates of the grid points. Corresponds\n to the spatial grid `G` above.\n values_grid: Real `Tensor` containing the function values at time\n `start_time` which have to be stepped back to time `end_time`. The shape\n of the `Tensor` must broadcast with `[K, d_1, d_2, ..., d_n]`. The first\n axis of size `K` is the values batch dimension and allows multiple\n functions (with potentially different boundary/final conditions) to be\n stepped back simultaneously.\n one_step_fn: The transition kernel. A callable that consumes the following\n arguments by keyword:\n 1. 'time': Current time\n 2. 'next_time': The next time to step to. For the backwards in time\n evolution, this time will be smaller than the current time.\n 3. 'coord_grid': The coordinate grid.\n 4. 'values_grid': The values grid.\n 5. 'quadratic_coeff': A callable returning the quadratic coefficients\n of the PDE (i.e. `(1/2)D_{ij}(t, x)` above). The callable accepts\n the time and coordinate grid as keyword arguments and returns a\n `Tensor` with shape that broadcasts with `[dim, dim]`.\n 6. 'linear_coeff': A callable returning the linear coefficients of the\n PDE (i.e. `mean_i(t, x)` above). Accepts time and coordinate grid as\n keyword arguments and returns a `Tensor` with shape that broadcasts\n with `[dim]`.\n 7. 'constant_coeff': A callable returning the coefficient of the\n linear homogeneous term (i.e. `r(t,x)` above). Same spec as above.\n The `one_step_fn` callable returns a 2-tuple containing the next\n coordinate grid, next values grid.\n boundary_conditions: A list of size `dim` containing boundary conditions.\n The i'th element of the list is a 2-tuple containing the lower and upper\n boundary condition for the boundary along the i`th axis.\n start_step_count: Scalar integer `Tensor`. Initial value for the number of\n time steps performed.\n Default value: 0 (i.e. no previous steps performed).\n num_steps: Positive int scalar `Tensor`. The number of time steps to take\n when moving from `start_time` to `end_time`. Either this argument or the\n `time_step` argument must be supplied (but not both). If num steps is\n `k>=1`, uniform time steps of size `(t0 - t1)/k` are taken to evolve the\n solution from `t0` to `t1`. Corresponds to the `n_steps` parameter\n above.\n time_step: The time step to take. Either this argument or the `num_steps`\n argument must be supplied (but not both). The type of this argument may\n be one of the following (in order of generality): (a) None in which case\n `num_steps` must be supplied. (b) A positive real scalar `Tensor`. The\n maximum time step to take. If the value of this argument is `dt`, then\n the total number of steps taken is N = (t1 - t0) / dt rounded up to\n the nearest integer. The first N-1 steps are of size dt and the last\n step is of size `t1 - t0 - (N-1) * dt`. (c) A callable accepting the\n current time and returning the size of the step to take. The input and\n the output are real scalar `Tensor`s.\n values_transform_fn: An optional callable applied to transform the\n solution values at each time step. The callable is invoked after the\n time step has been performed. The callable should accept the time of the\n grid, the coordinate grid and the values grid and should return the\n values grid. All input arguments to be passed by keyword.\n dtype: The dtype to use.\n name: The name to give to the ops.\n Default value: None which means `solve_forward` is used.\n **kwargs: Additional keyword args:\n (1) pde_solver_fn: Function to solve the PDE that accepts all the above\n arguments by name and returns the same tuple object as required below.\n Defaults to `tff.math.pde.fd_solvers.solve_forward`.\n\nReturns:\n A tuple object containing at least the following attributes:\n final_values_grid: A `Tensor` of same shape and dtype as `values_grid`.\n Contains the final state of the values grid at time `end_time`.\n final_coord_grid: A list of `Tensor`s of the same specification as\n the input `coord_grid`. Final state of the coordinate grid at time\n `end_time`.\n step_count: The total step count (i.e. the sum of the `start_step_count`\n and the number of steps performed in this call.).\n final_time: The final time at which the evolution stopped. This value\n is given by `max(min(end_time, start_time), 0)`."} +{"repo": "tensorflow", "function": "def eigvals(self, name='eigvals'):\n if not self.is_self_adjoint:\n raise NotImplementedError('Only self-adjoint matrices are supported.')\n with self._name_scope(name):\n return self._eigvals()", "docstring": "Returns the eigenvalues of this linear operator.\n\nIf the operator is marked as self-adjoint (via `is_self_adjoint`)\nthis computation can be more efficient.\n\nNote: This currently only supports self-adjoint operators.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n Shape `[B1,...,Bb, N]` `Tensor` of same `dtype` as `self`."} +{"repo": "transformers", "function": "class DepthProImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values']\n\n def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, **kwargs):\n super().__init__(**kwargs)\n size = size if size is not None else {'height': 1536, 'width': 1536}\n size = get_size_dict(size)\n self.do_resize = do_resize\n self.do_rescale = do_rescale\n self.do_normalize = do_normalize\n self.size = size\n self.resample = resample\n self.rescale_factor = rescale_factor\n self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN\n self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD\n\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized images.\n \"\"\"\n requires_backends(self, 'torch')\n size = get_size_dict(size)\n if 'height' not in size or 'width' not in size:\n raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')\n output_size = (size['height'], size['width'])\n image_tensor = torch.from_numpy(image).unsqueeze(0)\n resized_image = torch.nn.functional.interpolate(input=image_tensor, size=output_size, mode=pil_torch_interpolation_mapping[resample].value)\n resized_image = resized_image.squeeze(0).numpy()\n return resized_image\n\n def _validate_input_arguments(self, do_resize: bool, size: Dict[str, int], resample: PILImageResampling, do_rescale: bool, rescale_factor: float, do_normalize: bool, image_mean: Union[float, List[float]], image_std: Union[float, List[float]], data_format: Union[str, ChannelDimension]):\n if do_resize and None in (size, resample):\n raise ValueError('Size and resample must be specified if do_resize is True.')\n if do_rescale and rescale_factor is None:\n raise ValueError('Rescale factor must be specified if do_rescale is True.')\n if do_normalize and None in (image_mean, image_std):\n raise ValueError('Image mean and standard deviation must be specified if do_normalize is True.')\n\n @filter_out_non_signature_kwargs()\n def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Dictionary in the format `{\"height\": h, \"width\": w}` specifying the size of the output image after\n resizing.\n resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):\n `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has\n an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use if `do_normalize` is set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n resample = resample if resample is not None else self.resample\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n size = size if size is not None else self.size\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n self._validate_input_arguments(do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format)\n images = [to_numpy_array(image) for image in images]\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n all_images = []\n for image in images:\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n if do_normalize:\n image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n if do_resize:\n image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_channel_dim=input_data_format)\n image = self.resize(image=image, size=size, resample=resample)\n image = to_channel_dimension_format(image, data_format, input_channel_dim=ChannelDimension.FIRST)\n else:\n image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n all_images.append(image)\n data = {'pixel_values': all_images}\n return BatchFeature(data=data, tensor_type=return_tensors)\n\n def post_process_depth_estimation(self, outputs: 'DepthProDepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> Dict[str, List[TensorType]]:\n \"\"\"\n Post-processes the raw depth predictions from the model to generate\n final depth predictions which is caliberated using the field of view if provided\n and resized to specified target sizes if provided.\n\n Args:\n outputs ([`DepthProDepthEstimatorOutput`]):\n Raw outputs of the model.\n target_sizes (`Optional[Union[TensorType, List[Tuple[int, int]], None]]`, *optional*, defaults to `None`):\n Target sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)`\n or a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing\n is performed.\n\n Returns:\n `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth\n predictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`.\n\n Raises:\n `ValueError`:\n If the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched.\n \"\"\"\n requires_backends(self, 'torch')\n predicted_depth = outputs.predicted_depth\n fov = outputs.field_of_view\n batch_size = len(predicted_depth)\n if target_sizes is not None and batch_size != len(target_sizes):\n raise ValueError('Make sure that you pass in as many fov values as the batch dimension of the predicted depth')\n results = []\n fov = [None] * batch_size if fov is None else fov\n target_sizes = [None] * batch_size if target_sizes is None else target_sizes\n for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes):\n focal_length = None\n if target_size is not None:\n if fov_value is not None:\n width = target_size[1]\n focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value))\n depth = depth * width / focal_length\n depth = torch.nn.functional.interpolate(input=depth.unsqueeze(0).unsqueeze(1), size=target_size, mode=pil_torch_interpolation_mapping[self.resample].value).squeeze()\n depth = 1.0 / torch.clamp(depth, min=0.0001, max=10000.0)\n results.append({'predicted_depth': depth, 'field_of_view': fov_value, 'focal_length': focal_length})\n return results", "docstring": "Constructs a DepthPro image processor.\n\nArgs:\n do_resize (`bool`, *optional*, defaults to `True`):\n Whether to resize the image's (height, width) dimensions to the specified `(size[\"height\"],\n size[\"width\"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.\n size (`dict`, *optional*, defaults to `{\"height\": 1536, \"width\": 1536}`):\n Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`\n method.\n resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):\n Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the\n `preprocess` method.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`\n parameter in the `preprocess` method.\n rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):\n Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the\n `preprocess` method.\n do_normalize (`bool`, *optional*, defaults to `True`):\n Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`\n method.\n image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):\n Mean to use if normalizing the image. This is a float or list of floats the length of the number of\n channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.\n image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):\n Standard deviation to use if normalizing the image. This is a float or list of floats the length of the\n number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method."} +{"repo": "transformers", "function": "class InstructBlipVideoForConditionalGenerationModelOutput(ModelOutput):\n loss: Optional[Tuple[torch.FloatTensor]] = None\n logits: Optional[Tuple[torch.FloatTensor]] = None\n vision_outputs: Optional[torch.FloatTensor] = None\n qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None\n language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None\n\n def to_tuple(self) -> Tuple[Any]:\n return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Class defining the outputs of [`InstructBlipVideoForConditionalGeneration`].\n\nArgs:\n loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\n Language modeling loss from the language model.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head of the language model.\n vision_outputs (`BaseModelOutputWithPooling`):\n Outputs of the vision encoder.\n qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):\n Outputs of the Q-Former (Querying Transformer).\n language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):\n Outputs of the language model."} +{"repo": "beam", "function": "def _delete_blob(self, bucket, blob_name):\n if self._use_blob_generation:\n blob = bucket.get_blob(blob_name, retry=self._storage_client_retry)\n generation = getattr(blob, 'generation', None)\n else:\n generation = None\n try:\n bucket.delete_blob(blob_name, if_generation_match=generation, retry=self._storage_client_retry)\n except NotFound:\n return", "docstring": "Helper method to delete a single blob from GCS.\n\nArgs:\n bucket: The GCS bucket object.\n blob_name: The name of the blob to delete under the bucket."} +{"repo": "tensorflow", "function": "def conv1d_v2(input, filters, stride, padding, data_format='NWC', dilations=None, name=None):\n return conv1d(input, filters, stride, padding, use_cudnn_on_gpu=True, data_format=data_format, name=name, dilations=dilations)", "docstring": "Computes a 1-D convolution given 3-D input and filter tensors.\n\nGiven an input tensor of shape\n `batch_shape + [in_width, in_channels]`\nif `data_format` is `\"NWC\"`, or\n `batch_shape + [in_channels, in_width]`\nif `data_format` is `\"NCW\"`,\nand a filter / kernel tensor of shape\n`[filter_width, in_channels, out_channels]`, this op reshapes\nthe arguments to pass them to `conv2d` to perform the equivalent\nconvolution operation.\n\nInternally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.\nFor example, if `data_format` does not start with `\"NC\"`, a tensor of shape\n `batch_shape + [in_width, in_channels]`\nis reshaped to\n `batch_shape + [1, in_width, in_channels]`,\nand the filter is reshaped to\n `[1, filter_width, in_channels, out_channels]`.\nThe result is then reshaped back to\n `batch_shape + [out_width, out_channels]`\n\\(where out_width is a function of the stride and padding as in conv2d\\) and\nreturned to the caller.\n\nArgs:\n input: A Tensor of rank at least 3. Must be of type `float16`, `float32`, or\n `float64`.\n filters: A Tensor of rank at least 3. Must have the same type as `input`.\n stride: An int or list of `ints` that has length `1` or `3`. The number of\n entries by which the filter is moved right at each step.\n padding: 'SAME' or 'VALID'. See\n [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2)\n for more information.\n data_format: An optional `string` from `\"NWC\", \"NCW\"`. Defaults to `\"NWC\"`,\n the data is stored in the order of\n `batch_shape + [in_width, in_channels]`. The `\"NCW\"` format stores data\n as `batch_shape + [in_channels, in_width]`.\n dilations: An int or list of `ints` that has length `1` or `3` which\n defaults to 1. The dilation factor for each dimension of input. If set to\n k > 1, there will be k-1 skipped cells between each filter element on that\n dimension. Dilations in the batch and depth dimensions must be 1.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor`. Has the same type as input.\n\nRaises:\n ValueError: if `data_format` is invalid."} +{"repo": "beam", "function": "def run_inference(self, batch: Sequence[str], model: _VLLMModelServer, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n return asyncio.run(self._async_run_inference(batch, model, inference_args))", "docstring": "Runs inferences on a batch of text strings.\n\nArgs:\n batch: A sequence of examples as text strings.\n model: A _VLLMModelServer containing info for connecting to the server.\n inference_args: Any additional arguments for an inference.\n\nReturns:\n An Iterable of type PredictionResult."} +{"repo": "tensorflow", "function": "def cross_entropy(self, other, name='cross_entropy'):\n with self._name_scope(name):\n return self._cross_entropy(other)", "docstring": "Computes the (Shannon) cross entropy.\n\nDenote this distribution (`self`) by `P` and the `other` distribution by\n`Q`. Assuming `P, Q` are absolutely continuous with respect to\none another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon)\ncross entropy is defined as:\n\n```none\nH[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x)\n```\n\nwhere `F` denotes the support of the random variable `X ~ P`.\n\nArgs:\n other: `tfp.distributions.Distribution` instance.\n name: Python `str` prepended to names of ops created by this function.\n\nReturns:\n cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]`\n representing `n` different calculations of (Shanon) cross entropy."} +{"repo": "pyglove", "function": "def traverse(value: Any, preorder_visitor_fn: Optional[Callable[[KeyPath, Any], bool]]=None, postorder_visitor_fn: Optional[Callable[[KeyPath, Any], bool]]=None, root_path: Optional[KeyPath]=None) -> bool:\n root_path = root_path or KeyPath()\n\n def no_op_visitor(key, value):\n del key, value\n return True\n if preorder_visitor_fn is None:\n preorder_visitor_fn = no_op_visitor\n if postorder_visitor_fn is None:\n postorder_visitor_fn = no_op_visitor\n if not preorder_visitor_fn(root_path, value):\n return False\n if isinstance(value, dict):\n for k in value.keys():\n if not traverse(value[k], preorder_visitor_fn, postorder_visitor_fn, KeyPath(k, root_path)):\n return False\n elif isinstance(value, list):\n for i, v in enumerate(value):\n if not traverse(v, preorder_visitor_fn, postorder_visitor_fn, KeyPath(i, root_path)):\n return False\n if not postorder_visitor_fn(root_path, value):\n return False\n return True", "docstring": "Traverse a (maybe) hierarchical value.\n\nExample::\n\n def preorder_visit(path, value):\n print(path)\n\n tree = {'a': [{'c': [1, 2]}, {'d': {'g': (3, 4)}}], 'b': 'foo'}\n pg.utils.traverse(tree, preorder_visit)\n\n # Should print:\n # 'a'\n # 'a[0]'\n # 'a[0].c'\n # 'a[0].c[0]'\n # 'a[0].c[1]'\n # 'a[1]'\n # 'a[1].d'\n # 'a[1].d.g'\n # 'b'\n\nArgs:\n value: A maybe hierarchical value to traverse.\n preorder_visitor_fn: Preorder visitor function. Function signature is (path,\n value) -> should_continue.\n postorder_visitor_fn: Postorder visitor function. Function signature is\n (path, value) -> should_continue.\n root_path: The key path of the root value.\n\nReturns:\n Whether visitor function returns True on all nodes."} +{"repo": "tensorflow", "function": "def add_metric(self, value, aggregation=None, name=None):\n if aggregation is not None and aggregation != 'mean':\n raise ValueError('We currently support only `mean` sample-wise metric aggregation. You provided aggregation=`%s`' % aggregation)\n from_metric_obj = hasattr(value, '_metric_obj')\n is_symbolic = tf_utils.is_symbolic_tensor(value)\n in_call_context = base_layer_utils.call_context().in_call\n if name is None and (not from_metric_obj):\n raise ValueError(\"Please provide a name for your metric like `self.add_metric(tf.reduce_sum(inputs), name='mean_activation', aggregation='mean')`\")\n elif from_metric_obj:\n name = value._metric_obj.name\n if in_call_context:\n self._symbolic_add_metric(value, aggregation, name)\n else:\n if not is_symbolic:\n raise ValueError('Expected a symbolic Tensor for the metric value, received: ' + str(value))\n if not getattr(self, '_is_graph_network', False):\n with backend.get_graph().as_default():\n self._symbolic_add_metric(value, aggregation, name)\n return\n if from_metric_obj:\n raise ValueError('Using the result of calling a `Metric` object when calling `add_metric` on a Functional Model is not supported. Please pass the Tensor to monitor directly.')\n self._graph_network_add_metric(value, aggregation, name)", "docstring": "Adds metric tensor to the layer.\n\nArgs:\n value: Metric tensor.\n aggregation: Sample-wise metric reduction function. If `aggregation=None`,\n it indicates that the metric tensor provided has been aggregated\n already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by\n `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the\n given metric tensor will be sample-wise reduced using `mean` function.\n eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',\n aggregation='mean')`.\n name: String metric name.\n\nRaises:\n ValueError: If `aggregation` is anything other than None or `mean`."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False):\n residual = hidden_states\n hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n second_residual = hidden_states\n cross_attn_weights = None\n hidden_states, cross_attn_weights = self.encoder_attn(hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = second_residual + hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`):\n Input to the layer of shape `(seq_len, batch, embed_dim)`.\n position_embeddings (`torch.FloatTensor`, *optional*):\n Position embeddings that are added to the queries and keys in the self-attention layer.\n reference_points (`torch.FloatTensor`, *optional*):\n Reference points.\n spatial_shapes (`torch.LongTensor`, *optional*):\n Spatial shapes.\n level_start_index (`torch.LongTensor`, *optional*):\n Level start index.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\n values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "tensorflow", "function": "def unique():\n\n def _apply_fn(dataset):\n return dataset.unique()\n return _apply_fn", "docstring": "Creates a `Dataset` from another `Dataset`, discarding duplicates.\n\nUse this transformation to produce a dataset that contains one instance of\neach unique element in the input. For example:\n\n```python\ndataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])\n\n# Using `unique()` will drop the duplicate elements.\ndataset = dataset.apply(tf.data.experimental.unique()) # ==> { 1, 37, 2 }\n```\n\nReturns:\n A `Dataset` transformation function, which can be passed to\n `tf.data.Dataset.apply`."} +{"repo": "tensorflow", "function": "def write(self, index, value, name=None):\n return self._implementation.write(index, value, name=name)", "docstring": "Write `value` into index `index` of the TensorArray.\n\nArgs:\n index: 0-D. int32 scalar with the index to write to.\n value: N-D. Tensor of type `dtype`. The Tensor to write to this index.\n name: A name for the operation (optional).\n\nReturns:\n A new TensorArray object with flow that ensures the write occurs.\n Use this object for all subsequent operations.\n\nRaises:\n ValueError: if there are more writers than specified."} +{"repo": "beam", "function": "def on_merge(self, to_be_merged, merge_result, context):\n pass", "docstring": "Called when multiple windows are merged.\n\nArgs:\n to_be_merged: the set of windows to be merged\n merge_result: the window into which the windows are being merged\n context: a context (e.g. a TriggerContext instance) for managing state\n and setting timers"} +{"repo": "transformers", "function": "def apply_tool_use_template(self, conversation: Union[List[Dict[str, str]]], tools: List[Dict], **kwargs) -> Union[str, List[int]]:\n return self.apply_chat_template(conversation, chat_template='tool_use', tools=tools, **kwargs)", "docstring": "Create a Command-R tool-use prompt.\n\nOnce rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools\nto help carry out the user's requests.\n\nConceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter.\n\nConverts a chat in the form of a list of dictionaries with `\"role\"` and `\"content\"` keys and a list of available\ntools for the model to use into a prompt string, or a list of token ids.\nThis method will use the tokenizer's `default_tool_use_template` template specified at the class level.\nYou can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease.\n\nArgs:\n conversation (Union[List[Dict[str, str]]]): A list of dicts\n with \"role\" and \"content\" keys, representing the chat history so far.\n tools (List[Dict]): a list of tools to render into the prompt for the model to choose from.\n See an example at the bottom of the docstring.\n The format should be:\n * name (str): The name of the tool to be called. Valid names contain only the characters a-z,\n A-Z, 0-9, _ and must not begin with a digit.\n * description (str): The description of what the tool does, the model uses the description to\n choose when and how to call the function.\n * parameter_definitions (List[Dict]): The input parameters of the tool. Accepts a dictionary\n where the key is the name of the parameter and the value is the parameter spec.\n Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit.\n Parameter specs are as follows:\n * description (str): The description of the parameter.\n * type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool'\n * required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required.\n add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate\n the start of an assistant message. This is useful when you want to generate a response from the model.\n Note that this argument will be passed to the chat template, and so it must be supported in the\n template for this argument to have any effect.\n tokenize (`bool`, defaults to `True`):\n Whether to tokenize the output. If `False`, the output will be a string.\n padding (`bool`, defaults to `False`):\n Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`.\n truncation (`bool`, defaults to `False`):\n Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.\n max_length (`int`, *optional*):\n Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If\n not specified, the tokenizer's `max_length` attribute will be used as a default.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable\n values are:\n - `'tf'`: Return TensorFlow `tf.Tensor` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n return_dict (`bool`, *optional*, defaults to `False`):\n Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.\n **tokenizer_kwargs: Additional kwargs to pass to the tokenizer.\n\nReturns:\n `str`: A rendered prompt string.\n or if tokenize=True:\n `List[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This\n output is ready to pass to the model, either directly or via methods like `generate()`.\n\nExamples:\n\n```python\n>> tokenizer = CohereTokenizerFast.from_pretrained(\"CohereForAI/c4ai-command-r-v01\")\n>> tools = [\n {\n \"name\": \"internet_search\",\n \"description\": \"Returns a list of relevant document snippets for a textual query retrieved from the internet\",\n \"parameter_definitions\": {\n \"query\": {\n \"description\": \"Query to search the internet with\",\n \"type\": \"str\",\n \"required\": True\n }\n }\n },\n {\n \"name': \"directly_answer\",\n \"description\": \"Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history\",\n \"parameter_definitions\": {}\n }\n]\n>> conversation = [\n {\"role\": \"user\", \"content\": \"Whats the biggest penguin in the world?\"}\n]\n>> # render the prompt, ready for user to inspect, or for input into the model:\n>> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True)\n>> print(prompt)\n<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble\nThe instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral.\n\n# System Preamble\n## Basic Rules\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions.\n\n# User Preamble\n## Task and Context\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging.\n\n## Style Guide\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.\n\n## Available Tools\nHere is a list of tools that you have available to you:\n\n\\`\\`\\`python\ndef internet_search(query: str) -> List[Dict]:\n \"\"\"Returns a list of relevant document snippets for a textual query retrieved from the internet\n\n Args:\n query (str): Query to search the internet with\n \"\"\"\n pass\n\\`\\`\\`\n\n\\`\\`\\`python\ndef directly_answer() -> List[Dict]:\n \"\"\"Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history\n \"\"\"\n pass\n\\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:\n\\`\\`\\`json\n[\n {\n \"tool_name\": title of the tool in the specification,\n \"parameters\": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters\n }\n]\\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>\n```\n>> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt')\n>> outputs = model.generate(inputs, max_new_tokens=128)\n>> print(tokenizer.decode(outputs[0]))\nAction: ```json\n[\n {\n \"tool_name\": \"internet_search\",\n \"parameters\": {\n \"query\": \"biggest penguin in the world\"\n }\n }\n]\n```"} +{"repo": "transformers", "function": "def _prepare_images_structure(self, images: ImageInput) -> ImageInput:\n return make_flat_list_of_images(images)", "docstring": "Prepare the images structure for processing.\n\nArgs:\n images (`ImageInput`):\n The input images to process.\n\nReturns:\n `ImageInput`: The images with a valid nesting."} +{"repo": "tensorflow", "function": "def submodules(self):\n return tuple(self._flatten(predicate=_is_module))", "docstring": "Sequence of all sub-modules.\n\nSubmodules are modules which are properties of this module, or found as\nproperties of modules which are properties of this module (and so on).\n\n>>> a = tf.Module()\n>>> b = tf.Module()\n>>> c = tf.Module()\n>>> a.b = b\n>>> b.c = c\n>>> list(a.submodules) == [b, c]\nTrue\n>>> list(b.submodules) == [c]\nTrue\n>>> list(c.submodules) == []\nTrue\n\nReturns:\n A sequence of all submodules."} +{"repo": "transformers", "function": "def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):\n return cls(backbone_config=backbone_config, **kwargs)", "docstring": "Instantiate a [`DFineConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\nconfiguration.\n\n Args:\n backbone_config ([`PretrainedConfig`]):\n The backbone configuration.\n\n Returns:\n [`DFineConfig`]: An instance of a configuration object"} +{"repo": "tf-quant-finance", "function": "def add_business_days(self, date_tensor, num_days, roll_convention=constants.BusinessDayConvention.NONE):\n control_deps = []\n if roll_convention == constants.BusinessDayConvention.NONE:\n message = 'Some dates in date_tensor are not business days. Please specify the roll_convention argument.'\n is_bus_day = self.is_business_day(date_tensor)\n control_deps.append(tf.debugging.assert_equal(is_bus_day, True, message=message))\n else:\n date_tensor = self.roll_to_business_day(date_tensor, roll_convention)\n with tf.control_dependencies(control_deps):\n cumul_bus_days_table = self._compute_cumul_bus_days_table()\n cumul_bus_days = self._gather(cumul_bus_days_table, date_tensor.ordinal() - self._ordinal_offset + 1)\n target_cumul_bus_days = cumul_bus_days + num_days\n bus_day_ordinals_table = self._compute_bus_day_ordinals_table()\n ordinals = self._gather(bus_day_ordinals_table, target_cumul_bus_days)\n with tf.control_dependencies(self._assert_ordinals_in_bounds(ordinals)):\n return dt.from_ordinals(ordinals, validate=False)", "docstring": "Adds given number of business days to given dates.\n\nNote that this is different from calling `add_period_and_roll` with\nPeriodType.DAY. For example, adding 5 business days to Monday gives the next\nMonday (unless there are holidays on this week or next Monday). Adding 5\ndays and rolling means landing on Saturday and then rolling either to next\nMonday or to Friday of the same week, depending on the roll convention.\n\nIf any of the dates in `date_tensor` are not business days, they will be\nrolled to business days before doing the addition. If `roll_convention` is\n`NONE`, and any dates are not business days, an exception is raised.\n\nArgs:\n date_tensor: DateTensor of dates to advance from.\n num_days: Tensor of int32 type broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\nReturns:\n The resulting DateTensor."} +{"repo": "transformers", "function": "class HfArgumentParser(ArgumentParser):\n dataclass_types: Iterable[DataClassType]\n\n def __init__(self, dataclass_types: Optional[Union[DataClassType, Iterable[DataClassType]]]=None, **kwargs):\n if dataclass_types is None:\n dataclass_types = []\n elif not isinstance(dataclass_types, Iterable):\n dataclass_types = [dataclass_types]\n if 'formatter_class' not in kwargs:\n kwargs['formatter_class'] = ArgumentDefaultsHelpFormatter\n super().__init__(**kwargs)\n if dataclasses.is_dataclass(dataclass_types):\n dataclass_types = [dataclass_types]\n self.dataclass_types = list(dataclass_types)\n for dtype in self.dataclass_types:\n self._add_dataclass_arguments(dtype)\n\n @staticmethod\n def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field):\n long_options = [f'--{field.name}']\n if '_' in field.name:\n long_options.append(f'--{field.name.replace('_', '-')}')\n kwargs = field.metadata.copy()\n if isinstance(field.type, str):\n raise RuntimeError('Unresolved type detected, which should have been done with the help of `typing.get_type_hints` method by default')\n aliases = kwargs.pop('aliases', [])\n if isinstance(aliases, str):\n aliases = [aliases]\n origin_type = getattr(field.type, '__origin__', field.type)\n if origin_type is Union or (hasattr(types, 'UnionType') and isinstance(origin_type, types.UnionType)):\n if str not in field.type.__args__ and (len(field.type.__args__) != 2 or type(None) not in field.type.__args__):\n raise ValueError(f\"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because the argument parser only supports one type per argument. Problem encountered in field '{field.name}'.\")\n if type(None) not in field.type.__args__:\n field.type = field.type.__args__[0] if field.type.__args__[1] is str else field.type.__args__[1]\n origin_type = getattr(field.type, '__origin__', field.type)\n elif bool not in field.type.__args__:\n field.type = field.type.__args__[0] if isinstance(None, field.type.__args__[1]) else field.type.__args__[1]\n origin_type = getattr(field.type, '__origin__', field.type)\n bool_kwargs = {}\n if origin_type is Literal or (isinstance(field.type, type) and issubclass(field.type, Enum)):\n if origin_type is Literal:\n kwargs['choices'] = field.type.__args__\n else:\n kwargs['choices'] = [x.value for x in field.type]\n kwargs['type'] = make_choice_type_function(kwargs['choices'])\n if field.default is not dataclasses.MISSING:\n kwargs['default'] = field.default\n else:\n kwargs['required'] = True\n elif field.type is bool or field.type == Optional[bool]:\n bool_kwargs = copy(kwargs)\n kwargs['type'] = string_to_bool\n if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):\n default = False if field.default is dataclasses.MISSING else field.default\n kwargs['default'] = default\n kwargs['nargs'] = '?'\n kwargs['const'] = True\n elif isclass(origin_type) and issubclass(origin_type, list):\n kwargs['type'] = field.type.__args__[0]\n kwargs['nargs'] = '+'\n if field.default_factory is not dataclasses.MISSING:\n kwargs['default'] = field.default_factory()\n elif field.default is dataclasses.MISSING:\n kwargs['required'] = True\n else:\n kwargs['type'] = field.type\n if field.default is not dataclasses.MISSING:\n kwargs['default'] = field.default\n elif field.default_factory is not dataclasses.MISSING:\n kwargs['default'] = field.default_factory()\n else:\n kwargs['required'] = True\n parser.add_argument(*long_options, *aliases, **kwargs)\n if field.default is True and (field.type is bool or field.type == Optional[bool]):\n bool_kwargs['default'] = False\n parser.add_argument(f'--no_{field.name}', f'--no-{field.name.replace('_', '-')}', action='store_false', dest=field.name, **bool_kwargs)\n\n def _add_dataclass_arguments(self, dtype: DataClassType):\n if hasattr(dtype, '_argument_group_name'):\n parser = self.add_argument_group(dtype._argument_group_name)\n else:\n parser = self\n try:\n type_hints: dict[str, type] = get_type_hints(dtype)\n except NameError:\n raise RuntimeError(f'Type resolution failed for {dtype}. Try declaring the class in global scope or removing line of `from __future__ import annotations` which opts in Postponed Evaluation of Annotations (PEP 563)')\n except TypeError as ex:\n if sys.version_info[:2] < (3, 10) and 'unsupported operand type(s) for |' in str(ex):\n python_version = '.'.join(map(str, sys.version_info[:3]))\n raise RuntimeError(f'Type resolution failed for {dtype} on Python {python_version}. Try removing line of `from __future__ import annotations` which opts in union types as `X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To support Python versions that lower than 3.10, you need to use `typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of `X | None`.') from ex\n raise\n for field in dataclasses.fields(dtype):\n if not field.init:\n continue\n field.type = type_hints[field.name]\n self._parse_dataclass_field(parser, field)\n\n def parse_args_into_dataclasses(self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None, args_file_flag=None) -> tuple[DataClass, ...]:\n \"\"\"\n Parse command-line args into instances of the specified dataclass types.\n\n This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:\n docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args\n\n Args:\n args:\n List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)\n return_remaining_strings:\n If true, also return a list of remaining argument strings.\n look_for_args_file:\n If true, will look for a \".args\" file with the same base name as the entry point script for this\n process, and will append its potential content to the command line args.\n args_filename:\n If not None, will uses this file instead of the \".args\" file specified in the previous argument.\n args_file_flag:\n If not None, will look for a file in the command-line args specified with this flag. The flag can be\n specified multiple times and precedence is determined by the order (last one wins).\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.abspath\n - if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser\n after initialization.\n - The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)\n \"\"\"\n if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):\n args_files = []\n if args_filename:\n args_files.append(Path(args_filename))\n elif look_for_args_file and len(sys.argv):\n args_files.append(Path(sys.argv[0]).with_suffix('.args'))\n if args_file_flag:\n args_file_parser = ArgumentParser()\n args_file_parser.add_argument(args_file_flag, type=str, action='append')\n cfg, args = args_file_parser.parse_known_args(args=args)\n cmd_args_file_paths = vars(cfg).get(args_file_flag.lstrip('-'), None)\n if cmd_args_file_paths:\n args_files.extend([Path(p) for p in cmd_args_file_paths])\n file_args = []\n for args_file in args_files:\n if args_file.exists():\n file_args += args_file.read_text().split()\n args = file_args + args if args is not None else file_args + sys.argv[1:]\n namespace, remaining_args = self.parse_known_args(args=args)\n outputs = []\n for dtype in self.dataclass_types:\n keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n inputs = {k: v for k, v in vars(namespace).items() if k in keys}\n for k in keys:\n delattr(namespace, k)\n obj = dtype(**inputs)\n outputs.append(obj)\n if len(namespace.__dict__) > 0:\n outputs.append(namespace)\n if return_remaining_strings:\n return (*outputs, remaining_args)\n else:\n if remaining_args:\n raise ValueError(f'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')\n return (*outputs,)\n\n def parse_dict(self, args: dict[str, Any], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n \"\"\"\n Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass\n types.\n\n Args:\n args (`dict`):\n dict containing config values\n allow_extra_keys (`bool`, *optional*, defaults to `False`):\n Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.\n \"\"\"\n unused_keys = set(args.keys())\n outputs = []\n for dtype in self.dataclass_types:\n keys = {f.name for f in dataclasses.fields(dtype) if f.init}\n inputs = {k: v for k, v in args.items() if k in keys}\n unused_keys.difference_update(inputs.keys())\n obj = dtype(**inputs)\n outputs.append(obj)\n if not allow_extra_keys and unused_keys:\n raise ValueError(f'Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}')\n return tuple(outputs)\n\n def parse_json_file(self, json_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n \"\"\"\n Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the\n dataclass types.\n\n Args:\n json_file (`str` or `os.PathLike`):\n File name of the json file to parse\n allow_extra_keys (`bool`, *optional*, defaults to `False`):\n Defaults to False. If False, will raise an exception if the json file contains keys that are not\n parsed.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.\n \"\"\"\n with open(Path(json_file), encoding='utf-8') as open_json_file:\n data = json.loads(open_json_file.read())\n outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)\n return tuple(outputs)\n\n def parse_yaml_file(self, yaml_file: Union[str, os.PathLike], allow_extra_keys: bool=False) -> tuple[DataClass, ...]:\n \"\"\"\n Alternative helper method that does not use `argparse` at all, instead loading a yaml file and populating the\n dataclass types.\n\n Args:\n yaml_file (`str` or `os.PathLike`):\n File name of the yaml file to parse\n allow_extra_keys (`bool`, *optional*, defaults to `False`):\n Defaults to False. If False, will raise an exception if the json file contains keys that are not\n parsed.\n\n Returns:\n Tuple consisting of:\n\n - the dataclass instances in the same order as they were passed to the initializer.\n \"\"\"\n outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys)\n return tuple(outputs)", "docstring": "This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.\n\nThe class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)\narguments to the parser after initialization and you'll get the output back after parsing as an additional\nnamespace. Optional: To create sub argument groups use the `_argument_group_name` attribute in the dataclass.\n\nArgs:\n dataclass_types (`DataClassType` or `Iterable[DataClassType]`, *optional*):\n Dataclass type, or list of dataclass types for which we will \"fill\" instances with the parsed args.\n kwargs (`Dict[str, Any]`, *optional*):\n Passed to `argparse.ArgumentParser()` in the regular way."} +{"repo": "tensorflow", "function": "def __init__(self, cluster_resolver=None, communication_options=None, *, mesh=None):\n self._validate_init_args(mesh, cluster_resolver)\n if not mesh:\n if not cluster_resolver:\n cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()\n dtensor_env_var = _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver)\n _config_dtensor_env_var(dtensor_env_var)\n mesh = _build_distributed_mesh(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME)\n extended = dtensor_strategy_extended.DTensorStrategyExtended(container_strategy=self, mesh=mesh)\n super().__init__(extended)\n self._mesh = mesh\n self._cluster_resolver = cluster_resolver", "docstring": "Creates the strategy.\n\nArgs:\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. In case neither `mesh`\n nor `cluster_resolver` are provided,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n communication_options: currently ignore.\n mesh: optional Dtensor global mesh for the computation. Note that either\n `mesh` or the `cluster_resolver` should be provided. and not both."} +{"repo": "keras", "function": "def transform(self, X):\n sklearn.base.check_is_fitted(self)\n X = _validate_data(self, X, reset=False)\n return self.model_.predict(X)", "docstring": "Transform the data.\n\nArgs:\n X: array-like, shape=(n_samples, n_features)\n The input samples.\n\nReturns:\n X_transformed: array-like, shape=(n_samples, n_features)\n The transformed data."} +{"repo": "tensorflow", "function": "def stop_on_exception(self):\n return self._coord.stop_on_exception()", "docstring": "Context handler to stop the supervisor when an exception is raised.\n\nSee `Coordinator.stop_on_exception()`.\n\nReturns:\n A context handler."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BERT sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "keras", "function": "def pack_sequence_as(structure, flat_sequence):\n return tree_impl.pack_sequence_as(structure, flat_sequence)", "docstring": "Returns a given flattened sequence packed into a given structure.\n\nIf `structure` is an atom, `flat_sequence` must be a single-item list; in\nthis case the return value is `flat_sequence[0]`.\n\nIf `structure` is or contains a dict instance, the keys will be sorted to\npack the flat sequence in deterministic order. However, instances of\n`collections.OrderedDict` are handled differently: their sequence order is\nused instead of the sorted keys. The same convention is followed in\n`flatten`. This correctly repacks dicts and `OrderedDicts` after they have\nbeen flattened, or vice-versa.\n\nDictionaries with non-sortable keys are not supported.\n\nExamples:\n\n>>> structure = {\"key3\": \"\", \"key1\": \"\", \"key2\": \"\"}\n>>> flat_sequence = [\"value1\", \"value2\", \"value3\"]\n>>> keras.tree.pack_sequence_as(structure, flat_sequence)\n{\"key3\": \"value3\", \"key1\": \"value1\", \"key2\": \"value2\"}\n\n>>> structure = ((\"a\", \"b\"), (\"c\", \"d\", \"e\"), \"f\")\n>>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]\n>>> keras.tree.pack_sequence_as(structure, flat_sequence)\n((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)\n\n>>> structure = {\"key3\": {\"c\": (\"alpha\", \"beta\"), \"a\": (\"gamma\")},\n... \"key1\": {\"e\": \"val1\", \"d\": \"val2\"}}\n>>> flat_sequence = [\"val2\", \"val1\", 3.0, 1.0, 2.0]\n>>> keras.tree.pack_sequence_as(structure, flat_sequence)\n{'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}}\n\n>>> structure = [\"a\"]\n>>> flat_sequence = [np.array([[1, 2], [3, 4]])]\n>>> keras.tree.pack_sequence_as(structure, flat_sequence)\n[array([[1, 2],\n [3, 4]])]\n\n>>> structure = [\"a\"]\n>>> flat_sequence = [keras.ops.ones([2, 2])]\n>>> keras.tree.pack_sequence_as(structure, flat_sequence)\n[array([[1., 1.],\n [1., 1.]]]\n\nArgs:\n structure: Arbitrarily nested structure.\n flat_sequence: Flat sequence to pack.\n\nReturns:\n `flat_sequence` converted to have the same recursive structure as\n `structure`.\n\nRaises:\n TypeError: If `flat_sequence` is not iterable.\n ValueError: If `flat_sequence` cannot be repacked as `structure`; for\n instance, if `flat_sequence` has too few or too many elements."} +{"repo": "beam", "function": "def score_one(self, x: beam.Row) -> Optional[float]:\n if len(x.__dict__) != 1:\n raise ValueError('RobustZScore.score_one expected univariate input, but got %s', str(x))\n v = next(iter(x))\n if v is None or math.isnan(v):\n return None\n median = self._mad_tracker.get_median()\n mad = self._mad_tracker.get()\n if math.isnan(mad) or math.isnan(median):\n return float('NaN')\n if abs(mad) < EPSILON:\n return 0.0\n return abs(RobustZScore.SCALE_FACTOR * (v - median) / mad)", "docstring": "Scores a data point using the Robust Z-Score.\n\nArgs:\n x: A `beam.Row` containing a single numerical value.\n\nReturns:\n float | None: The Robust Z-Score."} +{"repo": "tensorflow", "function": "def get_summary_description(node_def):\n if node_def.op != 'TensorSummary':\n raise ValueError(\"Can't get_summary_description on %s\" % node_def.op)\n description_str = _compat.as_str_any(node_def.attr['description'].s)\n summary_description = SummaryDescription()\n _json_format.Parse(description_str, summary_description)\n return summary_description", "docstring": "Given a TensorSummary node_def, retrieve its SummaryDescription.\n\nWhen a Summary op is instantiated, a SummaryDescription of associated\nmetadata is stored in its NodeDef. This method retrieves the description.\n\nArgs:\n node_def: the node_def_pb2.NodeDef of a TensorSummary op\n\nReturns:\n a summary_pb2.SummaryDescription\n\nRaises:\n ValueError: if the node is not a summary op.\n\n@compatibility(eager)\nNot compatible with eager execution. To write TensorBoard\nsummaries under eager execution, use `tf.contrib.summary` instead.\n@end_compatibility"} +{"repo": "pytype", "function": "def get_module_action(self, module):\n f = module.full_path\n if f in self.filenames:\n action = Action.CHECK\n report = logging.warning\n else:\n action = Action.INFER\n report = logging.info\n if not module.name.startswith('pytype_extensions.') and module.kind in ('Builtin', 'System'):\n action = Action.GENERATE_DEFAULT\n report('%s: %s module %s', action, module.kind, module.name)\n return action", "docstring": "Get the action for the given module.\n\nArgs:\n module: A module_utils.Module object.\n\nReturns:\n An Action object, or None for a non-Python file."} +{"repo": "tensorflow", "function": "def matmul(self, x, adjoint=False, adjoint_arg=False, name='matmul'):\n if isinstance(x, linear_operator.LinearOperator):\n left_operator = self.adjoint() if adjoint else self\n right_operator = x.adjoint() if adjoint_arg else x\n if right_operator.range_dimension is not None and left_operator.domain_dimension is not None and (right_operator.range_dimension != left_operator.domain_dimension):\n raise ValueError('Operators are incompatible. Expected `x` to have dimension {} but got {}.'.format(left_operator.domain_dimension, right_operator.range_dimension))\n with self._name_scope(name):\n return self._linop_matmul(left_operator, right_operator)\n with self._name_scope(name):\n arg_dim = -1 if adjoint_arg else -2\n block_dimensions = self._block_range_dimensions() if adjoint else self._block_domain_dimensions()\n if linear_operator_util.arg_is_blockwise(block_dimensions, x, arg_dim):\n for i, block in enumerate(x):\n if not isinstance(block, linear_operator.LinearOperator):\n block = tensor_conversion.convert_to_tensor_v2_with_dispatch(block)\n self._check_input_dtype(block)\n block_dimensions[i].assert_is_compatible_with(block.shape[arg_dim])\n x[i] = block\n else:\n x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n self._check_input_dtype(x)\n op_dimension = self.range_dimension if adjoint else self.domain_dimension\n op_dimension.assert_is_compatible_with(x.shape[arg_dim])\n return self._matmul(x, adjoint=adjoint, adjoint_arg=adjoint_arg)", "docstring": "Transform [batch] matrix `x` with left multiplication: `x --> Ax`.\n\n```python\n# Make an operator acting like batch matrix A. Assume A.shape = [..., M, N]\noperator = LinearOperator(...)\noperator.shape = [..., M, N]\n\nX = ... # shape [..., N, R], batch matrix, R > 0.\n\nY = operator.matmul(X)\nY.shape\n==> [..., M, R]\n\nY[..., :, r] = sum_j A[..., :, j] X[j, r]\n```\n\nArgs:\n x: `LinearOperator`, `Tensor` with compatible shape and same `dtype` as\n `self`, or a blockwise iterable of `LinearOperator`s or `Tensor`s. See\n class docstring for definition of shape compatibility.\n adjoint: Python `bool`. If `True`, left multiply by the adjoint: `A^H x`.\n adjoint_arg: Python `bool`. If `True`, compute `A x^H` where `x^H` is\n the hermitian transpose (transposition and complex conjugation).\n name: A name for this `Op`.\n\nReturns:\n A `LinearOperator` or `Tensor` with shape `[..., M, R]` and same `dtype`\n as `self`, or if `x` is blockwise, a list of `Tensor`s with shapes that\n concatenate to `[..., M, R]`."} +{"repo": "transformers", "function": "def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]:\n height, width = image_size\n raw_size = None\n if max_size is not None:\n min_original_size = float(min((height, width)))\n max_original_size = float(max((height, width)))\n if max_original_size / min_original_size * size > max_size:\n raw_size = max_size * min_original_size / max_original_size\n size = int(round(raw_size))\n if height <= width and height == size or (width <= height and width == size):\n oh, ow = (height, width)\n elif width < height:\n ow = size\n if max_size is not None and raw_size is not None:\n oh = int(raw_size * height / width)\n else:\n oh = int(size * height / width)\n else:\n oh = size\n if max_size is not None and raw_size is not None:\n ow = int(raw_size * width / height)\n else:\n ow = int(size * width / height)\n return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size.\n\nArgs:\n image_size (`Tuple[int, int]`):\n The input image size.\n size (`int`):\n The desired output size.\n max_size (`int`, *optional*):\n The maximum allowed output size."} +{"repo": "transformers", "function": "def _reshape(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n batch_size, seq_length, three_times_hidden_size = fused_qkv.shape\n fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)\n query_layer = fused_qkv[..., 0, :].transpose(1, 2)\n key_layer = fused_qkv[..., 1, :].transpose(1, 2)\n value_layer = fused_qkv[..., 2, :].transpose(1, 2)\n return (query_layer, key_layer, value_layer)", "docstring": "Split the last dimension into (num_heads, head_dim) and reshapes to (bs, heads, len, dim) shape\nwithout making any copies, results share same memory storage as `fused_qkv`\n\nArgs:\n fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]\n\nReturns:\n query: [batch_size, num_heads, seq_length, head_dim]\n key: [batch_size, num_heads, seq_length, head_dim]\n value: [batch_size, num_heads, seq_length, head_dim]"} +{"repo": "tensorflow", "function": "def wrap_in_placeholder(self, arg, shape_info):\n if shape_info == 'known':\n return arg\n if isinstance(arg, ragged_tensor.RaggedTensor):\n return arg.with_flat_values(self.wrap_in_placeholder(arg.flat_values, shape_info))\n if isinstance(arg, tensor_shape.TensorShape):\n if arg.ndims is None:\n return arg\n arg = constant_op.constant(arg.as_list())\n if shape_info == 'unknown_rank':\n return array_ops.placeholder_with_default(arg, None)\n if shape_info == 'unknown_dims':\n return array_ops.placeholder_with_default(arg, [None] * arg.shape.rank)\n raise AssertionError('Unexpected shape_info %r' % shape_info)", "docstring": "Wraps `arg` in a placeholder to limit static shape info.\n\nArgs:\n arg: The value to wrap. A Tensor, RaggedTensor, or TensorShape.\n shape_info: One of ['known', 'unknown_dims', 'unknown_rank'].\n\nReturns:\n * If shape_info is 'known': returns `arg`.\n * If shape_info is 'unknown_dims': returns a placeholder wrapping `arg`\n where the dimension sizes are unknown. If `arg` is a TensorShape,\n then convert it to a vector first. If `arg` is a RaggedTensor, then\n wrap the flat_values.\n * If shape_info is 'unknown_rank': returns a placeholder wrapping `arg`\n where the rank is unknown. If `arg` is a TensorShape, then convert it\n to a vector first. If `arg` is a RaggedTensor, then wrap the\n flat_values."} +{"repo": "beam", "function": "def __init__(self, project):\n self._project = project\n self._client = None\n self._rpc_successes = Metrics.counter(_Mutate.DatastoreMutateFn, 'datastoreRpcSuccesses')\n self._rpc_errors = Metrics.counter(_Mutate.DatastoreMutateFn, 'datastoreRpcErrors')\n self._throttled_secs = Metrics.counter(_Mutate.DatastoreMutateFn, 'cumulativeThrottlingSeconds')\n self._throttler = AdaptiveThrottler(window_ms=120000, bucket_ms=1000, overload_ratio=1.25)", "docstring": "Args:\n project: (str) cloud project id"} +{"repo": "pytype", "function": "def _should_merge(self, pytd_type, union):\n names = self._CONTAINER_NAMES[pytd_type]\n length = None\n for t in union.type_list:\n if isinstance(t, pytd_type):\n if length is None:\n length = len(t.parameters)\n elif length != len(t.parameters):\n return True\n elif isinstance(t, pytd.GenericType) and t.name in names:\n return True\n return False", "docstring": "Determine whether pytd_type values in the union should be merged.\n\nIf the union contains the homogeneous flavor of pytd_type (e.g.,\nGenericType(base_type=tuple) when pytd_type is TupleType), or pytd_type\nvalues of different lengths, we want to turn all of the pytd_type values\ninto homogeneous ones so that they can be merged into a single container.\n\nArgs:\n pytd_type: The pytd type, either TupleType or CallableType.\n union: a pytd.UnionType\n\nReturns:\n True if the pytd_type values should be merged, False otherwise."} +{"repo": "transformers", "function": "class TFTransfoXLModelOutput(ModelOutput):\n last_hidden_state: Optional[tf.Tensor] = None\n mems: List[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\nArgs:\n last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n mems (`List[tf.Tensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`\n input) to speed up sequential decoding. The token ids which have their past given to this model should not\n be passed as input ids as they have already been computed.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "def forward(self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor]=None, static_real_features: Optional[torch.Tensor]=None, future_values: Optional[torch.Tensor]=None, future_time_features: Optional[torch.Tensor]=None, future_observed_mask: Optional[torch.Tensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[List[torch.FloatTensor]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, output_hidden_states: Optional[bool]=None, output_attentions: Optional[bool]=None, use_cache: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Seq2SeqTSModelOutput, Tuple]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if future_values is not None:\n use_cache = False\n outputs = self.model(past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, cache_position=cache_position)\n prediction_loss = None\n params = None\n if future_values is not None:\n params = self.output_params(outputs[0])\n distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2])\n loss = self.loss(distribution, future_values)\n if future_observed_mask is None:\n future_observed_mask = torch.ones_like(future_values)\n if len(self.target_shape) == 0:\n loss_weights = future_observed_mask\n else:\n loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False)\n prediction_loss = weighted_average(loss, weights=loss_weights)\n if not return_dict:\n outputs = (params,) + outputs[1:] if params is not None else outputs[1:]\n return (prediction_loss,) + outputs if prediction_loss is not None else outputs\n return Seq2SeqTSPredictionOutput(loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features)", "docstring": "past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`):\n Past values of the time series, that serve as context in order to predict the future. The sequence size of\n this tensor must be larger than the `context_length` of the model, since the model will use the larger size\n to construct lag features, i.e. additional values from the past which are added in order to serve as \"extra\n context\".\n\n The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no\n `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest\n look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of\n the past.\n\n The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as\n `static_categorical_features`, `static_real_features`, `past_time_features` and lags).\n\n Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`.\n\n For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of\n variates in the time series per time step.\npast_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`):\n Required time features, which the model internally will add to `past_values`. These could be things like\n \"month of year\", \"day of the month\", etc. encoded as vectors (for instance as Fourier features). These\n could also be so-called \"age\" features, which basically help the model know \"at which point in life\" a\n time-series is. Age features have small values for distant past time steps and increase monotonically the\n more we approach the current time step. Holiday features are also a good example of time features.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features. The Time Series Transformer only learns\n additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features\n must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\npast_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in\n `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\nstatic_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*):\n Optional static categorical features for which the model will learn an embedding, which it will add to the\n values of the time series.\n\n Static categorical features are features which have the same value for all time steps (static over time).\n\n A typical example of a static categorical feature is a time series ID.\nstatic_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*):\n Optional static real features which the model will add to the values of the time series.\n\n Static real features are features which have the same value for all time steps (static over time).\n\n A typical example of a static real feature is promotion information.\nfuture_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)` or `(batch_size, prediction_length, input_size)`, *optional*):\n Future values of the time series, that serve as labels for the model. The `future_values` is what the\n Transformer needs during training to learn to output, given the `past_values`.\n\n The sequence length here is equal to `prediction_length`.\n\n See the demo notebook and code snippets for details.\n\n Optionally, during training any missing values need to be replaced with zeros and indicated via the\n `future_observed_mask`.\n\n For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of\n variates in the time series per time step.\nfuture_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`):\n Required time features for the prediction window, which the model internally will add to `future_values`.\n These could be things like \"month of year\", \"day of the month\", etc. encoded as vectors (for instance as\n Fourier features). These could also be so-called \"age\" features, which basically help the model know \"at\n which point in life\" a time-series is. Age features have small values for distant past time steps and\n increase monotonically the more we approach the current time step. Holiday features are also a good example\n of time features.\n\n These features serve as the \"positional encodings\" of the inputs. So contrary to a model like BERT, where\n the position encodings are learned from scratch internally as parameters of the model, the Time Series\n Transformer requires to provide additional time features. The Time Series Transformer only learns\n additional embeddings for `static_categorical_features`.\n\n Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features\n must but known at prediction time.\n\n The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`.\nfuture_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*):\n Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\n This mask is used to filter out missing values for the final loss calculation.\ncross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\nencoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):\n Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n\nExamples:\n\n```python\n>>> from huggingface_hub import hf_hub_download\n>>> import torch\n>>> from transformers import TimeSeriesTransformerForPrediction\n\n>>> file = hf_hub_download(\n... repo_id=\"hf-internal-testing/tourism-monthly-batch\", filename=\"train-batch.pt\", repo_type=\"dataset\"\n... )\n>>> batch = torch.load(file)\n\n>>> model = TimeSeriesTransformerForPrediction.from_pretrained(\n... \"huggingface/time-series-transformer-tourism-monthly\"\n... )\n\n>>> # during training, one provides both past and future values\n>>> # as well as possible additional features\n>>> outputs = model(\n... past_values=batch[\"past_values\"],\n... past_time_features=batch[\"past_time_features\"],\n... past_observed_mask=batch[\"past_observed_mask\"],\n... static_categorical_features=batch[\"static_categorical_features\"],\n... static_real_features=batch[\"static_real_features\"],\n... future_values=batch[\"future_values\"],\n... future_time_features=batch[\"future_time_features\"],\n... )\n\n>>> loss = outputs.loss\n>>> loss.backward()\n\n>>> # during inference, one only provides past values\n>>> # as well as possible additional features\n>>> # the model autoregressively generates future values\n>>> outputs = model.generate(\n... past_values=batch[\"past_values\"],\n... past_time_features=batch[\"past_time_features\"],\n... past_observed_mask=batch[\"past_observed_mask\"],\n... static_categorical_features=batch[\"static_categorical_features\"],\n... static_real_features=batch[\"static_real_features\"],\n... future_time_features=batch[\"future_time_features\"],\n... )\n\n>>> mean_prediction = outputs.sequences.mean(dim=1)\n```"} +{"repo": "tensorflow", "function": "def _get_executor_init(self, workers):\n\n def pool_fn(seqs):\n pool = get_pool_class(True)(workers, initializer=init_pool_generator, initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", "docstring": "Gets the Pool initializer for multiprocessing.\n\nArgs:\n workers: Number of works.\n\nReturns:\n A Function to initialize the pool"} +{"repo": "tensorflow", "function": "def clip_by_value(t, clip_value_min, clip_value_max, name=None):\n with ops.name_scope(name, 'clip_by_value', [t, clip_value_min, clip_value_max]) as name:\n values = ops.convert_to_tensor(t.values if isinstance(t, indexed_slices.IndexedSlices) else t, name='t')\n t_min = math_ops.minimum(values, clip_value_max)\n values.shape.assert_is_compatible_with(t_min.shape)\n t_max = math_ops.maximum(t_min, clip_value_min, name=name)\n values.shape.assert_is_compatible_with(t_max.shape)\n if isinstance(t, indexed_slices.IndexedSlices):\n t_max = indexed_slices.IndexedSlices(t_max, t.indices, t.dense_shape)\n return t_max", "docstring": "Clips tensor values to a specified min and max.\n\nGiven a tensor `t`, this operation returns a tensor of the same type and\nshape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.\nAny values less than `clip_value_min` are set to `clip_value_min`. Any values\ngreater than `clip_value_max` are set to `clip_value_max`.\n\nNote: `clip_value_min` needs to be smaller or equal to `clip_value_max` for\ncorrect results.\n\nFor example:\n\nBasic usage passes a scalar as the min and max value.\n\n>>> t = tf.constant([[-10., -1., 0.], [0., 2., 10.]])\n>>> t2 = tf.clip_by_value(t, clip_value_min=-1, clip_value_max=1)\n>>> t2.numpy()\narray([[-1., -1., 0.],\n [ 0., 1., 1.]], dtype=float32)\n\nThe min and max can be the same size as `t`, or broadcastable to that size.\n\n>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n>>> clip_min = [[2],[1]]\n>>> t3 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\n>>> t3.numpy()\narray([[ 2., 2., 10.],\n [ 1., 1., 10.]], dtype=float32)\n\nBroadcasting fails, intentionally, if you would expand the dimensions of `t`\n\n>>> t = tf.constant([[-1, 0., 10.], [-1, 0, 10]])\n>>> clip_min = [[[2, 1]]] # Has a third axis\n>>> t4 = tf.clip_by_value(t, clip_value_min=clip_min, clip_value_max=100)\nTraceback (most recent call last):\n...\nInvalidArgumentError: Incompatible shapes: [2,3] vs. [1,1,2]\n\nIt throws a `TypeError` if you try to clip an `int` to a `float` value\n(`tf.cast` the input to `float` first).\n\n>>> t = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)\n>>> t5 = tf.clip_by_value(t, clip_value_min=-3.1, clip_value_max=3.1)\nTraceback (most recent call last):\n...\nTypeError: Cannot convert ...\n\n\nArgs:\n t: A `Tensor` or `IndexedSlices`.\n clip_value_min: The minimum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n clip_value_max: The maximum value to clip to. A scalar `Tensor` or one that\n is broadcastable to the shape of `t`.\n name: A name for the operation (optional).\n\nReturns:\n A clipped `Tensor` or `IndexedSlices`.\n\nRaises:\n `tf.errors.InvalidArgumentError`: If the clip tensors would trigger array\n broadcasting that would make the returned tensor larger than the input.\n TypeError: If dtype of the input is `int32` and dtype of\n the `clip_value_min` or `clip_value_max` is `float32`"} +{"repo": "transformers", "function": "class DepthProConfig(PretrainedConfig):\n model_type = 'depth_pro'\n sub_configs = {'image_model_config': AutoConfig, 'patch_model_config': AutoConfig, 'fov_model_config': AutoConfig}\n\n def __init__(self, fusion_hidden_size=256, patch_size=384, initializer_range=0.02, intermediate_hook_ids=[11, 5], intermediate_feature_dims=[256, 256], scaled_images_ratios=[0.25, 0.5, 1], scaled_images_overlap_ratios=[0.0, 0.5, 0.25], scaled_images_feature_dims=[1024, 1024, 512], merge_padding_value=3, use_batch_norm_in_fusion_residual=False, use_bias_in_fusion_residual=True, use_fov_model=False, num_fov_head_layers=2, image_model_config=None, patch_model_config=None, fov_model_config=None, **kwargs):\n super().__init__(**kwargs)\n if scaled_images_ratios != sorted(scaled_images_ratios):\n raise ValueError(f'Values in scaled_images_ratios={scaled_images_ratios} should be sorted from low to high')\n if not len(scaled_images_ratios) == len(scaled_images_overlap_ratios) == len(scaled_images_feature_dims):\n raise ValueError(f'len(scaled_images_ratios)={len(scaled_images_ratios)} and len(scaled_images_overlap_ratios)={len(scaled_images_overlap_ratios)} and len(scaled_images_feature_dims)={len(scaled_images_feature_dims)}, should match in config.')\n if not len(intermediate_hook_ids) == len(intermediate_feature_dims):\n raise ValueError(f'len(intermediate_hook_ids)={len(intermediate_hook_ids)} and len(intermediate_feature_dims)={len(intermediate_feature_dims)}, should match in config.')\n if fusion_hidden_size // 2 ** num_fov_head_layers == 0:\n raise ValueError(f'fusion_hidden_size={fusion_hidden_size} should be consistent with num_fov_head_layers={num_fov_head_layers} i.e fusion_hidden_size // 2**num_fov_head_layers > 0')\n self.fusion_hidden_size = fusion_hidden_size\n self.patch_size = patch_size\n self.initializer_range = initializer_range\n self.use_batch_norm_in_fusion_residual = use_batch_norm_in_fusion_residual\n self.use_bias_in_fusion_residual = use_bias_in_fusion_residual\n self.use_fov_model = use_fov_model\n self.num_fov_head_layers = num_fov_head_layers\n self.intermediate_hook_ids = intermediate_hook_ids\n self.intermediate_feature_dims = intermediate_feature_dims\n self.scaled_images_ratios = scaled_images_ratios\n self.scaled_images_overlap_ratios = scaled_images_overlap_ratios\n self.scaled_images_feature_dims = scaled_images_feature_dims\n self.merge_padding_value = merge_padding_value\n self.image_model_config = image_model_config\n self.patch_model_config = patch_model_config\n self.fov_model_config = fov_model_config\n for sub_config_key in self.sub_configs.keys():\n sub_config = getattr(self, sub_config_key)\n if sub_config is None:\n sub_config = CONFIG_MAPPING['dinov2'](image_size=patch_size)\n logger.info(f'`{sub_config_key}` is `None`. Initializing `{sub_config_key}` with the `Dinov2Config` with default values except `{sub_config_key}.image_size` is set to `config.patch_size`.')\n elif isinstance(sub_config, dict):\n sub_config = deepcopy(sub_config)\n if 'model_type' not in sub_config:\n raise KeyError(f'The `model_type` key is missing in the `{sub_config_key}` dictionary. Please provide the model type.')\n elif sub_config['model_type'] not in CONFIG_MAPPING:\n raise ValueError(f'The model type `{sub_config['model_type']}` in `{sub_config_key}` is not supported. Please provide a valid model type.')\n image_size = sub_config.get('image_size')\n if image_size != patch_size:\n logger.info(f'The `image_size` in `{sub_config_key}` is set to `{image_size}`, but it does not match the required `patch_size` of `{patch_size}`. Updating `image_size` to `{patch_size}` for consistency. Ensure that `image_size` aligns with `patch_size` in the configuration.')\n sub_config.update({'image_size': patch_size})\n sub_config = CONFIG_MAPPING[sub_config['model_type']](**sub_config)\n elif isinstance(sub_config, PretrainedConfig):\n sub_config = sub_config\n image_size = getattr(sub_config, 'image_size', None)\n if image_size != patch_size:\n raise ValueError(f'`config.{sub_config_key}.image_size={image_size}` should match `config.patch_size={patch_size}`.')\n else:\n raise TypeError(f'Invalid type for `sub_config`. Expected `PretrainedConfig`, `dict`, or `None`, but got {type(sub_config)}.')\n setattr(self, sub_config_key, sub_config)", "docstring": "This is the configuration class to store the configuration of a [`DepthProModel`]. It is used to instantiate a\nDepthPro model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the DepthPro\n[apple/DepthPro](https://huggingface.co/apple/DepthPro) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n fusion_hidden_size (`int`, *optional*, defaults to 256):\n The number of channels before fusion.\n patch_size (`int`, *optional*, defaults to 384):\n The size (resolution) of each patch. This is also the image_size for backbone model.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n intermediate_hook_ids (`List[int]`, *optional*, defaults to `[11, 5]`):\n Indices of the intermediate hidden states from the patch encoder to use for fusion.\n intermediate_feature_dims (`List[int]`, *optional*, defaults to `[256, 256]`):\n Hidden state dimensions during upsampling for each intermediate hidden state in `intermediate_hook_ids`.\n scaled_images_ratios (`List[float]`, *optional*, defaults to `[0.25, 0.5, 1]`):\n Ratios of scaled images to be used by the patch encoder.\n scaled_images_overlap_ratios (`List[float]`, *optional*, defaults to `[0.0, 0.5, 0.25]`):\n Overlap ratios between patches for each scaled image in `scaled_images_ratios`.\n scaled_images_feature_dims (`List[int]`, *optional*, defaults to `[1024, 1024, 512]`):\n Hidden state dimensions during upsampling for each scaled image in `scaled_images_ratios`.\n merge_padding_value (`int`, *optional*, defaults to 3):\n When merging smaller patches back to the image size, overlapping sections of this size are removed.\n use_batch_norm_in_fusion_residual (`bool`, *optional*, defaults to `False`):\n Whether to use batch normalization in the pre-activate residual units of the fusion blocks.\n use_bias_in_fusion_residual (`bool`, *optional*, defaults to `True`):\n Whether to use bias in the pre-activate residual units of the fusion blocks.\n use_fov_model (`bool`, *optional*, defaults to `False`):\n Whether to use `DepthProFovModel` to generate the field of view.\n num_fov_head_layers (`int`, *optional*, defaults to 2):\n Number of convolution layers in the head of `DepthProFovModel`.\n image_model_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):\n The configuration of the image encoder model, which is loaded using the [`AutoModel`] API.\n By default, Dinov2 model is used as backbone.\n patch_model_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):\n The configuration of the patch encoder model, which is loaded using the [`AutoModel`] API.\n By default, Dinov2 model is used as backbone.\n fov_model_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):\n The configuration of the fov encoder model, which is loaded using the [`AutoModel`] API.\n By default, Dinov2 model is used as backbone.\n\nExample:\n\n```python\n>>> from transformers import DepthProConfig, DepthProModel\n\n>>> # Initializing a DepthPro apple/DepthPro style configuration\n>>> configuration = DepthProConfig()\n\n>>> # Initializing a model (with random weights) from the apple/DepthPro style configuration\n>>> model = DepthProModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0) + [1]\n return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "tensorflow", "function": "class AveragePooling1D(keras_layers.AveragePooling1D, base.Layer):\n\n def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(AveragePooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Average Pooling layer for 1D inputs.\n\nArgs:\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer."} +{"repo": "tensorflow", "function": "def compute_gradient_error(x, x_shape, y, y_shape, x_init_value=None, delta=0.001, init_targets=None, extra_feed_dict=None):\n grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta, init_targets, extra_feed_dict=extra_feed_dict)\n return _compute_error(grad)", "docstring": "Computes the gradient error.\n\nComputes the maximum error for dy/dx between the computed Jacobian and the\nnumerically estimated Jacobian.\n\nThis function will modify the tensors passed in as it adds more operations\nand hence changing the consumers of the operations of the input tensors.\n\nThis function adds operations to the current session. To compute the error\nusing a particular device, such as a GPU, use the standard methods for\nsetting a device (e.g. using with sess.graph.device() or setting a device\nfunction in the session constructor).\n\nArgs:\n x: a tensor or list of tensors\n x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,\n then this is the list of shapes.\n y: a tensor\n y_shape: the dimensions of y as a tuple or an array of ints.\n x_init_value: (optional) a numpy array of the same shape as \"x\"\n representing the initial value of x. If x is a list, this should be a list\n of numpy arrays. If this is none, the function will pick a random tensor\n as the initial value.\n delta: (optional) the amount of perturbation.\n init_targets: list of targets to run to initialize model params.\n extra_feed_dict: dict that allows fixing specified tensor values\n during the Jacobian calculation.\n\nReturns:\n The maximum error in between the two Jacobians."} +{"repo": "tensorflow", "function": "def start(logdir, options=None):\n global _profiler\n with _profiler_lock:\n if _profiler is not None:\n raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')\n _profiler = _pywrap_profiler.ProfilerSession()\n try:\n opts = dict(options._asdict()) if options is not None else {}\n _profiler.start(logdir, opts)\n except errors.AlreadyExistsError:\n logging.warning('Another profiler session is running which is probably created by profiler server. Please avoid using profiler server and profiler APIs at the same time.')\n raise errors.AlreadyExistsError(None, None, 'Another profiler is running.')\n except Exception:\n _profiler = None\n raise", "docstring": "Start profiling TensorFlow performance.\n\nArgs:\n logdir: Profiling results log directory.\n options: `ProfilerOptions` namedtuple to specify miscellaneous profiler\n options. See example usage below.\n\nRaises:\n AlreadyExistsError: If a profiling session is already running.\n\nExample usage:\n```python\noptions = tf.profiler.experimental.ProfilerOptions(host_tracer_level = 3,\n python_tracer_level = 1,\n device_tracer_level = 1)\ntf.profiler.experimental.start('logdir_path', options = options)\n# Training code here\ntf.profiler.experimental.stop()\n```\n\nTo view the profiling results, launch TensorBoard and point it to `logdir`.\nOpen your browser and go to `localhost:6006/#profile` to view profiling\nresults."} +{"repo": "transformers", "function": "class Blip2VisionModelOutput(ModelOutput):\n image_embeds: Optional[torch.FloatTensor] = None\n last_hidden_state: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.\n\nArgs:\n image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):\n The image embeddings obtained by applying the projection layer to the pooler_output.\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "beam", "function": "class AssertEqual(beam.PTransform):\n\n def __init__(self, elements: Iterable[Any]):\n self._elements = elements\n\n def expand(self, pcoll):\n return assert_that(pcoll | beam.Map(lambda row: beam.Row(**row._asdict())), equal_to(dicts_to_rows(self._elements)))", "docstring": "Asserts that the input contains exactly the elements provided.\n\nThis is primarily used for testing; it will cause the entire pipeline to\nfail if the input to this transform is not exactly the set of `elements`\ngiven in the config parameter.\n\nAs with Create, YAML/JSON-style mappings are interpreted as Beam rows,\ne.g.::\n\n type: AssertEqual\n input: SomeTransform\n config:\n elements:\n - {a: 0, b: \"foo\"}\n - {a: 1, b: \"bar\"}\n\nwould ensure that `SomeTransform` produced exactly two elements with values\n`(a=0, b=\"foo\")` and `(a=1, b=\"bar\")` respectively.\n\nArgs:\n elements: The set of elements that should belong to the PCollection.\n YAML/JSON-style mappings will be interpreted as Beam rows."} +{"repo": "beam", "function": "def __init__(self, model_uri: str, model_type: ModelType=ModelType.SAVED_MODEL, create_model_fn: Optional[Callable]=None, *, load_model_args: Optional[dict[str, Any]]=None, custom_weights: str='', inference_fn: TensorInferenceFn=default_numpy_inference_fn, min_batch_size: Optional[int]=None, max_batch_size: Optional[int]=None, max_batch_duration_secs: Optional[int]=None, large_model: bool=False, model_copies: Optional[int]=None, **kwargs):\n self._model_uri = model_uri\n self._model_type = model_type\n self._inference_fn = inference_fn\n self._create_model_fn = create_model_fn\n self._env_vars = kwargs.get('env_vars', {})\n self._load_model_args = {} if not load_model_args else load_model_args\n self._custom_weights = custom_weights\n self._batching_kwargs = {}\n if min_batch_size is not None:\n self._batching_kwargs['min_batch_size'] = min_batch_size\n if max_batch_size is not None:\n self._batching_kwargs['max_batch_size'] = max_batch_size\n if max_batch_duration_secs is not None:\n self._batching_kwargs['max_batch_duration_secs'] = max_batch_duration_secs\n self._share_across_processes = large_model or model_copies is not None\n self._model_copies = model_copies or 1", "docstring": "Implementation of the ModelHandler interface for Tensorflow.\n\nExample Usage::\n\n pcoll | RunInference(TFModelHandlerNumpy(model_uri=\"my_uri\"))\n\nSee https://www.tensorflow.org/tutorials/keras/save_and_load for details.\n\nArgs:\n model_uri (str): path to the trained model.\n model_type: type of model to be loaded. Defaults to SAVED_MODEL.\n create_model_fn: a function that creates and returns a new\n tensorflow model to load the saved weights.\n It should be used with ModelType.SAVED_WEIGHTS.\n load_model_args: a dictionary of parameters to pass to the load_model\n function of TensorFlow to specify custom config.\n custom_weights (str): path to the custom weights to be applied\n once the model is loaded.\n inference_fn: inference function to use during RunInference.\n Defaults to default_numpy_inference_fn.\n large_model: set to true if your model is large enough to run into\n memory pressure if you load multiple copies. Given a model that\n consumes N memory and a machine with W cores and M memory, you should\n set this to True if N*W > M.\n model_copies: The exact number of models that you would like loaded\n onto your machine. This can be useful if you exactly know your CPU or\n GPU capacity and want to maximize resource utilization.\n kwargs: 'env_vars' can be used to set environment variables\n before loading the model.\n\n**Supported Versions:** RunInference APIs in Apache Beam have been tested\nwith Tensorflow 2.9, 2.10, 2.11."} +{"repo": "keras", "function": "def svd(x, full_matrices=True, compute_uv=True):\n if any_symbolic_tensors((x,)):\n return SVD(full_matrices, compute_uv).symbolic_call(x)\n return _svd(x, full_matrices, compute_uv)", "docstring": "Computes the singular value decomposition of a matrix.\n\nArgs:\n x: Input tensor of shape `(..., M, N)`.\n\nReturns:\n A tuple of three tensors: a tensor of shape `(..., M, M)` containing the\n left singular vectors, a tensor of shape `(..., M, N)` containing the\n singular values and a tensor of shape `(..., N, N)` containing the\n right singular vectors."} +{"repo": "tensorflow", "function": "def _gather_saveables_for_checkpoint(self):\n return getattr(self, '_self_saveable_object_factories', {})", "docstring": "Returns a dictionary of values to checkpoint with this object.\n\nNOTE: This method is deprecated, prefer implementing `_serialize_to_tensors`\nand `_restore_from_tensors` instead. This method is only used in the\ndeprecated `tf.compat.v1.train.Saver`.\n\nKeys in the returned dictionary are local to this object and in a separate\nnamespace from dependencies. Values may either be `SaveableObject` factories\nor variables easily converted to `SaveableObject`s (as in\n`tf.compat.v1.train.Saver`'s\n`var_list` constructor argument).\n\n`SaveableObjects` have a name set, which Trackable needs to generate\nitself. So rather than returning `SaveableObjects` directly, this method\nshould return a dictionary of callables which take `name` arguments and\nreturn `SaveableObjects` with that name.\n\nIf this object may also be passed to the global-name-based\n`tf.compat.v1.train.Saver`,\nthe returned callables should have a default value for their name argument\n(i.e. be callable with no arguments).\n\nReturned values must be saved only by this object; if any value may be\nshared, it should instead be a dependency. For example, variable objects\nsave their own values with the key `VARIABLE_VALUE_KEY`, but objects which\nreference variables simply add a dependency.\n\n**AsyncCheckpoint Support**\nIf your Trackable implements `_gather_saveables_for_checkpoint`,\n`_copy_trackable_to_cpu` needs to be implemented as well to support\nasynchronous checkpoint.\n\nReturns:\n The dictionary mapping attribute names to `SaveableObject` factories\n described above. For example:\n {VARIABLE_VALUE_KEY:\n lambda name=\"global_name_for_this_object\":\n SaveableObject(name=name, ...)}"} +{"repo": "transformers", "function": "class RealmScorer(RealmPreTrainedModel):\n\n def __init__(self, config, query_embedder=None):\n super().__init__(config)\n self.embedder = RealmEmbedder(self.config)\n self.query_embedder = query_embedder if query_embedder is not None else self.embedder\n self.post_init()\n\n @add_start_docstrings_to_model_forward(REALM_INPUTS_DOCSTRING.format('batch_size, sequence_length'))\n @replace_return_docstrings(output_type=RealmScorerOutput, config_class=_CONFIG_FOR_DOC)\n def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, candidate_input_ids: Optional[torch.LongTensor]=None, candidate_attention_mask: Optional[torch.FloatTensor]=None, candidate_token_type_ids: Optional[torch.LongTensor]=None, candidate_inputs_embeds: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, RealmScorerOutput]:\n \"\"\"\n candidate_input_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`):\n Indices of candidate input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n candidate_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n candidate_token_type_ids (`torch.LongTensor` of shape `(batch_size, num_candidates, sequence_length)`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n [What are token type IDs?](../glossary#token-type-ids)\n candidate_inputs_embeds (`torch.FloatTensor` of shape `(batch_size * num_candidates, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `candidate_input_ids` you can choose to directly pass an embedded\n representation. This is useful if you want more control over how to convert *candidate_input_ids* indices\n into associated vectors than the model's internal embedding lookup matrix.\n\n Returns:\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import AutoTokenizer, RealmScorer\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"google/realm-cc-news-pretrained-scorer\")\n >>> model = RealmScorer.from_pretrained(\"google/realm-cc-news-pretrained-scorer\", num_candidates=2)\n\n >>> # batch_size = 2, num_candidates = 2\n >>> input_texts = [\"How are you?\", \"What is the item in the picture?\"]\n >>> candidates_texts = [[\"Hello world!\", \"Nice to meet you!\"], [\"A cute cat.\", \"An adorable dog.\"]]\n\n >>> inputs = tokenizer(input_texts, return_tensors=\"pt\")\n >>> candidates_inputs = tokenizer.batch_encode_candidates(candidates_texts, max_length=10, return_tensors=\"pt\")\n\n >>> outputs = model(\n ... **inputs,\n ... candidate_input_ids=candidates_inputs.input_ids,\n ... candidate_attention_mask=candidates_inputs.attention_mask,\n ... candidate_token_type_ids=candidates_inputs.token_type_ids,\n ... )\n >>> relevance_score = outputs.relevance_score\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is None and inputs_embeds is None:\n raise ValueError('You have to specify either input_ids or input_embeds.')\n if candidate_input_ids is None and candidate_inputs_embeds is None:\n raise ValueError('You have to specify either candidate_input_ids or candidate_inputs_embeds.')\n query_outputs = self.query_embedder(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n flattened_input_ids, flattened_attention_mask, flattened_token_type_ids = self._flatten_inputs(candidate_input_ids, candidate_attention_mask, candidate_token_type_ids)\n candidate_outputs = self.embedder(flattened_input_ids, attention_mask=flattened_attention_mask, token_type_ids=flattened_token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=candidate_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n query_score = query_outputs[0]\n candidate_score = candidate_outputs[0]\n candidate_score = candidate_score.view(-1, self.config.num_candidates, self.config.retriever_proj_size)\n relevance_score = torch.einsum('bd,bnd->bn', query_score, candidate_score)\n if not return_dict:\n return (relevance_score, query_score, candidate_score)\n return RealmScorerOutput(relevance_score=relevance_score, query_score=query_score, candidate_score=candidate_score)", "docstring": "Args:\n query_embedder ([`RealmEmbedder`]):\n Embedder for input sequences. If not specified, it will use the same embedder as candidate sequences."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, reference_points=None, spatial_shapes=None, spatial_shapes_list=None, level_start_index=None, output_attentions: bool=False):\n residual = hidden_states\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, spatial_shapes_list=spatial_shapes_list, level_start_index=level_start_index, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n if self.training:\n if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Input to the layer.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Attention mask.\n position_embeddings (`torch.FloatTensor`, *optional*):\n Position embeddings, to be added to `hidden_states`.\n reference_points (`torch.FloatTensor`, *optional*):\n Reference points.\n spatial_shapes (`torch.LongTensor`, *optional*):\n Spatial shapes of the backbone feature maps.\n level_start_index (`torch.LongTensor`, *optional*):\n Level start index.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "tensorflow", "function": "def _copy_assets(src_path: str, dst_path: str) -> None:\n for assets_dir_name in [_ASSETS_DIR, _ASSETS_EXTRA_DIR]:\n src_assets_path = file_io.join(src_path, assets_dir_name)\n if not file_io.file_exists_v2(src_assets_path):\n continue\n dst_assets_path = file_io.join(dst_path, assets_dir_name)\n file_io.create_dir_v2(dst_assets_path)\n for curr_dir, _, files in file_io.walk_v2(src_assets_path):\n for asset_file_name in files:\n src_asset_file = file_io.join(curr_dir, asset_file_name)\n curr_dst_dir = curr_dir.replace(src_assets_path, dst_assets_path)\n dst_asset_file = file_io.join(curr_dst_dir, asset_file_name)\n file_io.copy_v2(src_asset_file, dst_asset_file)\n logging.info('Copied asset file: %s -> %s', src_asset_file, dst_asset_file)", "docstring": "Copies the assets directory of the saved model.\n\nClones the contents of the assets/ directory from the source saved model\ndirectory to the destination saved model directory. Nothing will be copied if\nthere are no assets directory in the source directory.\n\nArgs:\n src_path: Source saved model directory.\n dst_path: Destination saved model directory. This directory must exist."} +{"repo": "fhir-py", "function": "def create_database_view(self, view: views.View, view_name: str) -> None:\n view_sql = f'CREATE OR REPLACE VIEW {self._view_dataset}.{view_name} AS\\n{self.to_sql(view)}'\n self._engine.execute(view_sql).fetchall()", "docstring": "Creates a Spark view with the given name in the runner's view_dataset.\n\nArgs:\n view: the FHIR view that creates\n view_name: the view name passed to the CREATE OR REPLACE VIEW statement."} +{"repo": "transformers", "function": "def from_encoder_decoder_pretrained(cls, encoder_pretrained_model_name_or_path: Optional[str]=None, decoder_pretrained_model_name_or_path: Optional[str]=None, *model_args, **kwargs) -> PreTrainedModel:\n kwargs_encoder = {argument[len('encoder_'):]: value for argument, value in kwargs.items() if argument.startswith('encoder_')}\n kwargs_decoder = {argument[len('decoder_'):]: value for argument, value in kwargs.items() if argument.startswith('decoder_')}\n for key in kwargs_encoder.keys():\n del kwargs['encoder_' + key]\n for key in kwargs_decoder.keys():\n del kwargs['decoder_' + key]\n encoder = kwargs_encoder.pop('model', None)\n if encoder is None:\n if encoder_pretrained_model_name_or_path is None:\n raise ValueError('If `encoder_model` is not defined as an argument, a `encoder_pretrained_model_name_or_path` has to be defined.')\n if 'config' not in kwargs_encoder:\n encoder_config, kwargs_encoder = AutoConfig.from_pretrained(encoder_pretrained_model_name_or_path, **kwargs_encoder, return_unused_kwargs=True)\n if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:\n logger.info(f'Initializing {encoder_pretrained_model_name_or_path} as a encoder model from a decoder model. Cross-attention and casual mask are disabled.')\n encoder_config.is_decoder = False\n encoder_config.add_cross_attention = False\n kwargs_encoder['config'] = encoder_config\n encoder = AutoModel.from_pretrained(encoder_pretrained_model_name_or_path, *model_args, **kwargs_encoder)\n decoder = kwargs_decoder.pop('model', None)\n if decoder is None:\n if decoder_pretrained_model_name_or_path is None:\n raise ValueError('If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.')\n if 'config' not in kwargs_decoder:\n decoder_config, kwargs_decoder = AutoConfig.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True)\n if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:\n logger.info(f\"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers.\")\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n kwargs_decoder['config'] = decoder_config\n if kwargs_decoder['config'].is_decoder is False or kwargs_decoder['config'].add_cross_attention is False:\n logger.warning(f'Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_encoder_decoder_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_encoder_decoder_pretrained(...)`')\n decoder = AutoModelForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)\n config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder.config, decoder.config, **kwargs)\n config.tie_word_embeddings = False\n return cls(encoder=encoder, decoder=decoder, config=config)", "docstring": "Instantiate an encoder and a decoder from one or two base classes of the library from pretrained model\ncheckpoints.\n\n\nThe model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train\nthe model, you need to first set it back in training mode with `model.train()`.\n\nParams:\n encoder_pretrained_model_name_or_path (`str`, *optional*):\n Information necessary to initiate the image encoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co. An\n example is `google/vit-base-patch16-224-in21k`.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):\n Information necessary to initiate the text decoder. Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n - A path to a *directory* containing model weights saved using\n [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.\n - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In\n this case, `from_tf` should be set to `True` and a configuration object should be provided as\n `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a\n PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n\n model_args (remaining positional arguments, *optional*):\n All remaining positional arguments will be passed to the underlying model's `__init__` method.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`).\n\n - To update the encoder configuration, use the prefix *encoder_* for each configuration parameter.\n - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.\n - To update the parent model configuration, do not use a prefix for each configuration parameter.\n\n Behaves differently depending on whether a `config` is provided or automatically loaded.\n\nExample:\n\n```python\n>>> from transformers import VisionEncoderDecoderModel\n\n>>> # initialize a vit-bert from a pretrained ViT and a pretrained BERT model. Note that the cross-attention layers will be randomly initialized\n>>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(\n... \"google/vit-base-patch16-224-in21k\", \"google-bert/bert-base-uncased\"\n... )\n>>> # saving model after fine-tuning\n>>> model.save_pretrained(\"./vit-bert\")\n>>> # load fine-tuned model\n>>> model = VisionEncoderDecoderModel.from_pretrained(\"./vit-bert\")\n```"} +{"repo": "tensorflow", "function": "def forward_log_det_jacobian(self, x, event_ndims, name='forward_log_det_jacobian'):\n return self._call_forward_log_det_jacobian(x, event_ndims, name)", "docstring": "Returns both the forward_log_det_jacobian.\n\nArgs:\n x: `Tensor`. The input to the \"forward\" Jacobian determinant evaluation.\n event_ndims: Number of dimensions in the probabilistic events being\n transformed. Must be greater than or equal to\n `self.forward_min_event_ndims`. The result is summed over the final\n dimensions to produce a scalar Jacobian determinant for each event,\n i.e. it has shape `x.shape.ndims - event_ndims` dimensions.\n name: The name to give this op.\n\nReturns:\n `Tensor`, if this bijector is injective.\n If not injective this is not implemented.\n\nRaises:\n TypeError: if `self.dtype` is specified and `y.dtype` is not\n `self.dtype`.\n NotImplementedError: if neither `_forward_log_det_jacobian`\n nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or\n this is a non-injective bijector."} +{"repo": "keras", "function": "def list_devices(device_type=None):\n device_type = device_type.upper() if device_type else None\n tf_devices = tf.config.list_logical_devices(device_type=device_type)\n cpu_devices = []\n other_devices = []\n for device in tf_devices:\n if device.device_type.lower() == 'cpu':\n cpu_devices.append(device)\n else:\n other_devices.append(device)\n if device_type is None:\n tf_devices = other_devices if len(other_devices) > 0 else cpu_devices\n return [f'{device.device_type.lower()}:{device.name.split(':')[-1]}' for device in tf_devices]", "docstring": "Return all the available devices based on the device type.\n\nNote that this should return the global devices in a distributed setting.\n\nArgs:\n device_type: string of `\"cpu\"`, `\"gpu\"` or `\"tpu\"`. Default to `gpu` or\n `tpu` if available when device_type is not provided. Otherwise will\n return the `cpu` devices.\n\nReturn:\n List of devices that are available for distribute computation."} +{"repo": "tensorflow", "function": "def _build_advisor_options(options):\n opts = tfprof_options_pb2.AdvisorOptionsProto()\n if options is None:\n return opts\n for checker, checker_opts in options.items():\n checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()\n for k, v in checker_opts.items():\n checker_ops_pb[k] = v\n opts.checkers[checker].MergeFrom(checker_ops_pb)\n return opts", "docstring": "Build tfprof.AdvisorOptionsProto.\n\nArgs:\n options: A dictionary of options. See ALL_ADVICE example.\n\nReturns:\n tfprof.AdvisorOptionsProto."} +{"repo": "beam", "function": "def assert_sources_equal_reference_source(reference_source_info, sources_info):\n if not (isinstance(reference_source_info, tuple) and len(reference_source_info) == 3 and isinstance(reference_source_info[0], iobase.BoundedSource)):\n raise ValueError('reference_source_info must a three-tuple where firstitem of the tuple gives a iobase.BoundedSource. Received: %r' % reference_source_info)\n reference_records = read_from_source(*reference_source_info)\n source_records = []\n for source_info in sources_info:\n assert isinstance(source_info, tuple)\n assert len(source_info) == 3\n if not (isinstance(source_info, tuple) and len(source_info) == 3 and isinstance(source_info[0], iobase.BoundedSource)):\n raise ValueError('source_info must a three tuple where firstitem of the tuple gives a iobase.BoundedSource. Received: %r' % source_info)\n if type(reference_source_info[0].default_output_coder()) != type(source_info[0].default_output_coder()):\n raise ValueError('Reference source %r and the source %r must use the same coder. They are using %r and %r respectively instead.' % (reference_source_info[0], source_info[0], type(reference_source_info[0].default_output_coder()), type(source_info[0].default_output_coder())))\n source_records.extend(read_from_source(*source_info))\n if len(reference_records) != len(source_records):\n raise ValueError('Reference source must produce the same number of records as the list of sources. Number of records were %d and %d instead.' % (len(reference_records), len(source_records)))\n if equal_to(reference_records)(source_records):\n raise ValueError('Reference source and provided list of sources must produce the same set of records.')", "docstring": "Tests if a reference source is equal to a given set of sources.\n\nGiven a reference source (a :class:`~apache_beam.io.iobase.BoundedSource`\nand a position range) and a list of sources, assert that the union of the\nrecords read from the list of sources is equal to the records read from the\nreference source.\n\nArgs:\n reference_source_info (Tuple[~apache_beam.io.iobase.BoundedSource, int, int]):\n a three-tuple that gives the reference\n :class:`~apache_beam.io.iobase.BoundedSource`, position to start\n reading at, and position to stop reading at.\n sources_info (Iterable[Tuple[~apache_beam.io.iobase.BoundedSource, int, int]]):\n a set of sources. Each source is a three-tuple that is of the same\n format described above.\n\nRaises:\n ValueError: if the set of data produced by the reference source\n and the given set of sources are not equivalent."} +{"repo": "tensorflow", "function": "def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):\n del weight_collections\n del trainable\n if isinstance(self.categorical_column, _SequenceCategoricalColumn):\n raise ValueError('In indicator_column: {}. categorical_column must not be of type _SequenceCategoricalColumn. Suggested fix A: If you wish to use input_layer, use a non-sequence categorical_column_with_*. Suggested fix B: If you wish to create sequence input, use sequence_input_layer instead of input_layer. Given (type {}): {}'.format(self.name, type(self.categorical_column), self.categorical_column))\n return inputs.get(self)", "docstring": "Returns dense `Tensor` representing feature.\n\nArgs:\n inputs: A `_LazyBuilder` object to access inputs.\n weight_collections: Unused `weight_collections` since no variables are\n created in this function.\n trainable: Unused `trainable` bool since no variables are created in this\n function.\n\nReturns:\n Dense `Tensor` created within `_transform_feature`.\n\nRaises:\n ValueError: If `categorical_column` is a `_SequenceCategoricalColumn`."} +{"repo": "transformers", "function": "def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):\n use_auth_token = kwargs.pop('use_auth_token', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if kwargs.get('token', None) is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n kwargs['token'] = use_auth_token\n config = kwargs.pop('config', None)\n use_fast = kwargs.pop('use_fast', None)\n trust_remote_code = kwargs.pop('trust_remote_code', None)\n kwargs['_from_auto'] = True\n if 'image_processor_filename' in kwargs:\n image_processor_filename = kwargs.pop('image_processor_filename')\n elif is_timm_local_checkpoint(pretrained_model_name_or_path):\n image_processor_filename = CONFIG_NAME\n else:\n image_processor_filename = IMAGE_PROCESSOR_NAME\n try:\n config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=image_processor_filename, **kwargs)\n except Exception as initial_exception:\n try:\n config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, image_processor_filename=CONFIG_NAME, **kwargs)\n except Exception:\n raise initial_exception\n if not is_timm_config_dict(config_dict):\n raise initial_exception\n image_processor_type = config_dict.get('image_processor_type', None)\n image_processor_auto_map = None\n if 'AutoImageProcessor' in config_dict.get('auto_map', {}):\n image_processor_auto_map = config_dict['auto_map']['AutoImageProcessor']\n if image_processor_type is None and image_processor_auto_map is None:\n feature_extractor_class = config_dict.pop('feature_extractor_type', None)\n if feature_extractor_class is not None:\n image_processor_type = feature_extractor_class.replace('FeatureExtractor', 'ImageProcessor')\n if 'AutoFeatureExtractor' in config_dict.get('auto_map', {}):\n feature_extractor_auto_map = config_dict['auto_map']['AutoFeatureExtractor']\n image_processor_auto_map = feature_extractor_auto_map.replace('FeatureExtractor', 'ImageProcessor')\n if image_processor_type is None and image_processor_auto_map is None:\n if not isinstance(config, PretrainedConfig):\n config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)\n image_processor_type = getattr(config, 'image_processor_type', None)\n if hasattr(config, 'auto_map') and 'AutoImageProcessor' in config.auto_map:\n image_processor_auto_map = config.auto_map['AutoImageProcessor']\n image_processor_class = None\n if image_processor_type is not None:\n if use_fast is None:\n use_fast = image_processor_type.endswith('Fast')\n if not use_fast:\n logger.warning_once(\"Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\")\n if use_fast and (not image_processor_type.endswith('Fast')):\n image_processor_type += 'Fast'\n if use_fast and (not is_torchvision_available()):\n image_processor_class = get_image_processor_class_from_name(image_processor_type[:-4])\n if image_processor_class is None:\n raise ValueError(f'`{image_processor_type}` requires `torchvision` to be installed. Please install `torchvision` and try again.')\n logger.warning_once('Using `use_fast=True` but `torchvision` is not available. Falling back to the slow image processor.')\n use_fast = False\n if use_fast:\n for _, image_processors in IMAGE_PROCESSOR_MAPPING_NAMES.items():\n if image_processor_type in image_processors:\n break\n else:\n image_processor_type = image_processor_type[:-4]\n use_fast = False\n logger.warning_once('`use_fast` is set to `True` but the image processor class does not have a fast version. Falling back to the slow version.')\n image_processor_class = get_image_processor_class_from_name(image_processor_type)\n else:\n image_processor_type_slow = image_processor_type[:-4] if image_processor_type.endswith('Fast') else image_processor_type\n image_processor_class = get_image_processor_class_from_name(image_processor_type_slow)\n if image_processor_class is None and image_processor_type.endswith('Fast'):\n raise ValueError(f'`{image_processor_type}` does not have a slow version. Please set `use_fast=True` when instantiating the processor.')\n has_remote_code = image_processor_auto_map is not None\n has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING\n if has_remote_code:\n if image_processor_auto_map is not None and (not isinstance(image_processor_auto_map, tuple)):\n image_processor_auto_map = (image_processor_auto_map, None)\n if use_fast and image_processor_auto_map[1] is not None:\n class_ref = image_processor_auto_map[1]\n else:\n class_ref = image_processor_auto_map[0]\n if '--' in class_ref:\n upstream_repo = class_ref.split('--')[0]\n else:\n upstream_repo = None\n trust_remote_code = resolve_trust_remote_code(trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo)\n if has_remote_code and trust_remote_code:\n if not use_fast and image_processor_auto_map[1] is not None:\n _warning_fast_image_processor_available(image_processor_auto_map[1])\n image_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)\n _ = kwargs.pop('code_revision', None)\n image_processor_class.register_for_auto_class()\n return image_processor_class.from_dict(config_dict, **kwargs)\n elif image_processor_class is not None:\n return image_processor_class.from_dict(config_dict, **kwargs)\n elif type(config) in IMAGE_PROCESSOR_MAPPING:\n image_processor_tuple = IMAGE_PROCESSOR_MAPPING[type(config)]\n image_processor_class_py, image_processor_class_fast = image_processor_tuple\n if not use_fast and image_processor_class_fast is not None:\n _warning_fast_image_processor_available(image_processor_class_fast)\n if image_processor_class_fast and (use_fast or image_processor_class_py is None):\n return image_processor_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n elif image_processor_class_py is not None:\n return image_processor_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)\n else:\n raise ValueError('This image processor cannot be instantiated. Please make sure you have `Pillow` installed.')\n raise ValueError(f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a `image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: {', '.join((c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys()))}')", "docstring": "Instantiate one of the image processor classes of the library from a pretrained model vocabulary.\n\nThe image processor class to instantiate is selected based on the `model_type` property of the config object\n(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's\nmissing, by falling back to using pattern matching on `pretrained_model_name_or_path`:\n\nList options\n\nParams:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained image_processor hosted inside a model repo on\n huggingface.co.\n - a path to a *directory* containing a image processor file saved using the\n [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,\n `./my_model_directory/`.\n - a path or url to a saved image processor JSON *file*, e.g.,\n `./my_model_directory/preprocessor_config.json`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model image processor should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the image processor files and override the cached versions if\n they exist.\n resume_download:\n Deprecated and ignored. All downloads are now resumed by default when possible.\n Will be removed in v5 of Transformers.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `huggingface-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n use_fast (`bool`, *optional*, defaults to `False`):\n Use a fast torchvision-base image processor if it is supported for a given model.\n If a fast image processor is not available for a given model, a normal numpy-based image processor\n is returned instead.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final image processor object. If `True`, then this\n functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary\n consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of\n `kwargs` which has not been used to update `image_processor` and is otherwise ignored.\n trust_remote_code (`bool`, *optional*, defaults to `False`):\n Whether or not to allow for custom models defined on the Hub in their own modeling files. This option\n should only be set to `True` for repositories you trust and in which you have read the code, as it will\n execute code present on the Hub on your local machine.\n image_processor_filename (`str`, *optional*, defaults to `\"config.json\"`):\n The name of the file in the model directory to use for the image processor config.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are image processor attributes will be used to override the\n loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is\n controlled by the `return_unused_kwargs` keyword parameter.\n\n\n\nPassing `token=True` is required when you want to use a private model.\n\n\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor\n\n>>> # Download image processor from huggingface.co and cache.\n>>> image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224-in21k\")\n\n>>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*)\n>>> # image_processor = AutoImageProcessor.from_pretrained(\"./test/saved_model/\")\n```"} +{"repo": "tensorflow", "function": "def add_to_graph(self, g=None, overwrite=False):\n if not context.executing_eagerly() and (not g):\n g = ops.get_default_graph()\n if g is not None:\n g._add_function_recursive(self._delayed_rewrite_functions.forward())", "docstring": "Registers the function, adds it to the graph g or default graph.\n\nArgs:\n g: If specified, registers the function with this graph. Defaults to the\n current context (either the default graph or the eager context).\n overwrite: A bool. If True, its forward function will overwrite\n any existing function of the same signature name in the graph `g`."} +{"repo": "pytype", "function": "def _abstractify_value(val: '_instances.ConcreteValue', ctx: 'context.Context', seen: 'set[_base.BaseValue] | None'=None) -> '_instances.ConcreteValue':\n if seen is None:\n seen = set()\n if not val.is_concrete or val in seen:\n return val\n seen = seen | {val}\n if not isinstance(val.pyval, (list, tuple)):\n return ctx.convert.get_maybe_abstract_instance(val)\n new_content = []\n for elem in val.pyval:\n new_elem_data = [_abstractify_value(v, ctx, seen) for v in elem.data]\n if any((v != new_v for v, new_v in zip(elem.data, new_elem_data))):\n new_elem = ctx.program.NewVariable()\n for b, new_data in zip(elem.bindings, new_elem_data):\n new_elem.PasteBindingWithNewData(b, new_data)\n new_content.append(new_elem)\n else:\n new_content.append(elem)\n if any((elem != new_elem for elem, new_elem in zip(val.pyval, new_content))):\n return type(val)(type(val.pyval)(new_content), ctx)\n else:\n return val", "docstring": "Converts a maybe-abstract value to a concrete one.\n\nArgs:\n val: A value.\n ctx: The context.\n seen: Optionally, a seen values set.\n\nUnlike ctx.convert.get_maybe_abstract_instance, this method recursively\ndescends into lists and tuples.\n\nReturns:\n A concrete value."} +{"repo": "pyglove", "function": "def format(self, compact: bool=False, verbose: bool=True, root_indent: int=0, **kwargs) -> str:", "docstring": "Formats this object into a string representation.\n\nArgs:\n compact: If True, this object will be formatted into a single line.\n verbose: If True, this object will be formatted with verbosity.\n Subclasses should define `verbosity` on their own.\n root_indent: The start indent level for this object if the output is a\n multi-line string.\n **kwargs: Subclass specific keyword arguments.\n\nReturns:\n A string of formatted object."} +{"repo": "tensorflow", "function": "def _count_op_with_name_and_attribute(self, nodes: Iterable[node_def_pb2.NodeDef], op_name: str, attr_name: str, attr_val: _AttrValType, get_op_name: bool=False) -> int:\n if get_op_name:\n return len([node.attr.get(attr_name) == attr_val for node in nodes if node.name == op_name])\n else:\n return len([node.attr.get(attr_name) == attr_val for node in nodes if node.op == op_name])", "docstring": "Determine the number of nodes whose operation name matches `op_name`.\n\nIf `attr_name` is given, additionally check if the `attr_val` matches with\nthe attribute value of the op.\n\nArgs:\n nodes: Iterable of NodeDefs.\n op_name: Name of the op to match.\n attr_name: Name of the attribute of the op to match.\n attr_val: Value of the attr_name to check.\n get_op_name: If set True, checks node.name rather than node.op.\n\nReturns:\n The number of occurrences of nodes whose name match `op_name` and\n 'attr_val' if 'attr_name' is given."} +{"repo": "tensorflow", "function": "def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width):\n return pad_to_bounding_box_internal(image, offset_height, offset_width, target_height, target_width, check_dims=True)", "docstring": "Pad `image` with zeros to the specified `height` and `width`.\n\nAdds `offset_height` rows of zeros on top, `offset_width` columns of\nzeros on the left, and then pads the image on the bottom and right\nwith zeros until it has dimensions `target_height`, `target_width`.\n\nThis op does nothing if `offset_*` is zero and the image already has size\n`target_height` by `target_width`.\n\nUsage Example:\n\n>>> x = [[[1., 2., 3.],\n... [4., 5., 6.]],\n... [[7., 8., 9.],\n... [10., 11., 12.]]]\n>>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4)\n>>> padded_image\n\n\nArgs:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n offset_height: Number of rows of zeros to add on top.\n offset_width: Number of columns of zeros to add on the left.\n target_height: Height of output image.\n target_width: Width of output image.\n\nReturns:\n If `image` was 4-D, a 4-D float Tensor of shape\n `[batch, target_height, target_width, channels]`\n If `image` was 3-D, a 3-D float Tensor of shape\n `[target_height, target_width, channels]`\n\nRaises:\n ValueError: If the shape of `image` is incompatible with the `offset_*` or\n `target_*` arguments, or either `offset_height` or `offset_width` is\n negative."} +{"repo": "mobly", "function": "def sanitize_filename(filename):\n dirname = os.path.dirname(filename)\n basename = os.path.basename(filename)\n basename = _sanitize_windows_filename(basename)\n basename = _truncate_filename(basename, LINUX_MAX_FILENAME_LENGTH)\n basename = basename.replace(' ', '_')\n return os.path.join(dirname, basename)", "docstring": "Sanitizes a filename for various operating systems.\n\nArgs:\n filename: string, the filename to sanitize.\n\nReturns:\n A string that is safe to use as a filename on various operating systems."} +{"repo": "tensorflow", "function": "def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef):\n del fused_op_name, output_index, out_graphdef\n raise RuntimeError('Unimplemented abstract method.')", "docstring": "Add node(s) to graph representing output operands and returns type.\n\nArgs:\n fused_op_name: name of the fused op stub name.\n output_index: Output index that we are currently processing from stub.\n out_graphdef: The destination graphdef we are currently building up.\n\nReturns:\n The datatype of this identity.\n\nRaises:\n RuntimeError: if the method is not implemented."} +{"repo": "transformers", "function": "def _prepare_images_structure(self, images: ImageInput) -> ImageInput:\n return make_flat_list_of_images(images)", "docstring": "Prepare the images structure for processing.\n\nArgs:\n images (`ImageInput`):\n The input images to process.\n\nReturns:\n `ImageInput`: The images with a valid nesting."} +{"repo": "beam", "function": "def _generate_graph_update_dicts(self):\n transform_dict = {}\n pcoll_dict = {}\n for transform_id, transform_proto in self._top_level_transforms():\n transform_dict[transform_proto.unique_name] = {'required': transform_id in self._required_transforms}\n for pcoll_id in transform_proto.outputs.values():\n pcoll_dict[pcoll_id] = {'cached': pcoll_id in self._cached_pcollections, 'referenced': pcoll_id in self._referenced_pcollections}\n\n def vertex_properties_to_attributes(vertex):\n \"\"\"Converts PCollection properties to DOT vertex attributes.\"\"\"\n attrs = {}\n if 'leaf' in vertex:\n attrs['style'] = 'invis'\n elif vertex.get('required'):\n attrs['color'] = 'blue'\n attrs['fontcolor'] = 'blue'\n else:\n attrs['color'] = 'grey'\n return attrs\n\n def edge_properties_to_attributes(edge):\n \"\"\"Converts PTransform properties to DOT edge attributes.\"\"\"\n attrs = {}\n if edge.get('cached'):\n attrs['color'] = 'red'\n elif edge.get('referenced'):\n attrs['color'] = 'black'\n else:\n attrs['color'] = 'grey'\n return attrs\n vertex_dict = {}\n edge_dict = {}\n for transform_name, transform_properties in transform_dict.items():\n vertex_dict[transform_name] = vertex_properties_to_attributes(transform_properties)\n for pcoll_id, pcoll_properties in pcoll_dict.items():\n edge_dict[pcoll_id] = edge_properties_to_attributes(pcoll_properties)\n return (vertex_dict, edge_dict)", "docstring": "Generate updates specific to interactive pipeline.\n\nReturns:\n vertex_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes\n edge_dict: (Dict[str, Dict[str, str]]) maps vertex name to attributes"} +{"repo": "keras", "function": "def elu(x, alpha=1.0):\n if any_symbolic_tensors((x,)):\n return Elu(alpha).symbolic_call(x)\n return backend.nn.elu(x, alpha=alpha)", "docstring": "Exponential Linear Unit activation function.\n\nIt is defined as:\n\n`f(x) = alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`.\n\nArgs:\n x: Input tensor.\n alpha: A scalar, slope of positive section. Defaults to `1.0`.\n\nReturns:\n A tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-1., 0., 1.])\n>>> x_elu = keras.ops.elu(x)\n>>> print(x_elu)\narray([-0.63212055, 0., 1.], shape=(3,), dtype=float64)"} +{"repo": "sprockets", "function": "class StateValueInTransition(stl.base.NamedObject):\n\n def __init__(self, state, value):\n stl.base.NamedObject.__init__(self, state)\n self.value = value\n self.param_values = []\n\n def __eq__(self, other):\n return stl.base.NamedObject.__eq__(self, other) and self.value == other.value and (self.param_values == other.param_values)\n\n def __str__(self):\n return 'STATE %s: p(%s) v(%s)' % (self.name, stl.base.GetCSV(self.param_values), str(self.value))\n\n def Resolve(self, env, resolved_params):\n logging.log(1, 'Resolving ' + self.name)\n states = env['_current_module'].states\n if self.name not in states:\n did_you_mean = stl.levenshtein.closest_candidate(self.name, states.keys())\n raise NameError('Cannot find a state to expand: %s. Did you mean %s?' % (self.name, did_you_mean))\n found = states[self.name]\n if len(self.param_values) != len(found.params):\n raise TypeError('Wrong number of parameters: %s. Found %d params, expected %d params.' % (found.name, len(found.params), len(self.param_values)))\n resolved_state = StateResolved(self.name, found)\n for v in self.param_values:\n resolved_state.resolved_params.append(v.Resolve(env, resolved_params))\n for v in found.values:\n if self.value == v:\n return StateValue(resolved_state, v)\n did_you_mean = stl.levenshtein.closest_candidate(self.value, found.values)\n raise NameError('Invalid value in state %s: %s. Did you mean %s?' % (self.name, self.value, did_you_mean))", "docstring": "State instance defined in a state transition spec.\n\nA state instance is defined either in \"pre_states\", \"post_states\", or\n\"error_states\" in a state transition spec. Resolve() turns this into\nstate.StateValue resolved.\n\nAttributes:\n value: Current value of state of |name|.\n param_values: List of unresolved parameters of state of |name|."} +{"repo": "pyglove", "function": "def __init__(self, value_type, default: typing.Optional[numbers.Number]=MISSING_VALUE, min_value: typing.Optional[numbers.Number]=None, max_value: typing.Optional[numbers.Number]=None, is_noneable: bool=False, frozen: bool=False):\n if min_value is not None and max_value is not None and (min_value > max_value):\n raise ValueError(f'\"max_value\" must be equal or greater than \"min_value\". Encountered: min_value={min_value}, max_value={max_value}.')\n self._min_value = min_value\n self._max_value = max_value\n super().__init__(value_type, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\n value_type: Type of number.\n default: Default value for this spec.\n min_value: (Optional) minimum value of acceptable values.\n max_value: (Optional) maximum value of acceptable values.\n is_noneable: If True, None is acceptable.\n frozen: If True, values other than the default value is not accceptable."} +{"repo": "tensorflow", "function": "def main_op():\n init = variables.global_variables_initializer()\n init_local = variables.local_variables_initializer()\n init_tables = lookup_ops.tables_initializer()\n return control_flow_ops.group(init, init_local, init_tables)", "docstring": "Returns a main op to init variables and tables.\n\nReturns the main op including the group of ops that initializes all\nvariables, initializes local variables and initialize all tables.\n\nReturns:\n The set of ops to be run as part of the main op upon the load operation."} +{"repo": "keras", "function": "def on_test_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `evaluate` methods.\n\nAlso called at the beginning of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`Model` is set to `N`, this method will only be called every\n`N` batches.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Currently no data is passed to this argument for this\n method but that may change in the future."} +{"repo": "genai-processors", "function": "def as_videos(content: ProcessorContentTypes, *, ignore_unsupported_types: bool=False) -> list[ProcessorPart]:\n return _as_format_helper(content, lambda mime: mime.startswith('video/'), ignore_unsupported_types)", "docstring": "Returns the video parts from the content.\n\nArgs:\n content: Input content.\n ignore_unsupported_types: By default if content contains non-video parts a\n ValueError would be raised. This argument allows ingoring such parts.\n\nReturns:\n A list of video parts."} +{"repo": "tensorflow", "function": "class ZeroPadding1D(Layer):\n\n def __init__(self, padding=1, **kwargs):\n super(ZeroPadding1D, self).__init__(**kwargs)\n self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n if input_shape[1] is not None:\n length = input_shape[1] + self.padding[0] + self.padding[1]\n else:\n length = None\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\n\n def call(self, inputs):\n return backend.temporal_padding(inputs, padding=self.padding)\n\n def get_config(self):\n config = {'padding': self.padding}\n base_config = super(ZeroPadding1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "docstring": "Zero-padding layer for 1D input (e.g. temporal sequence).\n\nExamples:\n\n>>> input_shape = (2, 2, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> print(x)\n[[[ 0 1 2]\n [ 3 4 5]]\n [[ 6 7 8]\n [ 9 10 11]]]\n>>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)\n>>> print(y)\ntf.Tensor(\n [[[ 0 0 0]\n [ 0 0 0]\n [ 0 1 2]\n [ 3 4 5]\n [ 0 0 0]\n [ 0 0 0]]\n [[ 0 0 0]\n [ 0 0 0]\n [ 6 7 8]\n [ 9 10 11]\n [ 0 0 0]\n [ 0 0 0]]], shape=(2, 6, 3), dtype=int64)\n\nArgs:\n padding: Int, or tuple of int (length 2), or dictionary.\n - If int:\n How many zeros to add at the beginning and end of\n the padding dimension (axis 1).\n - If tuple of int (length 2):\n How many zeros to add at the beginning and the end of\n the padding dimension (`(left_pad, right_pad)`).\n\nInput shape:\n 3D tensor with shape `(batch_size, axis_to_pad, features)`\n\nOutput shape:\n 3D tensor with shape `(batch_size, padded_axis, features)`"} +{"repo": "transformers", "function": "class XSoftmax(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, input, mask, dim):\n ctx.dim = dim\n rmask = ~mask.to(torch.bool)\n output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min))\n output = torch.softmax(output, ctx.dim)\n output.masked_fill_(rmask, 0)\n ctx.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n output, = ctx.saved_tensors\n inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)\n return (inputGrad, None, None)\n\n @staticmethod\n def symbolic(g, self, mask, dim):\n import torch.onnx.symbolic_helper as sym_help\n from torch.onnx.symbolic_opset9 import masked_fill, softmax\n mask_cast_value = g.op('Cast', mask, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n r_mask = g.op('Cast', g.op('Sub', g.op('Constant', value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Bool'])\n output = masked_fill(g, self, r_mask, g.op('Constant', value_t=torch.tensor(torch.finfo(self.type().dtype()).min)))\n output = softmax(g, output, dim)\n return masked_fill(g, output, r_mask, g.op('Constant', value_t=torch.tensor(0, dtype=torch.bool)))", "docstring": "Masked Softmax which is optimized for saving memory\n\nArgs:\n input (`torch.tensor`): The input tensor that will apply softmax.\n mask (`torch.IntTensor`):\n The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax\n\nExample:\n\n```python\n>>> import torch\n>>> from transformers.models.deberta_v2.modeling_deberta_v2 import XSoftmax\n\n>>> # Make a tensor\n>>> x = torch.randn([4, 20, 100])\n\n>>> # Create a mask\n>>> mask = (x > 0).int()\n\n>>> # Specify the dimension to apply softmax\n>>> dim = -1\n\n>>> y = XSoftmax.apply(x, mask, dim)\n```"} +{"repo": "keras", "function": "def _log_epoch_metrics(self, epoch, logs):\n if not logs:\n return\n train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')}\n val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}\n train_logs = self._collect_learning_rate(train_logs)\n if self.write_steps_per_second:\n train_logs['steps_per_second'] = self._compute_steps_per_second()\n if train_logs:\n with self._train_writer.as_default():\n for name, value in train_logs.items():\n self.summary.scalar('epoch_' + name, value, step=epoch)\n if val_logs:\n with self._val_writer.as_default():\n for name, value in val_logs.items():\n name = name[4:]\n self.summary.scalar('epoch_' + name, value, step=epoch)", "docstring": "Writes epoch metrics out as scalar summaries.\n\nArgs:\n epoch: Int. The global step to use for TensorBoard.\n logs: Dict. Keys are scalar summary names, values are scalars."} +{"repo": "tensorflow", "function": "def zeros(shape, dtype=dtypes.float32, name=None, layout=None):\n dtype = dtypes.as_dtype(dtype).base_dtype\n with ops.name_scope(name, 'zeros', [shape]) as name:\n if dtype == dtypes.bool:\n zero = False\n elif dtype == dtypes.string:\n zero = ''\n elif dtype.is_quantized:\n zero = np.zeros([]).astype(dtype.as_numpy_dtype)\n else:\n zero = 0\n if not isinstance(shape, tensor_lib.Tensor):\n try:\n if not context.executing_eagerly():\n output = _constant_if_small(zero, shape, dtype, name, layout=layout)\n if output is not None:\n return output\n shape = constant_op._tensor_shape_tensor_conversion_function(tensor_shape.TensorShape(shape))\n except (TypeError, ValueError, errors.UnimplementedError):\n shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)\n if not shape._shape_tuple():\n shape = reshape(shape, [-1])\n output = fill(shape, constant(zero, dtype=dtype), name=name, layout=layout)\n assert output.dtype.base_dtype == dtype\n return output", "docstring": "Creates a tensor with all elements set to zero.\n\nSee also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.\n\nThis operation returns a tensor of type `dtype` with shape `shape` and\nall elements set to zero.\n\n>>> tf.zeros([3, 4], tf.int32)\n\n\nArgs:\n shape: A `list` of integers, a `tuple` of integers, or a 1-D `Tensor` of\n type `int32`.\n dtype: The DType of an element in the resulting `Tensor`.\n name: Optional string. A name for the operation.\n layout: Optional, `tf.experimental.dtensor.Layout`. If provided, the result\n is a [DTensor](https://www.tensorflow.org/guide/dtensor_overview) with the\n provided layout.\n\nReturns:\n A `Tensor` with all elements set to zero."} +{"repo": "tensorflow", "function": "def where(condition, x=None, y=None, name=None):\n if x is None and y is None:\n with ops.name_scope(name, 'Where', [condition]) as name:\n condition = ops.convert_to_tensor(condition, preferred_dtype=dtypes.bool, name='condition')\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select(condition=condition, x=x, y=y, name=name)\n else:\n raise ValueError('x and y must both be non-None or both be None.')", "docstring": "Return the elements, either from `x` or `y`, depending on the `condition`.\n\nIf both `x` and `y` are None, then this operation returns the coordinates of\ntrue elements of `condition`. The coordinates are returned in a 2-D tensor\nwhere the first dimension (rows) represents the number of true elements, and\nthe second dimension (columns) represents the coordinates of the true\nelements. Keep in mind, the shape of the output tensor can vary depending on\nhow many true values there are in input. Indices are output in row-major\norder.\n\nIf both non-None, `x` and `y` must have the same shape.\nThe `condition` tensor must be a scalar if `x` and `y` are scalar.\nIf `x` and `y` are tensors of higher rank, then `condition` must be either a\nvector with size matching the first dimension of `x`, or must have the same\nshape as `x`.\n\nThe `condition` tensor acts as a mask that chooses, based on the value at each\nelement, whether the corresponding element / row in the output should be taken\nfrom `x` (if true) or `y` (if false).\n\nIf `condition` is a vector and `x` and `y` are higher rank matrices, then it\nchooses which row (outer dimension) to copy from `x` and `y`. If `condition`\nhas the same shape as `x` and `y`, then it chooses which element to copy from\n`x` and `y`.\n\nArgs:\n condition: A `Tensor` of type `bool`\n x: A Tensor which may have the same shape as `condition`. If `condition` is\n rank 1, `x` may have higher rank, but its first dimension must match the\n size of `condition`.\n y: A `tensor` with the same shape and type as `x`.\n name: A name of the operation (optional)\n\nReturns:\n A `Tensor` with the same type and shape as `x`, `y` if they are non-None.\n Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.\n\nRaises:\n ValueError: When exactly one of `x` or `y` is non-None.\n\n@compatibility(TF2)\n\nThis API is compatible with eager execution and `tf.function`. However, this\nis still a legacy API endpoint originally designed for TF1. To migrate to\nfully-native TF2, please replace its usage with `tf.where` instead, which is\ndirectly backwards compatible with `tf.compat.v1.where`.\n\nHowever,`tf.compat.v1.where` is more restrictive than `tf.where`, requiring\n`x` and `y` to have the same shape, and returning a `Tensor` with the same\ntype and shape as `x`, `y` (if they are both non-None).\n\n`tf.where` will accept `x`, `y` that are not the same shape as long as they\nare broadcastable with one another and with `condition`, and will return a\n`Tensor` with shape broadcast from `condition`, `x`, and `y`.\n\nFor example, the following works with `tf.where` but not `tf.compat.v1.where`:\n\n>>> tf.where([True, False, False, True], [1,2,3,4], [100])\n\n\n>>> tf.where(True, [1,2,3,4], 100)\n\n\n@end_compatibility"} +{"repo": "transformers", "function": "def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):\n assert not (input_ids is None and inputs_embeds is None)\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if position_ids is None:\n if input_ids is not None:\n position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)\n else:\n position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n final_embeddings = inputs_embeds + position_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\n final_embeddings (`tf.Tensor`): output embedding tensor."} +{"repo": "nsscache", "function": "def FromHttpToTimestamp(self, http_ts_string):\n t = time.strptime(http_ts_string, '%a, %d %b %Y %H:%M:%S GMT')\n return int(calendar.timegm(t))", "docstring": "Converts HTTP timestamp string to internal nss_cache timestamp.\n\nArgs:\n HTTP format timestamp string\nReturns:\n number of seconds since epoch"} +{"repo": "beam", "function": "def write_to_text(pcoll, path: str):\n try:\n field_names = [name for name, _ in schemas.named_fields_from_element_type(pcoll.element_type)]\n except Exception as exn:\n raise ValueError('WriteToText requires an input schema with exactly one field.') from exn\n if len(field_names) != 1:\n raise ValueError('WriteToText requires an input schema with exactly one field, got %s' % field_names)\n sole_field_name, = field_names\n return pcoll | beam.Map(lambda x: str(getattr(x, sole_field_name))) | beam.io.WriteToText(path)", "docstring": "Writes a PCollection to a (set of) text files(s).\n\nThe input must be a PCollection whose schema has exactly one field.\n\nArgs:\n path (str): The file path to write to. The files written will\n begin with this prefix, followed by a shard identifier."} +{"repo": "transformers", "function": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor':\n class_queries_logits = outputs.class_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n masks_probs = masks_queries_logits.sigmoid()\n segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n batch_size = class_queries_logits.shape[0]\n if target_sizes is not None:\n if batch_size != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n semantic_segmentation = []\n for idx in range(batch_size):\n resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = segmentation.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports\nPyTorch.\n\nArgs:\n outputs ([`MaskFormerForInstanceSegmentation`]):\n Raw outputs of the model.\n target_sizes (`List[Tuple[int, int]]`, *optional*):\n List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\n final size (height, width) of each prediction. If left to None, predictions will not be resized.\nReturns:\n `List[torch.Tensor]`:\n A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\n corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n `torch.Tensor` correspond to a semantic class id."} +{"repo": "beam", "function": "def tokenize_sentence(input_dict):\n text, uid = (input_dict['text'], input_dict['id'])\n tokens = Tokenizer([text], padding=True, truncation=True, return_tensors='pt')\n tokens = {key: torch.squeeze(val) for key, val in tokens.items()}\n return ((text, uid), tokens)", "docstring": "Takes a dictionary with a text and an id, tokenizes the text, and\nreturns a tuple of the text and id and the tokenized text\n\nArgs:\n input_dict: a dictionary with the text and id of the sentence\n\nReturns:\n A tuple of the text and id, and a dictionary of the tokens."} +{"repo": "transformers", "function": "class Qwen2AudioCausalLMOutputWithPast(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Cache] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n attention_mask: Optional[torch.FloatTensor] = None", "docstring": "Base class for Qwen2Audio causal language model (or autoregressive) outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are\n two sets of pre-computed hidden-states: key and values states in the self-attention blocks.\n The `past_key_values` are returned when `use_cache=True` is passed or when `config.use_cache=True`.\n It is a [`~cache_utils.Cache`] instance.\n\n If `past_key_values` are used, the user can optionally input only the last `input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `input_ids` of shape `(batch_size, sequence_length)`.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n attention_mask (`torch.FloatTensor`, *optional*):\n Attentions mask, used to update attention mask and position_ids."} +{"repo": "tensorflow", "function": "class Undefined(object):\n __slots__ = ('symbol_name',)\n\n def __init__(self, symbol_name):\n self.symbol_name = symbol_name\n\n def read(self):\n raise UnboundLocalError(\"'{}' is used before assignment\".format(self.symbol_name))\n\n def __repr__(self):\n return self.symbol_name\n\n def __getattribute__(self, name):\n try:\n return object.__getattribute__(self, name)\n except AttributeError:\n return self\n\n def __getitem__(self, i):\n return self", "docstring": "Represents an undefined symbol in Python.\n\nThis is used to reify undefined symbols, which is required to use the\nfunctional form of loops.\nExample:\n\n while n > 0:\n n = n - 1\n s = n\n return s # Runtime error if n == 0\n\nThis is valid Python code and will not result in an error as long as n\nis positive. The use of this class is to stay as close to Python semantics\nas possible for staged code of this nature.\n\nConverted version of the above showing the possible usage of this class:\n\n s = Undefined('s')\n init_state = (s,)\n s = while_loop(cond, body, init_state)\n return s # s is an instance of Undefined if the loop never runs\n\nAttributes:\n symbol_name: Text, identifier for the undefined symbol"} +{"repo": "tensorflow", "function": "def _test_dir(temp_dir, test_name):\n test_dir = os.path.join(temp_dir, test_name)\n if os.path.isdir(test_dir):\n for f in glob.glob('%s/*' % test_dir):\n os.remove(f)\n else:\n os.makedirs(test_dir)\n return test_dir", "docstring": "Create an empty dir to use for tests.\n\nArgs:\n temp_dir: Tmp directory path.\n test_name: Name of the test.\n\nReturns:\n Absolute path to the test directory."} +{"repo": "transformers", "function": "class GroundingDinoDecoderOutput(ModelOutput):\n last_hidden_state: Optional[torch.FloatTensor] = None\n intermediate_hidden_states: Optional[torch.FloatTensor] = None\n intermediate_reference_points: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None", "docstring": "Base class for outputs of the GroundingDinoDecoder. This class adds two attributes to\nBaseModelOutputWithCrossAttentions, namely:\n- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)\n- a stacked tensor of intermediate reference points.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):\n Stacked intermediate hidden states (output of each layer of the decoder).\n intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`):\n Stacked intermediate reference points (reference points of each layer of the decoder).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of tuples of `torch.FloatTensor` (one for attention for each layer) of shape `(batch_size, num_heads,\n sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the\n weighted average in the self-attention, cross-attention and multi-scale deformable attention heads."} +{"repo": "tensorflow", "function": "def get_registered_kernels_for_op(name):\n buf = c_api.TF_GetRegisteredKernelsForOp(name)\n data = c_api.TF_GetBuffer(buf)\n kernel_list = kernel_def_pb2.KernelList()\n kernel_list.ParseFromString(compat.as_bytes(data))\n return kernel_list", "docstring": "Returns a KernelList proto of registered kernels for a given op.\n\nArgs:\n name: A string representing the name of the op whose kernels to retrieve."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states, self_attn_weights, past_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states, cross_attn_weights, past_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, output_attentions=output_attentions, cache_position=cache_position)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n if use_cache:\n outputs += (past_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length."} +{"repo": "sprockets", "function": "class EventFromExternal(Event):\n\n def __init__(self, name, external):\n Event.__init__(self, name)\n self.external_name = external\n module, event = external.rsplit('.', 1)\n self.external_event = importlib.import_module(module).__getattribute__(event)()\n assert isinstance(self.external_event, stl.lib.Event)\n\n def __eq__(self, other):\n return Event.__eq__(self, other) and self.external_name == other.external_name\n\n def __str__(self):\n return '%s b(%s)' % (Event.__str__(self), self.external_name)\n\n def Resolve(self, env, resolved_params):\n logging.log(1, 'Resolving ' + self.name)\n return stl.base.FuncWithContext(self.external_name, self.external_event)", "docstring": "An external event (i.e. defined and provided in a python file).\n\nevent mEventExample(int param1) = external \"foo.bar.EventFunction\";\n\nThere should be a python module foo.bar with a top-level function\nEventFunction. The actual event signature should be the same as\nthe one specified in the stl file, except it should expect to\ntake a Context as it's first argument, in addition to the others.\n\nIn the example above, EventFunction should look like:\n def EventFunction(context, param1):\n ...\n\nAttributes:\n name: The STL event name (e.g. mEventExample)\n external_name: name of the external funciton, including modules\n (e.g. \"foo.bar.EventFunction\")\n external_event: The actual, callable event function (e.g. EventFunction)"} +{"repo": "transformers", "function": "def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]:\n for diff_obj in branching_commit.diff(base_commit):\n if diff_obj.a_path != 'utils/not_doctested.txt':\n continue\n folder = Path(repo.working_dir)\n with checkout_commit(repo, branching_commit):\n with open(folder / 'utils/not_doctested.txt', 'r', encoding='utf-8') as f:\n old_content = f.read()\n with open(folder / 'utils/not_doctested.txt', 'r', encoding='utf-8') as f:\n new_content = f.read()\n removed_content = {x.split(' ')[0] for x in old_content.split('\\n')} - {x.split(' ')[0] for x in new_content.split('\\n')}\n return sorted(removed_content)\n return []", "docstring": "Get the list of files that were removed from \"utils/not_doctested.txt\", between `base_commit` and\n`branching_commit`.\n\nReturns:\n `List[str]`: List of files that were removed from \"utils/not_doctested.txt\"."} +{"repo": "genai-processors", "function": "def update(self, media_blob: genai_types.Blob):\n if self.generation_start_sec is not None and self.ttft_sec is None:\n self.time_audio_start = time.perf_counter()\n self.ttft_sec = self.time_audio_start - self.generation_start_sec\n self.audio_duration += audio_duration_sec(media_blob.data, RECEIVE_SAMPLE_RATE)", "docstring": "Updates the generation request with the new media data.\n\nArgs:\n media_blob: The new media data."} +{"repo": "tensorflow", "function": "def node_name(self):\n return self._node_name", "docstring": "Name of the node from which the tensor value was dumped.\n\nReturns:\n (`str`) name of the node watched by the debug op."} +{"repo": "tensorflow", "function": "def check_graphs(*args):\n graph = None\n for i, sgv in enumerate(args):\n if graph is None and sgv.graph is not None:\n graph = sgv.graph\n elif sgv.graph is not None and sgv.graph is not graph:\n raise ValueError(f'args[{i}] does not belong to the same graph as other arguments.')", "docstring": "Check that all the element in args belong to the same graph.\n\nArgs:\n *args: a list of object with a obj.graph property.\nRaises:\n ValueError: if all the elements do not belong to the same graph."} +{"repo": "nsscache", "function": "def Create(conf):\n global _source_implementations\n if not _source_implementations:\n raise RuntimeError('no source implementations exist')\n source_name = conf['name']\n if source_name not in list(_source_implementations.keys()):\n raise RuntimeError('source not implemented: %r' % (source_name,))\n return _source_implementations[source_name](conf)", "docstring": "Source creation factory method.\n\nArgs:\n conf: a dictionary of configuration key/value pairs, including one\n required attribute 'name'.\n\nReturns:\n A Source instance.\n\nRaises:\n RuntimeError: no sources are registered with RegisterImplementation"} +{"repo": "tensorflow", "function": "def range_dimension_tensor(self, name='range_dimension_tensor'):\n with self._name_scope(name):\n return self._range_dimension_tensor()", "docstring": "Dimension (in the sense of vector spaces) of the range of this operator.\n\nDetermined at runtime.\n\nIf this operator acts like the batch matrix `A` with\n`A.shape = [B1,...,Bb, M, N]`, then this returns `M`.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n `int32` `Tensor`"} +{"repo": "beam", "function": "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Update an association between a GCP project and a GitHub Enterprise server.\n\nArgs:\n request: (CloudbuildProjectsLocationsGithubEnterpriseConfigsPatchRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Operation) The response message."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n hidden_states = self.input_layernorm(hidden_states)\n residual = hidden_states\n hidden_states, self_attn_weights = self.self_attn(hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)\n hidden_states = residual * self.attn_alpha_factor + hidden_states * self.attn_beta_factor\n hidden_states = self.post_attention_layernorm(hidden_states)\n residual = hidden_states\n hidden_states, router_logits = self.block_sparse_moe(hidden_states)\n hidden_states = residual * self.mlp_alpha_factor + hidden_states * self.mlp_beta_factor\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if output_router_logits:\n outputs += (router_logits,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`):\n Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,\n with `head_dim` being the embedding dimension of each attention head.\n attention_mask (`torch.Tensor`, *optional*): attention mask of size\n `(batch, sequence_length)` where padding elements are indicated by 0.\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_router_logits (`bool`, *optional*):\n Whether or not to return the logits of all the routers. They are useful for computing the router loss, and\n should not be returned during inference.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence.\n kwargs (`dict`, *optional*):\n Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code\n into the model"} +{"repo": "keras", "function": "class CSVLogger(Callback):\n\n def __init__(self, filename, separator=',', append=False):\n super().__init__()\n self.sep = separator\n self.filename = file_utils.path_to_string(filename)\n self.append = append\n self.writer = None\n self.keys = None\n self.append_header = True\n self.csv_file = None\n\n def on_train_begin(self, logs=None):\n if self.append:\n if file_utils.exists(self.filename):\n with file_utils.File(self.filename, 'r') as f:\n self.append_header = not bool(len(f.readline()))\n mode = 'a'\n else:\n mode = 'w'\n if self.csv_file and (not self.csv_file.closed):\n self.csv_file.close()\n self.csv_file = file_utils.File(self.filename, mode)\n self.writer = None\n self.keys = None\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n\n def handle_value(k):\n is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0\n if isinstance(k, str):\n return k\n elif isinstance(k, collections.abc.Iterable) and (not is_zero_dim_ndarray):\n return f'\"[{', '.join(map(str, k))}]\"'\n else:\n return k\n if self.keys is None:\n self.keys = sorted(logs.keys())\n val_keys_found = False\n for key in self.keys:\n if key.startswith('val_'):\n val_keys_found = True\n break\n if not val_keys_found and self.keys:\n self.keys.extend(['val_' + k for k in self.keys])\n if not self.writer:\n\n class CustomDialect(csv.excel):\n delimiter = self.sep\n fieldnames = ['epoch'] + (self.keys or [])\n self.writer = csv.DictWriter(self.csv_file, fieldnames=fieldnames, dialect=CustomDialect)\n if self.append_header:\n self.writer.writeheader()\n row_dict = collections.OrderedDict({'epoch': epoch})\n row_dict.update(((key, handle_value(logs.get(key, 'NA'))) for key in self.keys))\n self.writer.writerow(row_dict)\n self.csv_file.flush()\n\n def on_train_end(self, logs=None):\n if self.csv_file and (not self.csv_file.closed):\n self.csv_file.close()\n self.writer = None", "docstring": "Callback that streams epoch results to a CSV file.\n\nSupports all values that can be represented as a string,\nincluding 1D iterables such as `np.ndarray`.\n\nArgs:\n filename: Filename of the CSV file, e.g. `'run/log.csv'`.\n separator: String used to separate elements in the CSV file.\n append: Boolean. True: append if file exists (useful for continuing\n training). False: overwrite existing file.\n\nExample:\n\n```python\ncsv_logger = CSVLogger('training.log')\nmodel.fit(X_train, Y_train, callbacks=[csv_logger])\n```"} +{"repo": "transformers", "function": "class PerceiverOneHotPreprocessor(AbstractPreprocessor):\n\n def __init__(self, config: PerceiverConfig) -> None:\n super().__init__()\n self.config: PerceiverConfig = config\n\n @property\n def num_channels(self) -> int:\n return self.config.num_labels\n\n def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, network_input_is_1d: bool=True):\n inputs = inputs[:, None, :]\n return (inputs, None, inputs)", "docstring": "One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.\n\nArgs:\n config ([`PerceiverConfig`]):\n Model configuration."} +{"repo": "starthinker", "function": "def _get_feed(self):\n if self.feed_name in self._feed_name_tab_map:\n for tab_name in self._feed_name_tab_map[self.feed_name]:\n for sheet in self.spreadsheet['sheets']:\n if sheet['properties']['title'] == tab_name:\n self.tab_name = tab_name\n return sheets_read(self.config, self.auth, self.trix_id, tab_name, self.trix_range)\n return [[]]", "docstring": "Fetches the feed based on initialization parameters.\n\nReturns:\n List of lists that represents the rows and columns of the feed. If the\n feed isn't found returns a list with an empty list."} +{"repo": "tensorflow", "function": "def _matrix_conv(self, m1, m2):\n n = m1[0, 0, 0].shape.as_list()[0]\n if n != m2[0, 0, 0].shape.as_list()[0]:\n raise ValueError(f'The entries in matrices m1 and m2 must have the same dimensions. Received m1[0, 0, 0].shape={m1[0, 0, 0].shape} and m2[0, 0, 0].shape={m2[0, 0, 0].shape}.')\n k = int(np.cbrt(len(m1)))\n l = int(np.cbrt(len(m2)))\n result = {}\n size = k + l - 1\n for i in range(size):\n for j in range(size):\n for r in range(size):\n result[i, j, r] = array_ops.zeros([n, n], self.dtype)\n for index1 in range(min(k, i + 1)):\n for index2 in range(min(k, j + 1)):\n for index3 in range(min(k, r + 1)):\n if i - index1 < l and j - index2 < l and (r - index3 < l):\n result[i, j, r] += math_ops.matmul(m1[index1, index2, index3], m2[i - index1, j - index2, r - index3])\n return result", "docstring": "Matrix convolution.\n\nArgs:\n m1: is a k x k x k dictionary, each element is a n x n matrix.\n m2: is a l x l x l dictionary, each element is a n x n matrix.\n\nReturns:\n (k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each\n element is a n x n matrix.\nRaises:\n ValueError: if the entries of m1 and m2 are of different dimensions."} +{"repo": "transformers", "function": "def pad_to_square(self, image: np.ndarray, background_color: Union[int, Tuple[int, int, int]]=0, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.array:\n height, width = get_image_size(image, input_data_format)\n num_channels = image.shape[0] if input_data_format == ChannelDimension.FIRST else image.shape[-1]\n if height == width:\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image\n max_dim = max(height, width)\n if isinstance(background_color, int):\n background_color = [background_color]\n elif len(background_color) != num_channels:\n raise ValueError(f'background_color must have no more than {num_channels} elements to match the number of channels')\n if input_data_format == ChannelDimension.FIRST:\n result = np.zeros((num_channels, max_dim, max_dim), dtype=image.dtype)\n for i, color in enumerate(background_color):\n result[i, :, :] = color\n if width > height:\n start = (max_dim - height) // 2\n result[:, start:start + height, :] = image\n else:\n start = (max_dim - width) // 2\n result[:, :, start:start + width] = image\n else:\n result = np.zeros((max_dim, max_dim, num_channels), dtype=image.dtype)\n for i, color in enumerate(background_color):\n result[:, :, i] = color\n if width > height:\n start = (max_dim - height) // 2\n result[start:start + height, :, :] = image\n else:\n start = (max_dim - width) // 2\n result[:, start:start + width, :] = image\n image = to_channel_dimension_format(result, data_format, input_data_format) if data_format is not None else result\n return image", "docstring": "Pads an image to a square based on the longest edge.\n\nArgs:\n image (`np.ndarray`):\n The image to pad.\n background_color (`int` or `Tuple[int, int, int]`, *optional*, defaults to 0):\n The color to use for the padding. Can be an integer for single channel or a\n tuple of integers representing for multi-channel images. If passed as integer\n in mutli-channel mode, it will default to `0` in subsequent channels.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\nReturns:\n `np.ndarray`: The padded image."} +{"repo": "tensorflow", "function": "def _assert_same_base_type(items, expected_type=None):\n original_expected_type = expected_type\n mismatch = False\n for item in items:\n if item is not None:\n item_type = item.dtype.base_dtype\n if not expected_type:\n expected_type = item_type\n elif expected_type != item_type:\n mismatch = True\n break\n if mismatch:\n expected_type = original_expected_type\n original_item_str = None\n for item in items:\n if item is not None:\n item_type = item.dtype.base_dtype\n if not expected_type:\n expected_type = item_type\n original_item_str = item.name if hasattr(item, 'name') else str(item)\n elif expected_type != item_type:\n raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (item.name if hasattr(item, 'name') else str(item), item_type, expected_type, ' as %s' % original_item_str if original_item_str else ''))\n return expected_type\n else:\n return expected_type", "docstring": "Asserts all items are of the same base type.\n\nArgs:\n items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,\n `Operation`, or `IndexedSlices`). Can include `None` elements, which\n will be ignored.\n expected_type: Expected type. If not specified, assert all items are\n of the same base type.\n\nReturns:\n Validated type, or none if neither expected_type nor items provided.\n\nRaises:\n ValueError: If any types do not match."} +{"repo": "tensorflow", "function": "class Zeros(Initializer):\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`).\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs)\n dtype = _get_dtype(dtype)\n if not dtype.is_numpy_compatible or dtype == dtypes.string:\n raise ValueError('Expected numeric or boolean dtype, got %s.' % dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return array_ops.zeros(shape, dtype)", "docstring": "Initializer that generates tensors initialized to 0.\n\nAlso available via the shortcut function `tf.keras.initializers.zeros`.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.Zeros()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.Zeros()\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)"} +{"repo": "tensorflow", "function": "def saved_model_format_scope(value, **kwargs):\n previous_format = _thread_local_data.saved_model_format\n previous_kwargs = _thread_local_data.save_kwargs\n try:\n _thread_local_data.saved_model_format = value\n _thread_local_data.save_kwargs = kwargs\n yield\n finally:\n _thread_local_data.saved_model_format = previous_format\n _thread_local_data.save_kwargs = previous_kwargs", "docstring": "Provides a scope within which the savde model format to test is `value`.\n\nThe saved model format gets restored to its original value upon exiting the\nscope.\n\nArgs:\n value: saved model format value\n **kwargs: optional kwargs to pass to the save function.\n\nYields:\n The provided value."} +{"repo": "transformers", "function": "def add_request(self, input_ids: List[int], request_id: Optional[str]=None, max_new_tokens: Optional[int]=None) -> str:\n if request_id is None:\n with self._request_lock:\n request_id = f'req_{self._request_counter}'\n self._request_counter += 1\n max_new_tokens = self.generation_config.max_new_tokens if max_new_tokens is None else max_new_tokens\n state = RequestState(request_id=request_id, prompt_ids=list(input_ids), full_prompt_ids=list(input_ids), max_new_tokens=max_new_tokens, eos_token_id=self.generation_config.eos_token_id)\n self.input_queue.put(state, block=True, timeout=10)\n logger.debug(f'Added request {request_id} to queue.')\n return request_id", "docstring": "Add a new generation request to the queue.\n\nArgs:\n input_ids: Input token IDs to use as prompt\n request_id: Optional custom request ID (auto-generated if None)\n **kwargs: Additional generation parameters\n\nReturns:\n str: The request ID"} +{"repo": "tensorflow", "function": "def _BroadcastMul(vec, mat):\n vec = array_ops.expand_dims(vec, -1)\n return vec * mat", "docstring": "Multiply after broadcasting vec to match dimensions of mat.\n\nArgs:\n vec: A 1-D tensor of dimension [D0]\n mat: A 2-D tensor of dimension [D0, D1]\n\nReturns:\n A tensor of dimension [D0, D1], the result of vec * mat"} +{"repo": "tensorflow", "function": "class SpatialDropout1D(Dropout):\n\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = array_ops.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape", "docstring": "Spatial 1D version of Dropout.\n\nThis version performs the same function as Dropout, however, it drops\nentire 1D feature maps instead of individual elements. If adjacent frames\nwithin feature maps are strongly correlated (as is normally the case in\nearly convolution layers) then regular dropout will not regularize the\nactivations and will otherwise just result in an effective learning rate\ndecrease. In this case, SpatialDropout1D will help promote independence\nbetween feature maps and should be used instead.\n\nArgs:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\nCall arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n\nInput shape:\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\nOutput shape:\n Same as input.\n\nReferences:\n - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)"} +{"repo": "transformers", "function": "class MaskFormerPixelLevelModuleOutput(ModelOutput):\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n decoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "MaskFormer's pixel level module output. It returns both the last and (optionally) the hidden states from the\n`encoder` and `decoder`. By default, the `encoder` is a MaskFormerSwin Transformer and the `decoder` is a Feature\nPyramid Network (FPN).\n\nThe `encoder_last_hidden_state` are referred on the paper as **images features**, while `decoder_last_hidden_state`\nas **pixel embeddings**\n\nArgs:\n encoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):\n Last hidden states (final feature map) of the last stage of the encoder.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at\n the output of each stage.\n decoder_last_hidden_state (`torch.FloatTensor` of shape`(batch_size, num_channels, height, width)`):\n Last hidden states (final feature map) of the last stage of the decoder.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the model at\n the output of each stage."} +{"repo": "tensorflow", "function": "def trace(self, name='trace'):\n with self._name_scope(name):\n return self._trace()", "docstring": "Trace of the linear operator, equal to sum of `self.diag_part()`.\n\nIf the operator is square, this is also the sum of the eigenvalues.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`."} +{"repo": "transformers", "function": "class InstructBlipForConditionalGenerationModelOutput(ModelOutput):\n loss: Optional[Tuple[torch.FloatTensor]] = None\n logits: Optional[Tuple[torch.FloatTensor]] = None\n vision_outputs: Optional[torch.FloatTensor] = None\n qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None\n language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None\n\n def to_tuple(self) -> Tuple[Any]:\n return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Class defining the outputs of [`InstructBlipForConditionalGeneration`].\n\nArgs:\n loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\n Language modeling loss from the language model.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head of the language model.\n vision_outputs (`BaseModelOutputWithPooling`):\n Outputs of the vision encoder.\n qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):\n Outputs of the Q-Former (Querying Transformer).\n language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):\n Outputs of the language model."} +{"repo": "beam", "function": "def __init__(self, topic):\n super().__init__()\n self.topic = topic", "docstring": "Initializes ``_WriteStringsToPubSub``.\n\nAttributes:\n topic: Cloud Pub/Sub topic in the form \"/topics//\"."} +{"repo": "transformers", "function": "def forward(self, vision_features, class_features, task_features, task_mask, output_attentions=None, output_hidden_states=None, return_dict=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_features, vision_shapes, vision_shapes_list, level_start_index = self._get_encoder_input(vision_features)\n denoise_embeddings, denoise_bboxes, key_padding_mask = (None, None, None)\n batch_size = task_mask.shape[0]\n task_features = self.task_encoder(task_features)\n if self.task_project is not None:\n task_features = self.task_project(task_features)\n src_key_mask = (task_mask == 0).detach()\n attn_mask_len = self.num_queries\n fusion_size = attn_mask_len + task_features.shape[0]\n key_padding_mask = torch.zeros([batch_size, fusion_size], dtype=torch.bool).to(task_features.device)\n key_padding_mask[:, attn_mask_len:] = src_key_mask\n attention_mask = _prepare_4d_attention_mask(~key_padding_mask, dtype=vision_features.dtype)\n decoder_embeddings, reference_points, encoder_bboxes, encoder_class_similarity, init_reference_points = self._get_decoder_input(vision_features, tuple(vision_shapes_list), class_features, denoise_embeddings, denoise_bboxes)\n all_hidden_states = () if output_hidden_states else None\n all_attns = () if output_attentions else None\n all_self_attns = () if output_attentions else None\n all_cross_attns = () if output_attentions else None\n predicted_class_features = decoder_embeddings\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (predicted_class_features,)\n decoder_bboxes = []\n decoder_classes = []\n last_refined_bbox = None\n reference_points = reference_points.sigmoid()\n for i, layer in enumerate(self.layers):\n if self.gradient_checkpointing and self.training:\n predicted_class_features, task_features, self_attention, cross_attention = self._gradient_checkpointing_func(layer.__call__, predicted_class_features, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=level_start_index, attention_mask=attention_mask, query_position=self.query_position_head(reference_points), output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n else:\n predicted_class_features, task_features, self_attention, cross_attention = layer(predicted_class_features, task_features, reference_points, vision_features, vision_shapes, vision_shapes_list, level_start_index=level_start_index, attention_mask=attention_mask, query_position=self.query_position_head(reference_points), output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n if output_attentions:\n all_self_attns = all_self_attns + (self_attention,)\n all_cross_attns = all_cross_attns + (cross_attention,)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (predicted_class_features,)\n refined_bbox = torch.sigmoid(self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(reference_points))\n original_class_projected = self.decoder_class_head[i](class_features).permute(1, 2, 0)\n if self.training:\n decoder_classes.append(get_class_similarity(class_distance_type=self.class_distance_type, cls_feature=predicted_class_features, class_proj=original_class_projected))\n if i == 0:\n decoder_bboxes.append(refined_bbox)\n else:\n decoder_bboxes.append(torch.sigmoid(self.decoder_bbox_head[i](predicted_class_features) + _inverse_sigmoid(last_refined_bbox)))\n elif i == self.decoder_num_layers - 1:\n decoder_classes.append(get_class_similarity(self.class_distance_type, predicted_class_features, original_class_projected))\n decoder_bboxes.append(refined_bbox)\n break\n last_refined_bbox = refined_bbox\n reference_points = refined_bbox.detach() if self.training else refined_bbox\n if output_attentions:\n all_attns += (all_self_attns, all_cross_attns)\n last_hidden_state = predicted_class_features\n decoder_bboxes = torch.stack(decoder_bboxes)\n decoder_classes = torch.stack(decoder_classes)\n if not return_dict:\n return (last_hidden_state, all_hidden_states, all_attns, decoder_bboxes, decoder_classes, encoder_bboxes, encoder_class_similarity, init_reference_points, reference_points)\n return OmDetTurboDecoderOutput(last_hidden_state=last_hidden_state, hidden_states=all_hidden_states, attentions=all_attns, decoder_coords=decoder_bboxes, decoder_classes=decoder_classes, encoder_coord_logits=encoder_bboxes, encoder_class_logits=encoder_class_similarity, init_reference_points=init_reference_points, intermediate_reference_points=reference_points)", "docstring": "Args:\n vision_features (`torch.FloatTensor`): The sequence of vision features. shape depends on the vision\n backbone.\n class_features (`torch.FloatTensor`): The sequence of class features of shape\n `(class_sequence_length, batch_size, class_embed_dim)`.\n task_features (`torch.FloatTensor`): The sequence of task features of shape\n `(task_sequence_length, batch_size, decoder_hidden_dim)`.\n task_mask (`torch.LongTensor`): The mask for the task features of shape `(batch_size, task_sequence_length)`.\n output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention\n layers. See `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See\n `hidden_states` under returned tensors for more detail.\n return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain\n tuple."} +{"repo": "keras", "function": "def path_to_string(path):\n if isinstance(path, os.PathLike):\n return os.fspath(path)\n return path", "docstring": "Convert `PathLike` objects to their string representation.\n\nIf given a non-string typed path object, converts it to its string\nrepresentation.\n\nIf the object passed to `path` is not among the above, then it is\nreturned unchanged. This allows e.g. passthrough of file objects\nthrough this function.\n\nArgs:\n path: `PathLike` object that represents a path\n\nReturns:\n A string representation of the path argument, if Python support exists."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n if start_positions is not None:\n slen, hsz = hidden_states.shape[-2:]\n start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n start_states = hidden_states.gather(-2, start_positions)\n start_states = start_states.expand(-1, slen, -1)\n x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))\n x = self.activation(x)\n x = self.LayerNorm(x)\n x = self.dense_1(x).squeeze(-1)\n if p_mask is not None:\n if get_parameter_dtype(self) == torch.float16:\n x = x * (1 - p_mask) - 65500 * p_mask\n else:\n x = x * (1 - p_mask) - 1e+30 * p_mask\n return x", "docstring": "Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\n The final hidden states of the model.\n start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):\n The hidden states of the first tokens for the labeled span.\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n The position of the first token for the labeled span.\n p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\n Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\n should be masked.\n\n\n\nOne of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides\n`start_states`.\n\n\n\nReturns:\n `torch.FloatTensor`: The end logits for SQuAD."} +{"repo": "transformers", "function": "def HfArg(*, aliases: Optional[Union[str, list[str]]]=None, help: Optional[str]=None, default: Any=dataclasses.MISSING, default_factory: Callable[[], Any]=dataclasses.MISSING, metadata: Optional[dict]=None, **kwargs) -> dataclasses.Field:\n if metadata is None:\n metadata = {}\n if aliases is not None:\n metadata['aliases'] = aliases\n if help is not None:\n metadata['help'] = help\n return dataclasses.field(metadata=metadata, default=default, default_factory=default_factory, **kwargs)", "docstring": "Argument helper enabling a concise syntax to create dataclass fields for parsing with `HfArgumentParser`.\n\nExample comparing the use of `HfArg` and `dataclasses.field`:\n```\n@dataclass\nclass Args:\n regular_arg: str = dataclasses.field(default=\"Huggingface\", metadata={\"aliases\": [\"--example\", \"-e\"], \"help\": \"This syntax could be better!\"})\n hf_arg: str = HfArg(default=\"Huggingface\", aliases=[\"--example\", \"-e\"], help=\"What a nice syntax!\")\n```\n\nArgs:\n aliases (Union[str, List[str]], optional):\n Single string or list of strings of aliases to pass on to argparse, e.g. `aliases=[\"--example\", \"-e\"]`.\n Defaults to None.\n help (str, optional): Help string to pass on to argparse that can be displayed with --help. Defaults to None.\n default (Any, optional):\n Default value for the argument. If not default or default_factory is specified, the argument is required.\n Defaults to dataclasses.MISSING.\n default_factory (Callable[[], Any], optional):\n The default_factory is a 0-argument function called to initialize a field's value. It is useful to provide\n default values for mutable types, e.g. lists: `default_factory=list`. Mutually exclusive with `default=`.\n Defaults to dataclasses.MISSING.\n metadata (dict, optional): Further metadata to pass on to `dataclasses.field`. Defaults to None.\n\nReturns:\n Field: A `dataclasses.Field` with the desired properties."} +{"repo": "transformers", "function": "def get_lagged_subsequences(self, sequence: torch.Tensor, subsequences_length: int, shift: int=0) -> torch.Tensor:\n sequence_length = sequence.shape[1]\n indices = [lag - shift for lag in self.config.lags_sequence]\n if max(indices) + subsequences_length > sequence_length:\n raise ValueError(f'lags cannot go further than history length, found lag {max(indices)} while history length is only {sequence_length}')\n lagged_values = []\n for lag_index in indices:\n begin_index = -lag_index - subsequences_length\n end_index = -lag_index if lag_index > 0 else None\n lagged_values.append(sequence[:, begin_index:end_index, ...])\n return torch.stack(lagged_values, dim=-1)", "docstring": "Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I),\n where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i,\n j, :, k] = sequence[i, -indices[k]-S+j, :].\n\nArgs:\n sequence: Tensor\n The sequence from which lagged subsequences should be extracted. Shape: (N, T, C).\n subsequences_length : int\n Length of the subsequences to be extracted.\n shift: int\n Shift the lags by this amount back."} +{"repo": "transformers", "function": "class ForcedBOSTokenLogitsProcessor(LogitsProcessor):\n\n def __init__(self, bos_token_id: int):\n self.bos_token_id = bos_token_id\n\n @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n cur_len = input_ids.shape[-1]\n scores_processed = scores\n if cur_len == 1:\n scores_processed = torch.full_like(scores, -math.inf)\n scores_processed[:, self.bos_token_id] = 0\n return scores_processed", "docstring": "[`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder\nmodels.\n\nArgs:\n bos_token_id (`int`):\n The id of the token to force as the first generated token.\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n\n>>> model = AutoModelForSeq2SeqLM.from_pretrained(\"google/flan-t5-small\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/flan-t5-small\")\n\n>>> inputs = tokenizer(\"Translate from English to German: I love cats.\", return_tensors=\"pt\")\n\n>>> # By default, it continues generating according to the model's logits\n>>> outputs = model.generate(**inputs, max_new_tokens=10)\n>>> print(tokenizer.batch_decode(outputs)[0])\n Ich liebe Kitty.\n\n>>> # We can use `forced_bos_token_id` to force the start of generation with an encoder-decoder model\n>>> # (including forcing it to end straight away with an EOS token)\n>>> outputs = model.generate(**inputs, max_new_tokens=10, forced_bos_token_id=tokenizer.eos_token_id)\n>>> print(tokenizer.batch_decode(outputs)[0])\n\n```"} +{"repo": "beam", "function": "class ApproximateQuantilesCombineFn(CombineFn):\n _offset_jitter = 0\n _MAX_NUM_ELEMENTS = 1000000000.0\n _qs = None\n\n def __init__(self, num_quantiles, buffer_size, num_buffers, key=None, reverse=False, weighted=False, input_batched=False):\n self._num_quantiles = num_quantiles\n self._spec = _QuantileSpec(buffer_size, num_buffers, weighted, key, reverse)\n self._input_batched = input_batched\n if self._input_batched:\n setattr(self, 'add_input', self._add_inputs)\n\n def __reduce__(self):\n return (self.__class__, (self._num_quantiles, self._spec.buffer_size, self._spec.num_buffers, self._spec.key, self._spec.reverse, self._spec.weighted, self._input_batched))\n\n @classmethod\n def create(cls, num_quantiles, epsilon=None, max_num_elements=None, key=None, reverse=False, weighted=False, input_batched=False):\n \"\"\"\n Creates an approximate quantiles combiner with the given key and desired\n number of quantiles.\n\n Args:\n num_quantiles: Number of quantiles to produce. It is the size of the\n final output list, including the mininum and maximum value items.\n epsilon: (optional) The default error bound is `epsilon`, which holds as\n long as the number of elements is less than `_MAX_NUM_ELEMENTS`.\n Specifically, if one considers the input as a sorted list x_1, ...,\n x_N, then the distance between each exact quantile x_c and its\n approximation x_c' is bounded by `|c - c'| < epsilon * N`. Note that\n these errors are worst-case scenarios. In practice the accuracy tends\n to be much better.\n max_num_elements: (optional) The cost (in time and space) to compute\n quantiles to a given accuracy is a function of the total number of\n elements in the data set.\n key: (optional) Key is a mapping of elements to a comparable key, similar\n to the key argument of Python's sorting methods.\n reverse: (optional) whether to order things smallest to largest, rather\n than largest to smallest.\n weighted: (optional) if set to True, the combiner produces weighted\n quantiles. The input elements are then expected to be tuples of values\n with the corresponding weight.\n input_batched: (optional) if set to True, inputs are expected to be\n batches of elements.\n \"\"\"\n max_num_elements = max_num_elements or cls._MAX_NUM_ELEMENTS\n if not epsilon:\n epsilon = min(0.01, 1.0 / num_quantiles) if weighted else 1.0 / num_quantiles\n b = 2\n while (b - 2) * (1 << b - 2) < epsilon * max_num_elements:\n b = b + 1\n b = b - 1\n k = max(2, int(math.ceil(max_num_elements / float(1 << b - 1))))\n return cls(num_quantiles=num_quantiles, buffer_size=k, num_buffers=b, key=key, reverse=reverse, weighted=weighted, input_batched=input_batched)\n\n def _offset(self, new_weight):\n \"\"\"\n If the weight is even, we must round up or down. Alternate between these\n two options to avoid a bias.\n \"\"\"\n if new_weight % 2 == 1:\n return (new_weight + 1) / 2\n else:\n self._offset_jitter = 2 - self._offset_jitter\n return (new_weight + self._offset_jitter) / 2\n\n def create_accumulator(self):\n self._qs = _QuantileState(unbuffered_elements=[], unbuffered_weights=[], buffers=[], spec=self._spec)\n return self._qs\n\n def add_input(self, quantile_state, element):\n \"\"\"\n Add a new element to the collection being summarized by quantile state.\n \"\"\"\n quantile_state.add_unbuffered([element], self._offset)\n return quantile_state\n\n def _add_inputs(self, quantile_state, elements):\n \"\"\"\n Add a batch of elements to the collection being summarized by quantile\n state.\n \"\"\"\n if len(elements) == 0:\n return quantile_state\n quantile_state.add_unbuffered(elements, self._offset)\n return quantile_state\n\n def merge_accumulators(self, accumulators):\n \"\"\"Merges all the accumulators (quantile state) as one.\"\"\"\n qs = self.create_accumulator()\n for accumulator in accumulators:\n if accumulator.is_empty():\n continue\n if self._spec.weighted:\n qs.add_unbuffered([accumulator.unbuffered_elements, accumulator.unbuffered_weights], self._offset)\n else:\n qs.add_unbuffered(accumulator.unbuffered_elements, self._offset)\n qs.buffers.extend(accumulator.buffers)\n heapq.heapify(qs.buffers)\n qs.collapse_if_needed(self._offset)\n return qs\n\n def extract_output(self, accumulator):\n \"\"\"\n Outputs num_quantiles elements consisting of the minimum, maximum and\n num_quantiles - 2 evenly spaced intermediate elements. Returns the empty\n list if no elements have been added.\n \"\"\"\n if accumulator.is_empty():\n return []\n accumulator.finalize()\n all_elems = accumulator.buffers\n total_weight = 0\n if self._spec.weighted:\n for buffer_elem in all_elems:\n total_weight += sum(buffer_elem.weights)\n else:\n for buffer_elem in all_elems:\n total_weight += len(buffer_elem.elements) * buffer_elem.weights[0]\n step = total_weight / (self._num_quantiles - 1)\n offset = (total_weight - 1) / (self._num_quantiles - 1)\n quantiles, _, min_val, max_val = _interpolate(all_elems, self._num_quantiles - 2, step, offset, self._spec)\n return [min_val] + quantiles + [max_val]", "docstring": "This combiner gives an idea of the distribution of a collection of values\nusing approximate N-tiles. The output of this combiner is the list of size of\nthe number of quantiles (num_quantiles), containing the input values of the\nminimum value item of the list, the intermediate values (n-tiles) and the\nmaximum value item of the list, in the sort order provided via key (similar\nto the key argument of Python's sorting methods).\n\nIf there are fewer values to combine than the number of quantile\n(num_quantiles), then the resulting list will contain all the values being\ncombined, in sorted order.\n\nIf no `key` is provided, then the results are sorted in the natural order.\n\nTo evaluate the quantiles, we use the \"New Algorithm\" described here:\n\n[MRL98] Manku, Rajagopalan & Lindsay, \"Approximate Medians and other\nQuantiles in One Pass and with Limited Memory\", Proc. 1998 ACM SIGMOD,\nVol 27, No 2, p 426-435, June 1998.\nhttp://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.6.6513&rep=rep1\n&type=pdf\n\nNote that the weighted quantiles are evaluated using a generalized version of\nthe algorithm referenced in the paper.\n\nThe default error bound is (1 / num_quantiles) for uniformly distributed data\nand min(1e-2, 1 / num_quantiles) for weighted case, though in practice the\naccuracy tends to be much better.\n\nArgs:\n num_quantiles: Number of quantiles to produce. It is the size of the final\n output list, including the mininum and maximum value items.\n buffer_size: The size of the buffers, corresponding to k in the referenced\n paper.\n num_buffers: The number of buffers, corresponding to b in the referenced\n paper.\n key: (optional) Key is a mapping of elements to a comparable key, similar\n to the key argument of Python's sorting methods.\n reverse: (optional) whether to order things smallest to largest, rather\n than largest to smallest.\n weighted: (optional) if set to True, the combiner produces weighted\n quantiles. The input elements are then expected to be tuples of input\n values with the corresponding weight.\n input_batched: (optional) if set to True, inputs are expected to be batches\n of elements."} +{"repo": "transformers", "function": "def insert_tokenizer_in_auto_module(old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns):\n if old_model_patterns.tokenizer_class is None or new_model_patterns.tokenizer_class is None:\n return\n with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'r', encoding='utf-8') as f:\n content = f.read()\n pattern_tokenizer = re.compile('^\\\\s*TOKENIZER_MAPPING_NAMES\\\\s*=\\\\s*OrderedDict\\\\b')\n lines = content.split('\\n')\n idx = 0\n while not pattern_tokenizer.search(lines[idx]):\n idx += 1\n idx += 1\n while not lines[idx].startswith('TOKENIZER_MAPPING = _LazyAutoMapping'):\n if lines[idx].endswith(','):\n block = lines[idx]\n else:\n block = []\n while not lines[idx].startswith(' ),'):\n block.append(lines[idx])\n idx += 1\n block.append(lines[idx])\n block = '\\n'.join(block)\n idx += 1\n if f'\"{old_model_patterns.model_type}\"' in block and old_model_patterns.tokenizer_class in block:\n break\n new_block = block.replace(old_model_patterns.model_type, new_model_patterns.model_type)\n new_block = new_block.replace(old_model_patterns.tokenizer_class, new_model_patterns.tokenizer_class)\n new_lines = lines[:idx] + [new_block] + lines[idx:]\n with open(TRANSFORMERS_PATH / 'models' / 'auto' / 'tokenization_auto.py', 'w', encoding='utf-8') as f:\n f.write('\\n'.join(new_lines))", "docstring": "Add a tokenizer to the relevant mappings in the auto module.\n\nArgs:\n old_model_patterns (`ModelPatterns`): The patterns for the old model.\n new_model_patterns (`ModelPatterns`): The patterns for the new model."} +{"repo": "starthinker", "function": "def _process_new(self, feed_item):\n return {'assetIdentifier': {'name': feed_item.get(FieldMap.CREATIVE_ASSET_FILE_NAME, None), 'type': feed_item.get(FieldMap.CREATIVE_TYPE, None)}}", "docstring": "Creates a new creative asset DCM object from a feed item representing a creative asset from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\n feed_item: Feed item representing the creative asset from the Bulkdozer\n feed.\n\nReturns:\n A creative asset object ready to be inserted in DCM through the API."} +{"repo": "tensorflow", "function": "def softmax_cross_entropy_with_logits_v2_helper(labels, logits, axis=None, name=None, dim=None):\n axis = deprecated_argument_lookup('axis', axis, 'dim', dim)\n del dim\n if axis is None:\n axis = -1\n with ops.name_scope(name, 'softmax_cross_entropy_with_logits', [logits, labels]) as name:\n logits = ops.convert_to_tensor(logits, name='logits')\n labels = ops.convert_to_tensor(labels, name='labels')\n convert_to_float32 = logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16\n precise_logits = math_ops.cast(logits, dtypes.float32) if convert_to_float32 else logits\n labels = math_ops.cast(labels, precise_logits.dtype)\n input_rank = array_ops.rank(precise_logits)\n shape = logits.get_shape()\n if axis != -1:\n\n def _move_dim_to_end(tensor, dim_index, rank):\n return array_ops.transpose(tensor, array_ops.concat([math_ops.range(dim_index), math_ops.range(dim_index + 1, rank), [dim_index]], 0))\n precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)\n labels = _move_dim_to_end(labels, axis, input_rank)\n input_shape = array_ops.shape(precise_logits)\n precise_logits = _flatten_outer_dims(precise_logits)\n labels = _flatten_outer_dims(labels)\n if config.is_op_determinism_enabled():\n log_probs = log_softmax_v2(precise_logits)\n cost = -math_ops.reduce_sum(labels * log_probs, axis=1)\n else:\n cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name=name)\n output_shape = array_ops.slice(input_shape, [0], [math_ops.subtract(input_rank, 1)])\n cost = array_ops.reshape(cost, output_shape)\n if not context.executing_eagerly() and shape is not None and (shape.dims is not None):\n shape = shape.as_list()\n del shape[axis]\n cost.set_shape(shape)\n if convert_to_float32:\n return math_ops.cast(cost, logits.dtype)\n else:\n return cost", "docstring": "Computes softmax cross entropy between `logits` and `labels`.\n\nMeasures the probability error in discrete classification tasks in which the\nclasses are mutually exclusive (each entry is in exactly one class). For\nexample, each CIFAR-10 image is labeled with one and only one label: an image\ncan be a dog or a truck, but not both.\n\n**NOTE:** While the classes are mutually exclusive, their probabilities\nneed not be. All that is required is that each row of `labels` is\na valid probability distribution. If they are not, the computation of the\ngradient will be incorrect.\n\nIf using exclusive `labels` (wherein one and only\none class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.\n\n**WARNING:** This op expects unscaled logits, since it performs a `softmax`\non `logits` internally for efficiency. Do not call this op with the\noutput of `softmax`, as it will produce incorrect results.\n\nA common use case is to have logits and labels of shape\n`[batch_size, num_classes]`, but higher dimensions are supported, with\nthe `axis` argument specifying the class dimension.\n\n`logits` and `labels` must have the same dtype (either `float16`, `float32`,\nor `float64`).\n\nBackpropagation will happen into both `logits` and `labels`. To disallow\nbackpropagation into `labels`, pass label tensors through `tf.stop_gradient`\nbefore feeding it to this function.\n\n**Note that to avoid confusion, it is required to pass only named arguments to\nthis function.**\n\nArgs:\n labels: Each vector along the class dimension should hold a valid\n probability distribution e.g. for the case in which labels are of shape\n `[batch_size, num_classes]`, each row of `labels[i]` must be a valid\n probability distribution.\n logits: Unscaled log probabilities.\n axis: The class dimension. Defaulted to -1 which is the last dimension.\n name: A name for the operation (optional).\n dim: Deprecated alias for axis.\n\nReturns:\n A `Tensor` that contains the softmax cross entropy loss. Its type is the\n same as `logits` and its shape is the same as `labels` except that it does\n not have the last dimension of `labels`."} +{"repo": "tensorflow", "function": "def input_shape(self):\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called and thus has no defined input shape.')\n all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')", "docstring": "Retrieves the input shape(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer, or if all inputs\nhave the same shape.\n\nReturns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\nRaises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode."} +{"repo": "beam", "function": "def build_worker_instruction(*args):\n tuple_class = collections.namedtuple(*args)\n tuple_class.__str__ = worker_object_to_string\n tuple_class.__repr__ = worker_object_to_string\n return tuple_class", "docstring": "Create an object representing a ParallelInstruction protobuf.\n\nThis will be a collections.namedtuple with a custom __str__ method.\n\nAlas, this wrapper is not known to pylint, which thinks it creates\nconstants. You may have to put a disable=invalid-name pylint\nannotation on any use of this, depending on your names.\n\nArgs:\n *args: first argument is the name of the type to create. Should\n start with \"Worker\". Second arguments is alist of the\n attributes of this object.\nReturns:\n A new class, a subclass of tuple, that represents the protobuf."} +{"repo": "keras", "function": "def matmul(x1, x2):\n if any_symbolic_tensors((x1, x2)):\n return Matmul().symbolic_call(x1, x2)\n return backend.numpy.matmul(x1, x2)", "docstring": "Matrix product of two tensors.\n\n- If both tensors are 1-dimensional, the dot product (scalar) is returned.\n- If either tensor is N-D, N > 2, it is treated as a stack of matrices\n residing in the last two indexes and broadcast accordingly.\n- If the first tensor is 1-D, it is promoted to a matrix by prepending\n a 1 to its dimensions. After matrix multiplication the prepended\n 1 is removed.\n- If the second tensor is 1-D, it is promoted to a matrix by appending a 1\n to its dimensions. After matrix multiplication the appended 1 is removed.\n\nArgs:\n x1: First tensor.\n x2: Second tensor.\n\nReturns:\n Output tensor, matrix product of the inputs."} +{"repo": "keras", "function": "def compare(self, reference_model):\n self.console.print('Running comparison')\n ref_spec = {}\n get_weight_spec_of_saveable(reference_model, ref_spec)\n\n def _compare(target, ref_spec, inner_path, target_name, ref_name, error_count, match_count, checked_paths):\n base_inner_path = inner_path\n for ref_key, ref_val in ref_spec.items():\n inner_path = base_inner_path + '/' + ref_key\n if inner_path in checked_paths:\n continue\n if ref_key not in target:\n error_count += 1\n checked_paths.add(inner_path)\n if isinstance(ref_val, dict):\n self.console.print(f'[color(160)]...Object [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n self.console.print(f' In {ref_name}, {inner_path} contains the following keys: {list(ref_val.keys())}')\n else:\n self.console.print(f'[color(160)]...Weight [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n elif isinstance(ref_val, dict):\n _error_count, _match_count = _compare(target[ref_key], ref_spec[ref_key], inner_path, target_name, ref_name, error_count=error_count, match_count=match_count, checked_paths=checked_paths)\n error_count += _error_count\n match_count += _match_count\n elif target[ref_key].shape != ref_val.shape:\n error_count += 1\n checked_paths.add(inner_path)\n self.console.print(f'[color(160)]...Weight shape mismatch for [bold]{inner_path}[/][/]\\n In {ref_name}: shape={ref_val.shape}\\n In {target_name}: shape={target[ref_key].shape}')\n else:\n match_count += 1\n return (error_count, match_count)\n checked_paths = set()\n error_count, match_count = _compare(self.weights_dict, ref_spec, inner_path='', target_name='saved file', ref_name='reference model', error_count=0, match_count=0, checked_paths=checked_paths)\n _error_count, _ = _compare(ref_spec, self.weights_dict, inner_path='', target_name='reference model', ref_name='saved file', error_count=0, match_count=0, checked_paths=checked_paths)\n error_count += _error_count\n self.console.print('\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500')\n if error_count == 0:\n status = 'success'\n self.console.print('[color(28)][bold]Comparison successful:[/] saved file is compatible with the reference model[/]')\n if match_count == 1:\n plural = ''\n else:\n plural = 's'\n self.console.print(f' Found {match_count} matching weight{plural}')\n else:\n status = 'error'\n if error_count == 1:\n plural = ''\n else:\n plural = 's'\n self.console.print(f'[color(160)][bold]Found {error_count} error{plural}:[/] saved file is not compatible with the reference model[/]')\n return {'status': status, 'error_count': error_count, 'match_count': match_count}", "docstring": "Compares the opened file to a reference model.\n\nThis method will list all mismatches between the\ncurrently opened file and the provided reference model.\n\nArgs:\n reference_model: Model instance to compare to.\n\nReturns:\n Dict with the following keys:\n `'status'`, `'error_count'`, `'match_count'`.\n Status can be `'success'` or `'error'`.\n `'error_count'` is the number of mismatches found.\n `'match_count'` is the number of matching weights found."} +{"repo": "transformers", "function": "def get_text_features(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, return_dict: Optional[bool]=None) -> tf.Tensor:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_outputs = self.blip.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, return_dict=return_dict)\n pooled_output = text_outputs[1]\n text_features = self.blip.text_projection(pooled_output)\n return text_features", "docstring": "Returns:\n text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying\n the projection layer to the pooled output of [`TFBlipTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoProcessor, TFBlipModel\n\n>>> model = TFBlipModel.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n>>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n\n>>> inputs = processor(text=[\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"tf\")\n>>> text_features = model.get_text_features(**inputs)\n```"} +{"repo": "pytype", "function": "def trace(name, *trace_args):\n\n def decorator(f):\n\n def wrapper(*args, **kwargs):\n t = tracer(name)\n if t.getEffectiveLevel() < logging.DEBUG:\n return f(*args, **kwargs)\n argspec = inspect.getfullargspec(f)\n t.debug('%s: {', f.__name__)\n for arg in trace_args:\n if isinstance(arg, int):\n argname = argspec.args[arg]\n val = args[arg]\n else:\n argname = arg\n val = kwargs[arg]\n t.debug('%s: %s = %s', f.__name__, argname, show(val))\n ret = f(*args, **kwargs)\n t.debug('%s: -> %s', f.__name__, show(ret))\n t.debug('%s: }', f.__name__)\n return ret\n return wrapper\n return decorator", "docstring": "Record args and return value for a function call.\n\nThe trace is of the form\n function name: {\n function name: arg = value\n function name: arg = value\n ...\n function name: -> return\n function name: }\n\nThis will let us write tools to pretty print the traces with indentation etc.\n\nArgs:\n name: module name, usually `__name__`\n *trace_args: function arguments to log\n\nReturns:\n a decorator"} +{"repo": "transformers", "function": "class InformerConfig(PretrainedConfig):\n model_type = 'informer'\n attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers'}\n\n def __init__(self, prediction_length: Optional[int]=None, context_length: Optional[int]=None, distribution_output: str='student_t', loss: str='nll', input_size: int=1, lags_sequence: Optional[List[int]]=None, scaling: Optional[Union[str, bool]]='mean', num_dynamic_real_features: int=0, num_static_real_features: int=0, num_static_categorical_features: int=0, num_time_features: int=0, cardinality: Optional[List[int]]=None, embedding_dimension: Optional[List[int]]=None, d_model: int=64, encoder_ffn_dim: int=32, decoder_ffn_dim: int=32, encoder_attention_heads: int=2, decoder_attention_heads: int=2, encoder_layers: int=2, decoder_layers: int=2, is_encoder_decoder: bool=True, activation_function: str='gelu', dropout: float=0.05, encoder_layerdrop: float=0.1, decoder_layerdrop: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, num_parallel_samples: int=100, init_std: float=0.02, use_cache=True, attention_type: str='prob', sampling_factor: int=5, distil: bool=True, **kwargs):\n self.prediction_length = prediction_length\n self.context_length = context_length or prediction_length\n self.distribution_output = distribution_output\n self.loss = loss\n self.input_size = input_size\n self.num_time_features = num_time_features\n self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]\n self.scaling = scaling\n self.num_dynamic_real_features = num_dynamic_real_features\n self.num_static_real_features = num_static_real_features\n self.num_static_categorical_features = num_static_categorical_features\n if cardinality and num_static_categorical_features > 0:\n if len(cardinality) != num_static_categorical_features:\n raise ValueError('The cardinality should be a list of the same length as `num_static_categorical_features`')\n self.cardinality = cardinality\n else:\n self.cardinality = [0]\n if embedding_dimension and num_static_categorical_features > 0:\n if len(embedding_dimension) != num_static_categorical_features:\n raise ValueError('The embedding dimension should be a list of the same length as `num_static_categorical_features`')\n self.embedding_dimension = embedding_dimension\n else:\n self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]\n self.num_parallel_samples = num_parallel_samples\n self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features\n self.d_model = d_model\n self.encoder_attention_heads = encoder_attention_heads\n self.decoder_attention_heads = decoder_attention_heads\n self.encoder_ffn_dim = encoder_ffn_dim\n self.decoder_ffn_dim = decoder_ffn_dim\n self.encoder_layers = encoder_layers\n self.decoder_layers = decoder_layers\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_dropout = activation_dropout\n self.encoder_layerdrop = encoder_layerdrop\n self.decoder_layerdrop = decoder_layerdrop\n self.activation_function = activation_function\n self.init_std = init_std\n self.use_cache = use_cache\n self.attention_type = attention_type\n self.sampling_factor = sampling_factor\n self.distil = distil\n super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)\n\n @property\n def _number_of_features(self) -> int:\n return sum(self.embedding_dimension) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2", "docstring": "This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an\nInformer model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the Informer\n[huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n prediction_length (`int`):\n The prediction length for the decoder. In other words, the prediction horizon of the model. This value is\n typically dictated by the dataset and we recommend to set it appropriately.\n context_length (`int`, *optional*, defaults to `prediction_length`):\n The context length for the encoder. If `None`, the context length will be the same as the\n `prediction_length`.\n distribution_output (`string`, *optional*, defaults to `\"student_t\"`):\n The distribution emission head for the model. Could be either \"student_t\", \"normal\" or \"negative_binomial\".\n loss (`string`, *optional*, defaults to `\"nll\"`):\n The loss function for the model corresponding to the `distribution_output` head. For parametric\n distributions it is the negative log likelihood (nll) - which currently is the only supported one.\n input_size (`int`, *optional*, defaults to 1):\n The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of\n multivariate targets.\n scaling (`string` or `bool`, *optional* defaults to `\"mean\"`):\n Whether to scale the input targets via \"mean\" scaler, \"std\" scaler or no scaler if `None`. If `True`, the\n scaler is set to \"mean\".\n lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):\n The lags of the input time series as covariates often dictated by the frequency of the data. Default is\n `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.\n num_time_features (`int`, *optional*, defaults to 0):\n The number of time features in the input time series.\n num_dynamic_real_features (`int`, *optional*, defaults to 0):\n The number of dynamic real valued features.\n num_static_categorical_features (`int`, *optional*, defaults to 0):\n The number of static categorical features.\n num_static_real_features (`int`, *optional*, defaults to 0):\n The number of static real valued features.\n cardinality (`list[int]`, *optional*):\n The cardinality (number of different values) for each of the static categorical features. Should be a list\n of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if\n `num_static_categorical_features` is > 0.\n embedding_dimension (`list[int]`, *optional*):\n The dimension of the embedding for each of the static categorical features. Should be a list of integers,\n having the same length as `num_static_categorical_features`. Cannot be `None` if\n `num_static_categorical_features` is > 0.\n d_model (`int`, *optional*, defaults to 64):\n Dimensionality of the transformer layers.\n encoder_layers (`int`, *optional*, defaults to 2):\n Number of encoder layers.\n decoder_layers (`int`, *optional*, defaults to 2):\n Number of decoder layers.\n encoder_attention_heads (`int`, *optional*, defaults to 2):\n Number of attention heads for each attention layer in the Transformer encoder.\n decoder_attention_heads (`int`, *optional*, defaults to 2):\n Number of attention heads for each attention layer in the Transformer decoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 32):\n Dimension of the \"intermediate\" (often named feed-forward) layer in encoder.\n decoder_ffn_dim (`int`, *optional*, defaults to 32):\n Dimension of the \"intermediate\" (often named feed-forward) layer in decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and decoder. If string, `\"gelu\"` and\n `\"relu\"` are supported.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the encoder, and decoder.\n encoder_layerdrop (`float`, *optional*, defaults to 0.1):\n The dropout probability for the attention and fully connected layers for each encoder layer.\n decoder_layerdrop (`float`, *optional*, defaults to 0.1):\n The dropout probability for the attention and fully connected layers for each decoder layer.\n attention_dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for the attention probabilities.\n activation_dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability used between the two layers of the feed-forward networks.\n num_parallel_samples (`int`, *optional*, defaults to 100):\n The number of samples to generate in parallel for each time step of inference.\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated normal weight initialization distribution.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.\n attention_type (`str`, *optional*, defaults to \"prob\"):\n Attention used in encoder. This can be set to \"prob\" (Informer's ProbAttention) or \"full\" (vanilla\n transformer's canonical self-attention).\n sampling_factor (`int`, *optional*, defaults to 5):\n ProbSparse sampling factor (only makes affect when `attention_type`=\"prob\"). It is used to control the\n reduced query matrix (Q_reduce) input length.\n distil (`bool`, *optional*, defaults to `True`):\n Whether to use distilling in encoder.\n\nExample:\n\n```python\n>>> from transformers import InformerConfig, InformerModel\n\n>>> # Initializing an Informer configuration with 12 time steps for prediction\n>>> configuration = InformerConfig(prediction_length=12)\n\n>>> # Randomly initializing a model (with random weights) from the configuration\n>>> model = InformerModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class DepthProPreActResidualLayer(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.use_batch_norm = config.use_batch_norm_in_fusion_residual\n use_bias_in_fusion_residual = config.use_bias_in_fusion_residual if config.use_bias_in_fusion_residual is not None else not self.use_batch_norm\n self.activation1 = nn.ReLU()\n self.convolution1 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n self.activation2 = nn.ReLU()\n self.convolution2 = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=3, stride=1, padding=1, bias=use_bias_in_fusion_residual)\n if self.use_batch_norm:\n self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)\n self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)\n\n def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:\n residual = hidden_state\n hidden_state = self.activation1(hidden_state)\n hidden_state = self.convolution1(hidden_state)\n if self.use_batch_norm:\n hidden_state = self.batch_norm1(hidden_state)\n hidden_state = self.activation2(hidden_state)\n hidden_state = self.convolution2(hidden_state)\n if self.use_batch_norm:\n hidden_state = self.batch_norm2(hidden_state)\n return hidden_state + residual", "docstring": "ResidualConvUnit, pre-activate residual unit.\n\nArgs:\n config (`[DepthProConfig]`):\n Model configuration class defining the model architecture."} +{"repo": "tensorflow", "function": "def batch(self, spec, batch_size):\n raise NotImplementedError(f'{type(self).__name__}.batch')", "docstring": "Returns the TypeSpec representing a batch of values described by `spec`.\n\nArgs:\n spec: The `TypeSpec` for an individual value.\n batch_size: An `int` indicating the number of values that are batched\n together, or `None` if the batch size is not known.\n\nReturns:\n A `TypeSpec` for a batch of values."} +{"repo": "transformers", "function": "def _repr_to_list(value: torch.Tensor):\n torch.set_printoptions(sci_mode=True, linewidth=120)\n with StringIO() as buf, redirect_stdout(buf):\n print(value)\n raw = buf.getvalue()\n return _sanitize_repr_for_diff(raw).splitlines()", "docstring": "Converts a tensor into a sanitized multi-line string representation.\n\nArgs:\n value (`torch.Tensor`): The tensor to represent.\n\nReturns:\n `List[str]`: List of string lines representing the tensor."} +{"repo": "transformers", "function": "def split_code_into_blocks(lines: List[str], start_index: int, end_index: int, indent: int, backtrace: bool=False) -> List[Tuple[str, int, int]]:\n splits = []\n try:\n target_block_name = re.search(f'^{' ' * (indent - 4)}((class|def)\\\\s+\\\\S+)(\\\\(|\\\\:)', lines[start_index]).groups()[0]\n except Exception:\n start_context = min(start_index - 10, 0)\n end_context = min(end_index + 10, len(lines))\n raise ValueError(f'Tried to split a class or function. It did not work. Error comes from line {start_index}: \\n```\\n' + ''.join(lines[start_context:end_context]) + '```\\n')\n indent_str = ' ' * indent\n block_without_name_idx = 0\n empty_block_idx = 0\n index = start_index\n if '(' in lines[start_index] and '):' not in lines[start_index] in lines[start_index]:\n while index < end_index:\n if _is_definition_header_ending_line(lines[index]):\n break\n index += 1\n index += 1\n splits.append((target_block_name, start_index, index))\n block_start_index, prev_block_end_index = (index, index)\n while index < end_index:\n block_found = re.search(f'^{indent_str}((class|def)\\\\s+\\\\S+)(\\\\(|\\\\:)', lines[index])\n if block_found:\n name = block_found.groups()[0]\n block_end_index = find_block_end(lines, index, indent + 4)\n block_start_index = index\n if index > prev_block_end_index and backtrace:\n idx = index - 1\n for idx in range(index - 1, prev_block_end_index - 2, -1):\n if not (len(lines[idx].strip()) > 0 and lines[idx].startswith(indent_str)):\n break\n idx += 1\n if idx < index:\n block_start_index = idx\n if block_start_index > prev_block_end_index:\n if len(''.join(lines[prev_block_end_index:block_start_index]).strip()) == 0:\n prev_block_name = f'_empty_block_{empty_block_idx}'\n empty_block_idx += 1\n else:\n prev_block_name = f'_block_without_name_{block_without_name_idx}'\n block_without_name_idx += 1\n splits.append((prev_block_name, prev_block_end_index, block_start_index))\n splits.append((name, block_start_index, block_end_index))\n prev_block_end_index = block_end_index\n index = block_end_index - 1\n index += 1\n if index > prev_block_end_index:\n if len(''.join(lines[prev_block_end_index:index]).strip()) == 0:\n prev_block_name = f'_empty_block_{empty_block_idx}'\n else:\n prev_block_name = f'_block_without_name_{block_without_name_idx}'\n splits.append((prev_block_name, prev_block_end_index, index))\n return splits", "docstring": "Split the class/func block starting at `start_index` in a source code (defined by `lines`) into *inner blocks*.\n\nThe block's header is included as the first element. The contiguous regions (without empty lines) that are not\ninside any inner block are included as blocks. The contiguous regions of empty lines that are not inside any inner\nblock are also included as (dummy) blocks.\n\nArgs:\n lines (`List[str]`):\n The source code, represented by a list of lines.\n start_index (`int`):\n The starting index of the target class/func block.\n end_index (`int`):\n The ending index of the target class/func block.\n indent (`int`):\n The indent of the class/func body.\n backtrace (`bool`, *optional*, defaults to `False`):\n Whether or not to include the lines before the inner class/func block's header (e.g. comments, decorators,\n etc.) until an empty line is encountered.\n\nReturns:\n `List[Tuple[str, int, int]]`: A list of elements with the form `(block_name, start_index, end_index)`."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An BARTPho sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "transformers", "function": "def forward(self, input):\n output = torch.einsum('eoi,bei->beo', self.weight, input)\n if self.bias is not None:\n raise RuntimeError()\n return output", "docstring": "Args:\n input (`torch.FloatTensor` of shape `(B, n_models, input_dim)`):\n The input to the layer."} +{"repo": "transformers", "function": "class TFCausalLMOutputWithPast(ModelOutput):\n loss: tf.Tensor | None = None\n logits: Optional[tf.Tensor] = None\n past_key_values: List[tf.Tensor] | None = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for causal language model (or autoregressive) outputs.\n\nArgs:\n loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "class ASTConfig(PretrainedConfig):\n model_type = 'audio-spectrogram-transformer'\n\n def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.patch_size = patch_size\n self.qkv_bias = qkv_bias\n self.frequency_stride = frequency_stride\n self.time_stride = time_stride\n self.max_length = max_length\n self.num_mel_bins = num_mel_bins\n\n def _get_non_default_generation_parameters(self) -> Dict[str, Any]:\n return {}", "docstring": "This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the AST\n[MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)\narchitecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries, keys and values.\n frequency_stride (`int`, *optional*, defaults to 10):\n Frequency stride to use when patchifying the spectrograms.\n time_stride (`int`, *optional*, defaults to 10):\n Temporal stride to use when patchifying the spectrograms.\n max_length (`int`, *optional*, defaults to 1024):\n Temporal dimension of the spectrograms.\n num_mel_bins (`int`, *optional*, defaults to 128):\n Frequency dimension of the spectrograms (number of Mel-frequency bins).\n\nExample:\n\n```python\n>>> from transformers import ASTConfig, ASTModel\n\n>>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration\n>>> configuration = ASTConfig()\n\n>>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration\n>>> model = ASTModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def sanity_check_tensor_sync(tensor: torch.Tensor, mesh: DeviceMesh, rtol: float=0.0001, atol: float=0.0001, not_sync: bool=False) -> None:\n if not dist.is_initialized() or mesh.size() == 1:\n return\n pg = mesh.get_group()\n if hasattr(tensor, 'to_local'):\n local_tensor = tensor.to_local()\n else:\n local_tensor = tensor\n world_size = dist.get_world_size(pg)\n gathered_tensors = [torch.empty_like(local_tensor) for _ in range(world_size)]\n dist.all_gather(gathered_tensors, local_tensor, group=pg)\n for i in range(1, world_size):\n try:\n torch.testing.assert_close(gathered_tensors[0], gathered_tensors[i], rtol=rtol, atol=atol)\n except AssertionError as e:\n if not_sync:\n continue\n raise e", "docstring": "Verify that a tensor is synchronized (or not synchronized) across all processes in the mesh's process group.\nHandles both regular tensors and DTensors.\n\nArgs:\n tensor (torch.Tensor): The tensor to check for synchronization (can be DTensor)\n mesh (DeviceMesh): The device mesh containing the process group\n rtol (float): Relative tolerance for comparison\n atol (float): Absolute tolerance for comparison\n not_sync (bool): If True, asserts that tensors are NOT synchronized. If False, asserts they are synchronized."} +{"repo": "transformers", "function": "def get_model_test_files() -> List[str]:\n _ignore_files = ['test_modeling_common', 'test_modeling_encoder_decoder', 'test_modeling_flax_encoder_decoder', 'test_modeling_flax_speech_encoder_decoder', 'test_modeling_marian', 'test_modeling_tf_common', 'test_modeling_tf_encoder_decoder']\n test_files = []\n model_test_root = os.path.join(PATH_TO_TESTS, 'models')\n model_test_dirs = []\n for x in os.listdir(model_test_root):\n x = os.path.join(model_test_root, x)\n if os.path.isdir(x):\n model_test_dirs.append(x)\n for target_dir in [PATH_TO_TESTS] + model_test_dirs:\n for file_or_dir in os.listdir(target_dir):\n path = os.path.join(target_dir, file_or_dir)\n if os.path.isfile(path):\n filename = os.path.split(path)[-1]\n if 'test_modeling' in filename and os.path.splitext(filename)[0] not in _ignore_files:\n file = os.path.join(*path.split(os.sep)[1:])\n test_files.append(file)\n return test_files", "docstring": "Get the model test files.\n\nReturns:\n `List[str]`: The list of test files. The returned files will NOT contain the `tests` (i.e. `PATH_TO_TESTS`\n defined in this script). They will be considered as paths relative to `tests`. A caller has to use\n `os.path.join(PATH_TO_TESTS, ...)` to access the files."} +{"repo": "tensorflow", "function": "def xw_plus_b(x, weights, biases, name=None):\n with ops.name_scope(name, 'xw_plus_b', [x, weights, biases]) as name:\n x = ops.convert_to_tensor(x, name='x')\n weights = ops.convert_to_tensor(weights, name='weights')\n biases = ops.convert_to_tensor(biases, name='biases')\n mm = math_ops.matmul(x, weights)\n return bias_add(mm, biases, name=name)", "docstring": "Computes matmul(x, weights) + biases.\n\nArgs:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b\" is used.\n\nReturns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units."} +{"repo": "tensorflow", "function": "def from_function_and_signature(cls, python_function, input_signature, is_pure=False, jit_compile=None):\n function_type, default_values = make_function_type(python_function, input_signature)\n while isinstance(python_function, functools.partial):\n python_function = python_function.func\n name = getattr(python_function, '__name__', 'f')\n return FunctionSpec(function_type, default_values, is_pure=is_pure, jit_compile=jit_compile, name=name)", "docstring": "Creates a FunctionSpec instance given a python function and signature.\n\nArgs:\n python_function: a function to inspect\n input_signature: a signature of the function (None, if variable)\n is_pure: if True all input arguments (including variables and constants)\n will be converted to tensors and no variable changes allowed.\n jit_compile: see `tf.function`\n\nReturns:\n instance of FunctionSpec"} +{"repo": "transformers", "function": "class JanusCausalLMOutputWithPast(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for Janus causal language model (or autoregressive) outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):\n Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,\n sequence_length, hidden_size)`.\n\n image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver"} +{"repo": "pyglove", "function": "def create_model() -> tf.keras.Model:\n return tf.keras.Sequential(pg.oneof([lambda: [tf.keras.layers.Flatten(), tf.keras.layers.Dense(pg.oneof([64, 128]), pg.oneof(['relu', 'sigmoid']))], lambda: [tf.keras.layers.Lambda(lambda x: tf.reshape(x, (-1, 28, 28, 1))), tf.keras.layers.Conv2D(pg.oneof([64, 128]), pg.oneof([(3, 3), (5, 5)]), activation=pg.oneof(['relu', 'sigmoid'])), tf.keras.layers.Flatten()]]) + [tf.keras.layers.Dense(10, activation='softmax')])", "docstring": "Create model for training.\n\nCreate a simple tf.keras model for training.\n\nReturns:\n The model to use for training."} +{"repo": "tensorflow", "function": "def _in_place_subclassed_model_reset(model):\n assert not model._is_graph_network\n version_utils.swap_class(model.__class__, training.Model, training_v1.Model, ops.executing_eagerly_outside_functions())\n attributes_cache = {}\n for name in dir(model):\n if name == 'submodules' or name == '_self_tracked_trackables':\n continue\n try:\n value = getattr(model, name)\n except (AttributeError, ValueError, TypeError):\n continue\n if isinstance(value, Layer):\n attributes_cache[name] = value\n assert value in model.layers\n if hasattr(value, 'layers') and value.layers:\n raise ValueError('We do not support the use of nested layers in `model_to_estimator` at this time. Found nested layer: %s' % value)\n elif isinstance(value, (list, tuple)) and name not in ('layers', '_layers', 'metrics', '_compile_metric_functions', '_output_loss_metrics'):\n if value and all((isinstance(val, Layer) for val in value)):\n raise ValueError('We do not support the use of list-of-layers attributes in subclassed models used with `model_to_estimator` at this time. Found list model: %s' % name)\n layers_to_names = {value: key for key, value in attributes_cache.items()}\n original_layers = list(model._flatten_layers(include_self=False, recursive=False))\n setattr_tracking = model._setattr_tracking\n model._setattr_tracking = False\n model._self_tracked_trackables = []\n for layer in original_layers:\n config = layer.get_config()\n if isinstance(layer, training.Model) and (not layer._is_graph_network):\n raise ValueError('We do not support the use of nested subclassed models in `model_to_estimator` at this time. Found nested model: %s' % layer)\n fresh_layer = layer.__class__.from_config(config)\n name = layers_to_names[layer]\n setattr(model, name, fresh_layer)\n model._self_tracked_trackables.append(fresh_layer)\n if not hasattr(model, '_original_attributes_cache') or model._original_attributes_cache is None:\n if model.built:\n attributes_to_cache = ['inputs', 'outputs', 'total_loss', 'optimizer', 'train_function', 'test_function', 'predict_function', '_training_endpoints', '_collected_trainable_weights', '_feed_inputs', '_feed_input_names', '_feed_input_shapes']\n for name in attributes_to_cache:\n attributes_cache[name] = getattr(model, name)\n model._original_attributes_cache = attributes_cache\n _reset_build_compile_trackers(model)\n model._setattr_tracking = setattr_tracking", "docstring": "Substitute for model cloning that works for subclassed models.\n\nSubclassed models cannot be cloned because their topology is not serializable.\nTo \"instantiate\" an identical model in a new TF graph, we reuse the original\nmodel object, but we clear its state.\n\nAfter calling this function on a model instance, you can use the model\ninstance as if it were a model clone (in particular you can use it in a new\ngraph).\n\nThis method clears the state of the input model. It is thus destructive.\nHowever the original state can be restored fully by calling\n`_in_place_subclassed_model_state_restoration`.\n\nArgs:\n model: Instance of a Keras model created via subclassing.\n\nRaises:\n ValueError: In case the model uses a subclassed model as inner layer."} +{"repo": "beam", "function": "def add_metadata_field(self, field: str, python_type: Type, column_name: Optional[str]=None, convert_fn: Optional[Callable[[Any], Any]]=None, default: Any=None, sql_typecast: Optional[str]=None) -> 'ColumnSpecsBuilder':\n name = column_name or field\n\n def value_fn(chunk: Chunk) -> Any:\n value = chunk.metadata.get(field, default)\n if value is not None and convert_fn is not None:\n value = convert_fn(value)\n return value\n spec = ColumnSpec(column_name=name, python_type=python_type, value_fn=value_fn, sql_typecast=sql_typecast)\n self._specs.append(spec)\n return self", "docstring": "\"\"Add a :class:`.ColumnSpec` that extracts and converts a field from\nchunk metadata.\n\nArgs:\n field: Key to extract from chunk metadata\n python_type: Python type for the column (e.g. str, int, float)\n column_name: Name for the column (defaults to metadata field name)\n convert_fn: Optional function to convert the extracted value to\n desired type. If None, value is used as-is\n default: Default value if field is missing from metadata\n sql_typecast: Optional SQL type cast (e.g. \"::timestamp\")\n\nReturns:\n Self for chaining\n\nExamples:\n\n Simple string field:\n >>> builder.add_metadata_field(\"source\", str)\n\n Integer with default:\n\n >>> builder.add_metadata_field(\n ... field=\"count\",\n ... python_type=int,\n ... column_name=\"item_count\",\n ... default=0\n ... )\n\n Float with conversion and default:\n\n >>> builder.add_metadata_field(\n ... field=\"confidence\",\n ... python_type=intfloat,\n ... convert_fn=lambda x: round(float(x), 2),\n ... default=0.0\n ... )\n\n Timestamp with conversion and type cast:\n\n >>> builder.add_metadata_field(\n ... field=\"created_at\",\n ... python_type=intstr,\n ... convert_fn=lambda ts: ts.replace('T', ' '),\n ... sql_typecast=\"::timestamp\"\n ... )"} +{"repo": "keras", "function": "class RecallAtPrecision(SensitivitySpecificityBase):\n\n def __init__(self, precision, num_thresholds=200, class_id=None, name=None, dtype=None):\n if precision < 0 or precision > 1:\n raise ValueError(f'Argument `precision` must be in the range [0, 1]. Received: precision={precision}')\n self.precision = precision\n self.num_thresholds = num_thresholds\n super().__init__(value=precision, num_thresholds=num_thresholds, class_id=class_id, name=name, dtype=dtype)\n\n def result(self):\n recalls = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_negatives))\n precisions = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives))\n return self._find_max_under_constraint(precisions, recalls, ops.greater_equal)\n\n def get_config(self):\n config = {'num_thresholds': self.num_thresholds, 'precision': self.precision}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Computes best recall where precision is >= specified value.\n\nFor a given score-label-distribution the required precision might not\nbe achievable, in this case 0.0 is returned as recall.\n\nThis metric creates four local variables, `true_positives`,\n`true_negatives`, `false_positives` and `false_negatives` that are used to\ncompute the recall at the given precision. The threshold for the given\nprecision value is computed and used to evaluate the corresponding recall.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nIf `class_id` is specified, we calculate precision by considering only the\nentries in the batch for which `class_id` is above the threshold\npredictions, and computing the fraction of them for which `class_id` is\nindeed a correct label.\n\nArgs:\n precision: A scalar value in range `[0, 1]`.\n num_thresholds: (Optional) Defaults to 200. The number of thresholds\n to use for matching the given precision.\n class_id: (Optional) Integer class ID for which we want binary metrics.\n This must be in the half-open interval `[0, num_classes)`, where\n `num_classes` is the last dimension of predictions.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.RecallAtPrecision(0.8)\n>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9])\n>>> m.result()\n0.5\n\n>>> m.reset_state()\n>>> m.update_state([0, 0, 1, 1], [0, 0.5, 0.3, 0.9],\n... sample_weight=[1, 0, 0, 1])\n>>> m.result()\n1.0\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(\n optimizer='sgd',\n loss='binary_crossentropy',\n metrics=[keras.metrics.RecallAtPrecision(precision=0.8)])\n```"} +{"repo": "transformers", "function": "class UniSpeechConfig(PretrainedConfig):\n model_type = 'unispeech'\n\n def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, replace_prob=0.5, **kwargs):\n super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)\n self.hidden_size = hidden_size\n self.feat_extract_norm = feat_extract_norm\n self.feat_extract_activation = feat_extract_activation\n self.conv_dim = list(conv_dim)\n self.conv_stride = list(conv_stride)\n self.conv_kernel = list(conv_kernel)\n self.conv_bias = conv_bias\n self.num_conv_pos_embeddings = num_conv_pos_embeddings\n self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups\n self.num_feat_extract_layers = len(self.conv_dim)\n self.num_hidden_layers = num_hidden_layers\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.num_attention_heads = num_attention_heads\n self.hidden_dropout = hidden_dropout\n self.attention_dropout = attention_dropout\n self.activation_dropout = activation_dropout\n self.feat_proj_dropout = feat_proj_dropout\n self.final_dropout = final_dropout\n self.layerdrop = layerdrop\n self.layer_norm_eps = layer_norm_eps\n self.initializer_range = initializer_range\n self.num_ctc_classes = num_ctc_classes\n self.vocab_size = vocab_size\n self.do_stable_layer_norm = do_stable_layer_norm\n self.use_weighted_layer_sum = use_weighted_layer_sum\n self.classifier_proj_size = classifier_proj_size\n if len(self.conv_stride) != self.num_feat_extract_layers or len(self.conv_kernel) != self.num_feat_extract_layers or len(self.conv_dim) != self.num_feat_extract_layers:\n raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')\n self.apply_spec_augment = apply_spec_augment\n self.mask_time_prob = mask_time_prob\n self.mask_time_length = mask_time_length\n self.mask_time_min_masks = mask_time_min_masks\n self.mask_feature_prob = mask_feature_prob\n self.mask_feature_length = mask_feature_length\n self.mask_feature_min_masks = mask_feature_min_masks\n self.num_codevectors_per_group = num_codevectors_per_group\n self.num_codevector_groups = num_codevector_groups\n self.contrastive_logits_temperature = contrastive_logits_temperature\n self.feat_quantizer_dropout = feat_quantizer_dropout\n self.num_negatives = num_negatives\n self.codevector_dim = codevector_dim\n self.proj_codevector_dim = proj_codevector_dim\n self.diversity_loss_weight = diversity_loss_weight\n self.ctc_loss_reduction = ctc_loss_reduction\n self.ctc_zero_infinity = ctc_zero_infinity\n self.replace_prob = replace_prob\n\n @property\n def inputs_to_logits_ratio(self):\n return functools.reduce(operator.mul, self.conv_stride, 1)", "docstring": "This is the configuration class to store the configuration of a [`UniSpeechModel`]. It is used to instantiate an\nUniSpeech model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the UniSpeech\n[microsoft/unispeech-large-1500h-cv](https://huggingface.co/microsoft/unispeech-large-1500h-cv) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 32):\n Vocabulary size of the UniSpeech model. Defines the number of different tokens that can be represented by\n the `inputs_ids` passed when calling [`UniSpeechModel`]. Vocabulary size of the model. Defines the\n different tokens that can be represented by the *inputs_ids* passed to the forward method of\n [`UniSpeechModel`].\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n activation_dropout (`float`, *optional*, defaults to 0.1):\n The dropout ratio for activations inside the fully connected layer.\n attention_dropout (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n feat_proj_dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for output of the feature encoder.\n feat_quantizer_dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for the output of the feature encoder that's used by the quantizer.\n final_dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for the final projection layer of [`UniSpeechForCTC`].\n layerdrop (`float`, *optional*, defaults to 0.1):\n The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more\n details.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n feat_extract_norm (`str`, *optional*, defaults to `\"group\"`):\n The norm to be applied to 1D convolutional layers in feature encoder. One of `\"group\"` for group\n normalization of only the first 1D convolutional layer or `\"layer\"` for layer normalization of all 1D\n convolutional layers.\n feat_extract_activation (`str, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the 1D convolutional layers of the feature\n extractor. If string, `\"gelu\"`, `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):\n A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the\n feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.\n conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):\n A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length\n of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.\n conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 2, 2)`):\n A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The\n length of *conv_kernel* defines the number of convolutional layers and has to match the length of\n *conv_dim*.\n conv_bias (`bool`, *optional*, defaults to `False`):\n Whether the 1D convolutional layers have a bias.\n num_conv_pos_embeddings (`int`, *optional*, defaults to 128):\n Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional\n embeddings layer.\n num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):\n Number of groups of 1D convolutional positional embeddings layer.\n do_stable_layer_norm (`bool`, *optional*, defaults to `False`):\n Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is\n True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is\n False` corresponds to applying layer norm after the attention layer.\n apply_spec_augment (`bool`, *optional*, defaults to `True`):\n Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see\n [SpecAugment: A Simple Data Augmentation Method for Automatic Speech\n Recognition](https://huggingface.co/papers/1904.08779).\n mask_time_prob (`float`, *optional*, defaults to 0.05):\n Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking\n procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If\n reasoning from the probability of each feature vector to be chosen as the start of the vector span to be\n masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the\n actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.\n mask_time_length (`int`, *optional*, defaults to 10):\n Length of vector span along the time axis.\n mask_time_min_masks (`int`, *optional*, defaults to 2):\n The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,\n irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <\n mask_time_min_masks''\n mask_feature_prob (`float`, *optional*, defaults to 0.0):\n Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The\n masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over\n the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector\n span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap\n may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is\n True`.\n mask_feature_length (`int`, *optional*, defaults to 10):\n Length of vector span along the feature axis.\n mask_feature_min_masks (`int`, *optional*, defaults to 0):\n The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time\n step, irrespectively of `mask_feature_prob`. Only relevant if\n ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''\n num_codevectors_per_group (`int`, *optional*, defaults to 320):\n Number of entries in each quantization codebook (group).\n num_codevector_groups (`int`, *optional*, defaults to 2):\n Number of codevector groups for product codevector quantization.\n contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):\n The temperature *kappa* in the contrastive loss.\n num_negatives (`int`, *optional*, defaults to 100):\n Number of negative samples for the contrastive loss.\n codevector_dim (`int`, *optional*, defaults to 256):\n Dimensionality of the quantized feature vectors.\n proj_codevector_dim (`int`, *optional*, defaults to 256):\n Dimensionality of the final projection of both the quantized and the transformer features.\n diversity_loss_weight (`int`, *optional*, defaults to 0.1):\n The weight of the codebook diversity loss component.\n ctc_loss_reduction (`str`, *optional*, defaults to `\"mean\"`):\n Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an\n instance of [`UniSpeechForCTC`].\n ctc_zero_infinity (`bool`, *optional*, defaults to `False`):\n Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly\n occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance\n of [`UniSpeechForCTC`].\n use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):\n Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an\n instance of [`UniSpeechForSequenceClassification`].\n classifier_proj_size (`int`, *optional*, defaults to 256):\n Dimensionality of the projection before token mean-pooling for classification.\n num_ctc_classes (`int`, *optional*, defaults to 80):\n Specifies the number of classes (phoneme tokens and blank token) for phoneme-level CTC loss. Only relevant\n when using an instance of [`UniSpeechForPreTraining`].\n pad_token_id (`int`, *optional*, defaults to 0):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the \"beginning-of-sequence\" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the \"end-of-sequence\" token.\n replace_prob (`float`, *optional*, defaults to 0.5):\n Probability that transformer feature is replaced by quantized feature for pretraining.\n\nExample:\n\n```python\n>>> from transformers import UniSpeechConfig, UniSpeechModel\n\n>>> # Initializing a UniSpeech facebook/unispeech-base-960h style configuration\n>>> configuration = UniSpeechConfig()\n\n>>> # Initializing a model (with random weights) from the facebook/unispeech-base-960h style configuration\n>>> model = UniSpeechModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def _partitioner(shape, dtype):\n if not isinstance(shape, tensor_shape.TensorShape):\n raise ValueError(f'shape is not a TensorShape: {shape}')\n if not shape.is_fully_defined():\n raise ValueError(f'shape is not fully defined: {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n element_size = bytes_per_string_element\n else:\n element_size = dtype.size\n partitions = [1] * shape.ndims\n bytes_per_slice = 1.0 * (shape.num_elements() / shape.dims[axis].value) * element_size\n slices_per_shard = max(1, math.floor(max_shard_bytes / bytes_per_slice))\n axis_shards = int(math.ceil(1.0 * shape.dims[axis].value / slices_per_shard))\n if max_shards:\n axis_shards = min(max_shards, axis_shards)\n partitions[axis] = axis_shards\n return partitions", "docstring": "Partitioner that partitions shards to have max_shard_bytes total size.\n\nArgs:\n shape: A `TensorShape`.\n dtype: A `DType`.\n\nReturns:\n A tuple representing how much to slice each axis in shape.\n\nRaises:\n ValueError: If shape is not a fully defined `TensorShape` or dtype is not\n a `DType`."} +{"repo": "tensorflow", "function": "def batch_shape_tensor(self, name='batch_shape_tensor'):\n with self._name_scope(name):\n if self.batch_shape.is_fully_defined():\n return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name='batch_shape')\n return self._batch_shape_tensor()", "docstring": "Shape of a single sample from a single event index as a 1-D `Tensor`.\n\nThe batch dimensions are indexes into independent, non-identical\nparameterizations of this distribution.\n\nArgs:\n name: name to give to the op\n\nReturns:\n batch_shape: `Tensor`."} +{"repo": "starthinker", "function": "def __init__(self, config, auth, trix_id, feed_name, parse=True, spreadsheet=None, timezone=None):\n self.config = config\n self.auth = auth\n self.trix_id = trix_id\n self.trix_range = 'A1:AZ'\n self.feed_name = feed_name\n self._parse = parse\n self._timezone = timezone or 'America/New_York'\n if spreadsheet:\n self.spreadsheet = spreadsheet\n else:\n self.spreadsheet = sheets_get(self.config, self.auth, self.trix_id)\n self.raw_feed = self._get_feed()\n self.feed = self._feed_to_dict(parse=self._parse)", "docstring": "Initializes the feed with parameters.\n\nArgs:\n auth: The authentication scheme to use based on the json configuration\n file.\n trix_id: Unique identifier of the Google Sheet that represents the\n Bulkdozer feed.\n feed_name: The name of the feed to initialize.\n spreadsheet: Optional, the spreadsheet object representing the Bulkdozer\n feed spreadsheet, useful to limit calls to the sheets API and allow\n multiple Feed objects to use the same spreadsheet instance. This is used\n to determine which tabs exist in the feed so the correct one can be\n selected for the entity this Feed object represents."} +{"repo": "tensorflow", "function": "class OptimizerV2(trackable.Trackable):\n _HAS_AGGREGATE_GRAD = False\n\n def __init__(self, name, gradient_aggregator=None, gradient_transformers=None, **kwargs):\n \"\"\"Create a new Optimizer.\n\n This must be called by the constructors of subclasses.\n Note that Optimizer instances should not bind to a single graph,\n and so shouldn't keep Tensors as member variables. Generally\n you should be able to use the _set_hyper()/state.get_hyper()\n facility instead.\n\n This class is stateful and thread-compatible.\n\n Example of custom gradient transformations:\n\n ```python\n def my_gradient_transformer(grads_and_vars):\n # Simple example, double the gradients.\n return [(2. * g, v) for g, v in grads_and_vars]\n\n optimizer = tf.keras.optimizers.SGD(\n 1e-3, gradient_transformers=[my_gradient_transformer])\n ```\n\n Args:\n name: String. The name to use for momentum accumulator weights created\n by the optimizer.\n gradient_aggregator: The function to use to aggregate gradients across\n devices (when using `tf.distribute.Strategy`). If `None`, defaults to\n summing the gradients across devices. The function should accept and\n return a list of `(gradient, variable)` tuples.\n gradient_transformers: Optional. List of functions to use to transform\n gradients before applying updates to Variables. The functions are\n applied after `gradient_aggregator`. The functions should accept and\n return a list of `(gradient, variable)` tuples.\n **kwargs: keyword arguments. Allowed arguments are `clipvalue`,\n `clipnorm`, `global_clipnorm`.\n If `clipvalue` (float) is set, the gradient of each weight\n is clipped to be no higher than this value.\n If `clipnorm` (float) is set, the gradient of each weight\n is individually clipped so that its norm is no higher than this value.\n If `global_clipnorm` (float) is set the gradient of all weights is\n clipped so that their global norm is no higher than this value.\n\n Raises:\n ValueError: in case of any invalid argument.\n \"\"\"\n allowed_kwargs = {'clipnorm', 'clipvalue', 'lr', 'decay', 'global_clipnorm'}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError('Unexpected keyword argument passed to optimizer: ' + str(k))\n if kwargs[k] is not None and kwargs[k] < 0:\n raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))\n if k == 'lr':\n warnings.warn('The `lr` argument is deprecated, use `learning_rate` instead.')\n self._use_locking = True\n self._init_set_name(name)\n self._hyper = {}\n self._slots = {}\n self._slot_names = []\n self._weights = []\n self._iterations = None\n self._deferred_slot_restorations = {}\n decay = kwargs.pop('decay', 0.0)\n if decay < 0.0:\n raise ValueError('decay cannot be less than 0: {}'.format(decay))\n self._initial_decay = decay\n self._hypers_created = False\n if distribute_lib.has_strategy():\n self._distribution_strategy = distribute_lib.get_strategy()\n else:\n self._distribution_strategy = None\n if gradient_aggregator is None:\n gradient_aggregator = optimizer_utils.all_reduce_sum_gradients\n self.gradient_aggregator = gradient_aggregator\n if gradient_transformers is None:\n gradient_transformers = []\n self.gradient_transformers = gradient_transformers\n self.clipnorm = kwargs.pop('clipnorm', None)\n self.global_clipnorm = kwargs.pop('global_clipnorm', None)\n if self.clipnorm is not None and self.global_clipnorm is not None:\n raise ValueError('Cannot accept both `clipnorm` and `global_clipnorm`, passed `clipnorm` {}, `global_clipnorm` {}'.format(self.clipnorm, self.global_clipnorm))\n self.clipvalue = kwargs.pop('clipvalue', None)\n\n @property\n def clipnorm(self):\n \"\"\"`float` or `None`. If set, clips gradients to a maximum norm.\"\"\"\n return self._clipnorm\n\n @property\n def global_clipnorm(self):\n \"\"\"`float` or `None`. If set, clips gradients to a maximum norm.\"\"\"\n return self._global_clipnorm\n\n @clipnorm.setter\n def clipnorm(self, val):\n if val is not None and self.gradient_transformers:\n raise ValueError('`clipnorm` cannot be set when `gradient_transformers` is set. Instead, use the `gradient_transformers` to specify clipping and other transformations.')\n self._clipnorm = val\n self._clipnorm_fn = optimizer_utils.make_gradient_clipnorm_fn(self._clipnorm)\n\n @global_clipnorm.setter\n def global_clipnorm(self, val):\n if val is not None and self.gradient_transformers:\n raise ValueError('`clipnorm` cannot be set when `gradient_transformers` is set. Instead, use the `gradient_transformers` to specify clipping and other transformations.')\n self._global_clipnorm = val\n self._global_clipnorm_fn = optimizer_utils.make_global_gradient_clipnorm_fn(self._global_clipnorm)\n\n @property\n def clipvalue(self):\n \"\"\"`float` or `None`. If set, clips gradients to a maximum value.\"\"\"\n return self._clipvalue\n\n @clipvalue.setter\n def clipvalue(self, val):\n if val is not None and self.gradient_transformers:\n raise ValueError('`clipvalue` cannot be set when `gradient_transformers` is set. Instead, use the `gradient_transformers` to specify clipping and other transformations.')\n self._clipvalue = val\n self._clipvalue_fn = optimizer_utils.make_gradient_clipvalue_fn(self._clipvalue)\n\n def _transform_loss(self, loss):\n \"\"\"Called in `.minimize` to transform loss before computing gradients.\"\"\"\n return loss\n\n def _get_gradients(self, tape, loss, var_list, grad_loss=None):\n \"\"\"Called in `minimize` to compute gradients from loss.\"\"\"\n grads = tape.gradient(loss, var_list, grad_loss)\n return list(zip(grads, var_list))\n\n def _transform_unaggregated_gradients(self, grads_and_vars):\n \"\"\"Called in `apply_gradients` before gradient aggregation.\"\"\"\n return grads_and_vars\n\n def _aggregate_gradients(self, grads_and_vars):\n \"\"\"Called in `apply_gradients` to aggregate gradients across devices.\n\n Note that user subclasses may override this, so the interface should not be\n changed.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n\n Returns:\n A list of (aggregated_gradient, variable) pairs. By default, this calls\n `self.gradient_aggregator`.\n \"\"\"\n return self.gradient_aggregator(grads_and_vars)\n\n def _transform_gradients(self, grads_and_vars):\n \"\"\"Called in `apply_gradients` after aggregation.\"\"\"\n if self._clipvalue is not None:\n grads_and_vars = self._clipvalue_fn(grads_and_vars)\n if self._clipnorm is not None:\n grads_and_vars = self._clipnorm_fn(grads_and_vars)\n if self._global_clipnorm is not None:\n grads_and_vars = self._global_clipnorm_fn(grads_and_vars)\n for fn in self.gradient_transformers:\n grads_and_vars = fn(grads_and_vars)\n return grads_and_vars\n\n def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None):\n \"\"\"Minimize `loss` by updating `var_list`.\n\n This method simply computes gradient using `tf.GradientTape` and calls\n `apply_gradients()`. If you want to process the gradient before applying\n then call `tf.GradientTape` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: `Tensor` or callable. If a callable, `loss` should take no arguments\n and return the value to minimize. If a `Tensor`, the `tape` argument\n must be passed.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` since the variables are created at the first time `loss` is\n called.\n grad_loss: (Optional). A `Tensor` holding the gradient computed for\n `loss`.\n name: (Optional) str. Name for the returned operation.\n tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,\n the tape that computed the `loss` must be provided.\n\n Returns:\n An `Operation` that updates the variables in `var_list`. The `iterations`\n will be automatically increased by 1.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n \"\"\"\n grads_and_vars = self._compute_gradients(loss, var_list=var_list, grad_loss=grad_loss, tape=tape)\n return self.apply_gradients(grads_and_vars, name=name)\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"Compute gradients of `loss` for the variables in `var_list`.\n\n This is the first part of `minimize()`. It returns a list\n of (gradient, variable) pairs where \"gradient\" is the gradient\n for \"variable\". Note that \"gradient\" can be a `Tensor`, an\n `IndexedSlices`, or `None` if there is no gradient for the\n given variable.\n\n Args:\n loss: `Tensor` or callable. If a callable, `loss` should take no\n arguments and return the value to minimize. If a `Tensor`, the `tape`\n argument must be passed.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` and the variables are created at the first time when `loss`\n is called.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,\n the tape that computed the `loss` must be provided.\n\n Returns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\n Raises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid, or var_list is None.\n \"\"\"\n if not callable(loss) and tape is None:\n raise ValueError('`tape` is required when a `Tensor` loss is passed.')\n tape = tape if tape is not None else backprop.GradientTape()\n if callable(loss):\n with tape:\n if not callable(var_list):\n tape.watch(var_list)\n loss = loss()\n if callable(var_list):\n var_list = var_list()\n with tape:\n loss = self._transform_loss(loss)\n var_list = nest.flatten(var_list)\n with ops.name_scope_v2(self._name + '/gradients'):\n grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)\n self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource])\n return grads_and_vars\n\n def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):\n \"\"\"Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n applies gradients.\n\n The method sums gradients from all replicas in the presence of\n `tf.distribute.Strategy` by default. You can aggregate gradients yourself by\n passing `experimental_aggregate_gradients=False`.\n\n Example:\n\n ```python\n grads = tape.gradient(loss, vars)\n grads = tf.distribute.get_replica_context().all_reduce('sum', grads)\n # Processing aggregated gradients.\n optimizer.apply_gradients(zip(grads, vars),\n experimental_aggregate_gradients=False)\n\n ```\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n name: Optional name for the returned operation. Default to the name passed\n to the `Optimizer` constructor.\n experimental_aggregate_gradients: Whether to sum gradients from different\n replicas in the presence of `tf.distribute.Strategy`. If False, it's\n user responsibility to aggregate the gradients. Default to True.\n\n Returns:\n An `Operation` that applies the specified gradients. The `iterations`\n will be automatically increased by 1.\n\n Raises:\n TypeError: If `grads_and_vars` is malformed.\n ValueError: If none of the variables have gradients.\n RuntimeError: If called in a cross-replica context.\n \"\"\"\n grads_and_vars = optimizer_utils.filter_empty_gradients(grads_and_vars)\n var_list = [v for _, v in grads_and_vars]\n with ops.name_scope_v2(self._name):\n with ops.init_scope():\n self._create_all_weights(var_list)\n if not grads_and_vars:\n return control_flow_ops.no_op()\n if distribute_lib.in_cross_replica_context():\n raise RuntimeError('`apply_gradients() cannot be called in cross-replica context. Use `tf.distribute.Strategy.run` to enter replica context.')\n strategy = distribute_lib.get_strategy()\n if not experimental_aggregate_gradients and strategy and isinstance(strategy, (parameter_server_strategy.ParameterServerStrategyV1, parameter_server_strategy_v2.ParameterServerStrategyV2, central_storage_strategy.CentralStorageStrategy, central_storage_strategy.CentralStorageStrategyV1)):\n raise NotImplementedError('`experimental_aggregate_gradients=False is not supported for ParameterServerStrategy and CentralStorageStrategy')\n apply_state = self._prepare(var_list)\n if experimental_aggregate_gradients:\n grads_and_vars = self._transform_unaggregated_gradients(grads_and_vars)\n grads_and_vars = self._aggregate_gradients(grads_and_vars)\n grads_and_vars = self._transform_gradients(grads_and_vars)\n if optimizer_utils.strategy_supports_no_merge_call():\n return self._distributed_apply(strategy, grads_and_vars, name, apply_state)\n else:\n return distribute_lib.get_replica_context().merge_call(functools.partial(self._distributed_apply, apply_state=apply_state), args=(grads_and_vars,), kwargs={'name': name})\n\n def _distributed_apply(self, distribution, grads_and_vars, name, apply_state):\n \"\"\"`apply_gradients` using a `DistributionStrategy`.\"\"\"\n\n def apply_grad_to_update_var(var, grad):\n \"\"\"Apply gradient to variable.\"\"\"\n if isinstance(var, tensor.Tensor):\n raise NotImplementedError('Trying to update a Tensor ', var)\n apply_kwargs = {}\n if isinstance(grad, indexed_slices.IndexedSlices):\n if var.constraint is not None:\n raise RuntimeError('Cannot use a constraint function on a sparse variable.')\n if 'apply_state' in self._sparse_apply_args:\n apply_kwargs['apply_state'] = apply_state\n return self._resource_apply_sparse_duplicate_indices(grad.values, var, grad.indices, **apply_kwargs)\n if 'apply_state' in self._dense_apply_args:\n apply_kwargs['apply_state'] = apply_state\n update_op = self._resource_apply_dense(grad, var, **apply_kwargs)\n if var.constraint is not None:\n with ops.control_dependencies([update_op]):\n return var.assign(var.constraint(var))\n else:\n return update_op\n eagerly_outside_functions = ops.executing_eagerly_outside_functions()\n update_ops = []\n with name_scope_only_in_function_or_graph(name or self._name):\n for grad, var in grads_and_vars:\n with distribution.extended.colocate_vars_with(var):\n with name_scope_only_in_function_or_graph('update' if eagerly_outside_functions else 'update_' + var.op.name):\n update_op = distribution.extended.update(var, apply_grad_to_update_var, args=(grad,), group=False)\n if distribute_lib.in_cross_replica_context():\n update_ops.extend(update_op)\n else:\n update_ops.append(update_op)\n any_symbolic = any((isinstance(i, ops.Operation) or tf_utils.is_symbolic_tensor(i) for i in update_ops))\n if not context.executing_eagerly() or any_symbolic:\n with backend._current_graph(update_ops).as_default():\n with ops.control_dependencies([control_flow_ops.group(update_ops)]):\n return self._iterations.assign_add(1, read_value=False)\n return self._iterations.assign_add(1)\n\n def get_gradients(self, loss, params):\n \"\"\"Returns gradients of `loss` with respect to `params`.\n\n Should be used only in legacy v1 graph mode.\n\n Args:\n loss: Loss tensor.\n params: List of variables.\n\n Returns:\n List of gradient tensors.\n\n Raises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).\n \"\"\"\n params = nest.flatten(params)\n with backend.get_graph().as_default(), backend.name_scope(self._name + '/gradients'):\n grads = gradients.gradients(loss, params)\n for grad, param in zip(grads, params):\n if grad is None:\n raise ValueError('Variable {} has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.'.format(param))\n return grads\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n grads_and_vars = list(zip(grads, params))\n self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource])\n return [self.apply_gradients(grads_and_vars)]\n\n def _set_hyper(self, name, value):\n \"\"\"set hyper `name` to value. value can be callable, tensor, numeric.\"\"\"\n if isinstance(value, trackable.Trackable):\n self._track_trackable(value, name, overwrite=True)\n if name not in self._hyper:\n self._hyper[name] = value\n else:\n prev_value = self._hyper[name]\n if callable(prev_value) or isinstance(prev_value, (tensor.Tensor, int, float, learning_rate_schedule.LearningRateSchedule)) or isinstance(value, learning_rate_schedule.LearningRateSchedule):\n self._hyper[name] = value\n else:\n backend.set_value(self._hyper[name], value)\n\n def _get_hyper(self, name, dtype=None):\n if not self._hypers_created:\n self._create_hypers()\n value = self._hyper[name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return value\n if callable(value):\n value = value()\n if dtype:\n return math_ops.cast(value, dtype)\n else:\n return value\n\n def _create_slots(self, var_list):\n pass\n\n def _create_all_weights(self, var_list):\n \"\"\"Creates all weights, including iterations, hyperparameters and slot vars.\n\n This will add newly created variables to `optimizer.weights`.\n\n New variables are only created when this method is called the first time, or\n when called with different variables in the var_list.\n\n Args:\n var_list: list or tuple of `Variable` objects that will be minimized\n using this optimizer.\n \"\"\"\n _ = self.iterations\n self._create_hypers()\n self._create_slots(var_list)\n\n def __getattribute__(self, name):\n \"\"\"Overridden to support hyperparameter access.\"\"\"\n try:\n return super(OptimizerV2, self).__getattribute__(name)\n except AttributeError as e:\n if name == '_hyper':\n raise e\n if name == 'lr':\n name = 'learning_rate'\n if name in self._hyper:\n return self._get_hyper(name)\n raise e\n\n def __dir__(self):\n result = set(super(OptimizerV2, self).__dir__())\n if '_hyper' in result:\n result |= self._hyper.keys()\n if 'learning_rate' in self._hyper.keys():\n result.add('lr')\n return list(result)\n\n def __setattr__(self, name, value):\n \"\"\"Override setattr to support dynamic hyperparameter setting.\"\"\"\n if name == 'lr':\n name = 'learning_rate'\n if hasattr(self, '_hyper') and name in self._hyper:\n self._set_hyper(name, value)\n else:\n super(OptimizerV2, self).__setattr__(name, value)\n\n def get_slot_names(self):\n \"\"\"A list of names for this optimizer's slots.\"\"\"\n return self._slot_names\n\n def add_slot(self, var, slot_name, initializer='zeros', shape=None):\n \"\"\"Add a new slot variable for `var`.\n\n A slot variable is an additional variable associated with `var` to train.\n It is allocated and managed by optimizers, e.g. `Adam`.\n\n Args:\n var: a `Variable` object.\n slot_name: name of the slot variable.\n initializer: initializer of the slot variable\n shape: (Optional) shape of the slot variable. If not set, it will default\n to the shape of `var`.\n\n Returns:\n A slot variable.\n \"\"\"\n if slot_name not in self._slot_names:\n self._slot_names.append(slot_name)\n var_key = _var_key(var)\n slot_dict = self._slots.setdefault(var_key, {})\n weight = slot_dict.get(slot_name, None)\n if weight is None:\n if isinstance(initializer, str) or callable(initializer):\n initializer = initializers.get(initializer)\n if isinstance(initializer, trackable.CheckpointInitialValueCallable) or shape is not None:\n slot_shape = shape\n else:\n slot_shape = var.shape\n initial_value = functools.partial(initializer, shape=slot_shape, dtype=var.dtype)\n else:\n initial_value = initializer\n with self._distribution_strategy_scope():\n strategy = distribute_lib.get_strategy()\n if not strategy.extended.variable_created_in_scope(var):\n raise ValueError(\"Trying to create optimizer slot variable under the scope for tf.distribute.Strategy ({}), which is different from the scope used for the original variable ({}). Make sure the slot variables are created under the same strategy scope. This may happen if you're restoring from a checkpoint outside the scope\".format(strategy, var))\n with strategy.extended.colocate_vars_with(var):\n weight = tf_variables.Variable(name='%s/%s' % (var._shared_name, slot_name), dtype=var.dtype, trainable=False, initial_value=initial_value)\n backend.track_variable(weight)\n slot_dict[slot_name] = weight\n self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=weight)\n self._weights.append(weight)\n return weight\n\n def get_slot(self, var, slot_name):\n var_key = _var_key(var)\n slot_dict = self._slots[var_key]\n return slot_dict[slot_name]\n\n def _prepare(self, var_list):\n keys = set()\n for var in var_list:\n if isinstance(var, ds_values.DistributedValues):\n var_devices = var._devices\n else:\n var_devices = [var.device]\n var_dtype = var.dtype.base_dtype\n for var_device in var_devices:\n keys.add((var_device, var_dtype))\n apply_state = {}\n for var_device, var_dtype in keys:\n apply_state[var_device, var_dtype] = {}\n with ops.device(var_device):\n self._prepare_local(var_device, var_dtype, apply_state)\n return apply_state\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n if 'learning_rate' in self._hyper:\n lr_t = array_ops.identity(self._decayed_lr(var_dtype))\n apply_state[var_device, var_dtype]['lr_t'] = lr_t\n\n def _fallback_apply_state(self, var_device, var_dtype):\n \"\"\"Compatibility for subclasses that don't pass apply_state through.\"\"\"\n apply_state = {(var_device, var_dtype): {}}\n self._prepare_local(var_device, var_dtype, apply_state)\n return apply_state[var_device, var_dtype]\n\n def _create_hypers(self):\n if self._hypers_created:\n return\n with self._distribution_strategy_scope():\n for name, value in sorted(self._hyper.items()):\n if isinstance(value, (tensor.Tensor, tf_variables.Variable)) or callable(value):\n continue\n else:\n self._hyper[name] = self.add_weight(name, shape=[], trainable=False, initializer=value, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._hypers_created = True\n\n @property\n def iterations(self):\n \"\"\"Variable. The number of training steps this Optimizer has run.\"\"\"\n if self._iterations is None:\n with self._distribution_strategy_scope():\n self._iterations = self.add_weight('iter', shape=[], dtype=dtypes.int64, trainable=False, aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)\n self._weights.append(self._iterations)\n return self._iterations\n\n @iterations.setter\n def iterations(self, variable):\n if self._iterations is not None:\n raise RuntimeError('Cannot set `iterations` to a new Variable after the Optimizer weights have been created')\n self._iterations = variable\n self._weights.append(self._iterations)\n\n def _decayed_lr(self, var_dtype):\n \"\"\"Get decayed learning rate as a Tensor with dtype=var_dtype.\"\"\"\n lr_t = self._get_hyper('learning_rate', var_dtype)\n if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):\n local_step = math_ops.cast(self.iterations, var_dtype)\n lr_t = math_ops.cast(lr_t(local_step), var_dtype)\n if self._initial_decay > 0.0:\n local_step = math_ops.cast(self.iterations, var_dtype)\n decay_t = math_ops.cast(self._initial_decay, var_dtype)\n lr_t = lr_t / (1.0 + decay_t * local_step)\n return lr_t\n\n @abc.abstractmethod\n def get_config(self):\n \"\"\"Returns the config of the optimizer.\n\n An optimizer config is a Python dictionary (serializable)\n containing the configuration of an optimizer.\n The same optimizer can be reinstantiated later\n (without any saved state) from this configuration.\n\n Returns:\n Python dictionary.\n \"\"\"\n config = {'name': self._name}\n if self.clipnorm is not None:\n config['clipnorm'] = self.clipnorm\n if self.clipvalue is not None:\n config['clipvalue'] = self.clipvalue\n if self.global_clipnorm is not None:\n config['global_clipnorm'] = self.global_clipnorm\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n \"\"\"Creates an optimizer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same optimizer from the config\n dictionary.\n\n Args:\n config: A Python dictionary, typically the output of get_config.\n custom_objects: A Python dictionary mapping names to additional Python\n objects used to create this optimizer, such as a function used for a\n hyperparameter.\n\n Returns:\n An optimizer instance.\n \"\"\"\n if 'lr' in config:\n config['learning_rate'] = config.pop('lr')\n if 'learning_rate' in config:\n if isinstance(config['learning_rate'], dict):\n config['learning_rate'] = learning_rate_schedule.deserialize(config['learning_rate'], custom_objects=custom_objects)\n return cls(**config)\n\n def _serialize_hyperparameter(self, hyperparameter_name):\n \"\"\"Serialize a hyperparameter that can be a float, callable, or Tensor.\"\"\"\n value = self._hyper[hyperparameter_name]\n if isinstance(value, learning_rate_schedule.LearningRateSchedule):\n return learning_rate_schedule.serialize(value)\n if callable(value):\n return value()\n if tensor_util.is_tf_type(value):\n return backend.get_value(value)\n return value\n\n def variables(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n @property\n def weights(self):\n \"\"\"Returns variables of this Optimizer based on the order created.\"\"\"\n return self._weights\n\n def get_weights(self):\n \"\"\"Returns the current weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function returns the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they were created. The returned list can in turn\n be used to load state into similarly parameterized optimizers.\n\n For example, the RMSprop optimizer for this simple model returns a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> len(opt.get_weights())\n 3\n\n Returns:\n Weights values as a list of numpy arrays.\n \"\"\"\n params = self.weights\n return backend.batch_get_value(params)\n\n def set_weights(self, weights):\n \"\"\"Set the weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function takes the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they are created. The passed values are used to set\n the new state of the optimizer.\n\n For example, the RMSprop optimizer for this simple model takes a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]\n >>> opt.set_weights(new_weights)\n >>> opt.iterations\n \n\n Args:\n weights: weight values as a list of numpy arrays.\n \"\"\"\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('You called `set_weights(weights)` on optimizer ' + self._name + ' with a weight list of length ' + str(len(weights)) + ', but the optimizer was expecting ' + str(len(params)) + ' weights. Provided weights: ' + str(weights)[:50] + '...')\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Optimizer weight shape ' + str(pv.shape) + ' not compatible with provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n\n def add_weight(self, name, shape, dtype=None, initializer='zeros', trainable=None, synchronization=tf_variables.VariableSynchronization.AUTO, aggregation=tf_variables.VariableAggregation.NONE):\n if dtype is None:\n dtype = dtypes.float32\n if isinstance(initializer, str) or callable(initializer):\n initializer = initializers.get(initializer)\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError('Synchronization value can be set to VariableSynchronization.ON_READ only for non-trainable variables. You have specified trainable=True and synchronization=VariableSynchronization.ON_READ.')\n else:\n trainable = False\n elif trainable is None:\n trainable = True\n variable = self._add_variable_with_custom_getter(name=name, shape=shape, getter=base_layer_utils.make_variable, overwrite=True, initializer=initializer, dtype=dtype, trainable=trainable, use_resource=True, synchronization=synchronization, aggregation=aggregation)\n backend.track_variable(variable)\n return variable\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(generic_utils.to_snake_case(self.__class__.__name__), zero_based=zero_based)\n else:\n self._name = name\n\n def _assert_valid_dtypes(self, tensors):\n \"\"\"Asserts tensors are all valid types (see `_valid_dtypes`).\n\n Args:\n tensors: Tensors to check.\n\n Raises:\n ValueError: If any tensor is not a valid type.\n \"\"\"\n valid_dtypes = self._valid_dtypes()\n for t in tensors:\n dtype = t.dtype.base_dtype\n if dtype not in valid_dtypes:\n raise ValueError('Invalid type %r for %s, expected: %s.' % (dtype, t.name, [v for v in valid_dtypes]))\n\n def _valid_dtypes(self):\n \"\"\"Valid types for loss, variables and gradients.\n\n Subclasses should override to allow other float types.\n\n Returns:\n Valid types for loss, variables and gradients.\n \"\"\"\n return _DEFAULT_VALID_DTYPES\n\n def _call_if_callable(self, param):\n \"\"\"Call the function if param is callable.\"\"\"\n return param() if callable(param) else param\n\n def _resource_apply_dense(self, grad, handle, apply_state):\n \"\"\"Add ops to apply dense gradients to the variable `handle`.\n\n Args:\n grad: a `Tensor` representing the gradient.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices, **kwargs):\n \"\"\"Add ops to apply sparse gradients to `handle`, with repeated indices.\n\n Optimizers which override this method must deal with repeated indices. See\n the docstring of `_apply_sparse_duplicate_indices` for details. By default\n the correct behavior, to sum non-unique indices and their associated\n gradients, is enforced by first pre-processing `grad` and `indices` and\n passing them on to `_resource_apply_sparse`. Optimizers which deal correctly\n with duplicate indices may instead override this method to avoid the\n overhead of summing.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices may be repeated.\n **kwargs: May optionally contain `apply_state`\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n return self._resource_apply_sparse(summed_grad, handle, unique_indices, **kwargs)\n\n def _resource_apply_sparse(self, grad, handle, indices, apply_state):\n \"\"\"Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has been\n de-duplicated. Optimizers which deal correctly with non-unique indices may\n instead override `_resource_apply_sparse_duplicate_indices` to avoid this\n overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices are unique.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError('Must be implemented in subclasses.')\n\n def _resource_scatter_add(self, x, i, v):\n with ops.control_dependencies([gen_resource_variable_ops.ResourceScatterAdd(resource=x.handle, indices=i, updates=v)]):\n return x.value()\n\n def _resource_scatter_update(self, x, i, v):\n with ops.control_dependencies([gen_resource_variable_ops.ResourceScatterUpdate(resource=x.handle, indices=i, updates=v)]):\n return x.value()\n\n @property\n @layer_utils.cached_per_instance\n def _dense_apply_args(self):\n return tf_inspect.getfullargspec(self._resource_apply_dense).args\n\n @property\n @layer_utils.cached_per_instance\n def _sparse_apply_args(self):\n return tf_inspect.getfullargspec(self._resource_apply_sparse).args\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n \"\"\"Restore a newly created slot variable's value.\"\"\"\n variable_key = _var_key(variable)\n deferred_restorations = self._deferred_slot_restorations.get(slot_name, {}).pop(variable_key, [])\n deferred_restorations.sort(key=lambda position: position.restore_uid, reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name, variable):\n \"\"\"Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored. When executing eagerly, we create the slot variable with a\n restoring initializer.\n\n No new variables are created when graph building. Instead,\n _restore_slot_variable catches these after normal creation and adds restore\n ops to the graph. This method is nonetheless important when graph building\n for the case when a slot variable has already been created but `variable`\n has just been added to a dependency graph (causing us to realize that the\n slot variable needs to be restored).\n\n Args:\n slot_variable_position: A `trackable._CheckpointPosition` object\n indicating the slot variable `Trackable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n \"\"\"\n variable_key = _var_key(variable)\n slot_dict = self._slots.get(variable_key, {})\n slot_variable = slot_dict.get(slot_name, None)\n if slot_variable is None and context.executing_eagerly() and slot_variable_position.is_simple_variable() and (not ops.get_default_graph()._variable_creator_stack or self._distribution_strategy):\n initializer = trackable.CheckpointInitialValueCallable(checkpoint_position=slot_variable_position)\n slot_variable = self.add_slot(var=variable, initializer=initializer, slot_name=slot_name, shape=slot_variable_position.value_shape())\n if slot_variable is not None:\n slot_variable_position.restore(slot_variable)\n else:\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(variable_key, []).append(slot_variable_position)\n\n @contextlib.contextmanager\n def _distribution_strategy_scope(self):\n \"\"\"Returns the `tf.distribute.Strategy` this optimizer was created under.\"\"\"\n if self._distribution_strategy and (not distribute_lib.has_strategy()):\n with self._distribution_strategy.scope():\n yield self._distribution_strategy.scope()\n else:\n yield", "docstring": "Base class for Keras optimizers.\n\nYou should not use this class directly, but instead instantiate one of its\nsubclasses such as `tf.keras.optimizers.SGD`, `tf.keras.optimizers.Adam`, etc.\n\n### Usage\n\n```python\n# Create an optimizer with the desired parameters.\nopt = tf.keras.optimizers.SGD(learning_rate=0.1)\n# `loss` is a callable that takes no argument and returns the value\n# to minimize.\nloss = lambda: 3 * var1 * var1 + 2 * var2 * var2\n# In graph mode, returns op that minimizes the loss by updating the listed\n# variables.\nopt_op = opt.minimize(loss, var_list=[var1, var2])\nopt_op.run()\n# In eager mode, simply call minimize to update the list of variables.\nopt.minimize(loss, var_list=[var1, var2])\n```\n\n### Usage in custom training loops\n\nIn Keras models, sometimes variables are created when the model is first\ncalled, instead of construction time. Examples include 1) sequential models\nwithout input shape pre-defined, or 2) subclassed models. Pass var_list as\ncallable in these cases.\n\nExample:\n\n```python\nopt = tf.keras.optimizers.SGD(learning_rate=0.1)\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Dense(num_hidden, activation='relu'))\nmodel.add(tf.keras.layers.Dense(num_classes, activation='sigmoid'))\nloss_fn = lambda: tf.keras.losses.mse(model(input), output)\nvar_list_fn = lambda: model.trainable_weights\nfor input, output in data:\n opt.minimize(loss_fn, var_list_fn)\n```\n\n### Processing gradients before applying them\n\nCalling `minimize()` takes care of both computing the gradients and\napplying them to the variables. If you want to process the gradients\nbefore applying them you can instead use the optimizer in three steps:\n\n1. Compute the gradients with `tf.GradientTape`.\n2. Process the gradients as you wish.\n3. Apply the processed gradients with `apply_gradients()`.\n\nExample:\n\n```python\n# Create an optimizer.\nopt = tf.keras.optimizers.SGD(learning_rate=0.1)\n\n# Compute the gradients for a list of variables.\nwith tf.GradientTape() as tape:\n loss = \nvars = \ngrads = tape.gradient(loss, vars)\n\n# Process the gradients, for example cap them, etc.\n# capped_grads = [MyCapper(g) for g in grads]\nprocessed_grads = [process_gradient(g) for g in grads]\n\n# Ask the optimizer to apply the processed gradients.\nopt.apply_gradients(zip(processed_grads, var_list))\n```\n\n### Use with `tf.distribute.Strategy`\n\nThis optimizer class is `tf.distribute.Strategy` aware, which means it\nautomatically sums gradients across all replicas. To average gradients,\nyou divide your loss by the global batch size, which is done\nautomatically if you use `tf.keras` built-in training or evaluation loops.\nSee the `reduction` argument of your loss which should be set to\n`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` for averaging or\n`tf.keras.losses.Reduction.SUM` for not.\n\nTo aggregate gradients yourself, call `apply_gradients` with\n`experimental_aggregate_gradients` set to False. This is useful if you need to\nprocess aggregated gradients.\n\nIf you are not using these and you want to average gradients, you should use\n`tf.math.reduce_sum` to add up your per-example losses and then divide by the\nglobal batch size. Note that when using `tf.distribute.Strategy`, the first\ncomponent of a tensor's shape is the *replica-local* batch size, which is off\nby a factor equal to the number of replicas being used to compute a single\nstep. As a result, using `tf.math.reduce_mean` will give the wrong answer,\nresulting in gradients that can be many times too big.\n\n### Variable Constraints\n\nAll Keras optimizers respect variable constraints. If constraint function is\npassed to any variable, the constraint will be applied to the variable after\nthe gradient has been applied to the variable.\nImportant: If gradient is sparse tensor, variable constraint is not supported.\n\n### Thread Compatibility\n\nThe entire optimizer is currently thread compatible, not thread-safe. The user\nneeds to perform synchronization if necessary.\n\n### Slots\n\nMany optimizer subclasses, such as `Adam` and `Adagrad` allocate and manage\nadditional variables associated with the variables to train. These are called\nSlots. Slots have names and you can ask the optimizer for the names of\nthe slots that it uses. Once you have a slot name you can ask the optimizer\nfor the variable it created to hold the slot value.\n\nThis can be useful if you want to log debug a training algorithm, report stats\nabout the slots, etc.\n\n### Hyperparameters\n\nThese are arguments passed to the optimizer subclass constructor\n(the `__init__` method), and then passed to `self._set_hyper()`.\nThey can be either regular Python values (like 1.0), tensors, or\ncallables. If they are callable, the callable will be called during\n`apply_gradients()` to get the value for the hyper parameter.\n\nHyperparameters can be overwritten through user code:\n\nExample:\n\n```python\n# Create an optimizer with the desired parameters.\nopt = tf.keras.optimizers.SGD(learning_rate=0.1)\n# `loss` is a callable that takes no argument and returns the value\n# to minimize.\nloss = lambda: 3 * var1 + 2 * var2\n# In eager mode, simply call minimize to update the list of variables.\nopt.minimize(loss, var_list=[var1, var2])\n# update learning rate\nopt.learning_rate = 0.05\nopt.minimize(loss, var_list=[var1, var2])\n```\n\n### Callable learning rate\n\nOptimizer accepts a callable learning rate in two ways. The first way is\nthrough built-in or customized\n`tf.keras.optimizers.schedules.LearningRateSchedule`. The schedule will be\ncalled on each iteration with `schedule(iteration)`, a `tf.Variable`\nowned by the optimizer.\n\nExample:\n\n>>> var = tf.Variable(np.random.random(size=(1,)))\n>>> learning_rate = tf.keras.optimizers.schedules.ExponentialDecay(\n... initial_learning_rate=.01, decay_steps=20, decay_rate=.1)\n>>> opt = tf.keras.optimizers.SGD(learning_rate=learning_rate)\n>>> loss = lambda: 3 * var\n>>> opt.minimize(loss, var_list=[var])\n>> var = tf.Variable(np.random.random(size=(1,)))\n>>> def lr_callable():\n... return .1\n>>> opt = tf.keras.optimizers.SGD(learning_rate=lr_callable)\n>>> loss = lambda: 3 * var\n>>> opt.minimize(loss, var_list=[var])\n torch.Tensor:\n criterion = nn.BCEWithLogitsLoss(reduction='none')\n cross_entropy_loss = criterion(inputs, labels)\n loss = cross_entropy_loss.mean(1).sum() / num_masks\n return loss", "docstring": "Args:\n inputs (`torch.Tensor`):\n A float tensor of arbitrary shape.\n labels (`torch.Tensor`):\n A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n (0 for the negative class and 1 for the positive class).\n\nReturns:\n loss (`torch.Tensor`): The computed loss."} +{"repo": "tensorflow", "function": "def _clone_helper(op_to_clone, variant_tensor_ops):\n remap_dict = {}\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in variant_tensor_ops:\n recursive_map = _clone_helper(input_tensor_op, variant_tensor_ops)\n remap_dict.update(recursive_map)\n inputs_list = []\n for input_tensor in op_to_clone.inputs:\n input_tensor_op = input_tensor.op\n if input_tensor_op in remap_dict:\n remapped_input = remap_dict[input_tensor_op].outputs[0]\n inputs_list.append(remapped_input)\n else:\n inputs_list.append(input_tensor_op.outputs[input_tensor.value_index])\n g = ops.get_default_graph()\n new_op = g.create_op(op_to_clone.type, inputs_list, [o.dtype for o in op_to_clone.outputs], name=op_to_clone.name, attrs=op_to_clone.node_def.attr, op_def=_get_op_def(op_to_clone))\n remap_dict[op_to_clone] = new_op\n return remap_dict", "docstring": "Helper method that recursively clones `op_to_clone`.\n\nArgs:\n op_to_clone: The op we want to clone.\n variant_tensor_ops: A list of ops that we have to clone along the way.\n\nReturns:\n A dictionary mapping old_ops to new_ops created. Includes op_to_clone\n as a key."} +{"repo": "tensorflow", "function": "def parse_single_example(serialized, features, name=None, example_names=None):\n return parse_single_example_v2(serialized, features, example_names, name)", "docstring": "Parses a single `Example` proto.\n\nSimilar to `parse_example`, except:\n\nFor dense tensors, the returned `Tensor` is identical to the output of\n`parse_example`, except there is no batch dimension, the output shape is the\nsame as the shape given in `dense_shape`.\n\nFor `SparseTensor`s, the first (batch) column of the indices matrix is removed\n(the indices matrix is a column vector), the values vector is unchanged, and\nthe first (`batch_size`) entry of the shape vector is removed (it is now a\nsingle element vector).\n\nOne might see performance advantages by batching `Example` protos with\n`parse_example` instead of using this function directly.\n\nArgs:\n serialized: A scalar string Tensor, a single serialized Example.\n features: A mapping of feature keys to `FixedLenFeature` or\n `VarLenFeature` values.\n name: A name for this operation (optional).\n example_names: (Optional) A scalar string Tensor, the associated name.\n\nReturns:\n A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.\n\nRaises:\n ValueError: if any feature is invalid."} +{"repo": "pytype", "function": "def topological_sort(nodes: Iterable[_SuccessorNode]) -> Generator[_SuccessorNode, None, None]:\n incoming = {node: set(getattr(node, 'incoming', ())) for node in nodes}\n outgoing = collections.defaultdict(set)\n for node in nodes:\n for inc in incoming[node]:\n outgoing[inc].add(node)\n stack = [node for node in nodes if not incoming[node]]\n for _ in nodes:\n if not stack:\n raise ValueError('Circular graph')\n leaf = stack.pop()\n yield leaf\n for out in outgoing[leaf]:\n incoming[out].remove(leaf)\n if not incoming[out]:\n stack.append(out)\n assert not stack", "docstring": "Sort a list of nodes topologically.\n\nThis will order the nodes so that any node that appears in the \"incoming\"\nlist of another node n2 will appear in the output before n2. It assumes that\nthe graph doesn't have any cycles.\nIf there are multiple ways to sort the list, a random one is picked.\n\nArgs:\n nodes: A sequence of nodes. Each node may have an attribute \"incoming\", a\n list of nodes (every node in this list needs to be in \"nodes\"). If\n \"incoming\" is not there, it's assumed to be empty. The list of nodes can't\n have duplicates.\n\nYields:\n The nodes in their topological order.\nRaises:\n ValueError: If the graph contains a cycle."} +{"repo": "tensorflow", "function": "def run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key, input_tensor_key_feed_dict, outdir, overwrite_flag, worker=None, init_tpu=False, use_tfrt=False, tf_debug=False):\n meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n for input_key_name in input_tensor_key_feed_dict.keys():\n if input_key_name not in inputs_tensor_info:\n raise ValueError('\"%s\" is not a valid input key. Please choose from %s, or use --show option.' % (input_key_name, '\"' + '\", \"'.join(inputs_tensor_info.keys()) + '\"'))\n inputs_feed_dict = {inputs_tensor_info[key].name: tensor for key, tensor in input_tensor_key_feed_dict.items()}\n outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def, signature_def_key)\n output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())\n output_tensor_names_sorted = [outputs_tensor_info[tensor_key].name for tensor_key in output_tensor_keys_sorted]\n config = None\n if use_tfrt:\n logging.info('Using TFRT session.')\n config = config_pb2.ConfigProto(experimental=config_pb2.ConfigProto.Experimental(use_tfrt=True))\n with session.Session(worker, graph=ops_lib.Graph(), config=config) as sess:\n if init_tpu:\n print('Initializing TPU System ...')\n sess.run(tpu.initialize_system())\n loader.load(sess, tag_set.split(','), saved_model_dir)\n if tf_debug:\n sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)\n outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)\n for i, output in enumerate(outputs):\n output_tensor_key = output_tensor_keys_sorted[i]\n print('Result for output key %s:\\n%s' % (output_tensor_key, output))\n if outdir:\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n output_full_path = os.path.join(outdir, output_tensor_key + '.npy')\n if not overwrite_flag and os.path.exists(output_full_path):\n raise RuntimeError('Output file %s already exists. Add \"--overwrite\" to overwrite the existing output files.' % output_full_path)\n np.save(output_full_path, output)\n print('Output %s is saved to %s' % (output_tensor_key, output_full_path))", "docstring": "Runs SavedModel and fetch all outputs.\n\nRuns the input dictionary through the MetaGraphDef within a SavedModel\nspecified by the given tag_set and SignatureDef. Also save the outputs to file\nif outdir is not None.\n\nArgs:\n saved_model_dir: Directory containing the SavedModel to execute.\n tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in\n string format, separated by ','. For tag-set contains multiple tags, all\n tags must be passed in.\n signature_def_key: A SignatureDef key string.\n input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.\n outdir: A directory to save the outputs to. If the directory doesn't exist,\n it will be created.\n overwrite_flag: A boolean flag to allow overwrite output file if file with\n the same name exists.\n worker: If provided, the session will be run on the worker. Valid worker\n specification is a bns or gRPC path.\n init_tpu: If true, the TPU system will be initialized after the session\n is created.\n use_tfrt: If true, TFRT session will be used.\n tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the\n intermediate Tensor values and runtime GraphDefs while running the\n SavedModel.\n\nRaises:\n ValueError: When any of the input tensor keys is not valid.\n RuntimeError: An error when output file already exists and overwrite is not\n enabled."} +{"repo": "pytype", "function": "class CollapseLongUnions(visitors.Visitor):\n\n def __init__(self, max_length: int=7):\n super().__init__()\n self.generic_type = pytd.AnythingType()\n self.max_length = max_length\n\n def VisitUnionType(self, union):\n if len(union.type_list) > self.max_length and (not any((isinstance(t, pytd.Literal) for t in union.type_list))):\n return self.generic_type\n elif self.generic_type in union.type_list:\n return pytd_utils.JoinTypes(union.type_list)\n else:\n return union", "docstring": "Shortens long unions to object (or \"?\").\n\nPoor man's version of FindCommonSuperClasses. Shorten types like\n\"str or unicode or int or float or list\" to just \"object\" or \"?\".\n\nAdditionally, if the union already contains at least one \"object\", we also\npotentially replace the entire union with just \"object\".\n\nAttributes:\n max_length: The maximum number of types to allow in a union. If there are\n more types than this, it is shortened."} +{"repo": "transformers", "function": "class Siglip2Processor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = 'AutoImageProcessor'\n tokenizer_class = 'AutoTokenizer'\n\n def __init__(self, image_processor, tokenizer):\n super().__init__(image_processor, tokenizer)\n\n def __call__(self, images: Optional[Union[ImageInput, List[ImageInput], List[List[ImageInput]]]]=None, text: Optional[Union[TextInput, 'PreTokenizedInput', List[TextInput], List['PreTokenizedInput']]]=None, audio=None, videos=None, **kwargs: Unpack[Siglip2ProcessorKwargs]) -> BatchFeature:\n \"\"\"\n Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\n and `kwargs` arguments to GemmaTokenizerFast's [`~GemmaTokenizerFast.__call__`] if `text` is not `None` to encode\n the text. To prepare the image(s), this method forwards the `images` argument to\n Siglip2ImageProcessor's [`~Siglip2ImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\n of the above two methods for more information.\n\n Args:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `max_length`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*, defaults to 64):\n Maximum length of the returned list and optionally padding length (see above).\n truncation (`bool`, *optional*, defaults to `True`):\n Activates truncation to cut input sequences longer than `max_length` to `max_length`.\n return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'pt'`):\n If set, will return tensors of a particular framework. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\n Returns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n - **pixel_attention_mask** -- Attention mask for the pixel values. Returned when `images` is not `None`.\n - **spatial_shapes** -- The number of horizontal and vertical patches per image.\n Returned when `images` is not `None`.\n \"\"\"\n output_kwargs = self._merge_kwargs(Siglip2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if text is None and images is None:\n raise ValueError('You have to specify either text or images. Both cannot be none.')\n if text is not None:\n encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n if text is not None and images is not None:\n encoding.update(image_features)\n return encoding\n elif text is not None:\n return encoding\n else:\n return_tensors = output_kwargs['common_kwargs']['return_tensors']\n return BatchFeature(data=dict(**image_features), tensor_type=return_tensors)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to Siglip2Tokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to Siglip2Tokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n image_processor_input_names = self.image_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))", "docstring": "Constructs a Siglip2 processor which wraps a Siglip2 image processor and a Gemma tokenizer into a single processor.\n\n[`Siglip2Processor`] offers all the functionalities of [`Siglip2ImageProcessor`] and [`GemmaTokenizerFast`]. See the\n[`~Siglip2Processor.__call__`] and [`~Siglip2Processor.decode`] for more information.\n\nArgs:\n image_processor ([`Siglip2ImageProcessor`]):\n The image processor is a required input.\n tokenizer ([`GemmaTokenizerFast`]):\n The tokenizer is a required input."} +{"repo": "transformers", "function": "def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, training: bool=False) -> Tuple[tf.Tensor]:\n residual = hidden_states\n hidden_states = self.layer_norm1(inputs=hidden_states)\n attention_outputs = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training)\n hidden_states = attention_outputs[0]\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(inputs=hidden_states)\n hidden_states = self.mlp(hidden_states=hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,) + attention_outputs[1:]\n return outputs", "docstring": "Args:\n hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`tf.Tensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n causal_attention_mask (`tf.Tensor`): causal attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`):\n Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned\n tensors for more detail."} +{"repo": "pyglove", "function": "def crowding_distance_sort(frontier: List[pg.DNA]) -> List[pg.DNA]:\n if len(frontier) <= 1:\n return frontier\n individual_num = len(frontier)\n objective_num = len(base.get_fitness(frontier[0]))\n distances = [0.0] * individual_num\n dist = [list(range(individual_num)) for i in range(objective_num)]\n for i in range(objective_num):\n dist[i] = sorted(dist[i], key=lambda idx: base.get_fitness(frontier[idx])[i])\n max_value = base.get_fitness(frontier[dist[i][individual_num - 1]])[i]\n min_value = base.get_fitness(frontier[dist[i][0]])[i]\n for j in range(individual_num):\n if j == 0 or j == individual_num - 1:\n distances[dist[i][j]] = objective_num\n elif max_value > min_value:\n distances[dist[i][j]] += (base.get_fitness(frontier[dist[i][j + 1]])[i] - base.get_fitness(frontier[dist[i][j - 1]])[i]) / (max_value - min_value)\n idx_arr = list(range(individual_num))\n idx_arr = sorted(idx_arr, key=lambda idx: distances[idx], reverse=True)\n return [frontier[idx_arr[i]] for i in range(individual_num)]", "docstring": "Algorithm crowding-distance-assignment implementation.\n\nCheck section III B in the original paper.\n\nArgs:\n frontier: A list of Individual that need to be sorted.\n\nReturns:\n sorted list of the original list."} +{"repo": "tensorflow", "function": "def banded_triangular_solve(bands, rhs, lower=True, adjoint=False, name=None):\n with ops.name_scope(name, 'banded_triangular_solve', [bands, rhs]):\n return gen_linalg_ops.banded_triangular_solve(bands, rhs, lower=lower, adjoint=adjoint)", "docstring": "Solve triangular systems of equations with a banded solver.\n\n`bands` is a tensor of shape `[..., K, M]`, where `K` represents the number\nof bands stored. This corresponds to a batch of `M` by `M` matrices, whose\n`K` subdiagonals (when `lower` is `True`) are stored.\n\nThis operator broadcasts the batch dimensions of `bands` and the batch\ndimensions of `rhs`.\n\n\nExamples:\n\nStoring 2 bands of a 3x3 matrix.\nNote that first element in the second row is ignored due to\nthe 'LEFT_RIGHT' padding.\n\n>>> x = [[2., 3., 4.], [1., 2., 3.]]\n>>> x2 = [[2., 3., 4.], [10000., 2., 3.]]\n>>> y = tf.zeros([3, 3])\n>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(-1, 0))\n>>> z\n\n>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([3, 1]))\n>>> soln\n\n>>> are_equal = soln == tf.linalg.banded_triangular_solve(x2, tf.ones([3, 1]))\n>>> print(tf.reduce_all(are_equal).numpy())\nTrue\n>>> are_equal = soln == tf.linalg.triangular_solve(z, tf.ones([3, 1]))\n>>> print(tf.reduce_all(are_equal).numpy())\nTrue\n\nStoring 2 superdiagonals of a 4x4 matrix. Because of the 'LEFT_RIGHT' padding\nthe last element of the first row is ignored.\n\n>>> x = [[2., 3., 4., 5.], [-1., -2., -3., -4.]]\n>>> y = tf.zeros([4, 4])\n>>> z = tf.linalg.set_diag(y, x, align='LEFT_RIGHT', k=(0, 1))\n>>> z\n\n>>> soln = tf.linalg.banded_triangular_solve(x, tf.ones([4, 1]), lower=False)\n>>> soln\n\n>>> are_equal = (soln == tf.linalg.triangular_solve(\n... z, tf.ones([4, 1]), lower=False))\n>>> print(tf.reduce_all(are_equal).numpy())\nTrue\n\n\nArgs:\n bands: A `Tensor` describing the bands of the left hand side, with shape\n `[..., K, M]`. The `K` rows correspond to the diagonal to the `K - 1`-th\n diagonal (the diagonal is the top row) when `lower` is `True` and\n otherwise the `K - 1`-th superdiagonal to the diagonal (the diagonal is\n the bottom row) when `lower` is `False`. The bands are stored with\n 'LEFT_RIGHT' alignment, where the superdiagonals are padded on the right\n and subdiagonals are padded on the left. This is the alignment cuSPARSE\n uses. See `tf.linalg.set_diag` for more details.\n rhs: A `Tensor` of shape [..., M] or [..., M, N] and with the same dtype as\n `diagonals`. Note that if the shape of `rhs` and/or `diags` isn't known\n statically, `rhs` will be treated as a matrix rather than a vector.\n lower: An optional `bool`. Defaults to `True`. Boolean indicating whether\n `bands` represents a lower or upper triangular matrix.\n adjoint: An optional `bool`. Defaults to `False`. Boolean indicating whether\n to solve with the matrix's block-wise adjoint.\n name: A name to give this `Op` (optional).\n\nReturns:\n A `Tensor` of shape [..., M] or [..., M, N] containing the solutions."} +{"repo": "tensorflow", "function": "def compute_output_shape(self, input_shape):\n if context.executing_eagerly():\n self._maybe_build(input_shape)\n with ops.get_default_graph().as_default():\n graph = func_graph.FuncGraph('graph')\n with graph.as_default():\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n inputs = nest.map_structure(base_layer_utils.generate_placeholders_from_shape, input_shape)\n try:\n outputs = self(inputs, training=False)\n except TypeError as e:\n raise NotImplementedError(\"We could not automatically infer the static shape of the layer's output. Please implement the `compute_output_shape` method on your layer (%s).\" % self.__class__.__name__) from e\n return nest.map_structure(lambda t: t.shape, outputs)\n raise NotImplementedError", "docstring": "Computes the output shape of the layer.\n\nIf the layer has not been built, this method will call `build` on the\nlayer. This assumes that the layer will later be used with inputs that\nmatch the input shape provided here.\n\nArgs:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\nReturns:\n An input shape tuple."} +{"repo": "transformers", "function": "def num_special_tokens_to_add(self, pair: bool=False) -> int:\n return self._tokenizer.num_special_tokens_to_add(pair)", "docstring": "Returns the number of added tokens when encoding a sequence with special tokens.\n\n\n\nThis encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put\nthis inside your training loop.\n\n\n\nArgs:\n pair (`bool`, *optional*, defaults to `False`):\n Whether the number of added tokens should be computed in the case of a sequence pair or a single\n sequence.\n\nReturns:\n `int`: Number of special tokens added to sequences."} +{"repo": "transformers", "function": "class FocalNetMaskedImageModelingOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n reconstruction: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "FocalNet masked image model outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):\n Masked image modeling (MLM) loss.\n reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Reconstructed pixel values.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, hidden_size, height, width)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to\n include the spatial dimensions."} +{"repo": "data-quality-monitor", "function": "def words_string(fake: Faker, n: int) -> str:\n return ' '.join(fake.words(n))", "docstring": "Provide Faker words as a joined string.\n\nArgs:\n * fake: Faker instance\n * n: number of words\n\nReturns:\n * string of n words joined by spaces"} +{"repo": "pyglove", "function": "def __eq__(self, other: Any) -> bool:\n if self is other:\n return True\n if isinstance(other, MissingValue):\n return self._value_spec == other.value_spec\n return MISSING_VALUE == other", "docstring": "Operator ==.\n\nNOTE: `MissingValue(value_spec) and `utils.MissingValue` are\nconsidered equal, but `MissingValue(value_spec1)` and\n`MissingValue(value_spec2)` are considered different. That being said,\nthe 'eq' operation is not transitive.\n\nHowever in practice this is not a problem, since user always compare\nagainst `schema.MISSING_VALUE` which is `utils.MissingValue`.\nTherefore the `__hash__` function returns the same value with\n`utils.MissingValue`.\n\nArgs:\n other: the value to compare against.\n\nReturns:\n True if the other value is a general MissingValue or MissingValue of the\n same value spec."} +{"repo": "genai-processors", "function": "def model_call_event(self) -> asyncio.Event:\n return self._model_call_event", "docstring": "Returns an event that is set when the wrapped processor has all parts.\n\nThe event is set when the wrapped processor has all the input parts and\nis about to start generating the output.\n\nThe event starts in a cleared state when the first part of the input\nstream is yielded. It is also cleared at the end of the wrappedprocessor,\nwhen all the output parts have been yielded.\n\nIts default value is unset and this event is set only for a short time\nduring the call.\n\nReturns:\n An event that is set when the model call is started, that is when all the\n input parts have been sent to the wrapped processor."} +{"repo": "python-fire", "function": "class Pager(object):\n HELP_TEXT = '\\n Simple pager commands:\\n\\n b, ^B, , \\n Back one page.\\n f, ^F, , , \\n Forward one page. Does not quit if there are no more lines.\\n g, \\n Back to the first page.\\n g\\n Go to lines from the top.\\n G, \\n Forward to the last page.\\n G\\n Go to lines from the bottom.\\n h\\n Print pager command help.\\n j, +, \\n Forward one line.\\n k, -, \\n Back one line.\\n /pattern\\n Forward search for pattern.\\n ?pattern\\n Backward search for pattern.\\n n\\n Repeat current search.\\n N\\n Repeat current search in the opposite direction.\\n q, Q, ^C, ^D, ^Z\\n Quit return to the caller.\\n any other character\\n Prompt again.\\n\\n Hit any key to continue:'\n PREV_POS_NXT_REPRINT = (-1, -1)\n\n def __init__(self, contents, out=None, prompt=None):\n \"\"\"Constructor.\n\n Args:\n contents: The entire contents of the text lines to page.\n out: The output stream, log.out (effectively) if None.\n prompt: The page break prompt, a default prompt is used if None..\n \"\"\"\n self._contents = contents\n self._out = out or sys.stdout\n self._search_pattern = None\n self._search_direction = None\n self.prev_pos, self.prev_nxt = self.PREV_POS_NXT_REPRINT\n self._attr = console_attr.GetConsoleAttr()\n self._width, self._height = self._attr.GetTermSize()\n if not prompt:\n prompt = '{bold}--({{percent}}%)--{normal}'.format(bold=self._attr.GetFontCode(bold=True), normal=self._attr.GetFontCode())\n self._clear = '\\r{0}\\r'.format(' ' * (self._attr.DisplayWidth(prompt) - 6))\n self._prompt = prompt\n self._lines = []\n for line in contents.splitlines():\n self._lines += self._attr.SplitLine(line, self._width)\n\n def _Write(self, s):\n \"\"\"Mockable helper that writes s to self._out.\"\"\"\n self._out.write(s)\n\n def _GetSearchCommand(self, c):\n \"\"\"Consumes a search command and returns the equivalent pager command.\n\n The search pattern is an RE that is pre-compiled and cached for subsequent\n /, ?, n, or N commands.\n\n Args:\n c: The search command char.\n\n Returns:\n The pager command char.\n \"\"\"\n self._Write(c)\n buf = ''\n while True:\n p = self._attr.GetRawKey()\n if p in (None, '\\n', '\\r') or len(p) != 1:\n break\n self._Write(p)\n buf += p\n self._Write('\\r' + ' ' * len(buf) + '\\r')\n if buf:\n try:\n self._search_pattern = re.compile(buf)\n except re.error:\n self._search_pattern = None\n return ''\n self._search_direction = 'n' if c == '/' else 'N'\n return 'n'\n\n def _Help(self):\n \"\"\"Print command help and wait for any character to continue.\"\"\"\n clear = self._height - (len(self.HELP_TEXT) - len(self.HELP_TEXT.replace('\\n', '')))\n if clear > 0:\n self._Write('\\n' * clear)\n self._Write(self.HELP_TEXT)\n self._attr.GetRawKey()\n self._Write('\\n')\n\n def Run(self):\n \"\"\"Run the pager.\"\"\"\n if len(self._lines) <= self._height:\n self._Write(self._contents)\n return\n reset_prev_values = True\n self._height -= 1\n pos = 0\n while pos < len(self._lines):\n nxt = pos + self._height\n if nxt > len(self._lines):\n nxt = len(self._lines)\n pos = nxt - self._height\n if self.prev_pos < pos < self.prev_nxt:\n self._Write('\\n'.join(self._lines[self.prev_nxt:nxt]) + '\\n')\n elif pos != self.prev_pos and nxt != self.prev_nxt:\n self._Write('\\n'.join(self._lines[pos:nxt]) + '\\n')\n percent = self._prompt.format(percent=100 * nxt // len(self._lines))\n digits = ''\n while True:\n if reset_prev_values:\n self.prev_pos, self.prev_nxt = (pos, nxt)\n reset_prev_values = False\n self._Write(percent)\n c = self._attr.GetRawKey()\n self._Write(self._clear)\n if c in (None, 'q', 'Q', '\\x03', '\\x1b'):\n return\n elif c in ('/', '?'):\n c = self._GetSearchCommand(c)\n elif c.isdigit():\n digits += c\n continue\n if digits:\n count = int(digits)\n digits = ''\n else:\n count = 0\n if c in ('', '', 'b', '\\x02'):\n nxt = pos - self._height\n if nxt < 0:\n nxt = 0\n elif c in ('', '', 'f', '\\x06', ' '):\n if nxt >= len(self._lines):\n continue\n nxt = pos + self._height\n if nxt >= len(self._lines):\n nxt = pos\n elif c in ('', 'g'):\n nxt = count - 1\n if nxt > len(self._lines) - self._height:\n nxt = len(self._lines) - self._height\n if nxt < 0:\n nxt = 0\n elif c in ('', 'G'):\n nxt = len(self._lines) - count\n if nxt > len(self._lines) - self._height:\n nxt = len(self._lines) - self._height\n if nxt < 0:\n nxt = 0\n elif c == 'h':\n self._Help()\n self.prev_pos, self.prev_nxt = self.PREV_POS_NXT_REPRINT\n nxt = pos\n break\n elif c in ('', 'j', '+', '\\n', '\\r'):\n if nxt >= len(self._lines):\n continue\n nxt = pos + 1\n if nxt >= len(self._lines):\n nxt = pos\n elif c in ('', 'k', '-'):\n nxt = pos - 1\n if nxt < 0:\n nxt = 0\n elif c in ('n', 'N'):\n if not self._search_pattern:\n continue\n nxt = pos\n i = pos\n direction = 1 if c == self._search_direction else -1\n while True:\n i += direction\n if i < 0 or i >= len(self._lines):\n break\n if self._search_pattern.search(self._lines[i]):\n nxt = i\n break\n else:\n continue\n if nxt != pos:\n reset_prev_values = True\n break\n pos = nxt", "docstring": "A simple console text pager.\n\nThis pager requires the entire contents to be available. The contents are\nwritten one page of lines at a time. The prompt is written after each page of\nlines. A one character response is expected. See HELP_TEXT below for more\ninfo.\n\nThe contents are written as is. For example, ANSI control codes will be in\neffect. This is different from pagers like more(1) which is ANSI control code\nagnostic and miscalculates line lengths, and less(1) which displays control\ncharacter names by default.\n\nAttributes:\n _attr: The current ConsoleAttr handle.\n _clear: A string that clears the prompt when written to _out.\n _contents: The entire contents of the text lines to page.\n _height: The terminal height in characters.\n _out: The output stream, log.out (effectively) if None.\n _prompt: The page break prompt.\n _search_direction: The search direction command, n:forward, N:reverse.\n _search_pattern: The current forward/reverse search compiled RE.\n _width: The termonal width in characters."} +{"repo": "keras", "function": "def rot90(array, k=1, axes=(0, 1)):\n array = convert_to_tensor(array)\n if array.ndim < 2:\n raise ValueError(f'Input array must have at least 2 dimensions. Received: array.ndim={array.ndim}')\n if len(axes) != 2 or axes[0] == axes[1]:\n raise ValueError(f'Invalid axes: {axes}. Axes must be a tuple of two different dimensions.')\n axes = tuple((axis if axis >= 0 else array.ndim + axis for axis in axes))\n if not builtins.all((0 <= axis < array.ndim for axis in axes)):\n raise ValueError(f'Invalid axes {axes} for tensor with {array.ndim} dimensions')\n rotated = torch.rot90(array, k=k, dims=axes)\n if isinstance(array, np.ndarray):\n rotated = rotated.cpu().numpy()\n return rotated", "docstring": "Rotate an array by 90 degrees in the specified plane using PyTorch.\n\nArgs:\n array: Input tensor\n k: Number of 90-degree rotations (default=1)\n axes: Tuple of two axes that define the\n plane of rotation (defaults to `(0, 1)`).\n\nReturns:\n Rotated tensor"} +{"repo": "tensorflow", "function": "def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None, initial_state_fw=None, initial_state_bw=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None):\n rnn_cell_impl.assert_like_rnncell('cell_fw', cell_fw)\n rnn_cell_impl.assert_like_rnncell('cell_bw', cell_bw)\n with vs.variable_scope(scope or 'bidirectional_rnn'):\n with vs.variable_scope('fw') as fw_scope:\n output_fw, output_state_fw = dynamic_rnn(cell=cell_fw, inputs=inputs, sequence_length=sequence_length, initial_state=initial_state_fw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=fw_scope)\n if not time_major:\n time_axis = 1\n batch_axis = 0\n else:\n time_axis = 0\n batch_axis = 1\n\n def _reverse(input_, seq_lengths, seq_axis, batch_axis):\n if seq_lengths is not None:\n return array_ops.reverse_sequence(input=input_, seq_lengths=seq_lengths, seq_axis=seq_axis, batch_axis=batch_axis)\n else:\n return array_ops.reverse(input_, axis=[seq_axis])\n with vs.variable_scope('bw') as bw_scope:\n\n def _map_reverse(inp):\n return _reverse(inp, seq_lengths=sequence_length, seq_axis=time_axis, batch_axis=batch_axis)\n inputs_reverse = nest.map_structure(_map_reverse, inputs)\n tmp, output_state_bw = dynamic_rnn(cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length, initial_state=initial_state_bw, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=bw_scope)\n output_bw = _reverse(tmp, seq_lengths=sequence_length, seq_axis=time_axis, batch_axis=batch_axis)\n outputs = (output_fw, output_bw)\n output_states = (output_state_fw, output_state_bw)\n return (outputs, output_states)", "docstring": "Creates a dynamic version of bidirectional recurrent neural network.\n\nTakes input and builds independent forward and backward RNNs. The input_size\nof forward and backward cell must match. The initial state for both directions\nis zero by default (but can be set optionally) and no intermediate states are\never returned -- the network is fully unrolled for the given (passed in)\nlength(s) of the sequence(s) or completely unrolled if length(s) is not\ngiven.\n\nArgs:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: The RNN inputs.\n If time_major == False (default), this must be a tensor of shape:\n `[batch_size, max_time, ...]`, or a nested tuple of such elements.\n If time_major == True, this must be a tensor of shape: `[max_time,\n batch_size, ...]`, or a nested tuple of such elements.\n sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,\n containing the actual lengths for each of the sequences in the batch. If\n not provided, all batch entries are assumed to be full sequences; and time\n reversal is applied from time `0` to `max_time` for each sequence.\n initial_state_fw: (optional) An initial state for the forward RNN. This must\n be a tensor of appropriate type and shape `[batch_size,\n cell_fw.state_size]`. If `cell_fw.state_size` is a tuple, this should be a\n tuple of tensors having shapes `[batch_size, s] for s in\n cell_fw.state_size`.\n initial_state_bw: (optional) Same as for `initial_state_fw`, but using the\n corresponding properties of `cell_bw`.\n dtype: (optional) The data type for the initial states and expected output.\n Required if initial_states are not provided or RNN states have a\n heterogeneous dtype.\n parallel_iterations: (Default: 32). The number of iterations to run in\n parallel. Those operations which do not have any temporal dependency and\n can be run in parallel, will be. This parameter trades off time for\n space. Values >> 1 use more memory but take less time, while smaller\n values use less memory but computations take longer.\n swap_memory: Transparently swap the tensors produced in forward inference\n but needed for back prop from GPU to CPU. This allows training RNNs which\n would typically not fit on a single GPU, with very minimal (or no)\n performance penalty.\n time_major: The shape format of the `inputs` and `outputs` Tensors. If true,\n these `Tensors` must be shaped `[max_time, batch_size, depth]`. If false,\n these `Tensors` must be shaped `[batch_size, max_time, depth]`. Using\n `time_major = True` is a bit more efficient because it avoids transposes\n at the beginning and end of the RNN calculation. However, most TensorFlow\n data is batch-major, so by default this function accepts input and emits\n output in batch-major form.\n scope: VariableScope for the created subgraph; defaults to\n \"bidirectional_rnn\"\n\nReturns:\n A tuple (outputs, output_states) where:\n outputs: A tuple (output_fw, output_bw) containing the forward and\n the backward rnn output `Tensor`.\n If time_major == False (default),\n output_fw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[batch_size, max_time, cell_bw.output_size]`.\n If time_major == True,\n output_fw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_fw.output_size]`\n and output_bw will be a `Tensor` shaped:\n `[max_time, batch_size, cell_bw.output_size]`.\n It returns a tuple instead of a single concatenated `Tensor`, unlike\n in the `bidirectional_rnn`. If the concatenated one is preferred,\n the forward and backward outputs can be concatenated as\n `tf.concat(outputs, 2)`.\n output_states: A tuple (output_state_fw, output_state_bw) containing\n the forward and the backward final states of bidirectional rnn.\n\nRaises:\n TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`."} +{"repo": "tensorflow", "function": "def op(self):\n return self._op", "docstring": "The operation that failed, if known.\n\n*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`\nor `Recv` op, there will be no corresponding\n`tf.Operation`\nobject. In that case, this will return `None`, and you should\ninstead use the `tf.errors.OpError.node_def` to\ndiscover information about the op.\n\nReturns:\n The `Operation` that failed, or None."} +{"repo": "pytype", "function": "def _pytd_constant_to_value(self, pyval: pytd.Node, subst, get_node):\n if isinstance(pyval, pytd.LateType):\n actual = self._load_late_type(pyval)\n return self._constant_to_value(actual, subst, get_node)\n elif isinstance(pyval, pytd.TypeDeclUnit):\n return self._create_module(pyval)\n elif isinstance(pyval, pytd.Module):\n mod = self.ctx.loader.import_name(pyval.module_name)\n return self._create_module(mod)\n elif isinstance(pyval, pytd.Class):\n return self._pytd_class_to_value(pyval, get_node())\n elif isinstance(pyval, pytd.Function):\n f = self.convert_pytd_function(pyval)\n f.is_abstract = pyval.is_abstract\n return f\n elif isinstance(pyval, pytd.ClassType):\n if pyval.cls:\n cls = pyval.cls\n else:\n cls = self.ctx.loader.lookup_pytd(*pyval.name.split('.', 1))\n assert isinstance(cls, pytd.Class)\n return self.constant_to_value(cls, subst)\n elif isinstance(pyval, pytd.NothingType):\n return self.empty\n elif isinstance(pyval, pytd.AnythingType):\n return self.unsolvable\n elif isinstance(pyval, pytd.Constant) and isinstance(pyval.type, pytd.AnythingType):\n return self.unsolvable\n elif isinstance(pyval, pytd.Constant) and isinstance(pyval.type, pytd.GenericType) and (pyval.type.name == 'builtins.type'):\n param, = pyval.type.parameters\n return self.constant_to_value(param, subst)\n elif isinstance(pyval, pytd.UnionType):\n options = [self.constant_to_value(t, subst) for t in pyval.type_list]\n if len(options) > 1:\n return abstract.Union(options, self.ctx)\n else:\n return options[0]\n elif isinstance(pyval, (pytd.TypeParameter, pytd.ParamSpec)):\n constraints = tuple((self.constant_to_value(c, {}) for c in pyval.constraints))\n bound = pyval.bound and self.constant_to_value(pyval.bound, {})\n if isinstance(pyval, pytd.ParamSpec):\n cls = abstract.ParamSpec\n else:\n cls = abstract.TypeParameter\n return cls(pyval.name, self.ctx, constraints=constraints, bound=bound, scope=pyval.scope)\n elif isinstance(pyval, (pytd.ParamSpecArgs, pytd.ParamSpecKwargs)):\n return self.unsolvable\n elif isinstance(pyval, pytd.Concatenate):\n params = [self.constant_to_value(p, subst) for p in pyval.parameters]\n return abstract.Concatenate(params, self.ctx)\n elif isinstance(pyval, pytd.GenericType) and pyval.name == 'typing.ClassVar':\n param, = pyval.parameters\n return self.constant_to_value(param, subst)\n elif isinstance(pyval, pytd.GenericType):\n return self._pytd_generic_type_to_value(pyval, subst, get_node)\n elif isinstance(pyval, pytd.Literal):\n value = self._get_literal_value(pyval.value, subst)\n return abstract.LiteralClass(value, self.ctx)\n elif isinstance(pyval, pytd.Annotated):\n typ = self.constant_to_value(pyval.base_type, subst)\n return self._apply_metadata_annotations(typ, pyval.annotations)\n else:\n raise NotImplementedError(f\"Can't convert pytd constant {type(pyval)} {pyval!r}\")", "docstring": "Convert a pytd type to an abstract value.\n\nArgs:\n pyval: The PyTD value to convert.\n subst: The current type parameters.\n get_node: A getter function for the current node.\n\nReturns:\n A Value that represents the constant, or None if we couldn't convert.\nRaises:\n NotImplementedError: If we don't know how to convert a value.\n TypeParameterError: If we can't find a substitution for a type parameter."} +{"repo": "tensorflow", "function": "def _print(self, *args):\n\n def _format(name, arr):\n \"\"\"Prints compatibility check results with a format.\n\n Args:\n name: String that is the title representing list `arr`.\n arr: List of items to be printed in a certain format.\n \"\"\"\n title = '### All Compatibility %s ###' % str(name)\n tlen = len(title)\n print('-' * tlen)\n print(title)\n print('-' * tlen)\n print(' Total # of %s: %s\\n' % (str(name), str(len(arr))))\n if arr:\n for item in arr:\n detail = ''\n if isinstance(item[1], list):\n for itm in item[1]:\n detail += str(itm) + ', '\n detail = detail[:-2]\n else:\n detail = str(item[1])\n print(\" %s ('%s')\\n\" % (str(item[0]), detail))\n else:\n print(' No %s' % name)\n print('\\n')\n for p_item in args:\n if p_item == 'failures':\n _format('Failures', self.failures)\n elif p_item == 'successes':\n _format('Successes', self.successes)\n elif p_item == 'failure_msgs':\n _format('Failure Messages', self.error_msg)\n elif p_item == 'warning_msgs':\n _format('Warning Messages', self.warning_msg)\n else:\n raise Exception('[Error] Wrong input provided for %s.' % _get_func_name())", "docstring": "Prints compatibility check status and failure or warning messages.\n\nPrints to console without using `logging`.\n\nArgs:\n *args: String(s) that is one of:\n [`failures`, # all failures\n `successes`, # all successes\n `failure_msgs`, # failure message(s) recorded upon failure(s)\n `warning_msgs`] # warning message(s) recorded upon warning(s)\nRaises:\n Exception: If *args not in:\n [`failures`, `successes`, `failure_msgs`, `warning_msg`]"} +{"repo": "tensorflow", "function": "def min(x, axis=None, keepdims=False):\n return math_ops.reduce_min(x, axis, keepdims)", "docstring": "Minimum value in a tensor.\n\nArgs:\n x: A tensor or variable.\n axis: An integer, the axis to find minimum values.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`,\n the reduced dimension is retained with length 1.\n\nReturns:\n A tensor with minimum values of `x`."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A Big Bird sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "transformers", "function": "def forward(self, hidden_states):\n forwarded_states = self.mlp(hidden_states)\n output = hidden_states + self.norm(forwarded_states)\n return output", "docstring": "Args:\n hidden_states (`torch.Tensor`) :\n [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\nReturns:\n torch.Tensor[num_groups, tokens_per_group, hidden_dim]"} +{"repo": "tensorflow", "function": "def get_completions(self, context_word, prefix):\n if context_word not in self._comp_dict:\n return (None, None)\n comp_items = self._comp_dict[context_word]\n comp_items = sorted([item for item in comp_items if item.startswith(prefix)])\n return (comp_items, self._common_prefix(comp_items))", "docstring": "Get the tab completions given a context word and a prefix.\n\nArgs:\n context_word: The context word.\n prefix: The prefix of the incomplete word.\n\nReturns:\n (1) None if no registered context matches the context_word.\n A list of str for the matching completion items. Can be an empty list\n of a matching context exists, but no completion item matches the\n prefix.\n (2) Common prefix of all the words in the first return value. If the\n first return value is None, this return value will be None, too. If\n the first return value is not None, i.e., a list, this return value\n will be a str, which can be an empty str if there is no common\n prefix among the items of the list."} +{"repo": "tensorflow", "function": "def quantize_and_dequantize(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, round_mode='HALF_TO_EVEN', name=None, narrow_range=False, axis=None):\n if axis is None:\n axis = -1\n elif axis < 0:\n if input.shape.ndims is None:\n raise ValueError('input should have known rank to use negative axis.')\n axis %= input.shape.ndims\n return gen_array_ops.quantize_and_dequantize_v2(input, input_min=input_min, input_max=input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, narrow_range=narrow_range, axis=axis, name=name)", "docstring": "Quantizes then dequantizes a tensor.\n\nArgs:\n input: A `Tensor` to quantize and dequantize.\n input_min: If range_given=True, the minimum input value, that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of minimum values for each slice along axis.\n input_max: If range_given=True, the maximum input value that needs to be\n represented in the quantized representation. If axis is specified, this\n should be a vector of maximum values for each slice along axis.\n signed_input: True if the quantization is signed or unsigned.\n num_bits: The bitwidth of the quantization.\n range_given: If true use `input_min` and `input_max` for the range of the\n input, otherwise determine min and max from the input `Tensor`.\n round_mode: Rounding mode when rounding from float values to quantized ones.\n one of ['HALF_TO_EVEN', 'HALF_UP']\n name: Optional name for the operation.\n narrow_range: If true, then the absolute value of the quantized minimum\n value is the same as the quantized maximum value, instead of 1 greater.\n i.e. for 8 bit quantization, the minimum value is -127 instead of -128.\n axis: Integer. If specified, refers to a dimension of the input tensor, such\n that quantization will be per slice along that dimension.\n\nReturns:\n A `Tensor`. Each element is the result of quantizing and dequantizing the\n corresponding element of `input`."} +{"repo": "tensorflow", "function": "def histogram(name, data, step=None, buckets=None, description=None):\n try:\n from tensorboard.summary.v2 import histogram as histogram_v2\n except ImportError as exc:\n raise TBNotInstalledError('tf.summary.histogram') from exc\n return histogram_v2(name=name, data=data, step=step, buckets=buckets, description=description)", "docstring": "Write a histogram summary.\n\nSee also `tf.summary.scalar`, `tf.summary.SummaryWriter`.\n\nWrites a histogram to the current default summary writer, for later analysis\nin TensorBoard's 'Histograms' and 'Distributions' dashboards (data written\nusing this API will appear in both places). Like `tf.summary.scalar` points,\neach histogram is associated with a `step` and a `name`. All the histograms\nwith the same `name` constitute a time series of histograms.\n\nThe histogram is calculated over all the elements of the given `Tensor`\nwithout regard to its shape or rank.\n\nThis example writes 2 histograms:\n\n```python\nw = tf.summary.create_file_writer('test/logs')\nwith w.as_default():\n tf.summary.histogram(\"activations\", tf.random.uniform([100, 50]), step=0)\n tf.summary.histogram(\"initial_weights\", tf.random.normal([1000]), step=0)\n```\n\nA common use case is to examine the changing activation patterns (or lack\nthereof) at specific layers in a neural network, over time.\n\n```python\nw = tf.summary.create_file_writer('test/logs')\nwith w.as_default():\nfor step in range(100):\n # Generate fake \"activations\".\n activations = [\n tf.random.normal([1000], mean=step, stddev=1),\n tf.random.normal([1000], mean=step, stddev=10),\n tf.random.normal([1000], mean=step, stddev=100),\n ]\n\n tf.summary.histogram(\"layer1/activate\", activations[0], step=step)\n tf.summary.histogram(\"layer2/activate\", activations[1], step=step)\n tf.summary.histogram(\"layer3/activate\", activations[2], step=step)\n```\n\nArguments:\n name: A name for this summary. The summary tag used for TensorBoard will be\n this name prefixed by any active name scopes.\n data: A `Tensor` of any shape. The histogram is computed over its elements,\n which must be castable to `float64`.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which must\n not be None.\n buckets: Optional positive `int`. The output will have this many buckets,\n except in two edge cases. If there is no data, then there are no buckets.\n If there is data but all points have the same value, then all buckets'\n left and right endpoints are the same and only the last bucket has nonzero\n count. Defaults to 30 if not specified.\n description: Optional long-form description for this summary, as a constant\n `str`. Markdown is supported. Defaults to empty.\n\nReturns:\n True on success, or false if no summary was emitted because no default\n summary writer was available.\n\nRaises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None."} +{"repo": "keras", "function": "def linear(x):\n return x", "docstring": "Linear activation function (pass-through).\n\nA \"linear\" activation is an identity function:\nit returns the input, unmodified.\n\nArgs:\n x: Input tensor."} +{"repo": "tensorflow", "function": "def get_output_shape_at(self, node_index):\n return self._get_node_attribute_at_index(node_index, 'output_shapes', 'output shape')", "docstring": "Retrieves the output shape(s) of a layer at a given node.\n\nArgs:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\nReturns:\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n\nRaises:\n RuntimeError: If called in Eager mode."} +{"repo": "tensorflow", "function": "def _create_uninitialized_mirrored_tpu_replicated_variables(**kwargs):\n dtype = kwargs.get('dtype', None)\n shape = kwargs.get('shape', None)\n initial_value = kwargs.get('initial_value', None)\n if initial_value is None:\n return _create_mirrored_tpu_replicated_variables(**kwargs)\n with maybe_init_scope():\n if initial_value is not None:\n if callable(initial_value):\n initial_value = initial_value()\n initial_value = ops.convert_to_tensor(initial_value, dtype=dtype)\n kwargs['initial_value'] = initial_value\n if dtype is None:\n kwargs['dtype'] = kwargs['initial_value'].dtype\n if shape is None:\n kwargs['shape'] = kwargs['initial_value'].shape\n mirrored_replicated_var_list = []\n for replica_id in range(num_replicas):\n replicated_var_list = []\n for logic_core_id in range(num_cores_per_replica):\n with ops.device(self._tpu_devices[replica_id][logic_core_id]):\n v = uninitialized_variable_creator(**kwargs)\n replicated_var_list.append(v)\n replica_name = '{}/r:{}'.format(kwargs['name'], replica_id)\n tpu_replicated_var = tpu_replicated_variable.TPUReplicatedVariable(variables=replicated_var_list, name=replica_name)\n mirrored_replicated_var_list.append(tpu_replicated_var)\n return mirrored_replicated_var_list", "docstring": "Returns a list of `TPUReplicatedVariable`s.\n\nThe list consists of `num_replicas` `TPUReplicatedVariable`s and can be\nused to initialize a `TPUMirroredVariable`. Each `TPUReplicatedVariable`\ncontains a list of `tf.Variable`s which are replicated to\n`num_cores_per_replica` logical cores to enable XLA SPMD compilation.\n\nArgs:\n **kwargs: the keyword arguments for creating a variable"} +{"repo": "transformers", "function": "class GotOcr2CausalLMOutputWithPast(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n image_hidden_states: Optional[torch.FloatTensor] = None", "docstring": "Base class for GotOcr2 causal language model (or autoregressive) outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n image_hidden_states (`torch.FloatTensor`, *optional*):\n A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.\n image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state."} +{"repo": "keras", "function": "class AdditiveAttention(Attention):\n\n def __init__(self, use_scale=True, dropout=0.0, **kwargs):\n super().__init__(use_scale=use_scale, dropout=dropout, **kwargs)\n\n def build(self, input_shape):\n self._validate_inputs(input_shape)\n dim = input_shape[0][-1]\n self.scale = None\n if self.use_scale:\n self.scale = self.add_weight(name='scale', shape=[dim], initializer='glorot_uniform', dtype=self.dtype, trainable=True)\n\n def _calculate_scores(self, query, key):\n \"\"\"Calculates attention scores as a nonlinear sum of query and key.\n\n Args:\n query: Query tensor of shape `(batch_size, Tq, dim)`.\n key: Key tensor of shape `(batch_size, Tv, dim)`.\n\n Returns:\n Tensor of shape `(batch_size, Tq, Tv)`.\n \"\"\"\n q_reshaped = ops.expand_dims(query, axis=-2)\n k_reshaped = ops.expand_dims(key, axis=-3)\n scale = self.scale if self.use_scale else 1.0\n return ops.sum(scale * ops.tanh(q_reshaped + k_reshaped), axis=-1)\n\n def get_config(self):\n base_config = super().get_config()\n del base_config['score_mode']\n return base_config", "docstring": "Additive attention layer, a.k.a. Bahdanau-style attention.\n\nInputs are a list with 2 or 3 elements:\n1. A `query` tensor of shape `(batch_size, Tq, dim)`.\n2. A `value` tensor of shape `(batch_size, Tv, dim)`.\n3. A optional `key` tensor of shape `(batch_size, Tv, dim)`. If none\n supplied, `value` will be used as `key`.\n\nThe calculation follows the steps:\n1. Calculate attention scores using `query` and `key` with shape\n `(batch_size, Tq, Tv)` as a non-linear sum\n `scores = reduce_sum(tanh(query + key), axis=-1)`.\n2. Use scores to calculate a softmax distribution with shape\n `(batch_size, Tq, Tv)`.\n3. Use the softmax distribution to create a linear combination of `value`\n with shape `(batch_size, Tq, dim)`.\n\nArgs:\n use_scale: If `True`, will create a scalar variable to scale the\n attention scores.\n dropout: Float between 0 and 1. Fraction of the units to drop for the\n attention scores. Defaults to `0.0`.\n\nCall arguments:\n inputs: List of the following tensors:\n - `query`: Query tensor of shape `(batch_size, Tq, dim)`.\n - `value`: Value tensor of shape `(batch_size, Tv, dim)`.\n - `key`: Optional key tensor of shape `(batch_size, Tv, dim)`. If\n not given, will use `value` for both `key` and `value`, which is\n the most common case.\n mask: List of the following tensors:\n - `query_mask`: A boolean mask tensor of shape `(batch_size, Tq)`.\n If given, the output will be zero at the positions where\n `mask==False`.\n - `value_mask`: A boolean mask tensor of shape `(batch_size, Tv)`.\n If given, will apply the mask such that values at positions\n where `mask==False` do not contribute to the result.\n return_attention_scores: bool, it `True`, returns the attention scores\n (after masking and softmax) as an additional output argument.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n use_causal_mask: Boolean. Set to `True` for decoder self-attention. Adds\n a mask such that position `i` cannot attend to positions `j > i`.\n This prevents the flow of information from the future towards the\n past. Defaults to `False`.\n\nOutput:\n Attention outputs of shape `(batch_size, Tq, dim)`.\n (Optional) Attention scores after masking and softmax with shape\n `(batch_size, Tq, Tv)`."} +{"repo": "keras", "function": "def tanh(x):\n if any_symbolic_tensors((x,)):\n return Tanh().symbolic_call(x)\n return backend.numpy.tanh(x)", "docstring": "Hyperbolic tangent, element-wise.\n\nArguments:\n x: Input tensor.\n\nReturns:\n Output tensor of same shape as `x`."} +{"repo": "tensorflow", "function": "def add_tensor_filter(self, filter_name, tensor_filter):\n if self._session_wrapper:\n self._session_wrapper.add_tensor_filter(filter_name, tensor_filter)\n else:\n self._pending_tensor_filters[filter_name] = tensor_filter", "docstring": "Add a tensor filter.\n\nSee doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details.\nOverride default behavior to accommodate the possibility of this method\nbeing\ncalled prior to the initialization of the underlying\n`LocalCLIDebugWrapperSession` object.\n\nArgs:\n filter_name: See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()`\n for details.\n tensor_filter: See doc of\n `LocalCLIDebugWrapperSession.add_tensor_filter()` for details."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A MVP sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "beam", "function": "def temp_sqlite_database(prefix='yaml_jdbc_it_'):\n conn = cursor = None\n try:\n db_name = f'{prefix}{uuid.uuid4().hex}.db'\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n cursor.execute('\\n CREATE TABLE tmp_table (\\n value INTEGER PRIMARY KEY,\\n rank INTEGER\\n )\\n ')\n conn.commit()\n yield f'jdbc:sqlite:{db_name}'\n except (sqlite3.Error, Exception) as err:\n logging.error('Error interacting with temporary SQLite DB: %s', err)\n raise err\n finally:\n if cursor:\n cursor.close()\n if conn:\n conn.close()\n try:\n if os.path.exists(db_name):\n os.remove(db_name)\n except Exception as err:\n logging.error('Error deleting temporary SQLite DB: %s', err)\n raise err", "docstring": "Context manager to provide a temporary SQLite database via JDBC for\ntesting.\n\nThis function creates a temporary SQLite database file on the local\nfilesystem. It establishes a connection using 'sqlite3', creates a predefined\n'tmp_table', and then yields a JDBC connection string suitable for use in\ntests that require a generic JDBC connection (specifically configured for \nSQLite in this case).\n\nThe SQLite database file is automatically cleaned up (closed and deleted)\nwhen the context manager exits.\n\nArgs:\n prefix (str): A prefix to use for the temporary database file name.\n\nYields:\n str: A JDBC connection string for the temporary SQLite database.\n Example format: \"jdbc:sqlite:\"\n\nRaises:\n sqlite3.Error: If there's an error connecting to or interacting with\n the SQLite database during setup.\n Exception: Any other exception encountered during the setup or cleanup\n process."} +{"repo": "transformers", "function": "def default_sequence_length(self) -> int:\n return OnnxConfig.default_fixed_sequence", "docstring": "The default sequence length to use if no other indication\n\nReturns:\n Integer > 0"} +{"repo": "keras", "function": "def rematerialized_call(self, layer_call, *args, **kwargs):\n\n def compute_size(x):\n return math.prod([d or 1 for d in x.shape]) if isinstance(x, KerasTensor) else 0\n if self._remat_mode.mode == 'full':\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'list_of_layers' and self.name in self._remat_mode.layer_names:\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'larger_than':\n output_spec = self.compute_output_spec(*args, **kwargs)\n output_size = sum(tree.flatten(tree.map_structure(compute_size, output_spec)))\n if output_size and output_size > self._remat_mode.output_size_threshold:\n return remat.remat(layer_call)\n elif self._remat_mode.mode == 'activations':\n has_activation = hasattr(self, 'activation') and self.activation is not None\n if has_activation:\n\n @functools.wraps(layer_call)\n def rematerialized_activation_call_wrapper(*args, **kwargs):\n original_activation = self.activation\n self.activation = remat.remat(original_activation)\n try:\n return layer_call(*args, **kwargs)\n finally:\n self.activation = original_activation\n return rematerialized_activation_call_wrapper\n return layer_call", "docstring": "Enable rematerialization dynamically for layer's call method.\n\nArgs:\n layer_call: The original `call` method of a layer.\n\nReturns:\n Rematerialized layer's `call` method."} +{"repo": "transformers", "function": "def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None):\n out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n if target_sizes is not None:\n if len(out_logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n prob = nn.functional.softmax(out_logits, -1)\n scores, labels = prob[..., :-1].max(-1)\n boxes = center_to_corners_format(out_bbox)\n if target_sizes is not None:\n if isinstance(target_sizes, List):\n img_h = torch.Tensor([i[0] for i in target_sizes])\n img_w = torch.Tensor([i[1] for i in target_sizes])\n else:\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n boxes = boxes * scale_fct[:, None, :]\n results = []\n for s, l, b in zip(scores, labels, boxes):\n score = s[s > threshold]\n label = l[s > threshold]\n box = b[s > threshold]\n results.append({'scores': score, 'labels': label, 'boxes': box})\n return results", "docstring": "Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\nbottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\nArgs:\n outputs ([`DetrObjectDetectionOutput`]):\n Raw outputs of the model.\n threshold (`float`, *optional*):\n Score threshold to keep object detection predictions.\n target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\n Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n `(height, width)` of each image in the batch. If unset, predictions will not be resized.\nReturns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\n in the batch as predicted by the model."} +{"repo": "tensorflow", "function": "def _read_file(filename):\n graph_def = graph_pb2.GraphDef()\n if not file_io.file_exists(filename):\n raise IOError(f'File {filename} does not exist.')\n with file_io.FileIO(filename, 'rb') as f:\n file_content = f.read()\n try:\n graph_def.ParseFromString(file_content)\n return graph_def\n except Exception:\n pass\n try:\n text_format.Merge(file_content, graph_def)\n except text_format.ParseError as e:\n raise IOError(f'Cannot parse file {filename}: {str(e)}.')\n return graph_def", "docstring": "Reads a file containing `GraphDef` and returns the protocol buffer.\n\nArgs:\n filename: `graph_def` filename including the path.\n\nReturns:\n A `GraphDef` protocol buffer.\n\nRaises:\n IOError: If the file doesn't exist, or cannot be successfully parsed."} +{"repo": "keras", "function": "def diag(x, k=0):\n if any_symbolic_tensors((x,)):\n return Diag(k=k).symbolic_call(x)\n return backend.numpy.diag(x, k=k)", "docstring": "Extract a diagonal or construct a diagonal array.\n\nArgs:\n x: Input tensor. If `x` is 2-D, returns the k-th diagonal of `x`.\n If `x` is 1-D, return a 2-D tensor with `x` on the k-th diagonal.\n k: The diagonal to consider. Defaults to `0`. Use `k > 0` for diagonals\n above the main diagonal, and `k < 0` for diagonals below\n the main diagonal.\n\nReturns:\n The extracted diagonal or constructed diagonal tensor.\n\nExamples:\n>>> from keras.src import ops\n>>> x = ops.arange(9).reshape((3, 3))\n>>> x\narray([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n\n>>> ops.diag(x)\narray([0, 4, 8])\n>>> ops.diag(x, k=1)\narray([1, 5])\n>>> ops.diag(x, k=-1)\narray([3, 7])\n\n>>> ops.diag(ops.diag(x)))\narray([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 8]])"} +{"repo": "tensorflow", "function": "def add_to_optionally_restored(self, var):\n self._optionally_restored.append(var)", "docstring": "Add a variable to the list of optionally restored variables.\n\nThere are situations where certain variables should be ignored in assertions\nsuch as assert_existing_objects_matched(). One example is that of a\ncheckpoint saved with train.Saver(), and restored with train.Checkpoint():\nit is possible for the train.Saver() checkpoint to be missing the internal\n`save_counter` variable, which we want to ignore on restore.\n\nArgs:\n var: The variable to treat as optionally restored."} +{"repo": "transformers", "function": "def forward(self, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n hidden_states = self.value_embedding(inputs_embeds)\n embed_pos = self.embed_positions(inputs_embeds.size())\n hidden_states = self.layernorm_embedding(hidden_states + embed_pos)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n if attention_mask is not None:\n attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n if head_mask is not None:\n if head_mask.size()[0] != len(self.layers):\n raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, (encoder_layer, conv_layer) in enumerate(zip(self.layers, self.conv_layers)):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions)\n if conv_layer is not None:\n output = self._gradient_checkpointing_func(conv_layer, layer_outputs[0])\n layer_outputs = (output,) + layer_outputs[1:]\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)\n if conv_layer is not None:\n output = conv_layer(layer_outputs[0])\n layer_outputs = (output,) + layer_outputs[1:]\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "keras", "function": "def build(self, var_list):\n if self.built:\n return\n super().build(var_list)\n self._momentums = self.add_optimizer_variables(var_list, 'momentum')", "docstring": "Initialize optimizer variables.\n\nLion optimizer has one variable `momentums`.\n\nArgs:\n var_list: list of model variables to build Lion variables on."} +{"repo": "transformers", "function": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n size = get_size_dict(size)\n if 'height' not in size or 'width' not in size:\n raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')\n output_size = (size['height'], size['width'])\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image to `(size[\"height\"], size[\"width\"])`.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\nReturns:\n `np.ndarray`: The resized image."} +{"repo": "transformers", "function": "def get_rel_pos(q_size, k_size, rel_pos):\n max_rel_dist = int(2 * max(q_size, k_size) - 1)\n if rel_pos.shape[0] != max_rel_dist:\n rel_pos_resized = nn.functional.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size=max_rel_dist, mode='linear')\n rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n else:\n rel_pos_resized = rel_pos\n q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0)\n return rel_pos_resized[relative_coords.long()]", "docstring": "Get relative positional embeddings according to the relative positions of query and key sizes.\n\nArgs:\n q_size (`int`):\n Size of query q.\n k_size (`int`):\n Size of key k.\n rel_pos (`torch.Tensor`):\n Relative position embeddings (num_embeddings, num_channels).\n\nReturns:\n Extracted positional embeddings according to relative positions."} +{"repo": "fhir-py", "function": "def _to_boolean(operand: List[WorkSpaceMessage]) -> Optional[bool]:\n if not operand:\n return None\n if len(operand) > 1:\n raise ValueError('Expected a single boolean result but got multiple items.')\n if not fhir_types.is_boolean(operand[0].message):\n raise ValueError('Expected a boolean but got a non-boolean value.')\n return proto_utils.get_value_at_field(operand[0].message, 'value')", "docstring": "Converts an evaluation result to a boolean value or None.\n\nArgs:\n operand: an expression operand result to convert to boolean.\n\nReturns:\n the boolean value, or None if the operand was empty.\n\nRaises:\n ValueError if it is not an empty result or a single, boolean value."} +{"repo": "transformers", "function": "def compute_context_repetition_mask(self, input_ids: torch.LongTensor) -> torch.LongTensor:\n self._check_input_ids_shape(input_ids)\n batch_size, _ = input_ids.shape\n state = SynthIDTextWatermarkState(batch_size=batch_size, ngram_len=self.ngram_len, context_history_size=self.context_history_size, device=self.device)\n contexts = input_ids[:, :-1].unfold(dimension=1, size=self.ngram_len - 1, step=1)\n _, num_contexts, _ = contexts.shape\n are_repeated_contexts = []\n for i in range(num_contexts):\n context = contexts[:, i, :]\n hash_result = torch.ones(batch_size, device=self.device, dtype=torch.long)\n context_hash = self.accumulate_hash(hash_result, context)[:, None]\n is_repeated_context = (state.context_history == context_hash).any(dim=1, keepdim=True)\n are_repeated_contexts.append(is_repeated_context)\n state.context_history = torch.concat((context_hash, state.context_history), dim=1)[:, :-1]\n are_repeated_contexts = torch.concat(are_repeated_contexts, dim=1)\n return torch.logical_not(are_repeated_contexts)", "docstring": "Computes repetition mask.\n\n0 and 1 stand for repeated and not repeated context n-1 grams respectively.\n\nArgs:\n input_ids (`torch.LongTensor`):\n Input token ids (batch_size, input_len).\n\nReturns:\n Repetitions mask (batch_size, input_len - (ngram_len - 1))."} +{"repo": "tensorflow", "function": "def assert_scalar(tensor, name=None, message=None):\n with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:\n tensor = ops.convert_to_tensor(tensor, name=name_scope)\n shape = tensor.get_shape()\n message = _message_prefix(message)\n if shape.ndims != 0:\n if context.executing_eagerly():\n raise ValueError('%sExpected scalar shape, saw shape: %s.' % (message, shape))\n else:\n raise ValueError('%sExpected scalar shape for %s, saw shape: %s.' % (message, tensor.name, shape))\n return tensor", "docstring": "Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).\n\nThis function raises `ValueError` unless it can be certain that the given\n`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is\nunknown.\n\nArgs:\n tensor: A `Tensor`.\n name: A name for this operation. Defaults to \"assert_scalar\"\n message: A string to prefix to the default message.\n\nReturns:\n The input tensor (potentially converted to a `Tensor`).\n\nRaises:\n ValueError: If the tensor is not scalar (rank 0), or if its shape is\n unknown."} +{"repo": "transformers", "function": "def _get_num_audio_features(self, audio_lengths: Sequence[int]) -> Sequence[int]:\n hop_length = self.melspec_kwargs['hop_length']\n effective_window_size = self.projector_window_size // self.projector_downsample_rate\n projector_lengths = []\n for raw_length in audio_lengths:\n mel_length = raw_length // hop_length + 1\n encoder_length = mel_length // 2\n nblocks = math.ceil(encoder_length / self.projector_window_size)\n projector_length = nblocks * effective_window_size\n projector_lengths.append(projector_length)\n return projector_lengths", "docstring": "Gets the (variable length) number of features (i.e., projector output) for the sequences\nbeing considered.\n\nArgs:\n audio_lengths (`Sequence[int]`):\n Sequence of one or more raw audio lengths."} +{"repo": "tensorflow", "function": "def __init__(self, hooks=None, scaffold=None, master='', config=None, checkpoint_dir=None, stop_grace_period_secs=120, checkpoint_filename_with_path=None):\n session_creator = ChiefSessionCreator(scaffold=scaffold, master=master, config=config, checkpoint_dir=checkpoint_dir, checkpoint_filename_with_path=checkpoint_filename_with_path)\n super(SingularMonitoredSession, self).__init__(session_creator, hooks, should_recover=False, stop_grace_period_secs=stop_grace_period_secs)", "docstring": "Creates a SingularMonitoredSession.\n\nArgs:\n hooks: An iterable of `SessionRunHook' objects.\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n master: `String` representation of the TensorFlow master to use.\n config: `ConfigProto` proto used to configure the session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n checkpoint_filename_with_path: A string. Optional path to a checkpoint\n file from which to restore variables."} +{"repo": "keras", "function": "def get_config(self):\n raise NotImplementedError(f'{self} does not implement get_config()')", "docstring": "Returns the config of the quantizer.\n\nA quantizer config is a Python dictionary (serializable)\ncontaining all configuration parameters of the quantizer.\nThe same quantizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nThis method is optional if you are just training and executing models,\nexporting to and from SavedModels, or using weight checkpoints.\n\nThis method is required for Keras `model_to_estimator`, saving and\nloading models to HDF5 formats, Keras model cloning, some visualization\nutilities, and exporting models to and from JSON.\n\nReturns:\n Python dictionary."} +{"repo": "tensorflow", "function": "def __init__(self, session, run_with_hooks_fn):\n self._session = session\n self._run_with_hooks_fn = run_with_hooks_fn", "docstring": "Initializes the `step_context` argument for a `step_fn` invocation.\n\nArgs:\n session: An instance of `tf.compat.v1.Session`.\n run_with_hooks_fn: A function for running fetches and hooks."} +{"repo": "transformers", "function": "def _compute_linear_scaling_rope_parameters(config: Optional[PretrainedConfig]=None, device: Optional['torch.device']=None, seq_len: Optional[int]=None, **rope_kwargs) -> tuple['torch.Tensor', float]:\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(f'Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in `_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}')\n if len(rope_kwargs) > 0:\n factor = rope_kwargs['factor']\n elif config is not None:\n factor = config.rope_scaling['factor']\n inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n inv_freq /= factor\n return (inv_freq, attention_factor)", "docstring": "Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev\nArgs:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\nReturns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE)."} +{"repo": "transformers", "function": "class MixtralConfig(PretrainedConfig):\n model_type = 'mixtral'\n keys_to_ignore_at_inference = ['past_key_values']\n base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.block_sparse_moe.gate': 'colwise_rep', 'layers.*.block_sparse_moe.experts.*.w1': 'colwise', 'layers.*.block_sparse_moe.experts.*.w2': 'rowwise', 'layers.*.block_sparse_moe.experts.*.w3': 'colwise'}\n base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}\n\n def __init__(self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, head_dim=None, hidden_act='silu', max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=1000000.0, sliding_window=None, attention_dropout=0.0, num_experts_per_tok=2, num_local_experts=8, output_router_logits=False, router_aux_loss_coef=0.001, router_jitter_noise=0.0, **kwargs):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.sliding_window = sliding_window\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.use_cache = use_cache\n self.rope_theta = rope_theta\n self.attention_dropout = attention_dropout\n self.head_dim = head_dim\n self.num_experts_per_tok = num_experts_per_tok\n self.num_local_experts = num_local_experts\n self.output_router_logits = output_router_logits\n self.router_aux_loss_coef = router_aux_loss_coef\n self.router_jitter_noise = router_jitter_noise\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an\nMixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.\n\n[mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)\n[mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MixtralModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 14336):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.\n head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):\n The attention head dimension.\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to `4096*32`):\n The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention\n allows sequence of up to 4096*32 tokens.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*):\n The id of the padding token.\n bos_token_id (`int`, *optional*, defaults to 1):\n The id of the \"beginning-of-sequence\" token.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the \"end-of-sequence\" token.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model's input and output word embeddings should be tied.\n rope_theta (`float`, *optional*, defaults to 1000000.0):\n The base period of the RoPE embeddings.\n sliding_window (`int`, *optional*):\n Sliding window attention window size. If not specified, will default to `4096`.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n num_experts_per_tok (`int`, *optional*, defaults to 2):\n The number of experts to route per-token, can be also interpreted as the `top-k` routing\n parameter\n num_local_experts (`int`, *optional*, defaults to 8):\n Number of experts per Sparse MLP layer.\n output_router_logits (`bool`, *optional*, defaults to `False`):\n Whether or not the router logits should be returned by the model. Enabling this will also\n allow the model to output the auxiliary loss. See [here]() for more details\n router_aux_loss_coef (`float`, *optional*, defaults to 0.001):\n The aux loss factor for the total loss.\n router_jitter_noise (`float`, *optional*, defaults to 0.0):\n Amount of noise to add to the router.\n\n```python\n>>> from transformers import MixtralModel, MixtralConfig\n\n>>> # Initializing a Mixtral 7B style configuration\n>>> configuration = MixtralConfig()\n\n>>> # Initializing a model from the Mixtral 7B style configuration\n>>> model = MixtralModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):\n assert not (input_ids is None and inputs_embeds is None)\n if input_ids is not None:\n check_embeddings_within_bounds(input_ids, self.config.vocab_size)\n inputs_embeds = tf.gather(params=self.weight, indices=input_ids)\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(dims=input_shape, value=0)\n if self.trigram_input:\n inputs_embeds = tf.concat([tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))), inputs_embeds, tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0)))], axis=2)\n if self.trigram_input or self.embedding_size != self.hidden_size:\n inputs_embeds = self.embedding_transformation(inputs_embeds)\n if position_ids is None:\n position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)\n position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)\n token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)\n final_embeddings = inputs_embeds + position_embeds + token_type_embeds\n final_embeddings = self.LayerNorm(inputs=final_embeddings)\n final_embeddings = self.dropout(inputs=final_embeddings, training=training)\n return final_embeddings", "docstring": "Applies embedding based on inputs tensor.\n\nReturns:\n final_embeddings (`tf.Tensor`): output embedding tensor."} +{"repo": "tensorflow", "function": "class CodeGenerator(NodeStateTracker, gast.NodeVisitor):\n\n def __init__(self, ctx):\n super(CodeGenerator, self).__init__(ctx)\n self._output_code = ''\n self.source_map = {}\n\n def emit(self, code):\n self._output_code += code\n\n @property\n def code_buffer(self):\n return self._output_code\n\n def visit(self, node):\n if anno.hasanno(node, anno.Basic.SKIP_PROCESSING):\n return\n parent_origin = self.ctx.current_origin\n eof_before = len(self._output_code)\n if anno.hasanno(node, anno.Basic.ORIGIN):\n self.ctx.current_origin = anno.getanno(node, anno.Basic.ORIGIN)\n try:\n ret = super(CodeGenerator, self).visit(node)\n eof_after = len(self._output_code)\n if eof_before - eof_after:\n inherited_origin = anno.getanno(node, anno.Basic.ORIGIN, default=parent_origin)\n if inherited_origin is not None:\n self.source_map[eof_before, eof_after] = inherited_origin\n return ret\n finally:\n self.ctx.current_origin = parent_origin", "docstring": "Base class for general-purpose Python-to-string code transformation.\n\nSimilar to Base, but outputs arbitrary strings instead of a Python AST.\n\nThis uses the same visitor mechanism that the standard NodeVisitor uses,\nmeaning that subclasses write handlers for the different kinds of nodes.\nNew code is generated using the emit method, which appends to a code buffer\nthat can be afterwards obtained from code_buffer.\n\nExample:\n\n class SimpleCodeGen(CodeGenerator):\n\n def visitIf(self, node):\n self.emit('if ')\n self.visit(node.test)\n self.emit(' { ')\n self.visit(node.body)\n self.emit(' } else { ')\n self.visit(node.orelse)\n self.emit(' } ')\n\n node = ast.parse(...)\n gen = SimpleCodeGen()\n gen.visit(node)\n # gen.code_buffer contains the resulting code"} +{"repo": "transformers", "function": "class AriaGroupedExpertsGemm(nn.Module):\n\n def __init__(self, in_features, out_features, groups):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.groups = groups\n self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))\n\n def forward(self, input, tokens_per_expert):\n \"\"\"\n Perform grouped matrix multiplication.\n\n Args:\n input (`torch.Tensor`):\n Input tensor of shape (num_tokens, in_features).\n tokens_per_expert (`torch.Tensor`):\n Number of tokens assigned to each expert.\n\n Returns:\n torch.Tensor: Output tensor of shape (num_tokens, out_features).\n \"\"\"\n return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())", "docstring": "Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.\nThis module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)\nfor optimized performance. If the grouped_gemm library is not installed, it gracefully\nfalls back to a sequential GEMM implementation, which may be slower but ensures\nfunctionality.\n\nArgs:\n in_features (`int`):\n Number of input features.\n out_features (`int`):\n Number of output features.\n groups (`int`):\n Number of expert groups."} +{"repo": "keras", "function": "def einsum(subscripts, *operands, **kwargs):\n if any_symbolic_tensors(operands):\n return Einsum(subscripts).symbolic_call(*operands, **kwargs)\n return backend.numpy.einsum(subscripts, *operands, **kwargs)", "docstring": "Evaluates the Einstein summation convention on the operands.\n\nArgs:\n subscripts: Specifies the subscripts for summation as comma separated\n list of subscript labels. An implicit (classical Einstein\n summation) calculation is performed unless the explicit indicator\n `->` is included as well as subscript labels of the precise\n output form.\n operands: The operands to compute the Einstein sum of.\n\nReturns:\n The calculation based on the Einstein summation convention.\n\nExample:\n>>> from keras.src import ops\n>>> a = ops.arange(25).reshape(5, 5)\n>>> b = ops.arange(5)\n>>> c = ops.arange(6).reshape(2, 3)\n\nTrace of a matrix:\n\n>>> ops.einsum(\"ii\", a)\n60\n>>> ops.einsum(a, [0, 0])\n60\n>>> ops.trace(a)\n60\n\nExtract the diagonal:\n\n>>> ops.einsum(\"ii -> i\", a)\narray([ 0, 6, 12, 18, 24])\n>>> ops.einsum(a, [0, 0], [0])\narray([ 0, 6, 12, 18, 24])\n>>> ops.diag(a)\narray([ 0, 6, 12, 18, 24])\n\nSum over an axis:\n\n>>> ops.einsum(\"ij -> i\", a)\narray([ 10, 35, 60, 85, 110])\n>>> ops.einsum(a, [0, 1], [0])\narray([ 10, 35, 60, 85, 110])\n>>> ops.sum(a, axis=1)\narray([ 10, 35, 60, 85, 110])\n\nFor higher dimensional tensors summing a single axis can be done\nwith ellipsis:\n\n>>> ops.einsum(\"...j -> ...\", a)\narray([ 10, 35, 60, 85, 110])\n>>> np.einsum(a, [..., 1], [...])\narray([ 10, 35, 60, 85, 110])\n\nCompute a matrix transpose or reorder any number of axes:\n\n>>> ops.einsum(\"ji\", c)\narray([[0, 3],\n [1, 4],\n [2, 5]])\n>>> ops.einsum(\"ij -> ji\", c)\narray([[0, 3],\n [1, 4],\n [2, 5]])\n>>> ops.einsum(c, [1, 0])\narray([[0, 3],\n [1, 4],\n [2, 5]])\n>>> ops.transpose(c)\narray([[0, 3],\n [1, 4],\n [2, 5]])\n\nMatrix vector multiplication:\n\n>>> ops.einsum(\"ij, j\", a, b)\narray([ 30, 80, 130, 180, 230])\n>>> ops.einsum(a, [0, 1], b, [1])\narray([ 30, 80, 130, 180, 230])\n>>> ops.einsum(\"...j, j\", a, b)\narray([ 30, 80, 130, 180, 230])"} +{"repo": "tensorflow", "function": "def assertAllDifferent(self, tensors):\n values = [array_ops.reshape(t, shape=[-1]) for t in tensors]\n values = array_ops.concat(values, axis=0)\n values = self.evaluate(values)\n values = values.tolist()\n self.assertAllEqual(len(values), len(set(values)))", "docstring": "Checks that there are no duplicate elements anywhere among the tensors.\n\nArgs:\n tensors: a list of tensors. They can have different shapes."} +{"repo": "tensorflow", "function": "def export(self, name=None):\n with ops.name_scope(name, '%s_lookup_table_export_values' % self.name, [self.resource_handle]):\n with ops.colocate_with(self.resource_handle):\n exported_keys, exported_values = gen_lookup_ops.lookup_table_export_v2(self.resource_handle, self._key_dtype, self._value_dtype)\n return (exported_keys, exported_values)", "docstring": "Returns tensors of all keys and values in the table.\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n A pair of tensors with the first tensor containing all keys and the\n second tensors containing all values in the table."} +{"repo": "beam", "function": "def __init__(self, step_name, transform_id=None):\n self.step_name = step_name\n self.transform_id = transform_id", "docstring": "Creates a new step NameContext.\n\nArgs:\n step_name: The name of the step."} +{"repo": "tensorflow", "function": "class MaxSizePartitioner(Partitioner):\n\n def __init__(self, max_shard_bytes, max_shards=None, bytes_per_string=16):\n \"\"\"Creates a new `MaxSizePartitioner`.\n\n Args:\n max_shard_bytes: The maximum size any given shard is allowed to be.\n max_shards: The maximum number of shards in `int` created taking\n precedence over `max_shard_bytes`.\n bytes_per_string: If the partition value is of type string, this provides\n an estimate of how large each string is.\n \"\"\"\n if max_shard_bytes < 1:\n raise ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}')\n if max_shards and max_shards < 1:\n raise ValueError(f'Argument `max_shards` must be positive. Received {max_shards}')\n if bytes_per_string < 1:\n raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}')\n self._max_shard_bytes = max_shard_bytes\n self._max_shards = max_shards\n self._bytes_per_string = bytes_per_string\n\n def __call__(self, shape, dtype, axis=0):\n return partitioned_variables.variable_axis_size_partitioner(max_shard_bytes=self._max_shard_bytes, max_shards=self._max_shards, bytes_per_string_element=self._bytes_per_string, axis=axis)(shape, dtype)", "docstring": "Partitioner that keeps shards below `max_shard_bytes`.\n\nThis partitioner ensures each shard has at most `max_shard_bytes`, and tries\nto allocate as few shards as possible, i.e., keeping shard size as large\nas possible.\n\nIf the partitioner hits the `max_shards` limit, then each shard may end up\nlarger than `max_shard_bytes`. By default `max_shards` equals `None` and no\nlimit on the number of shards is enforced.\n\nExamples:\n\n>>> partitioner = MaxSizePartitioner(max_shard_bytes=4)\n>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)\n>>> [6, 1]\n>>> partitioner = MaxSizePartitioner(max_shard_bytes=4, max_shards=2)\n>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)\n>>> [2, 1]\n>>> partitioner = MaxSizePartitioner(max_shard_bytes=1024)\n>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)\n>>> [1, 1]\n>>>\n>>> # use in ParameterServerStrategy\n>>> # strategy = tf.distribute.experimental.ParameterServerStrategy(\n>>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, next_sentence_label: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)\n total_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))\n if next_sentence_label is not None and total_loss is not None:\n next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))\n total_loss = total_loss + next_sentence_loss\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return (total_loss,) + output if total_loss is not None else output\n return BigBirdForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\nnext_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be\n added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in\n `[0, 1]`:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, BigBirdForPreTraining\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/bigbird-roberta-base\")\n>>> model = BigBirdForPreTraining.from_pretrained(\"google/bigbird-roberta-base\")\n\n>>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n\n>>> prediction_logits = outputs.prediction_logits\n>>> seq_relationship_logits = outputs.seq_relationship_logits\n```"} +{"repo": "transformers", "function": "def create_segment_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:\n table_ids = list(zip(*table_values))[0] if table_values else []\n return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids)", "docstring": "Creates the segment token type IDs according to the query token IDs and a list of table values.\n\nArgs:\n query_ids (`List[int]`): list of token IDs corresponding to the ID.\n table_values (`List[TableValue]`): lift of table values, which are named tuples containing the\n token value, the column ID and the row ID of said token.\n\nReturns:\n `List[int]`: List of ints containing the segment token type IDs values."} +{"repo": "transformers", "function": "def diff(self) -> List[str]:\n return set(self.to_track.keys()) - self._seen", "docstring": "This method returns a set difference between the keys in the tracked state dict and the one we have access so far.\nThis is an effective method to check if we have update all the keys\n\nReturns:\n List[str]: List of keys not yet updated"} +{"repo": "tensorflow", "function": "def _BatchNormWithGlobalNormalizationGrad(op: ops.Operation, grad):\n dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad, op.get_attr('variance_epsilon'), op.get_attr('scale_after_normalization'))\n return (dx, dm, dv, db, dg)", "docstring": "Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.\n\nWe do not backprop anything for the mean and var intentionally as they are\nnot being trained with backprop in the operation.\n\nArgs:\n op: The BatchNormOp for which we need to generate gradients.\n grad: Tensor. The gradients passed to the BatchNormOp.\n\nReturns:\n dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))\n dm: Backprop for mean, which is\n sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))\n dv: Backprop for variance, which is\n sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)\n db: Backprop for beta, which is grad reduced in all except the\n last dimension.\n dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))"} +{"repo": "tensorflow", "function": "def remove_checkpoint(checkpoint_prefix, checkpoint_format_version=saver_pb2.SaverDef.V2, meta_graph_suffix='meta'):\n _delete_file_if_exists(meta_graph_filename(checkpoint_prefix, meta_graph_suffix))\n if checkpoint_format_version == saver_pb2.SaverDef.V2:\n _delete_file_if_exists(checkpoint_prefix + '.index')\n _delete_file_if_exists(checkpoint_prefix + '.data-?????-of-?????')\n else:\n _delete_file_if_exists(checkpoint_prefix)", "docstring": "Removes a checkpoint given by `checkpoint_prefix`.\n\nArgs:\n checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result\n of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of\n sharded/non-sharded or V1/V2.\n checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to\n `SaverDef.V2`.\n meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'."} +{"repo": "nsscache", "function": "def GetGroupMap(self, since=None):\n return GroupUpdateGetter().GetUpdates(self, self.conf['group_url'], since)", "docstring": "Return the group map from this source.\n\nArgs:\n since: Get data only changed since this timestamp (inclusive) or None\n for all data.\n\nReturns:\n instance of group.GroupMap"} +{"repo": "tensorflow", "function": "def values(self):\n return self._values", "docstring": "The concatenated rows for this ragged tensor.\n\n`rt.values` is a potentially ragged tensor formed by flattening the two\noutermost dimensions of `rt` into a single dimension.\n\n`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the\nnumber of items in the outer two dimensions of `rt`).\n\n`rt.ragged_rank = self.ragged_rank - 1`\n\nReturns:\n A potentially ragged tensor.\n\n#### Example:\n\n>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n>>> print(rt.values)\ntf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)"} +{"repo": "tensorflow", "function": "def add_variable(self, feature_column, var):\n del feature_column, var\n raise NotImplementedError('StateManager.add_variable')", "docstring": "Adds an existing variable to the state.\n\nArgs:\n feature_column: A `FeatureColumn` object to associate this variable with.\n var: The variable."} +{"repo": "transformers", "function": "class UdopConfig(PretrainedConfig):\n model_type = 'udop'\n keys_to_ignore_at_inference = ['past_key_values']\n attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}\n\n def __init__(self, vocab_size=33201, d_model=1024, d_kv=64, d_ff=4096, num_layers=24, num_decoder_layers=None, num_heads=16, relative_attention_num_buckets=32, relative_attention_max_distance=128, relative_bias_args=[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}], dropout_rate=0.1, layer_norm_epsilon=1e-06, initializer_factor=1.0, feed_forward_proj='relu', is_encoder_decoder=True, use_cache=True, pad_token_id=0, eos_token_id=1, max_2d_position_embeddings=1024, image_size=224, patch_size=16, num_channels=3, **kwargs):\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.d_kv = d_kv\n self.d_ff = d_ff\n self.num_layers = num_layers\n self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers\n self.num_heads = num_heads\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.relative_attention_max_distance = relative_attention_max_distance\n self.dropout_rate = dropout_rate\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_factor = initializer_factor\n self.feed_forward_proj = feed_forward_proj\n self.use_cache = use_cache\n self.max_2d_position_embeddings = max_2d_position_embeddings\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n if not isinstance(relative_bias_args, list):\n raise TypeError('`relative_bias_args` should be a list of dictionaries.')\n self.relative_bias_args = relative_bias_args\n act_info = self.feed_forward_proj.split('-')\n self.dense_act_fn = act_info[-1]\n self.is_gated_act = act_info[0] == 'gated'\n if len(act_info) > 1 and act_info[0] != 'gated' or len(act_info) > 2:\n raise ValueError(f\"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.Please make sure `feed_forward_proj` is of the format `gated-{{ACT_FN}}` or `{{ACT_FN}}`, e.g. 'gated-gelu' or 'relu'\")\n super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`UdopForConditionalGeneration`]. It is used to\ninstantiate a UDOP model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the UDOP\n[microsoft/udop-large](https://huggingface.co/microsoft/udop-large) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArguments:\n vocab_size (`int`, *optional*, defaults to 33201):\n Vocabulary size of the UDOP model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`UdopForConditionalGeneration`].\n d_model (`int`, *optional*, defaults to 1024):\n Size of the encoder layers and the pooler layer.\n d_kv (`int`, *optional*, defaults to 64):\n Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will\n be defined as `num_heads * d_kv`.\n d_ff (`int`, *optional*, defaults to 4096):\n Size of the intermediate feed forward layer in each `UdopBlock`.\n num_layers (`int`, *optional*, defaults to 24):\n Number of hidden layers in the Transformer encoder and decoder.\n num_decoder_layers (`int`, *optional*):\n Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.\n num_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder and decoder.\n relative_attention_num_buckets (`int`, *optional*, defaults to 32):\n The number of buckets to use for each attention layer.\n relative_attention_max_distance (`int`, *optional*, defaults to 128):\n The maximum distance of the longer sequences for the bucket separation.\n relative_bias_args (`List[dict]`, *optional*, defaults to `[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}]`):\n A list of dictionaries containing the arguments for the relative bias layers.\n dropout_rate (`float`, *optional*, defaults to 0.1):\n The ratio for all dropout layers.\n layer_norm_epsilon (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n initializer_factor (`float`, *optional*, defaults to 1.0):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n feed_forward_proj (`string`, *optional*, defaults to `\"relu\"`):\n Type of feed forward layer to be used. Should be one of `\"relu\"` or `\"gated-gelu\"`. Udopv1.1 uses the\n `\"gated-gelu\"` feed forward projection. Original Udop uses `\"relu\"`.\n is_encoder_decoder (`bool`, *optional*, defaults to `True`):\n Whether the model should behave as an encoder/decoder or not.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n pad_token_id (`int`, *optional*, defaults to 0):\n The id of the padding token in the vocabulary.\n eos_token_id (`int`, *optional*, defaults to 1):\n The id of the end-of-sequence token in the vocabulary.\n max_2d_position_embeddings (`int`, *optional*, defaults to 1024):\n The maximum absolute position embeddings for relative position encoding.\n image_size (`int`, *optional*, defaults to 224):\n The size of the input images.\n patch_size (`int`, *optional*, defaults to 16):\n The patch size used by the vision encoder.\n num_channels (`int`, *optional*, defaults to 3):\n The number of channels in the input images."} +{"repo": "keras", "function": "class StatelessScope:\n\n def __init__(self, state_mapping=None, collect_losses=False, initialize_variables=True):\n from keras.src import backend\n from keras.src.backend.common.variables import Variable\n self.collect_losses = collect_losses\n self.initialize_variables = initialize_variables\n self.losses = []\n self.state_mapping = {}\n state_mapping = state_mapping or {}\n for k, v in state_mapping:\n if not isinstance(k, Variable):\n raise ValueError(f'Invalid reference variable in StatelessScope: all keys in argument `mapping` must be Variable instances. Received instead: {k}')\n if isinstance(v, Variable):\n v = backend.cast(v.value, dtype=k.dtype)\n else:\n v = backend.convert_to_tensor(v, dtype=k.dtype)\n if k.shape != v.shape:\n raise ValueError(f'Invalid variable value in StatelessScope: all values in argument `mapping` must be tensors with a shape that matches the corresponding variable shape. For variable {k}, received invalid value {v} with shape {v.shape}.')\n self.state_mapping[id(k)] = v\n\n def __enter__(self):\n self.original_scope = get_stateless_scope()\n global_state.set_global_attribute('stateless_scope', self)\n return self\n\n def add_loss(self, loss):\n self.losses.append(loss)\n\n def add_update(self, update):\n variable, value = update\n self.state_mapping[id(variable)] = value\n\n def get_current_value(self, variable):\n return self.state_mapping.get(id(variable), None)\n\n def __exit__(self, *args, **kwargs):\n global_state.set_global_attribute('stateless_scope', self.original_scope)\n if self.original_scope is None and self.initialize_variables:\n from keras.src.backend.common.variables import initialize_all_variables\n initialize_all_variables()", "docstring": "Scope to prevent any update to Keras Variables.\n\nThe values of variables to be used inside the scope\nshould be passed via the `state_mapping` argument, a\nlist of tuples `(k, v)` where `k` is a `Variable`\nand `v` is the intended value for this variable\n(a backend tensor).\n\nUpdated values can be collected on scope exit via\n`value = scope.get_current_value(variable)`. No updates\nwill be applied in-place to any variables for the duration\nof the scope.\n\nExample:\n\n```python\nstate_mapping = [(k, ops.ones(k.shape, k.dtype)) for k in model.weights]\nwith keras.StatelessScope(state_mapping) as scope:\n outputs = model.some_function(inputs)\n\n# All model variables remain unchanged. Their new values can be\n# collected via:\nfor k in model.weights:\n new_value = scope.get_current_value(k)\n print(f\"New value for {k}: {new_value})\n```"} +{"repo": "transformers", "function": "def flip_back(output_flipped, flip_pairs, target_type='gaussian-heatmap'):\n if target_type not in ['gaussian-heatmap', 'combined-target']:\n raise ValueError('target_type should be gaussian-heatmap or combined-target')\n if output_flipped.ndim != 4:\n raise ValueError('output_flipped should be [batch_size, num_keypoints, height, width]')\n batch_size, num_keypoints, height, width = output_flipped.shape\n channels = 1\n if target_type == 'combined-target':\n channels = 3\n output_flipped[:, 1::3, ...] = -output_flipped[:, 1::3, ...]\n output_flipped = output_flipped.reshape(batch_size, -1, channels, height, width)\n output_flipped_back = output_flipped.clone()\n for left, right in flip_pairs.tolist():\n output_flipped_back[:, left, ...] = output_flipped[:, right, ...]\n output_flipped_back[:, right, ...] = output_flipped[:, left, ...]\n output_flipped_back = output_flipped_back.reshape((batch_size, num_keypoints, height, width))\n output_flipped_back = output_flipped_back.flip(-1)\n return output_flipped_back", "docstring": "Flip the flipped heatmaps back to the original form.\n\nArgs:\n output_flipped (`torch.tensor` of shape `(batch_size, num_keypoints, height, width)`):\n The output heatmaps obtained from the flipped images.\n flip_pairs (`torch.Tensor` of shape `(num_keypoints, 2)`):\n Pairs of keypoints which are mirrored (for example, left ear -- right ear).\n target_type (`str`, *optional*, defaults to `\"gaussian-heatmap\"`):\n Target type to use. Can be gaussian-heatmap or combined-target.\n gaussian-heatmap: Classification target with gaussian distribution.\n combined-target: The combination of classification target (response map) and regression target (offset map).\n Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n\nReturns:\n torch.Tensor: heatmaps that flipped back to the original image"} +{"repo": "tensorflow", "function": "def canonicalize_without_job_and_task(d):\n canonicalized_device = canonicalize(d)\n spec = tf_device.DeviceSpec.from_string(canonicalized_device)\n spec = spec.replace(job=None, task=None, replica=0)\n return spec.to_string()", "docstring": "Partially canonicalize device string.\n\nThis returns device string from `d` without including job and task.\nThis is most useful for parameter server strategy where the device strings are\ngenerated on the chief, but executed on workers.\n\n For example:\n If d = '/cpu:0', default='/job:worker/task:1', it returns\n '/replica:0/device:CPU:0'.\n If d = '/cpu:0', default='/job:worker', it returns\n '/replica:0/device:CPU:0'.\n If d = '/gpu:0', default=None, it returns\n '/replica:0/device:GPU:0'.\n\nNote: This uses \"job:localhost\" as the default if executing eagerly.\n\nArgs:\n d: a device string or tf.config.LogicalDevice\n\nReturns:\n a partially canonicalized device string."} +{"repo": "tensorflow", "function": "def get_git_version(git_base_path, git_tag_override):\n unknown_label = b'unknown'\n try:\n val = bytes(subprocess.check_output(['git', str('--git-dir=%s/.git' % git_base_path), str('--work-tree=%s' % git_base_path), 'describe', '--long', '--tags']).strip())\n version_separator = b'-'\n if git_tag_override and val:\n split_val = val.split(version_separator)\n if len(split_val) < 3:\n raise Exception(\"Expected git version in format 'TAG-COMMITS AFTER TAG-HASH' but got '%s'\" % val)\n abbrev_commit = split_val[-1]\n val = version_separator.join([bytes(git_tag_override, 'utf-8'), b'0', abbrev_commit])\n return val if val else unknown_label\n except (subprocess.CalledProcessError, OSError):\n return unknown_label", "docstring": "Get the git version from the repository.\n\nThis function runs `git describe ...` in the path given as `git_base_path`.\nThis will return a string of the form:\n--\n\nFor example, 'v0.10.0-1585-gbb717a6' means v0.10.0 was the last tag when\ncompiled. 1585 commits are after that commit tag, and we can get back to this\nversion by running `git checkout gbb717a6`.\n\nArgs:\n git_base_path: where the .git directory is located\n git_tag_override: Override the value for the git tag. This is useful for\n releases where we want to build the release before the git tag is\n created.\nReturns:\n A bytestring representing the git version"} +{"repo": "mobly", "function": "def make_connection_with_forwarded_port(self, host_port, device_port, uid=UNKNOWN_UID, cmd=ConnectionHandshakeCommand.INIT):\n self.host_port = host_port\n self.device_port = device_port\n self._counter = self._id_counter()\n self.create_socket_connection()\n self.send_handshake_request(uid, cmd)", "docstring": "Makes a connection to the server with the given forwarded port.\n\nThis process assumes that a device port has already been forwarded to a\nhost port, and it only makes a connection to the snippet server based on\nthe forwarded port. This is typically used by clients that share the same\nsnippet server, e.g. the snippet client and its event client.\n\nArgs:\n host_port: int, the host port which has already been forwarded.\n device_port: int, the device port listened by the snippet server.\n uid: int, the uid of the server session to continue. It will be ignored\n if the `cmd` requires the server to create a new session.\n cmd: ConnectionHandshakeCommand, the handshake command Enum for the\n server, which requires the server to create a new session or use the\n current session."} +{"repo": "transformers", "function": "def batch_encode_plus_boxes(self, batch_text_or_text_pairs: Union[List[TextInput], List[TextInputPair], List[PreTokenizedInput]], is_pair: Optional[bool]=None, boxes: Optional[List[List[List[int]]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n return self._batch_encode_plus_boxes(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.\n\nArgs:\n batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):\n Batch of sequences or pair of sequences to be encoded. This can be a list of\n string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see\n details in `encode_plus`)."} +{"repo": "tensorflow", "function": "def _weighted_flat_map(input_datasets: Sequence[dataset_ops.DatasetV2], weights: Optional[Sequence[Union[float, tensor.Tensor]]]=None, name: Optional[str]=None) -> dataset_ops.DatasetV2:\n return _WeightedFlatMap(input_datasets, weights, name=name)", "docstring": "A `Dataset` that fetches elements from `input_datasets` and flattens them.\n\nThis operation combines elements from multiple datasets into a flattened\ndataset. Elements are read in proportion to the `weights` assigned to each\ninput dataset. All requested elements from a dataset are read before reading\nthe elements from the next dataset.\n\nFor example, suppose we have 2 datasets:\n\n# TODO(wilsin): Make the following code testable after the API is released.\ndataset1 = tf.data.Dataset.range(0, 10)\ndataset2 = tf.data.Dataset.range(10, 20),\n\nSuppose that we call `weighted_flat_map` from these 2 datasets with the\nfollowing weights:\n\ndataset = tf.data.Dataset.weighted_flat_map([dataset1, dataset2], [0.5, 1.0])\n\nThen, the outcome of the elements is:\n# [0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\n\nArgs:\n input_datasets: A non-empty list of `tf.data.Dataset` objects with\n compatible structure.\n weights: (Optional.) A list or Tensor of `len(datasets)` non-zero\n floating-point values where `weights[i]` represents the probability to\n sample from `datasets[i]`, or a `tf.data.Dataset` object where each\n element is such a list. Defaults to a uniform distribution across\n `datasets`.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A dataset that reads elements from all its inputs, reading the requested\n elements from an input according to the weight before proceeding to the next\n input. The number of elements read from an input is in proportion to its\n weight given in `weights`.\n\nRaises:\n TypeError: if the `datasets` or `weights` arguments have the wrong type.\n ValueError:\n - if `input_datasets` has less than 2 datasets.\n - if `weights` is specified and does not match the length of\n `input_datasets`.\n InvalidArgumentError:\n - if any of the `input_datasets` has an unknown or infinite cardinality.\n - if any of the `weights` has a value that is less than or equal to 0.0"} +{"repo": "tensorflow", "function": "def squeeze_batch_dims(inp, op, inner_rank, name=None):\n with ops.name_scope(name, 'squeeze_batch_dims', [inp]):\n inp = ops.convert_to_tensor(inp, name='input')\n shape = inp.shape\n inner_shape = shape[-inner_rank:]\n if not inner_shape.is_fully_defined():\n inner_shape = array_ops.shape(inp)[-inner_rank:]\n batch_shape = shape[:-inner_rank]\n if not batch_shape.is_fully_defined():\n batch_shape = array_ops.shape(inp)[:-inner_rank]\n if isinstance(inner_shape, tensor_shape.TensorShape):\n inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n else:\n inp_reshaped = array_ops.reshape(inp, array_ops.concat(([-1], inner_shape), axis=-1))\n out_reshaped = op(inp_reshaped)\n out_inner_shape = out_reshaped.shape[-inner_rank:]\n if not out_inner_shape.is_fully_defined():\n out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n out = array_ops.reshape(out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n return out", "docstring": "Returns `unsqueeze_batch(op(squeeze_batch(inp)))`.\n\nWhere `squeeze_batch` reshapes `inp` to shape\n`[prod(inp.shape[:-inner_rank])] + inp.shape[-inner_rank:]`\nand `unsqueeze_batch` does the reverse reshape but on the output.\n\nArgs:\n inp: A tensor with dims `batch_shape + inner_shape` where `inner_shape`\n is length `inner_rank`.\n op: A callable that takes a single input tensor and returns a single.\n output tensor.\n inner_rank: A python integer.\n name: A string.\n\nReturns:\n `unsqueeze_batch_op(squeeze_batch(inp))`."} +{"repo": "keras", "function": "def preprocess_input(x, data_format=None):\n return x", "docstring": "A placeholder method for backward compatibility.\n\nThe preprocessing logic has been included in the efficientnet model\nimplementation. Users are no longer required to call this method to\nnormalize the input data. This method does nothing and only kept as a\nplaceholder to align the API surface between old and new version of model.\n\nArgs:\n x: A floating point `numpy.array` or a tensor.\n data_format: Optional data format of the image tensor/array. `None`\n means the global setting `keras.backend.image_data_format()`\n is used (unless you changed it, it uses `\"channels_last\"`).\n Defaults to `None`.\n\nReturns:\n Unchanged `numpy.array` or tensor."} +{"repo": "tensorflow", "function": "def wav_to_features(sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, quantize, preprocess, input_wav, output_c_file):\n sess = tf.compat.v1.InteractiveSession()\n model_settings = models.prepare_model_settings(0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms, feature_bin_count, preprocess)\n audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0, model_settings, None)\n results = audio_processor.get_features_for_wav(input_wav, model_settings, sess)\n features = results[0]\n variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]\n with gfile.GFile(output_c_file, 'w') as f:\n f.write('/* File automatically created by\\n')\n f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\\\\n')\n f.write(' * --sample_rate=%d \\\\\\n' % sample_rate)\n f.write(' * --clip_duration_ms=%d \\\\\\n' % clip_duration_ms)\n f.write(' * --window_size_ms=%d \\\\\\n' % window_size_ms)\n f.write(' * --window_stride_ms=%d \\\\\\n' % window_stride_ms)\n f.write(' * --feature_bin_count=%d \\\\\\n' % feature_bin_count)\n if quantize:\n f.write(' * --quantize=1 \\\\\\n')\n f.write(' * --preprocess=\"%s\" \\\\\\n' % preprocess)\n f.write(' * --input_wav=\"%s\" \\\\\\n' % input_wav)\n f.write(' * --output_c_file=\"%s\" \\\\\\n' % output_c_file)\n f.write(' */\\n\\n')\n f.write('const int g_%s_width = %d;\\n' % (variable_base, model_settings['fingerprint_width']))\n f.write('const int g_%s_height = %d;\\n' % (variable_base, model_settings['spectrogram_length']))\n if quantize:\n features_min, features_max = input_data.get_features_range(model_settings)\n f.write('const unsigned char g_%s_data[] = {' % variable_base)\n i = 0\n for value in features.flatten():\n quantized_value = int(round(255 * (value - features_min) / (features_max - features_min)))\n if quantized_value < 0:\n quantized_value = 0\n if quantized_value > 255:\n quantized_value = 255\n if i == 0:\n f.write('\\n ')\n f.write('%d, ' % quantized_value)\n i = (i + 1) % 10\n else:\n f.write('const float g_%s_data[] = {\\n' % variable_base)\n i = 0\n for value in features.flatten():\n if i == 0:\n f.write('\\n ')\n f.write('%f, ' % value)\n i = (i + 1) % 10\n f.write('\\n};\\n')", "docstring": "Converts an audio file into its corresponding feature map.\n\nArgs:\n sample_rate: Expected sample rate of the wavs.\n clip_duration_ms: Expected duration in milliseconds of the wavs.\n window_size_ms: How long each spectrogram timeslice is.\n window_stride_ms: How far to move in time between spectrogram timeslices.\n feature_bin_count: How many bins to use for the feature fingerprint.\n quantize: Whether to train the model for eight-bit deployment.\n preprocess: Spectrogram processing mode; \"mfcc\", \"average\" or \"micro\".\n input_wav: Path to the audio WAV file to read.\n output_c_file: Where to save the generated C source file."} +{"repo": "tensorflow", "function": "def rgb_to_grayscale(images, name=None):\n with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name:\n images = ops.convert_to_tensor(images, name='images')\n orig_dtype = images.dtype\n flt_image = convert_image_dtype(images, dtypes.float32)\n rgb_weights = [0.2989, 0.587, 0.114]\n gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1])\n gray_float = array_ops.expand_dims(gray_float, -1)\n return convert_image_dtype(gray_float, orig_dtype, name=name)", "docstring": "Converts one or more images from RGB to Grayscale.\n\nOutputs a tensor of the same `DType` and rank as `images`. The size of the\nlast dimension of the output is 1, containing the Grayscale value of the\npixels.\n\n>>> original = tf.constant([[[1.0, 2.0, 3.0]]])\n>>> converted = tf.image.rgb_to_grayscale(original)\n>>> print(converted.numpy())\n[[[1.81...]]]\n\nArgs:\n images: The RGB tensor to convert. The last dimension must have size 3 and\n should contain RGB values.\n name: A name for the operation (optional).\n\nReturns:\n The converted grayscale image(s)."} +{"repo": "tensorflow", "function": "def _run_contained(task_type, task_id, fn, args, kwargs):\n is_successful = False\n return_value = None\n exc_info = None\n try:\n return_value = fn(*args, **kwargs)\n is_successful = True\n return _ProcessStatusInfo(task_type=task_type, task_id=task_id, is_successful=is_successful, exc_info=exc_info, return_value=return_value)\n except Exception:\n exc_info = sys.exc_info()\n return _ProcessStatusInfo(task_type=task_type, task_id=task_id, is_successful=is_successful, exc_info=exc_info, return_value=return_value)", "docstring": "Runs `fn` with `args` and `kwargs`.\n\nThe function returns _ProcessStatusInfo which captures the return value and\nthe exception.\n\nArgs:\n task_type: the task type.\n task_id: the task index.\n fn: the function to be run.\n args: optional positional arguments to be supplied in `fn`.\n kwargs: optional keyword arguments to be supplied in `fn`.\n\nReturns:\n a _ProcessStatusInfo."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.FloatTensor]=None, vision_feature_layer: int=-1):\n vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n patch_attention_mask = self._create_patch_attention_mask(pixel_mask)\n image_outputs = self.vision_tower(pixel_values, patch_attention_mask=patch_attention_mask, output_hidden_states=True)\n image_attn_mask = None\n if patch_attention_mask is not None:\n flattened_mask = patch_attention_mask.flatten(1)\n image_attn_mask = torch.logical_not(flattened_mask)\n selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n image_features = self.multi_modal_projector(selected_image_feature, attn_mask=image_attn_mask)\n return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\n pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\n The tensors corresponding to the input images.\n pixel_mask (`torch.FloatTensor]`, *optional*):\n The tensors corresponding to the input image mask.\n vision_feature_layer (`Union[int, List[int]]`, *optional*):\n The index of the layer to select the vision feature. If multiple indices are provided,\n the vision feature of the corresponding indices will be concatenated to form the\n vision features.\nReturns:\n image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`)."} +{"repo": "transformers", "function": "def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:\n pad_size = pad_size if pad_size is not None else self.pad_size\n if pad_size is not None:\n padded_size = (pad_size['height'], pad_size['width'])\n else:\n padded_size = get_max_height_width(images, input_data_format=input_data_format)\n annotation_list = annotations if annotations is not None else [None] * len(images)\n padded_images = []\n padded_annotations = []\n for image, annotation in zip(images, annotation_list):\n padded_image, padded_annotation = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)\n padded_images.append(padded_image)\n padded_annotations.append(padded_annotation)\n data = {'pixel_values': padded_images}\n if return_pixel_mask:\n masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]\n data['pixel_mask'] = masks\n encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)\n if annotations is not None:\n encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]\n return encoded_inputs", "docstring": "Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\nin the batch and optionally returns their corresponding pixel mask.\n\nArgs:\n images (List[`np.ndarray`]):\n Images to pad.\n annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):\n Annotations to transform according to the padding that is applied to the images.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n update_bboxes (`bool`, *optional*, defaults to `True`):\n Whether to update the bounding boxes in the annotations to match the padded images. If the\n bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`\n format, the bounding boxes will not be updated.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch."} +{"repo": "tensorflow", "function": "def _merge_run_options(self, options, incoming_options):\n options.trace_level = max(options.trace_level, incoming_options.trace_level)\n options.timeout_in_ms = max(options.timeout_in_ms, incoming_options.timeout_in_ms)\n options.inter_op_thread_pool = max(options.inter_op_thread_pool, incoming_options.inter_op_thread_pool)\n options.output_partition_graphs = max(options.output_partition_graphs, incoming_options.output_partition_graphs)\n options.debug_options.debug_tensor_watch_opts.extend(incoming_options.debug_options.debug_tensor_watch_opts)\n options.debug_options.reset_disk_byte_usage = options.debug_options.reset_disk_byte_usage or incoming_options.debug_options.reset_disk_byte_usage\n options.report_tensor_allocations_upon_oom = options.report_tensor_allocations_upon_oom or incoming_options.report_tensor_allocations_upon_oom", "docstring": "Merge two instances of RunOptions into the first one.\n\nDuring the merger, the numerical fields including trace_level,\ntimeout_in_ms, inter_op_thread_pool are set to the larger one of the two.\nThe boolean value is set to the logical OR of the two.\ndebug_tensor_watch_opts of the original options is extended with that from\nthe incoming one.\n\nArgs:\n options: The options to merge into.\n incoming_options: The options to be merged into the first argument."} +{"repo": "transformers", "function": "def postprocess_qa_predictions(examples, features, predictions: tuple[np.ndarray, np.ndarray], version_2_with_negative: bool=False, n_best_size: int=20, max_answer_length: int=30, null_score_diff_threshold: float=0.0, output_dir: Optional[str]=None, prefix: Optional[str]=None, log_level: Optional[int]=logging.WARNING):\n if len(predictions) != 2:\n raise ValueError('`predictions` should be a tuple with two elements (start_logits, end_logits).')\n all_start_logits, all_end_logits = predictions\n if len(predictions[0]) != len(features):\n raise ValueError(f'Got {len(predictions[0])} predictions and {len(features)} features.')\n example_id_to_index = {k: i for i, k in enumerate(examples['id'])}\n features_per_example = collections.defaultdict(list)\n for i, feature in enumerate(features):\n features_per_example[example_id_to_index[feature['example_id']]].append(i)\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n if version_2_with_negative:\n scores_diff_json = collections.OrderedDict()\n logger.setLevel(log_level)\n logger.info(f'Post-processing {len(examples)} example predictions split into {len(features)} features.')\n for example_index, example in enumerate(tqdm(examples)):\n feature_indices = features_per_example[example_index]\n min_null_prediction = None\n prelim_predictions = []\n for feature_index in feature_indices:\n start_logits = all_start_logits[feature_index]\n end_logits = all_end_logits[feature_index]\n offset_mapping = features[feature_index]['offset_mapping']\n token_is_max_context = features[feature_index].get('token_is_max_context', None)\n feature_null_score = start_logits[0] + end_logits[0]\n if min_null_prediction is None or min_null_prediction['score'] > feature_null_score:\n min_null_prediction = {'offsets': (0, 0), 'score': feature_null_score, 'start_logit': start_logits[0], 'end_logit': end_logits[0]}\n start_indexes = np.argsort(start_logits)[-1:-n_best_size - 1:-1].tolist()\n end_indexes = np.argsort(end_logits)[-1:-n_best_size - 1:-1].tolist()\n for start_index in start_indexes:\n for end_index in end_indexes:\n if start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or (len(offset_mapping[start_index]) < 2) or (offset_mapping[end_index] is None) or (len(offset_mapping[end_index]) < 2):\n continue\n if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n continue\n if token_is_max_context is not None and (not token_is_max_context.get(str(start_index), False)):\n continue\n prelim_predictions.append({'offsets': (offset_mapping[start_index][0], offset_mapping[end_index][1]), 'score': start_logits[start_index] + end_logits[end_index], 'start_logit': start_logits[start_index], 'end_logit': end_logits[end_index]})\n if version_2_with_negative and min_null_prediction is not None:\n prelim_predictions.append(min_null_prediction)\n null_score = min_null_prediction['score']\n predictions = sorted(prelim_predictions, key=lambda x: x['score'], reverse=True)[:n_best_size]\n if version_2_with_negative and min_null_prediction is not None and (not any((p['offsets'] == (0, 0) for p in predictions))):\n predictions.append(min_null_prediction)\n context = example['context']\n for pred in predictions:\n offsets = pred.pop('offsets')\n pred['text'] = context[offsets[0]:offsets[1]]\n if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]['text'] == ''):\n predictions.insert(0, {'text': 'empty', 'start_logit': 0.0, 'end_logit': 0.0, 'score': 0.0})\n scores = np.array([pred.pop('score') for pred in predictions])\n exp_scores = np.exp(scores - np.max(scores))\n probs = exp_scores / exp_scores.sum()\n for prob, pred in zip(probs, predictions):\n pred['probability'] = prob\n if not version_2_with_negative:\n all_predictions[example['id']] = predictions[0]['text']\n else:\n i = 0\n while predictions[i]['text'] == '':\n i += 1\n best_non_null_pred = predictions[i]\n score_diff = null_score - best_non_null_pred['start_logit'] - best_non_null_pred['end_logit']\n scores_diff_json[example['id']] = float(score_diff)\n if score_diff > null_score_diff_threshold:\n all_predictions[example['id']] = ''\n else:\n all_predictions[example['id']] = best_non_null_pred['text']\n all_nbest_json[example['id']] = [{k: float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v for k, v in pred.items()} for pred in predictions]\n if output_dir is not None:\n if not os.path.isdir(output_dir):\n raise OSError(f'{output_dir} is not a directory.')\n prediction_file = os.path.join(output_dir, 'predictions.json' if prefix is None else f'{prefix}_predictions.json')\n nbest_file = os.path.join(output_dir, 'nbest_predictions.json' if prefix is None else f'{prefix}_nbest_predictions.json')\n if version_2_with_negative:\n null_odds_file = os.path.join(output_dir, 'null_odds.json' if prefix is None else f'{prefix}_null_odds.json')\n logger.info(f'Saving predictions to {prediction_file}.')\n with open(prediction_file, 'w') as writer:\n writer.write(json.dumps(all_predictions, indent=4) + '\\n')\n logger.info(f'Saving nbest_preds to {nbest_file}.')\n with open(nbest_file, 'w') as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + '\\n')\n if version_2_with_negative:\n logger.info(f'Saving null_odds to {null_odds_file}.')\n with open(null_odds_file, 'w') as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + '\\n')\n return all_predictions", "docstring": "Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the\noriginal contexts. This is the base postprocessing functions for models that only return start and end logits.\n\nArgs:\n examples: The non-preprocessed dataset (see the main script for more information).\n features: The processed dataset (see the main script for more information).\n predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):\n The predictions of the model: two arrays containing the start logits and the end logits respectively. Its\n first dimension must match the number of elements of :obj:`features`.\n version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the underlying dataset contains examples with no answers.\n n_best_size (:obj:`int`, `optional`, defaults to 20):\n The total number of n-best predictions to generate when looking for an answer.\n max_answer_length (:obj:`int`, `optional`, defaults to 30):\n The maximum length of an answer that can be generated. This is needed because the start and end predictions\n are not conditioned on one another.\n null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):\n The threshold used to select the null answer: if the best answer has a score that is less than the score of\n the null answer minus this threshold, the null answer is selected for this example (note that the score of\n the null answer for an example giving several features is the minimum of the scores for the null answer on\n each feature: all features must be aligned on the fact they `want` to predict a null answer).\n\n Only useful when :obj:`version_2_with_negative` is :obj:`True`.\n output_dir (:obj:`str`, `optional`):\n If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if\n :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null\n answers, are saved in `output_dir`.\n prefix (:obj:`str`, `optional`):\n If provided, the dictionaries mentioned above are saved with `prefix` added to their names.\n log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):\n ``logging`` log level (e.g., ``logging.WARNING``)"} +{"repo": "tensorflow", "function": "def merge(self, accumulators):\n pass", "docstring": "Merge several accumulators to a single accumulator.\n\nThis method takes the partial values in several accumulators and combines\nthem into a single accumulator. This computation must not be order-specific\n(that is, merge([a, b]) must return the same result as merge([b, a]).\n\nArgs:\n accumulators: the accumulators to merge, as a list.\n\nReturns:\n A merged accumulator."} +{"repo": "keras", "function": "def get_word_index(path='reuters_word_index.json'):\n origin_folder = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n path = get_file(path, origin=origin_folder + 'reuters_word_index.json', file_hash='4d44cc38712099c9e383dc6e5f11a921')\n with open(path) as f:\n return json.load(f)", "docstring": "Retrieves a dict mapping words to their index in the Reuters dataset.\n\nActual word indices starts from 3, with 3 indices reserved for:\n0 (padding), 1 (start), 2 (oov).\n\nE.g. word index of 'the' is 1, but the in the actual training data, the\nindex of 'the' will be 1 + 3 = 4. Vice versa, to translate word indices in\ntraining data back to words using this mapping, indices need to subtract 3.\n\nArgs:\n path: where to cache the data (relative to `~/.keras/dataset`).\n\nReturns:\n The word index dictionary. Keys are word strings, values are their\n index."} +{"repo": "beam", "function": "def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform, final_table_name_with_ptransform):\n final_table_name = final_table_name_no_ptransform\n with beam.Pipeline(options=PipelineOptions()) as pipeline:\n kvs = pipeline | 'CreateKVs' >> beam.Create(KVs)\n kvs | 'WriteToSimpleKV' >> beam.io.Write(SimpleKVSink(simplekv, 'http://url_to_simple_kv/', final_table_name))\n final_table_name = final_table_name_with_ptransform\n with beam.Pipeline(options=PipelineOptions()) as pipeline:\n kvs = pipeline | 'CreateKVs' >> beam.core.Create(KVs)\n kvs | 'WriteToSimpleKV' >> WriteToKVSink(simplekv, 'http://url_to_simple_kv/', final_table_name)", "docstring": "Demonstrates creating a new custom sink and using it in a pipeline.\n\nUses the new sink in an example pipeline.\n\nAdditionally demonstrates how a sink should be implemented using a\n``PTransform``. This is the recommended way to develop sinks that are to be\ndistributed to a large number of end users.\n\nThis method runs two pipelines.\n\n(1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``\n transform.\n(2) A pipeline that uses a custom ``PTransform`` that wraps\n ``SimpleKVSink``.\n\nArgs:\n simplekv: an object that mocks the key-value storage.\n\n KVs: the set of key-value pairs to be written in the example pipeline.\n\n final_table_name_no_ptransform: the prefix of final set of tables to be\n created by the example pipeline that uses\n ``SimpleKVSink`` directly.\n\n final_table_name_with_ptransform: the prefix of final set of tables to be\n created by the example pipeline that uses\n a ``PTransform`` that wraps\n ``SimpleKVSink``."} +{"repo": "keras", "function": "class ZeroPadding3D(Layer):\n\n def __init__(self, padding=((1, 1), (1, 1), (1, 1)), data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.data_format = backend.standardize_data_format(data_format)\n if isinstance(padding, int):\n self.padding = ((padding, padding), (padding, padding), (padding, padding))\n elif hasattr(padding, '__len__'):\n if len(padding) != 3:\n raise ValueError(f'`padding` should have 3 elements. Received: {padding}.')\n dim1_padding = argument_validation.standardize_tuple(padding[0], 2, '1st entry of padding', allow_zero=True)\n dim2_padding = argument_validation.standardize_tuple(padding[1], 2, '2nd entry of padding', allow_zero=True)\n dim3_padding = argument_validation.standardize_tuple(padding[2], 2, '3rd entry of padding', allow_zero=True)\n self.padding = (dim1_padding, dim2_padding, dim3_padding)\n else:\n raise ValueError(f'`padding` should be either an int, a tuple of 3 ints (symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad), or a tuple of 3 tuples of 2 ints ((left_dim1_pad, right_dim1_pad), (left_dim2_pad, right_dim2_pad), (left_dim3_pad, right_dim2_pad)). Received: padding={padding}.')\n self.input_spec = InputSpec(ndim=5)\n\n def compute_output_shape(self, input_shape):\n output_shape = list(input_shape)\n spatial_dims_offset = 2 if self.data_format == 'channels_first' else 1\n for index in range(0, 3):\n if output_shape[index + spatial_dims_offset] is not None:\n output_shape[index + spatial_dims_offset] += self.padding[index][0] + self.padding[index][1]\n return tuple(output_shape)\n\n def call(self, inputs):\n if self.data_format == 'channels_first':\n all_dims_padding = ((0, 0), (0, 0), *self.padding)\n else:\n all_dims_padding = ((0, 0), *self.padding, (0, 0))\n return ops.pad(inputs, all_dims_padding)\n\n def get_config(self):\n config = {'padding': self.padding, 'data_format': self.data_format}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Zero-padding layer for 3D data (spatial or spatio-temporal).\n\nExample:\n\n>>> input_shape = (1, 1, 2, 2, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> y = keras.layers.ZeroPadding3D(padding=2)(x)\n>>> y.shape\n(1, 5, 6, 6, 3)\n\nArgs:\n padding: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.\n - If int: the same symmetric padding is applied to depth, height,\n and width.\n - If tuple of 3 ints: interpreted as three different symmetric\n padding values for depth, height, and width:\n `(symmetric_dim1_pad, symmetric_dim2_pad, symmetric_dim3_pad)`.\n - If tuple of 3 tuples of 2 ints: interpreted as\n `((left_dim1_pad, right_dim1_pad), (left_dim2_pad,\n right_dim2_pad), (left_dim3_pad, right_dim3_pad))`.\n data_format: A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.\n\nInput shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad, depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_axis_to_pad, second_axis_to_pad,\n third_axis_to_pad)`\n\nOutput shape:\n 5D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, first_padded_axis, second_padded_axis,\n third_axis_to_pad, depth)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, depth, first_padded_axis, second_padded_axis,\n third_axis_to_pad)`"} +{"repo": "transformers", "function": "def tokenize(self, text, never_split=None):\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n unicode_normalized_text = unicodedata.normalize('NFC', text)\n orig_tokens = whitespace_tokenize(unicode_normalized_text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split."} +{"repo": "transformers", "function": "def fuse_awq_modules(model, quantization_config):\n if isinstance(quantization_config, dict):\n quantization_config = AwqConfig.from_dict(quantization_config)\n backend = quantization_config.backend\n modules_to_fuse = get_modules_to_fuse(model, quantization_config)\n modules_to_not_convert = getattr(quantization_config, 'modules_to_not_convert', None)\n if backend == AwqBackendPackingMethod.AUTOAWQ:\n from awq.modules.fused.attn import QuantAttentionFused\n from awq.modules.fused.mlp import QuantFusedMLP\n from awq.modules.fused.norm import FasterTransformerRMSNorm\n else:\n raise ValueError('Fusing is only supported for the AutoAWQ backend')\n fused_attention_modules = []\n for name, module in model.named_modules():\n if modules_to_not_convert is not None:\n if any((module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert)):\n continue\n _fuse_awq_layernorm(modules_to_fuse['layernorm'], module, FasterTransformerRMSNorm)\n if quantization_config.version != 'ipex':\n _fuse_awq_mlp(model, name, modules_to_fuse['mlp'], module, QuantFusedMLP)\n else:\n logger.info('The IPEX version AWQ does not support fuse mlp for now.')\n attention_has_been_fused = _fuse_awq_attention_layers(model, module, modules_to_fuse, name, QuantAttentionFused)\n if attention_has_been_fused:\n fused_attention_modules.append(name.split('.')[0])\n if len(fused_attention_modules) > 0:\n for module_name, module in model.named_modules():\n if any((module_name in fused_attention_modules for fused_attention_parent_module in fused_attention_modules)):\n if hasattr(module, 'config') and hasattr(module.config, '_attn_implementation'):\n module.config._attn_implementation = 'custom'\n return model", "docstring": "Optionally fuse some modules in the model to speedup inference.\n\nArgs:\n model (`~PreTrainedModel`):\n The model to fuse - note this model should have been converted into AWQ format beforehand.\n quantization_config (`Union[AwqConfig, dict]`):\n The quantization configuration to use."} +{"repo": "tensorflow", "function": "def from_proto(context_def, import_scope=None):\n ret = WhileContext(context_def=context_def, import_scope=import_scope)\n ret.Enter()\n for nested_def in context_def.nested_contexts:\n from_control_flow_context_def(nested_def, import_scope=import_scope)\n ret.Exit()\n return ret", "docstring": "Returns a `WhileContext` object created from `context_def`.\n\nArgs:\n context_def: A `WhileContextDef` protocol buffer.\n import_scope: Optional `string`. Name scope to add.\n\nReturns:\n A `WhileContext` Python object."} +{"repo": "mobly", "function": "def filter_devices(ads, func):\n results = []\n for ad in ads:\n if func(ad):\n results.append(ad)\n return results", "docstring": "Finds the AndroidDevice instances from a list that match certain\nconditions.\n\nArgs:\n ads: A list of AndroidDevice instances.\n func: A function that takes an AndroidDevice object and returns True\n if the device satisfies the filter condition.\n\nReturns:\n A list of AndroidDevice instances that satisfy the filter condition."} +{"repo": "transformers", "function": "def backward(ctx, grad_at_output: torch.Tensor):\n multiplier, selected_experts, masked_gates = ctx.saved_tensors\n grad_at_output = grad_at_output * multiplier\n grad_at_scores_expanded = masked_gates * grad_at_output.mul(-1)\n grad_at_scores_expanded.scatter_add_(dim=-1, index=selected_experts, src=grad_at_output)\n return (grad_at_scores_expanded, None, None, None, None)", "docstring": "Backward pass for the custom autograd function.\n\nArgs:\n ctx: Context object with saved tensors from the forward pass.\n grad_at_output (torch.Tensor): Gradient at the output.\n\nReturns:\n Tuple[torch.Tensor, None, None, None, None]: Gradients for the inputs."} +{"repo": "transformers", "function": "def get_multimodal_features(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, return_dict: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=True, output_hidden_states=True, return_dict=return_dict, interpolate_pos_encoding=interpolate_pos_encoding)\n image_embeds = vision_outputs[0]\n image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=return_dict)\n pooled_output = text_outputs[1]\n multimodal_features = self.text_projection(pooled_output)\n return multimodal_features", "docstring": "Returns:\n multimodal_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The multimodal embeddings\n obtained by applying the image embeddings to the text encoder using the cross-attention mechanism.\n\nExamples:\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, BlipModel\n\n>>> model = BlipModel.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n>>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> texts = [\"a photo of a cat\", \"a photo of a dog\"]\n>>> inputs = processor(images=image, text=texts, padding=True, return_tensors=\"pt\")\n\n>>> multimodal_features = model.get_multimodal_features(**inputs)\n```"} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is not None:\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "pytype", "function": "def And(exprs):\n return simplify_exprs(exprs, _And, FALSE, TRUE)", "docstring": "Create a conjunction or its simplified equivalent.\n\nThis will ensure that, when an _And is returned, none of its immediate\nsubterms is TRUE, FALSE, or another conjunction.\n\nArgs:\n exprs: An iterable. The subterms.\n\nReturns:\n A BooleanTerm."} +{"repo": "tensorflow", "function": "def res_call(self, ns, types_ns, node, f_type, args, keywords):\n raise NotImplementedError('subclasses must implement')", "docstring": "Resolves the return type an external function or method call.\n\nArgs:\n ns: namespace\n types_ns: types namespace\n node: str, the function name\n f_type: types of the actual function being called, if known\n args: types of each respective argument in node.args\n keywords: types of each respective argument in node.keywords\n\nReturns:\n Tuple (return_type, side_effect_types). The first element is just the\n return types of the function. The second element is a map from\n argument names to sets of types, and allow modelling side effects of\n functions (for example via global or nonlocal)."} +{"repo": "transformers", "function": "class TFGreedySearchEncoderDecoderOutput(ModelOutput):\n sequences: Optional[tf.Tensor] = None\n scores: Optional[Tuple[tf.Tensor]] = None\n encoder_attentions: Optional[Tuple[tf.Tensor]] = None\n encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None\n decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None\n cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None\n decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None", "docstring": "Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention\nweights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the\nencoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)\n\n\nArgs:\n sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter\n if all batches finished early due to the `eos_token_id`.\n scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):\n Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)\n at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each\n generated token), with each tensor of shape `(batch_size, config.vocab_size)`.\n encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.\n cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.\n decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of\n `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`."} +{"repo": "transformers", "function": "def tokenize(self, text, never_split=None):\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n unicode_normalized_text = unicodedata.normalize('NFC', text)\n orig_tokens = whitespace_tokenize(unicode_normalized_text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split."} +{"repo": "tensorflow", "function": "def where_v2(condition, x=None, y=None, name=None):\n if x is None and y is None:\n with ops.name_scope(name, 'Where', [condition]) as name:\n condition = ops.convert_to_tensor(condition, preferred_dtype=dtypes.bool, name='condition')\n return gen_array_ops.where(condition=condition, name=name)\n elif x is not None and y is not None:\n return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)\n else:\n raise ValueError('x and y must both be non-None or both be None.')", "docstring": "Returns the indices of non-zero elements, or multiplexes `x` and `y`.\n\nThis operation has two modes:\n\n1. **Return the indices of non-zero elements** - When only\n `condition` is provided the result is an `int64` tensor where each row is\n the index of a non-zero element of `condition`. The result's shape\n is `[tf.math.count_nonzero(condition), tf.rank(condition)]`.\n2. **Multiplex `x` and `y`** - When both `x` and `y` are provided the\n result has the shape of `x`, `y`, and `condition` broadcast together. The\n result is taken from `x` where `condition` is non-zero\n or `y` where `condition` is zero.\n\n#### 1. Return the indices of non-zero elements\n\nNote: In this mode `condition` can have a dtype of `bool` or any numeric\ndtype.\n\nIf `x` and `y` are not provided (both are None):\n\n`tf.where` will return the indices of `condition` that are non-zero,\nin the form of a 2-D tensor with shape `[n, d]`, where `n` is the number of\nnon-zero elements in `condition` (`tf.count_nonzero(condition)`), and `d` is\nthe number of axes of `condition` (`tf.rank(condition)`).\n\nIndices are output in row-major order. The `condition` can have a `dtype` of\n`tf.bool`, or any numeric `dtype`.\n\nHere `condition` is a 1-axis `bool` tensor with 2 `True` values. The result\nhas a shape of `[2,1]`\n\n>>> tf.where([True, False, False, True]).numpy()\narray([[0],\n [3]])\n\nHere `condition` is a 2-axis integer tensor, with 3 non-zero values. The\nresult has a shape of `[3, 2]`.\n\n>>> tf.where([[1, 0, 0], [1, 0, 1]]).numpy()\narray([[0, 0],\n [1, 0],\n [1, 2]])\n\nHere `condition` is a 3-axis float tensor, with 5 non-zero values. The output\nshape is `[5, 3]`.\n\n>>> float_tensor = [[[0.1, 0], [0, 2.2], [3.5, 1e6]],\n... [[0, 0], [0, 0], [99, 0]]]\n>>> tf.where(float_tensor).numpy()\narray([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\nThese indices are the same that `tf.sparse.SparseTensor` would use to\nrepresent the condition tensor:\n\n>>> sparse = tf.sparse.from_dense(float_tensor)\n>>> sparse.indices.numpy()\narray([[0, 0, 0],\n [0, 1, 1],\n [0, 2, 0],\n [0, 2, 1],\n [1, 2, 0]])\n\nA complex number is considered non-zero if either the real or imaginary\ncomponent is non-zero:\n\n>>> tf.where([complex(0.), complex(1.), 0+1j, 1+1j]).numpy()\narray([[1],\n [2],\n [3]])\n\n#### 2. Multiplex `x` and `y`\n\nNote: In this mode `condition` must have a dtype of `bool`.\n\nIf `x` and `y` are also provided (both have non-None values) the `condition`\ntensor acts as a mask that chooses whether the corresponding\nelement / row in the output should be taken from `x` (if the element in\n`condition` is `True`) or `y` (if it is `False`).\n\nThe shape of the result is formed by\n[broadcasting](https://docs.scipy.org/doc/numpy/reference/ufuncs.html)\ntogether the shapes of `condition`, `x`, and `y`.\n\nWhen all three inputs have the same size, each is handled element-wise.\n\n>>> tf.where([True, False, False, True],\n... [1, 2, 3, 4],\n... [100, 200, 300, 400]).numpy()\narray([ 1, 200, 300, 4], dtype=int32)\n\nThere are two main rules for broadcasting:\n\n1. If a tensor has fewer axes than the others, length-1 axes are added to the\n left of the shape.\n2. Axes with length-1 are streched to match the coresponding axes of the other\n tensors.\n\nA length-1 vector is streched to match the other vectors:\n\n>>> tf.where([True, False, False, True], [1, 2, 3, 4], [100]).numpy()\narray([ 1, 100, 100, 4], dtype=int32)\n\nA scalar is expanded to match the other arguments:\n\n>>> tf.where([[True, False], [False, True]], [[1, 2], [3, 4]], 100).numpy()\narray([[ 1, 100], [100, 4]], dtype=int32)\n>>> tf.where([[True, False], [False, True]], 1, 100).numpy()\narray([[ 1, 100], [100, 1]], dtype=int32)\n\nA scalar `condition` returns the complete `x` or `y` tensor, with\nbroadcasting applied.\n\n>>> tf.where(True, [1, 2, 3, 4], 100).numpy()\narray([1, 2, 3, 4], dtype=int32)\n>>> tf.where(False, [1, 2, 3, 4], 100).numpy()\narray([100, 100, 100, 100], dtype=int32)\n\nFor a non-trivial example of broadcasting, here `condition` has a shape of\n`[3]`, `x` has a shape of `[3,3]`, and `y` has a shape of `[3,1]`.\nBroadcasting first expands the shape of `condition` to `[1,3]`. The final\nbroadcast shape is `[3,3]`. `condition` will select columns from `x` and `y`.\nSince `y` only has one column, all columns from `y` will be identical.\n\n>>> tf.where([True, False, True],\n... x=[[1, 2, 3],\n... [4, 5, 6],\n... [7, 8, 9]],\n... y=[[100],\n... [200],\n... [300]]\n... ).numpy()\narray([[ 1, 100, 3],\n [ 4, 200, 6],\n [ 7, 300, 9]], dtype=int32)\n\nNote that if the gradient of either branch of the `tf.where` generates\na `NaN`, then the gradient of the entire `tf.where` will be `NaN`. This is\nbecause the gradient calculation for `tf.where` combines the two branches, for\nperformance reasons.\n\nA workaround is to use an inner `tf.where` to ensure the function has\nno asymptote, and to avoid computing a value whose gradient is `NaN` by\nreplacing dangerous inputs with safe inputs.\n\nInstead of this,\n\n>>> x = tf.constant(0., dtype=tf.float32)\n>>> with tf.GradientTape() as tape:\n... tape.watch(x)\n... y = tf.where(x < 1., 0., 1. / x)\n>>> print(tape.gradient(y, x))\ntf.Tensor(nan, shape=(), dtype=float32)\n\nAlthough, the `1. / x` values are never used, its gradient is a `NaN` when\n`x = 0`. Instead, we should guard that with another `tf.where`\n\n>>> x = tf.constant(0., dtype=tf.float32)\n>>> with tf.GradientTape() as tape:\n... tape.watch(x)\n... safe_x = tf.where(tf.equal(x, 0.), 1., x)\n... y = tf.where(x < 1., 0., 1. / safe_x)\n>>> print(tape.gradient(y, x))\ntf.Tensor(0.0, shape=(), dtype=float32)\n\nSee also:\n\n* `tf.sparse` - The indices returned by the first form of `tf.where` can be\n useful in `tf.sparse.SparseTensor` objects.\n* `tf.gather_nd`, `tf.scatter_nd`, and related ops - Given the\n list of indices returned from `tf.where` the `scatter` and `gather` family\n of ops can be used fetch values or insert values at those indices.\n* `tf.strings.length` - `tf.string` is not an allowed dtype for the\n `condition`. Use the string length instead.\n\nArgs:\n condition: A `tf.Tensor` of dtype bool, or any numeric dtype. `condition`\n must have dtype `bool` when `x` and `y` are provided.\n x: If provided, a Tensor which is of the same type as `y`, and has a shape\n broadcastable with `condition` and `y`.\n y: If provided, a Tensor which is of the same type as `x`, and has a shape\n broadcastable with `condition` and `x`.\n name: A name of the operation (optional).\n\nReturns:\n If `x` and `y` are provided:\n A `Tensor` with the same type as `x` and `y`, and shape that\n is broadcast from `condition`, `x`, and `y`.\n Otherwise, a `Tensor` with shape `[tf.math.count_nonzero(condition),\n tf.rank(condition)]`.\n\nRaises:\n ValueError: When exactly one of `x` or `y` is non-None, or the shapes\n are not all broadcastable."} +{"repo": "tensorflow", "function": "def get_next(self, name=None):\n self._get_next_call_count += 1\n if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:\n warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)\n with ops.colocate_with(self._iterator_resource):\n flat_ret = gen_dataset_ops.iterator_get_next(self._iterator_resource, output_types=self._flat_tensor_types, output_shapes=self._flat_tensor_shapes, name=name)\n return structure.from_tensor_list(self._element_spec, flat_ret)", "docstring": "Returns the next element.\n\nIn graph mode, you should typically call this method *once* and use its\nresult as the input to another computation. A typical loop will then call\n`tf.Session.run` on the result of that computation. The loop will terminate\nwhen the `Iterator.get_next()` operation raises\n`tf.errors.OutOfRangeError`. The following skeleton shows how to use\nthis method when building a training loop:\n\n```python\ndataset = ... # A `tf.data.Dataset` object.\niterator = dataset.make_initializable_iterator()\nnext_element = iterator.get_next()\n\n# Build a TensorFlow graph that does something with each element.\nloss = model_function(next_element)\noptimizer = ... # A `tf.compat.v1.train.Optimizer` object.\ntrain_op = optimizer.minimize(loss)\n\nwith tf.compat.v1.Session() as sess:\n try:\n while True:\n sess.run(train_op)\n except tf.errors.OutOfRangeError:\n pass\n```\n\nNOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.\nwhen you are distributing different elements to multiple devices in a single\nstep. However, a common pitfall arises when users call `Iterator.get_next()`\nin each iteration of their training loop. `Iterator.get_next()` adds ops to\nthe graph, and executing each op allocates resources (including threads); as\na consequence, invoking it in every iteration of a training loop causes\nslowdown and eventual resource exhaustion. To guard against this outcome, we\nlog a warning when the number of uses crosses a fixed threshold of\nsuspiciousness.\n\nArgs:\n name: (Optional.) A name for the created operation.\n\nReturns:\n A (nested) structure of values matching `tf.data.Iterator.element_spec`."} +{"repo": "tensorflow", "function": "class Adam(Optimizer):\n\n def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False, **kwargs):\n super(Adam, self).__init__(**kwargs)\n with backend.name_scope(self.__class__.__name__):\n self.iterations = backend.variable(0, dtype='int64', name='iterations')\n self.lr = backend.variable(lr, name='lr')\n self.beta_1 = backend.variable(beta_1, name='beta_1')\n self.beta_2 = backend.variable(beta_2, name='beta_2')\n self.decay = backend.variable(decay, name='decay')\n if epsilon is None:\n epsilon = backend.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n self.amsgrad = amsgrad\n\n def _create_all_weights(self, params):\n ms = [backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params]\n vs = [backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params]\n if self.amsgrad:\n vhats = [backend.zeros(backend.int_shape(p), dtype=backend.dtype(p)) for p in params]\n else:\n vhats = [backend.zeros(1) for _ in params]\n self.weights = [self.iterations] + ms + vs + vhats\n return (ms, vs, vhats)\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * (1.0 / (1.0 + self.decay * math_ops.cast(self.iterations, backend.dtype(self.decay))))\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, backend.floatx())\n lr_t = lr * (backend.sqrt(1.0 - math_ops.pow(self.beta_2, t)) / (1.0 - math_ops.pow(self.beta_1, t)))\n ms, vs, vhats = self._create_all_weights(params)\n for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):\n m_t = self.beta_1 * m + (1.0 - self.beta_1) * g\n v_t = self.beta_2 * v + (1.0 - self.beta_2) * math_ops.square(g)\n if self.amsgrad:\n vhat_t = math_ops.maximum(vhat, v_t)\n p_t = p - lr_t * m_t / (backend.sqrt(vhat_t) + self.epsilon)\n self.updates.append(state_ops.assign(vhat, vhat_t))\n else:\n p_t = p - lr_t * m_t / (backend.sqrt(v_t) + self.epsilon)\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(v, v_t))\n new_p = p_t\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {'lr': float(backend.get_value(self.lr)), 'beta_1': float(backend.get_value(self.beta_1)), 'beta_2': float(backend.get_value(self.beta_2)), 'decay': float(backend.get_value(self.decay)), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad}\n base_config = super(Adam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "docstring": "Adam optimizer.\n\nDefault parameters follow those provided in the original paper.\n\nArgs:\n lr: float >= 0. Learning rate.\n beta_1: float, 0 < beta < 1. Generally close to 1.\n beta_2: float, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor.\n If `None`, defaults to `backend.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm\n from the paper \"On the Convergence of Adam and Beyond\"."} +{"repo": "pyglove", "function": "def _html_tree_view_render(self, *, view: 'HtmlTreeView', name: Optional[str]=None, parent: Any=None, root_path: Optional[KeyPath]=None, **kwargs) -> Html:\n return self._html_tree_view(view=view, name=name, parent=parent, root_path=root_path, **view.get_kwargs(kwargs, self._html_tree_view_config(), root_path or KeyPath())).add_style(*self._html_tree_view_css_styles())", "docstring": "The entrypoint of rendering the subtree represented by this extension.\n\nArgs:\n view: The view to render the object.\n name: The name of the object.\n parent: The parent of the object.\n root_path: The key path of the object relative to the root.\n **kwargs: kwargs to pass to `view.render()` on this extension.\n\nReturns:\n The rendered HTML."} +{"repo": "transformers", "function": "class TableTransformerModelOutput(Seq2SeqModelOutput):\n intermediate_hidden_states: Optional[torch.FloatTensor] = None", "docstring": "Base class for outputs of the TABLE_TRANSFORMER encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,\nnamely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\ngone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each\n layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the\n weighted average in the self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,\n used to compute the weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each\n layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the\n weighted average in the self-attention heads.\n intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):\n Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a\n layernorm."} +{"repo": "transformers", "function": "def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[PaliGemmaProcessorKwargs]) -> BatchFeature:\n images, text = _validate_images_text_input_order(images, text)\n output_kwargs = self._merge_kwargs(PaliGemmaProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n suffix = output_kwargs['text_kwargs'].pop('suffix', None)\n return_token_type_ids = True if suffix is not None else False\n if images is None:\n raise ValueError('`images` are expected as arguments to a `PaliGemmaProcessor` instance.')\n if text is None:\n logger.warning_once('You are using PaliGemma without a text prefix. It will perform as a picture-captioning model.')\n text = ''\n if _is_str_or_image(text):\n text = [text]\n elif isinstance(text, list) and _is_str_or_image(text[0]):\n pass\n if text is not None and images is not None:\n if not any((IMAGE_TOKEN in sample for sample in text)):\n logger.warning('You are passing both `text` and `images` to `PaliGemmaProcessor`. The processor expects special image tokens in the text, as many tokens as there are images per each text. It is recommended to add `` tokens in the very beginning of your text. For this call, we will infer how many images each text has and add special tokens.')\n if isinstance(text, List) and isinstance(images, List):\n if len(images) != len(text):\n raise ValueError(f'Received {len(images)} images for {len(text)} prompts. Each prompt should be associated with an image or list of images.')\n if is_valid_image(images):\n images = [[images]]\n elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):\n images = [[image] for image in images]\n elif not (isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0])):\n raise ValueError('images must be an image, list of images or list of list of images')\n input_strings = [build_string_from_input(prompt=prompt, bos_token=self.tokenizer.bos_token, image_seq_len=self.image_seq_length, image_token=IMAGE_TOKEN, num_images=len(image_list) if isinstance(image_list, list) else 1) for prompt, image_list in zip(text, images)]\n images = make_flat_list_of_images(images)\n else:\n expanded_samples = []\n for sample in text:\n expanded_sample = sample.replace(IMAGE_TOKEN, IMAGE_TOKEN * self.image_seq_length)\n bos_rfind_index = expanded_sample.rfind(IMAGE_TOKEN)\n bos_index = bos_rfind_index + len(IMAGE_TOKEN) if bos_rfind_index != -1 else 0\n expanded_sample = expanded_sample[:bos_index] + self.tokenizer.bos_token + expanded_sample[bos_index:]\n expanded_samples.append(expanded_sample)\n input_strings = [f'{sample}\\n' for sample in expanded_samples]\n if suffix is not None and _is_str_or_image(suffix):\n suffix = [suffix]\n if suffix is not None:\n suffix = [sfx + self.tokenizer.eos_token for sfx in suffix]\n pixel_values = self.image_processor(images, **output_kwargs['images_kwargs'])['pixel_values']\n return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)\n return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', None)\n inputs = self.tokenizer(input_strings, text_pair=suffix, return_token_type_ids=return_token_type_ids, **output_kwargs['text_kwargs'])\n self._check_special_mm_tokens(input_strings, inputs, modalities=['image'])\n return_data = {**inputs, 'pixel_values': pixel_values}\n if return_token_type_ids:\n labels = np.array(inputs['input_ids'])\n labels[np.array(inputs['token_type_ids']) == 0] = -100\n return_data.update({'labels': labels})\n if return_mm_token_type_ids:\n array_ids = np.array(return_data['input_ids'])\n mm_token_type_ids = np.zeros_like(return_data['input_ids'])\n mm_token_type_ids[array_ids == self.image_token_id] = 1\n return_data['mm_token_type_ids'] = mm_token_type_ids.tolist()\n return BatchFeature(data=return_data, tensor_type=return_tensors)", "docstring": "Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\nand `kwargs` arguments to GemmaTokenizerFast's [`~GemmaTokenizerFast.__call__`] if `text` is not `None` to encode\nthe text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\nSiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\nof the above two methods for more information.\n\nThe usage for PaliGemma fine-tuning preparation is slightly different than usual. suffix passed are suffixes to\nthe prompt in `text`, and will be placed after the prompt. This is because attention is handled differently for\nthe prefix and the suffix. For instance,\n```python\nimage = PIL_cow_image\nprompt = \"answer en Where is the cow standing?\"\nsuffix = \"on the beach\"\ninputs = processor(text=prompt, images=image, suffix=suffix)\n```\nHere `inputs` will contain the `input_ids` and `token_type_ids` that follow\n```python\ninputs[\"input_ids\"][:, 256:]\n# tensor([[ 2, 6006, 603, 573, 13910, 9980, 235336, 108, 477, 573, 8318]])\ninputs[\"token_type_ids\"][:, 256:]\ntensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]])\n```\nMeaning the last three tokens are of \"label\" (\"suffix\") type while the other ones are of \"prefix\" type.\n\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a\n number of channels, H and W are image height and width.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n suffix (`str`, `List[str]`, `List[List[str]]`):\n The suffixes or batch of suffixes to be encoded. Only necessary for finetuning. See https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md\n for more information. If your prompt is \" What is on the image\", the suffix corresponds to the expected prediction \"a cow sitting on a bench\".\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. If `suffix`\n is provided, the `input_ids` will also contain the suffix input ids.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n - **labels** -- Labels compatible with training if `suffix` is not None"} +{"repo": "transformers", "function": "class RelativePositionBiasBase(nn.Module, ABC):\n\n def __init__(self, num_heads=None, relative_attention_num_buckets=32, bidirectional=True, scaling_factor=1, max_distance=128, level='tokens', augmentation=False, prefix_bucket=False, expand=False):\n super(RelativePositionBiasBase, self).__init__()\n self.prefix_bucket = prefix_bucket\n self.augmentation = augmentation\n self.level = level\n self.max_distance = max_distance\n self.scaling_factor = scaling_factor\n self.bidirectional = bidirectional\n self.num_heads = num_heads\n self.expand = expand\n self.relative_attention_num_buckets = relative_attention_num_buckets\n extra_head = 2 if prefix_bucket and (not self.expand) else 0\n self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets + extra_head, self.num_heads)\n\n @abstractmethod\n def prepare_input(self, attention_mask: Optional[Tensor]=None, bbox: Optional[Dict[str, Any]]=None) -> Tensor:\n pass\n\n def get_bucket(self, attention_mask: Optional[Tensor]=None, bbox: Optional[Dict[str, Any]]=None) -> Tensor:\n relative_position = self.prepare_input(attention_mask, bbox)\n rp_bucket: Tensor = get_relative_position_bucket(relative_position, bidirectional=self.bidirectional, num_buckets=self.relative_attention_num_buckets, max_distance=self.max_distance)\n return rp_bucket\n\n def get_relative_position(self, positions):\n context_position = positions[:, :, None]\n memory_position = positions[:, None, :]\n relative_position = memory_position - context_position\n if self.augmentation and self.training:\n relative_position *= random.uniform(*AUGMENTATION_RANGE)\n relative_position *= self.scaling_factor\n return relative_position.to(torch.long)\n\n def forward(self, attention_mask: Optional[Tensor]=None, bbox: Optional[Dict[str, Any]]=None) -> Tensor:\n if self.expand and self.prefix_bucket:\n new_bias = nn.Embedding(self.relative_attention_num_buckets + 2, self.num_heads)\n new_bias.weight.data[:self.relative_attention_num_buckets] = self.relative_attention_bias.weight.data\n new_bias.weight.data[self.relative_attention_num_buckets:] = 0.1\n self.relative_attention_bias = new_bias\n self.expand = False\n rp_bucket = self.get_bucket(attention_mask, bbox)\n if self.prefix_bucket:\n if rp_bucket.size(0) == 1 and attention_mask.size(0) > 1:\n rp_bucket = rp_bucket.repeat(attention_mask.size(0), 1, 1)\n is_prefix = bbox[:, :, 1] < 0\n num_prefix = is_prefix.sum(-1)\n for idx, num_prefix_row in enumerate(num_prefix.cpu().numpy()):\n rp_bucket[idx, :num_prefix_row, num_prefix_row:] = self.relative_attention_num_buckets\n rp_bucket[idx, num_prefix_row:, :num_prefix_row] = self.relative_attention_num_buckets + 1\n values: Tensor = self.relative_attention_bias(rp_bucket)\n if values.dim() != 4:\n raise ValueError('Wrong dimension of values tensor')\n values = values.permute([0, 3, 1, 2])\n return values", "docstring": "Base class of relative biases.\n\nArgs:\n num_heads (`int`):\n Number of attention heads in the model, it will create embeddings of size `num_heads`, which will be added to the scores of each token pair.\n relative_attention_num_buckets (`int`, *optional*, defaults to 32):\n Pair token metric (distance in the sequence, distance in pixels etc.) will be bucketed, parameter is defining number of such\n buckets.\n bidirectional (`bool`, *optional*, defaults to `True`):\n Whether the distance should be bidirectional for a pair of tokens. If `False`, then distance(tok1, tok2) == distance(tok2, tok1).\n scaling_factor (`int`, *optional*, defaults to 1):\n Defining factor which will be used to scale relative distance.\n max_distance (`int`, *optional*, defaults to 128):\n All distances above this value will end up in the one/same bucket.\n augmentation (`bool`, *optional*, defaults to `False`):\n Whether to multiply relative distances by a random scalar.\n expand (`bool`, *optional*, defaults to `False`):\n Whether to expand an existing pretrained model with subsequent additions of prefix_bucket."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n pooled_output = vision_outputs[1]\n image_features = self.visual_projection(pooled_output)\n return image_features", "docstring": "Returns:\n image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying\n the projection layer to the pooled output of [`TFCLIPVisionModel`].\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import TFVisionTextDualEncoderModel, AutoImageProcessor\n\n>>> model = TFVisionTextDualEncoderModel.from_pretrained(\"clip-italian/clip-italian\", from_pt=True)\n>>> image_processor = AutoImageProcessor.from_pretrained(\"google/vit-base-patch16-224\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = image_processor(images=image, return_tensors=\"np\")\n\n>>> image_features = model.get_image_features(**inputs)\n```"} +{"repo": "pytype", "function": "def list_pytype_files(suffix):\n assert not suffix.endswith('/')\n loader = globals().get('__loader__', None)\n try:\n filenames = loader.get_zipfile().namelist()\n except AttributeError:\n yield from list_files(get_full_path(suffix))\n else:\n for filename in filenames:\n directory = 'pytype/' + suffix + '/'\n try:\n i = filename.rindex(directory)\n except ValueError:\n pass\n else:\n yield filename[i + len(directory):]", "docstring": "Recursively get the contents of a directory in the pytype installation.\n\nThis reports files in said directory as well as all subdirectories of it.\n\nArguments:\n suffix: the path, relative to \"pytype/\"\n\nYields:\n The filenames, relative to pytype/{suffix}\nRaises:\n NoSuchDirectory: if the directory doesn't exist."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n if self.do_layer_norm_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, position_ids=position_ids, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position, **kwargs)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n if not self.do_layer_norm_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states_shape = hidden_states.shape\n hidden_states = hidden_states.reshape(-1, hidden_states.size(-1))\n residual = hidden_states\n if self.do_layer_norm_before:\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.fc1(hidden_states)\n hidden_states = self.activation_fn(hidden_states)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = (residual + hidden_states).view(hidden_states_shape)\n if not self.do_layer_norm_before:\n hidden_states = self.final_layer_norm(hidden_states)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence.."} +{"repo": "tensorflow", "function": "def master(self, task_type=None, task_id=None, rpc_layer=None):\n task_type = task_type if task_type is not None else self.task_type\n task_id = task_id if task_id is not None else self.task_id\n if task_type is not None and task_id is not None:\n return format_master_url(self.cluster_spec().task_address(task_type, task_id), rpc_layer or self.rpc_layer)\n return ''", "docstring": "Returns the master string for connecting to a TensorFlow master.\n\nArgs:\n task_type: (Optional) Overrides the default auto-selected task type.\n task_id: (Optional) Overrides the default auto-selected task index.\n rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses\n to communicate across nodes.\n\nReturns:\n A connection string for connecting to a TensorFlow master."} +{"repo": "transformers", "function": "class DetaImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values', 'pixel_mask']\n\n def __init__(self, format: Union[str, AnnotationFormat]=AnnotationFormat.COCO_DETECTION, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_annotations: bool=True, do_pad: bool=True, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> None:\n if 'pad_and_return_pixel_mask' in kwargs:\n do_pad = kwargs.pop('pad_and_return_pixel_mask')\n size = size if size is not None else {'shortest_edge': 800, 'longest_edge': 1333}\n size = get_size_dict(size, default_to_square=False)\n if do_convert_annotations is None:\n do_convert_annotations = do_normalize\n super().__init__(**kwargs)\n self.format = format\n self.do_resize = do_resize\n self.size = size\n self.resample = resample\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.do_normalize = do_normalize\n self.do_convert_annotations = do_convert_annotations\n self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN\n self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD\n self.do_pad = do_pad\n self.pad_size = pad_size\n\n def prepare_annotation(self, image: np.ndarray, target: Dict, format: Optional[AnnotationFormat]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Dict:\n \"\"\"\n Prepare an annotation for feeding into DETA model.\n \"\"\"\n format = format if format is not None else self.format\n if format == AnnotationFormat.COCO_DETECTION:\n return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks\n target = prepare_coco_detection_annotation(image, target, return_segmentation_masks, input_data_format=input_data_format)\n elif format == AnnotationFormat.COCO_PANOPTIC:\n return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks\n target = prepare_coco_panoptic_annotation(image, target, masks_path=masks_path, return_masks=return_segmentation_masks, input_data_format=input_data_format)\n else:\n raise ValueError(f'Format {format} is not supported.')\n return target\n\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an\n int, smaller edge of the image will be matched to this number.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the image's `(height, width)` dimensions after resizing. Available options are:\n - `{\"height\": int, \"width\": int}`: The image will be resized to the exact size `(height, width)`.\n Do NOT keep the aspect ratio.\n - `{\"shortest_edge\": int, \"longest_edge\": int}`: The image will be resized to a maximum size respecting\n the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge\n less or equal to `longest_edge`.\n - `{\"max_height\": int, \"max_width\": int}`: The image will be resized to the maximum size respecting the\n aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to\n `max_width`.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use if resizing the image.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if 'shortest_edge' in size and 'longest_edge' in size:\n new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format)\n elif 'height' in size and 'width' in size:\n new_size = (size['height'], size['width'])\n elif 'max_height' in size and 'max_width' in size:\n new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format)\n else:\n raise ValueError(f\"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.\")\n image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format)\n return image\n\n def resize_annotation(self, annotation, orig_size, size, resample: PILImageResampling=PILImageResampling.NEAREST) -> Dict:\n \"\"\"\n Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched\n to this number.\n \"\"\"\n return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample)\n\n def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"\n Rescale the image by the given factor. image = image * rescale_factor.\n\n Args:\n image (`np.ndarray`):\n Image to rescale.\n rescale_factor (`float`):\n The value to use for rescaling.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. If unset, is inferred from the input image. Can be\n one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n \"\"\"\n return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)\n\n def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict:\n \"\"\"\n Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to\n `[center_x, center_y, width, height]` format and from absolute to relative pixel values.\n \"\"\"\n return normalize_annotation(annotation, image_size=image_size)\n\n def _update_annotation_for_padded_image(self, annotation: Dict, input_image_size: Tuple[int, int], output_image_size: Tuple[int, int], padding, update_bboxes) -> Dict:\n \"\"\"\n Update the annotation for a padded image.\n \"\"\"\n new_annotation = {}\n new_annotation['size'] = output_image_size\n for key, value in annotation.items():\n if key == 'masks':\n masks = value\n masks = pad(masks, padding, mode=PaddingMode.CONSTANT, constant_values=0, input_data_format=ChannelDimension.FIRST)\n masks = safe_squeeze(masks, 1)\n new_annotation['masks'] = masks\n elif key == 'boxes' and update_bboxes:\n boxes = value\n boxes *= np.asarray([input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0], input_image_size[1] / output_image_size[1], input_image_size[0] / output_image_size[0]])\n new_annotation['boxes'] = boxes\n elif key == 'size':\n new_annotation['size'] = output_image_size\n else:\n new_annotation[key] = value\n return new_annotation\n\n def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], annotation: Optional[Dict[str, Any]]=None, constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)\n if annotation is not None:\n annotation = self._update_annotation_for_padded_image(annotation, (input_height, input_width), (output_height, output_width), padding, update_bboxes)\n return (padded_image, annotation)\n\n def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:\n \"\"\"\n Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\n in the batch and optionally returns their corresponding pixel mask.\n\n Args:\n images (List[`np.ndarray`]):\n Images to pad.\n annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):\n Annotations to transform according to the padding that is applied to the images.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n update_bboxes (`bool`, *optional*, defaults to `True`):\n Whether to update the bounding boxes in the annotations to match the padded images. If the\n bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`\n format, the bounding boxes will not be updated.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch.\n \"\"\"\n pad_size = pad_size if pad_size is not None else self.pad_size\n if pad_size is not None:\n padded_size = (pad_size['height'], pad_size['width'])\n else:\n padded_size = get_max_height_width(images, input_data_format=input_data_format)\n annotation_list = annotations if annotations is not None else [None] * len(images)\n padded_images = []\n padded_annotations = []\n for image, annotation in zip(images, annotation_list):\n padded_image, padded_annotation = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)\n padded_images.append(padded_image)\n padded_annotations.append(padded_annotation)\n data = {'pixel_values': padded_images}\n if return_pixel_mask:\n masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]\n data['pixel_mask'] = masks\n encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)\n if annotations is not None:\n encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]\n return encoded_inputs\n\n def preprocess(self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_annotations: Optional[bool]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature:\n \"\"\"\n Preprocess an image or a batch of images so that it can be used by the model.\n\n Args:\n images (`ImageInput`):\n Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging\n from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):\n List of annotations associated with the image or batch of images. If annotation is for object\n detection, the annotations should be a dictionary with the following keys:\n - \"image_id\" (`int`): The image id.\n - \"annotations\" (`List[Dict]`): List of annotations for an image. Each annotation should be a\n dictionary. An image can have no annotations, in which case the list should be empty.\n If annotation is for segmentation, the annotations should be a dictionary with the following keys:\n - \"image_id\" (`int`): The image id.\n - \"segments_info\" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.\n An image can have no segments, in which case the list should be empty.\n - \"file_name\" (`str`): The file name of the image.\n return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):\n Whether to return segmentation masks.\n masks_path (`str` or `pathlib.Path`, *optional*):\n Path to the directory containing the segmentation masks.\n do_resize (`bool`, *optional*, defaults to self.do_resize):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to self.size):\n Size of the image's `(height, width)` dimensions after resizing. Available options are:\n - `{\"height\": int, \"width\": int}`: The image will be resized to the exact size `(height, width)`.\n Do NOT keep the aspect ratio.\n - `{\"shortest_edge\": int, \"longest_edge\": int}`: The image will be resized to a maximum size respecting\n the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge\n less or equal to `longest_edge`.\n - `{\"max_height\": int, \"max_width\": int}`: The image will be resized to the maximum size respecting the\n aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to\n `max_width`.\n resample (`PILImageResampling`, *optional*, defaults to self.resample):\n Resampling filter to use when resizing the image.\n do_rescale (`bool`, *optional*, defaults to self.do_rescale):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to self.rescale_factor):\n Rescale factor to use when rescaling the image.\n do_normalize (`bool`, *optional*, defaults to self.do_normalize):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):\n Mean to use when normalizing the image.\n image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):\n Standard deviation to use when normalizing the image.\n do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):\n Whether to convert the annotations to the format expected by the model. Converts the bounding\n boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`\n and in relative coordinates.\n do_pad (`bool`, *optional*, defaults to self.do_pad):\n Whether to pad the image. If `True`, padding will be applied to the bottom and right of\n the image with zeros. If `pad_size` is provided, the image will be padded to the specified\n dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.\n format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):\n Format of the annotations.\n return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):\n Type of tensors to return. If `None`, will return the list of images.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch.\n \"\"\"\n if 'pad_and_return_pixel_mask' in kwargs:\n logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.')\n do_pad = kwargs.pop('pad_and_return_pixel_mask')\n do_resize = self.do_resize if do_resize is None else do_resize\n size = self.size if size is None else size\n size = get_size_dict(size=size, default_to_square=False)\n resample = self.resample if resample is None else resample\n do_rescale = self.do_rescale if do_rescale is None else do_rescale\n rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor\n do_normalize = self.do_normalize if do_normalize is None else do_normalize\n image_mean = self.image_mean if image_mean is None else image_mean\n image_std = self.image_std if image_std is None else image_std\n do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations\n do_pad = self.do_pad if do_pad is None else do_pad\n pad_size = self.pad_size if pad_size is None else pad_size\n format = self.format if format is None else format\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)\n if not is_batched(images):\n images = [images]\n annotations = [annotations] if annotations is not None else None\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n if annotations is not None and len(images) != len(annotations):\n raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.')\n format = AnnotationFormat(format)\n if annotations is not None:\n validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)\n if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))):\n raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.')\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n if annotations is not None:\n prepared_images = []\n prepared_annotations = []\n for image, target in zip(images, annotations):\n target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format)\n prepared_images.append(image)\n prepared_annotations.append(target)\n images = prepared_images\n annotations = prepared_annotations\n del prepared_images, prepared_annotations\n if do_resize:\n if annotations is not None:\n resized_images, resized_annotations = ([], [])\n for image, target in zip(images, annotations):\n orig_size = get_image_size(image, input_data_format)\n resized_image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)\n resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format))\n resized_images.append(resized_image)\n resized_annotations.append(resized_annotation)\n images = resized_images\n annotations = resized_annotations\n del resized_images, resized_annotations\n else:\n images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]\n if do_rescale:\n images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]\n if do_normalize:\n images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]\n if do_convert_annotations and annotations is not None:\n annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images)]\n if do_pad:\n encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, return_tensors=return_tensors, update_bboxes=do_convert_annotations, pad_size=pad_size)\n else:\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]\n encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)\n if annotations is not None:\n encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations]\n return encoded_inputs\n\n def post_process_object_detection(self, outputs, threshold: float=0.5, target_sizes: Union[TensorType, List[Tuple]]=None, nms_threshold: float=0.7):\n \"\"\"\n Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\n bottom_right_x, bottom_right_y) format. Only supports PyTorch.\n\n Args:\n outputs ([`DetrObjectDetectionOutput`]):\n Raw outputs of the model.\n threshold (`float`, *optional*, defaults to 0.5):\n Score threshold to keep object detection predictions.\n target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\n Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n (height, width) of each image in the batch. If left to None, predictions will not be resized.\n nms_threshold (`float`, *optional*, defaults to 0.7):\n NMS threshold.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image\n in the batch as predicted by the model.\n \"\"\"\n out_logits, out_bbox = (outputs.logits, outputs.pred_boxes)\n batch_size, num_queries, num_labels = out_logits.shape\n if target_sizes is not None:\n if len(out_logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n prob = out_logits.sigmoid()\n all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device)\n all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device)\n all_boxes = torch.div(all_indexes, out_logits.shape[2], rounding_mode='floor')\n all_labels = all_indexes % out_logits.shape[2]\n boxes = center_to_corners_format(out_bbox)\n boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4))\n if target_sizes is not None:\n if isinstance(target_sizes, List):\n img_h = torch.Tensor([i[0] for i in target_sizes])\n img_w = torch.Tensor([i[1] for i in target_sizes])\n else:\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)\n boxes = boxes * scale_fct[:, None, :]\n results = []\n for b in range(batch_size):\n box = boxes[b]\n score = all_scores[b]\n lbls = all_labels[b]\n pre_topk = score.topk(min(10000, num_queries * num_labels)).indices\n box = box[pre_topk]\n score = score[pre_topk]\n lbls = lbls[pre_topk]\n keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100]\n score = score[keep_inds]\n lbls = lbls[keep_inds]\n box = box[keep_inds]\n results.append({'scores': score[score > threshold], 'labels': lbls[score > threshold], 'boxes': box[score > threshold]})\n return results", "docstring": "Constructs a Deformable DETR image processor.\n\nArgs:\n format (`str`, *optional*, defaults to `\"coco_detection\"`):\n Data format of the annotations. One of \"coco_detection\" or \"coco_panoptic\".\n do_resize (`bool`, *optional*, defaults to `True`):\n Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be\n overridden by the `do_resize` parameter in the `preprocess` method.\n size (`Dict[str, int]` *optional*, defaults to `{\"shortest_edge\": 800, \"longest_edge\": 1333}`):\n Size of the image's `(height, width)` dimensions after resizing. Can be overridden by the `size` parameter\n in the `preprocess` method. Available options are:\n - `{\"height\": int, \"width\": int}`: The image will be resized to the exact size `(height, width)`.\n Do NOT keep the aspect ratio.\n - `{\"shortest_edge\": int, \"longest_edge\": int}`: The image will be resized to a maximum size respecting\n the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge\n less or equal to `longest_edge`.\n - `{\"max_height\": int, \"max_width\": int}`: The image will be resized to the maximum size respecting the\n aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to\n `max_width`.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use if resizing the image.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the\n `do_rescale` parameter in the `preprocess` method.\n rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):\n Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the\n `preprocess` method.\n do_normalize:\n Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the\n `preprocess` method.\n image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):\n Mean values to use when normalizing the image. Can be a single value or a list of values, one for each\n channel. Can be overridden by the `image_mean` parameter in the `preprocess` method.\n image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):\n Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one\n for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method.\n do_convert_annotations (`bool`, *optional*, defaults to `True`):\n Controls whether to convert the annotations to the format expected by the DETR model. Converts the\n bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.\n Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.\n do_pad (`bool`, *optional*, defaults to `True`):\n Controls whether to pad the image. Can be overridden by the `do_pad` parameter in the `preprocess`\n method. If `True`, padding will be applied to the bottom and right of the image with zeros.\n If `pad_size` is provided, the image will be padded to the specified dimensions.\n Otherwise, the image will be padded to the maximum height and width of the batch.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch."} +{"repo": "tensorflow", "function": "def _process_file_infos(file_infos):\n files = []\n num_elements = 0\n offsets = np.int64([])\n offset_sum = 0\n thresholds = np.int64([])\n threshold_sum = 0\n adjustment_needed = False\n for file_info in file_infos:\n files.append(file_info['path'])\n skip = 0\n if 'skip' in file_info:\n if file_info['skip'] < -1:\n raise ValueError('`skip` should be greater than `-1` but got {}'.format(file_info['skip']))\n if file_info['skip'] == -1:\n skip = file_info['num_elements']\n else:\n skip = min(file_info['skip'], file_info['num_elements'])\n take = file_info['num_elements'] - skip\n if 'take' in file_info:\n if file_info['take'] < -1:\n raise ValueError('`take` should be greater than `-1` but got {}'.format(file_info['take']))\n if file_info['take'] != -1:\n take = min(file_info['take'], take)\n remainder = file_info['num_elements'] - skip - take\n if take != file_info['num_elements']:\n adjustment_needed = True\n num_elements += take\n offsets = np.append(offsets, offset_sum + skip)\n offset_sum += skip + remainder\n thresholds = np.append(thresholds, threshold_sum)\n threshold_sum += take\n result = {'files': files, 'num_elements': num_elements}\n if adjustment_needed:\n result['offsets'] = offsets\n result['thresholds'] = thresholds\n return result", "docstring": "Computes aggregate information about files to read.\n\nThe method collects information about the files to read, the total number of\nelements, and arrays that can be used to account for elements to be skipped,\nwhich can be specified via the \"skip\" and \"take\" keys.\n\nTo account for elements to skip, the range of each file can be divided into\nthree regions:\n- S (elements to skip)\n- T (elements to read)\n- R (remainder of elements that will also be skipped)\n\nThe `thresholds` and `offsets` arrays are initialized as follows:\n`thresholds = [0, T_1, T_1 + T_2, ...]` and\n`offsets = [S_1, S_1 + R_1 + S_2, S_1 + R_1 + S_2 + R_2 + S_3, ...]`\n\nThis makes it possible to map an index from a contiguous range\n`(0...num_elements_to_read)` to an index in the range of all elements,\nskipping over elements as per the \"skip\" and \"take\" keys values. In\nparticular, for a given input index `X`, we find the greatest `thresholds`\nvalue that is smaller or equal to `X`. Let `t(X)` denotes such index in the\n`thresholds` array. The output index is computed as `X + offsets[t(X)]`.\n\nArgs:\n file_infos: See `file_infos` argument of `index_shuffle` for details.\n\nReturns:\n A dictionary containing the following keys:\n - `files`, the vector of pathnames of files to read\n - `num_elements`, an integer identifying the total number of elements\n - `offsets`, the vector of offsets to use for index adjustment (in case\n any elements should be skipped)\n - `thresholds`, the vector of thresholds to use for index adjustment (in\n case any elements should be skipped)"} +{"repo": "transformers", "function": "class LlavaOnevisionCausalLMOutputWithPast(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n image_hidden_states: Optional[torch.FloatTensor] = None\n video_hidden_states: Optional[torch.FloatTensor] = None", "docstring": "Base class for LlavaOnevision causal language model (or autoregressive) outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n image_hidden_states (`torch.FloatTensor`, *optional*):\n A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.\n image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.\n\n video_hidden_states (`torch.FloatTensor`, *optional*):\n A `torch.FloatTensor` of size `(batch_size * num_frames, num_videos, sequence_length, hidden_size)`.\n video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state."} +{"repo": "tensorflow", "function": "def index_of(self, value_str):\n if value_str is None:\n value_str = ''\n if value_str in self._string_to_index:\n return self._string_to_index[value_str]\n index = len(self._string_table)\n self._string_table.append(value_str)\n self._string_to_index[value_str] = index\n return index", "docstring": "Get index of value_str in the string table.\n\nIf value_str is not in the string table, we will add it at the end\nand then return the new index.\nArgs:\n value_str: (string) Value to lookup/add in/to the string table.\n\nReturns:\n Index of value_str in the string table."} +{"repo": "tensorflow", "function": "def _get_raw_feature_as_tensor(self, key):\n raw_feature = self._features[key]\n feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature)\n\n def expand_dims(input_tensor):\n if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):\n return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1])\n else:\n return array_ops.expand_dims(input_tensor, -1)\n rank = feature_tensor.get_shape().ndims\n if rank is not None:\n if rank == 0:\n raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))\n return feature_tensor if rank != 1 else expand_dims(feature_tensor)\n with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]):\n return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)", "docstring": "Gets the raw_feature (keyed by `key`) as `tensor`.\n\nThe raw feature is converted to (sparse) tensor and maybe expand dim.\n\nFor both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if\nthe rank is 1. This supports dynamic rank also. For rank 0 raw feature, will\nerror out as it is not supported.\n\nArgs:\n key: A `str` key to access the raw feature.\n\nReturns:\n A `Tensor` or `SparseTensor`.\n\nRaises:\n ValueError: if the raw feature has rank 0."} +{"repo": "tensorflow", "function": "def restore(self, sess, save_path):\n start_time = time.time()\n if self._is_empty:\n return\n if save_path is None:\n raise ValueError(\"Can't load save_path when it is None.\")\n checkpoint_prefix = compat.as_text(save_path)\n if not checkpoint_management.checkpoint_exists_internal(checkpoint_prefix):\n raise ValueError('The passed save_path is not a valid checkpoint: ' + checkpoint_prefix)\n logging.info('Restoring parameters from %s', checkpoint_prefix)\n try:\n if context.executing_eagerly():\n self._build_eager(save_path, build_save=False, build_restore=True)\n else:\n sess.run(self.saver_def.restore_op_name, {self.saver_def.filename_tensor_name: save_path})\n except errors.NotFoundError as err:\n try:\n names_to_keys = object_graph_key_mapping(save_path)\n except errors.NotFoundError:\n raise _wrap_restore_error_with_msg(err, 'a Variable name or other graph key that is missing')\n logging.warning('Restoring an object-based checkpoint using a name-based saver. This may be somewhat fragile, and will re-build the Saver. Instead, consider loading object-based checkpoints using tf.train.Checkpoint().')\n self._object_restore_saver = saver_from_object_based_checkpoint(checkpoint_path=save_path, var_list=self._var_list, builder=self._builder, names_to_keys=names_to_keys, cached_saver=self._object_restore_saver)\n self._object_restore_saver.restore(sess=sess, save_path=save_path)\n except errors.InvalidArgumentError as err:\n raise _wrap_restore_error_with_msg(err, 'a mismatch between the current graph and the graph')\n metrics.AddCheckpointReadDuration(api_label=_SAVER_LABEL, microseconds=_get_duration_microseconds(start_time, time.time()))", "docstring": "Restores previously saved variables.\n\nThis method runs the ops added by the constructor for restoring variables.\nIt requires a session in which the graph was launched. The variables to\nrestore do not have to have been initialized, as restoring is itself a way\nto initialize variables.\n\nThe `save_path` argument is typically a value previously returned from a\n`save()` call, or a call to `latest_checkpoint()`.\n\nArgs:\n sess: A `Session` to use to restore the parameters. None in eager mode.\n save_path: Path where parameters were previously saved.\n\nRaises:\n ValueError: If save_path is None or not a valid checkpoint."} +{"repo": "beam", "function": "def _get_object_type(filename, filepath):\n filename_no_ext = os.path.splitext(filename)[0].lower()\n if filename_no_ext.endswith(PrecompiledExampleType.test_ends):\n object_type = PRECOMPILED_OBJECT_TYPE_UNIT_TEST\n elif PrecompiledExampleType.katas in filepath.split(os.sep):\n object_type = PRECOMPILED_OBJECT_TYPE_KATA\n elif PrecompiledExampleType.examples in filepath.split(os.sep):\n object_type = PRECOMPILED_OBJECT_TYPE_EXAMPLE\n else:\n object_type = PRECOMPILED_OBJECT_TYPE_UNSPECIFIED\n return object_type", "docstring": "Get type of an object based on it filename/filepath\n\nArgs:\n filename: object's filename\n filepath: object's filepath\n\nReturns: type of the object (example, kata, unit-test)"} +{"repo": "transformers", "function": "class RobertaTokenizerFast(PreTrainedTokenizerFast):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n slow_tokenizer_class = RobertaTokenizer\n\n def __init__(self, vocab_file=None, merges_file=None, tokenizer_file=None, errors='replace', bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', add_prefix_space=False, trim_offsets=True, **kwargs):\n mask_token = AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False) if isinstance(mask_token, str) else mask_token\n super().__init__(vocab_file, merges_file, tokenizer_file=tokenizer_file, errors=errors, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets, **kwargs)\n tokenizer_component = 'post_processor'\n tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)\n if tokenizer_component_instance:\n state = json.loads(tokenizer_component_instance.__getstate__())\n if 'sep' in state:\n state['sep'] = tuple(state['sep'])\n if 'cls' in state:\n state['cls'] = tuple(state['cls'])\n changes_to_apply = False\n if state.get('add_prefix_space', add_prefix_space) != add_prefix_space:\n state['add_prefix_space'] = add_prefix_space\n changes_to_apply = True\n if state.get('trim_offsets', trim_offsets) != trim_offsets:\n state['trim_offsets'] = trim_offsets\n changes_to_apply = True\n if changes_to_apply:\n component_class = getattr(processors, state.pop('type'))\n new_value = component_class(**state)\n setattr(self.backend_tokenizer, tokenizer_component, new_value)\n\n @property\n def mask_token(self) -> str:\n \"\"\"\n `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not\n having been set.\n\n Roberta tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily\n comprise the space before the **.\n \"\"\"\n if self._mask_token is None:\n if self.verbose:\n logger.error('Using mask_token, but it is not set yet.')\n return None\n return str(self._mask_token)\n\n @mask_token.setter\n def mask_token(self, value):\n \"\"\"\n Overriding the default behavior of the mask token to have it eat the space before it.\n\n This is needed to preserve backward compatibility with all the previously used models based on Roberta.\n \"\"\"\n value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value\n self._mask_token = value\n\n def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:\n is_split_into_words = kwargs.get('is_split_into_words', False)\n assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'\n return super()._batch_encode_plus(*args, **kwargs)\n\n def _encode_plus(self, *args, **kwargs) -> BatchEncoding:\n is_split_into_words = kwargs.get('is_split_into_words', False)\n assert self.add_prefix_space or not is_split_into_words, f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with pretokenized inputs.'\n return super()._encode_plus(*args, **kwargs)\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n return tuple(files)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]\n if token_ids_1 is None:\n return output\n return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not\n make use of token type ids, therefore a list of zeros is returned.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of zeros.\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Construct a \"fast\" RoBERTa tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2\ntokenizer, using byte-level Byte-Pair-Encoding.\n\nThis tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\nbe encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n```python\n>>> from transformers import RobertaTokenizerFast\n\n>>> tokenizer = RobertaTokenizerFast.from_pretrained(\"FacebookAI/roberta-base\")\n>>> tokenizer(\"Hello world\")[\"input_ids\"]\n[0, 31414, 232, 2]\n\n>>> tokenizer(\" Hello world\")[\"input_ids\"]\n[0, 20920, 232, 2]\n```\n\nYou can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you\ncall it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.\n\n\n\nWhen used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.\n\n\n\nThis tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\nrefer to this superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the beginning of\n sequence. The token used is the `cls_token`.\n\n \n\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the end of sequence.\n The token used is the `sep_token`.\n\n \n\n sep_token (`str`, *optional*, defaults to `\"\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n cls_token (`str`, *optional*, defaults to `\"\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n mask_token (`str`, *optional*, defaults to `\"\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (RoBERTa tokenizer detect beginning of words by the preceding space).\n trim_offsets (`bool`, *optional*, defaults to `True`):\n Whether the post processing step should trim offsets to avoid including whitespaces."} +{"repo": "keras", "function": "class SquaredHinge(reduction_metrics.MeanMetricWrapper):\n\n def __init__(self, name='squared_hinge', dtype=None):\n super().__init__(fn=squared_hinge, name=name, dtype=dtype)\n self._direction = 'down'\n\n def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}", "docstring": "Computes the hinge metric between `y_true` and `y_pred`.\n\n`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are\nprovided we will convert them to -1 or 1.\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.SquaredHinge()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result()\n1.86\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n... sample_weight=[1, 0])\n>>> m.result()\n1.46"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, mems: Optional[torch.Tensor]=None, perm_mask: Optional[torch.Tensor]=None, target_mapping: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, input_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_mems: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, XLNetLMHeadModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n transformer_outputs = self.transformer(input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs)\n logits = self.lm_loss(transformer_outputs[0])\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))\n if not return_dict:\n output = (logits,) + transformer_outputs[1:]\n return (loss,) + output if loss is not None else output\n return XLNetLMHeadModelOutput(loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions)", "docstring": "mems (`List[torch.FloatTensor]` of length `config.n_layers`):\n Contains pre-computed hidden-states (see `mems` output below) . Can be used to speed up sequential\n decoding. The token ids which have their past given to this model should not be passed as `input_ids` as\n they have already been computed.\n\n `use_mems` has to be set to `True` to make use of `mems`.\nperm_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, sequence_length)`, *optional*):\n Mask to indicate the attention pattern for each input token with values selected in `[0, 1]`:\n\n - if `perm_mask[k, i, j] = 0`, i attend to j in batch k;\n - if `perm_mask[k, i, j] = 1`, i does not attend to j in batch k.\n\n If not set, each token attends to all the others (full bidirectional attention). Only used during\n pretraining (to define factorization order) or for sequential decoding (generation).\ntarget_mapping (`torch.FloatTensor` of shape `(batch_size, num_predict, sequence_length)`, *optional*):\n Mask to indicate the output tokens to use. If `target_mapping[k, i, j] = 1`, the i-th predict in batch k is\n on the j-th token. Only used during pretraining for partial prediction or for sequential decoding\n (generation).\ninput_mask (`torch.FloatTensor` of shape `batch_size, sequence_length`, *optional*):\n Mask to avoid performing attention on padding token indices. Negative of `attention_mask`, i.e. with 0 for\n real tokens and 1 for padding which is kept for compatibility with the original code base.\n\n Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **masked**,\n - 0 for tokens that are **not masked**.\n\n You can only uses one of `input_mask` and `attention_mask`.\nlabels (`torch.LongTensor` of shape `(batch_size, num_predict)`, *optional*):\n Labels for masked language modeling. `num_predict` corresponds to `target_mapping.shape[1]`. If\n `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.\n\n The labels should correspond to the masked input words that should be predicted and depends on\n `target_mapping`. Note in order to perform standard auto-regressive language modeling a ** token has\n to be added to the `input_ids` (see the `prepare_inputs_for_generation` function and examples below)\n\n Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored, the loss\n is only computed for labels in `[0, ..., config.vocab_size]`\nuse_mems (`bool`, *optional*):\n Whether to use memory states to speed up sequential decoding. If set to `True`, the model will use the hidden\n states from previous forward passes to compute attention, which can significantly improve performance for\n sequential decoding tasks.\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, XLNetLMHeadModel\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"xlnet/xlnet-large-cased\")\n>>> model = XLNetLMHeadModel.from_pretrained(\"xlnet/xlnet-large-cased\")\n\n>>> # We show how to setup inputs to predict a next token using a bi-directional context.\n>>> input_ids = torch.tensor(\n... tokenizer.encode(\"Hello, my dog is very \", add_special_tokens=False)\n... ).unsqueeze(\n... 0\n... ) # We will predict the masked token\n>>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)\n>>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token\n>>> target_mapping = torch.zeros(\n... (1, 1, input_ids.shape[1]), dtype=torch.float\n... ) # Shape [1, 1, seq_length] => let's predict one token\n>>> target_mapping[\n... 0, 0, -1\n... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)\n\n>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)\n>>> next_token_logits = outputs[\n... 0\n... ] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]\n\n>>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.\n>>> input_ids = torch.tensor(\n... tokenizer.encode(\"Hello, my dog is very \", add_special_tokens=False)\n... ).unsqueeze(\n... 0\n... ) # We will predict the masked token\n>>> labels = torch.tensor(tokenizer.encode(\"cute\", add_special_tokens=False)).unsqueeze(0)\n>>> assert labels.shape[0] == 1, \"only one word will be predicted\"\n>>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)\n>>> perm_mask[\n... :, :, -1\n... ] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training\n>>> target_mapping = torch.zeros(\n... (1, 1, input_ids.shape[1]), dtype=torch.float\n... ) # Shape [1, 1, seq_length] => let's predict one token\n>>> target_mapping[\n... 0, 0, -1\n... ] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)\n\n>>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)\n>>> loss = outputs.loss\n>>> next_token_logits = (\n... outputs.logits\n... ) # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]\n```"} +{"repo": "pyglove", "function": "def class_from_typename(cls, type_name: str) -> Optional[Type['JSONConvertible']]:\n return cls._TYPE_REGISTRY.class_from_typename(type_name)", "docstring": "Gets the class for a registered type name.\n\nArgs:\n type_name: A string as the global unique type identifier for requested\n class.\n\nReturns:\n A type object if registered, otherwise None."} +{"repo": "tensorflow", "function": "def _tf_data_pack_sequence_as(structure, flat_sequence):\n if not (_tf_data_is_nested(flat_sequence) or isinstance(flat_sequence, list)):\n raise TypeError(f\"Argument `flat_sequence` must be a sequence. Got '{type(flat_sequence).__name__}'.\")\n if not _tf_data_is_nested(structure):\n if len(flat_sequence) != 1:\n raise ValueError(f'Argument `structure` is a scalar but `len(flat_sequence)`={len(flat_sequence)} > 1')\n return flat_sequence[0]\n flat_structure = _tf_data_flatten(structure)\n if len(flat_structure) != len(flat_sequence):\n raise ValueError(f'Could not pack sequence. Argument `structure` had {len(flat_structure)} elements, but argument `flat_sequence` had {len(flat_sequence)} elements. Received structure: {structure}, flat_sequence: {flat_sequence}.')\n _, packed = _tf_data_packed_nest_with_indices(structure, flat_sequence, 0)\n return sequence_like(structure, packed)", "docstring": "Returns a given flattened sequence packed into a nest.\n\nIf `structure` is a scalar, `flat_sequence` must be a single-element list;\nin this case the return value is `flat_sequence[0]`.\n\nArgs:\n structure: tuple or list constructed of scalars and/or other tuples/lists,\n or a scalar. Note: numpy arrays are considered scalars.\n flat_sequence: flat sequence to pack.\n\nReturns:\n packed: `flat_sequence` converted to have the same recursive structure as\n `structure`.\n\nRaises:\n ValueError: If nest and structure have different element counts."} +{"repo": "transformers", "function": "def post_process_depth_estimation(self, outputs: 'DepthEstimatorOutput', target_sizes: Optional[Union[TensorType, List[Tuple[int, int]], None]]=None) -> List[Dict[str, TensorType]]:\n requires_backends(self, 'torch')\n predicted_depth = outputs.predicted_depth\n if target_sizes is not None and len(predicted_depth) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the predicted depth')\n results = []\n target_sizes = [None] * len(predicted_depth) if target_sizes is None else target_sizes\n for depth, target_size in zip(predicted_depth, target_sizes):\n if target_size is not None:\n depth = depth[None, None, ...]\n depth = torch.nn.functional.interpolate(depth, size=target_size, mode='bicubic', align_corners=False)\n depth = depth.squeeze()\n results.append({'predicted_depth': depth})\n return results", "docstring": "Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.\nOnly supports PyTorch.\n\nArgs:\n outputs ([`DepthEstimatorOutput`]):\n Raw outputs of the model.\n target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):\n Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n (height, width) of each image in the batch. If left to None, predictions will not be resized.\n\nReturns:\n `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth\n predictions."} +{"repo": "tensorflow", "function": "def sequence_categorical_column_with_vocabulary_file(key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0, default_value=None, dtype=dtypes.string):\n return fc.SequenceCategoricalColumn(fc.categorical_column_with_vocabulary_file(key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=num_oov_buckets, default_value=default_value, dtype=dtype))", "docstring": "A sequence of categorical terms where ids use a vocabulary file.\n\nPass this to `embedding_column` or `indicator_column` to convert sequence\ncategorical data into dense representation for input to sequence NN, such as\nRNN.\n\nExample:\n\n```python\nstates = sequence_categorical_column_with_vocabulary_file(\n key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,\n num_oov_buckets=5)\nstates_embedding = embedding_column(states, dimension=10)\ncolumns = [states_embedding]\n\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nsequence_feature_layer = SequenceFeatures(columns)\nsequence_input, sequence_length = sequence_feature_layer(features)\nsequence_length_mask = tf.sequence_mask(sequence_length)\n\nrnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)\nrnn_layer = tf.keras.layers.RNN(rnn_cell)\noutputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)\n```\n\nArgs:\n key: A unique string identifying the input feature.\n vocabulary_file: The vocabulary file name.\n vocabulary_size: Number of the elements in the vocabulary. This must be no\n greater than length of `vocabulary_file`, if less than length, later\n values are ignored. If None, it is set to the length of `vocabulary_file`.\n num_oov_buckets: Non-negative integer, the number of out-of-vocabulary\n buckets. All out-of-vocabulary inputs will be assigned IDs in the range\n `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of\n the input value. A positive `num_oov_buckets` can not be specified with\n `default_value`.\n default_value: The integer ID value to return for out-of-vocabulary feature\n values, defaults to `-1`. This can not be specified with a positive\n `num_oov_buckets`.\n dtype: The type of features. Only string and integer types are supported.\n\nReturns:\n A `SequenceCategoricalColumn`.\n\nRaises:\n ValueError: `vocabulary_file` is missing or cannot be opened.\n ValueError: `vocabulary_size` is missing or < 1.\n ValueError: `num_oov_buckets` is a negative integer.\n ValueError: `num_oov_buckets` and `default_value` are both specified.\n ValueError: `dtype` is neither string nor integer."} +{"repo": "genai-processors", "function": "def _parallel_part_processors(part_processors: Sequence[PartProcessorWithMatchFn]) -> PartProcessorFn:\n\n async def part_processor(content: ProcessorPart) -> AsyncIterable[ProcessorPart]:\n output_queue = asyncio.Queue()\n processors = []\n match_fns = []\n passthrough_fallback = False\n passthrough_always = False\n for p in part_processors:\n if p is PASSTHROUGH_FALLBACK:\n passthrough_fallback = True\n continue\n if p is PASSTHROUGH_ALWAYS:\n passthrough_always = True\n continue\n processors.append(_CaptureReservedSubstreams(output_queue, p))\n match_fns.append(p.match)\n parallel_processor = _CaptureReservedSubstreams(output_queue, map_processor.parallel_part_functions(processors, match_fns, with_default_output=passthrough_fallback, with_always_output=passthrough_always))\n content = parallel_processor(content)\n create_task(_enqueue_content(content, output_queue))\n while (part := (await output_queue.get())) is not None:\n yield part\n output_queue.task_done()\n return part_processor", "docstring": "Combine **part processors** in parallel.\n\nAdds debug and status streams to the output.\n\nNOTE: Substreams debug and status are yielded immediately instead of passing\n them to the next processor.\n\nArgs:\n part_processors: sequence of part processors to compute concurrently.\n\nReturns:\n Part processor that computes the output of the provided sequence of part\n processors concurrently."} +{"repo": "beam", "function": "def Insert(self, request, global_params=None):\n config = self.GetMethodConfig('Insert')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Creates a new empty dataset.\n\nArgs:\n request: (BigqueryDatasetsInsertRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Dataset) The response message."} +{"repo": "transformers", "function": "def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n do_resize = do_resize if do_resize is not None else self.do_resize\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n resample = resample if resample is not None else self.resample\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n size = size if size is not None else self.size\n size_dict = get_size_dict(size)\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n if do_resize:\n images = [self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format) for image in images]\n if do_rescale:\n images = [self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format) for image in images]\n if do_normalize:\n images = [self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]\n data = {'pixel_values': images}\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Preprocess an image or batch of images.\n\nArgs:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Dictionary in the format `{\"height\": h, \"width\": w}` specifying the size of the output image after\n resizing.\n resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):\n `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has\n an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use if `do_normalize` is set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format."} +{"repo": "tensorflow", "function": "def __call__(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:\n out = math_ops.matmul(input_tensor, self.filters)\n return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\n input_tensor: Input tensor to matmul with the filter.\n\nReturns:\n A map of: output key -> output result."} +{"repo": "tensorflow", "function": "class Callback:\n\n def __init__(self):\n self.validation_data = None\n self.model = None\n self._chief_worker_only = None\n self._supports_tf_logs = False\n\n def set_params(self, params):\n self.params = params\n\n def set_model(self, model):\n self.model = model\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_batch_begin(self, batch, logs=None):\n \"\"\"A backwards compatibility alias for `on_train_batch_begin`.\"\"\"\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_batch_end(self, batch, logs=None):\n \"\"\"A backwards compatibility alias for `on_train_batch_end`.\"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_epoch_begin(self, epoch, logs=None):\n \"\"\"Called at the start of an epoch.\n\n Subclasses should override for any actions to run. This function should only\n be called during TRAIN mode.\n\n Args:\n epoch: Integer, index of epoch.\n logs: Dict. Currently no data is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_epoch_end(self, epoch, logs=None):\n \"\"\"Called at the end of an epoch.\n\n Subclasses should override for any actions to run. This function should only\n be called during TRAIN mode.\n\n Args:\n epoch: Integer, index of epoch.\n logs: Dict, metric results for this training epoch, and for the\n validation epoch if validation is performed. Validation result keys\n are prefixed with `val_`. For training epoch, the values of the\n `Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':\n 0.7}`.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_train_batch_begin(self, batch, logs=None):\n \"\"\"Called at the beginning of a training batch in `fit` methods.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.train_step`. Typically,\n the values of the `Model`'s metrics are returned. Example:\n `{'loss': 0.2, 'accuracy': 0.7}`.\n \"\"\"\n self.on_batch_begin(batch, logs=logs)\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_train_batch_end(self, batch, logs=None):\n \"\"\"Called at the end of a training batch in `fit` methods.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.\n \"\"\"\n self.on_batch_end(batch, logs=logs)\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_test_batch_begin(self, batch, logs=None):\n \"\"\"Called at the beginning of a batch in `evaluate` methods.\n\n Also called at the beginning of a validation batch in the `fit`\n methods, if validation data is provided.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.test_step`. Typically,\n the values of the `Model`'s metrics are returned. Example:\n `{'loss': 0.2, 'accuracy': 0.7}`.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_test_batch_end(self, batch, logs=None):\n \"\"\"Called at the end of a batch in `evaluate` methods.\n\n Also called at the end of a validation batch in the `fit`\n methods, if validation data is provided.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_predict_batch_begin(self, batch, logs=None):\n \"\"\"Called at the beginning of a batch in `predict` methods.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.predict_step`,\n it typically returns a dict with a key 'outputs' containing\n the model's outputs.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n @generic_utils.default\n def on_predict_batch_end(self, batch, logs=None):\n \"\"\"Called at the end of a batch in `predict` methods.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every `N`\n batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_train_begin(self, logs=None):\n \"\"\"Called at the beginning of training.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently no data is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_train_end(self, logs=None):\n \"\"\"Called at the end of training.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently the output of the last call to `on_epoch_end()`\n is passed to this argument for this method but that may change in\n the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_test_begin(self, logs=None):\n \"\"\"Called at the beginning of evaluation or validation.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently no data is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_test_end(self, logs=None):\n \"\"\"Called at the end of evaluation or validation.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently the output of the last call to\n `on_test_batch_end()` is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_predict_begin(self, logs=None):\n \"\"\"Called at the beginning of prediction.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently no data is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n @doc_controls.for_subclass_implementers\n def on_predict_end(self, logs=None):\n \"\"\"Called at the end of prediction.\n\n Subclasses should override for any actions to run.\n\n Args:\n logs: Dict. Currently no data is passed to this argument for this method\n but that may change in the future.\n \"\"\"\n\n def _implements_train_batch_hooks(self):\n \"\"\"Determines if this Callback should be called for each train batch.\"\"\"\n return not generic_utils.is_default(self.on_batch_begin) or not generic_utils.is_default(self.on_batch_end) or (not generic_utils.is_default(self.on_train_batch_begin)) or (not generic_utils.is_default(self.on_train_batch_end))\n\n def _implements_test_batch_hooks(self):\n \"\"\"Determines if this Callback should be called for each test batch.\"\"\"\n return not generic_utils.is_default(self.on_test_batch_begin) or not generic_utils.is_default(self.on_test_batch_end)\n\n def _implements_predict_batch_hooks(self):\n \"\"\"Determines if this Callback should be called for each predict batch.\"\"\"\n return not generic_utils.is_default(self.on_predict_batch_begin) or not generic_utils.is_default(self.on_predict_batch_end)", "docstring": "Abstract base class used to build new callbacks.\n\nCallbacks can be passed to keras methods such as `fit`, `evaluate`, and\n`predict` in order to hook into the various stages of the model training and\ninference lifecycle.\n\nTo create a custom callback, subclass `keras.callbacks.Callback` and override\nthe method associated with the stage of interest. See\nhttps://www.tensorflow.org/guide/keras/custom_callback for more information.\n\nExample:\n\n>>> training_finished = False\n>>> class MyCallback(tf.keras.callbacks.Callback):\n... def on_train_end(self, logs=None):\n... global training_finished\n... training_finished = True\n>>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])\n>>> model.compile(loss='mean_squared_error')\n>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),\n... callbacks=[MyCallback()])\n>>> assert training_finished == True\n\nIf you want to use `Callback` objects in a custom training loop:\n\n1. You should pack all your callbacks into a single `callbacks.CallbackList`\n so they can all be called together.\n2. You will need to manually call all the `on_*` methods at the appropriate\n locations in your loop. Like this:\n\n ```\n callbacks = tf.keras.callbacks.CallbackList([...])\n callbacks.append(...)\n\n callbacks.on_train_begin(...)\n for epoch in range(EPOCHS):\n callbacks.on_epoch_begin(epoch)\n for i, data in dataset.enumerate():\n callbacks.on_train_batch_begin(i)\n batch_logs = model.train_step(data)\n callbacks.on_train_batch_end(i, batch_logs)\n epoch_logs = ...\n callbacks.on_epoch_end(epoch, epoch_logs)\n final_logs=...\n callbacks.on_train_end(final_logs)\n ```\n\nAttributes:\n params: Dict. Training parameters (eg. verbosity, batch size, number of\n epochs...).\n model: Instance of `keras.models.Model`. Reference of the model being\n trained.\n\nThe `logs` dictionary that callback methods\ntake as argument will contain keys for quantities relevant to\nthe current batch or epoch (see method-specific docstrings)."} +{"repo": "tensorflow", "function": "def start_server_on_separate_thread(dump_to_filesystem=True, server_start_delay_sec=0.0, poll_server=False, blocking=True, toggle_watch_on_core_metadata=None):\n server_port = portpicker.pick_unused_port()\n debug_server_url = 'grpc://localhost:%d' % server_port\n server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None\n server = EventListenerTestServicer(server_port=server_port, dump_dir=server_dump_dir, toggle_watch_on_core_metadata=toggle_watch_on_core_metadata)\n\n def delay_then_run_server():\n time.sleep(server_start_delay_sec)\n server.run_server(blocking=blocking)\n server_thread = threading.Thread(target=delay_then_run_server)\n server_thread.start()\n if poll_server:\n if not _poll_server_till_success(50, 0.2, debug_server_url, server_dump_dir, server, gpu_memory_fraction=0.1):\n raise ValueError('Failed to start test gRPC debug server at port %d' % server_port)\n server.clear_data()\n return (server_port, debug_server_url, server_dump_dir, server_thread, server)", "docstring": "Create a test gRPC debug server and run on a separate thread.\n\nArgs:\n dump_to_filesystem: (bool) whether the debug server will dump debug data\n to the filesystem.\n server_start_delay_sec: (float) amount of time (in sec) to delay the server\n start up for.\n poll_server: (bool) whether the server will be polled till success on\n startup.\n blocking: (bool) whether the server should be started in a blocking mode.\n toggle_watch_on_core_metadata: A list of\n (node_name, output_slot, debug_op) tuples to toggle the\n watchpoint status during the on_core_metadata calls (optional).\n\nReturns:\n server_port: (int) Port on which the server runs.\n debug_server_url: (str) grpc:// URL to the server.\n server_dump_dir: (str) The debug server's dump directory.\n server_thread: The server Thread object.\n server: The `EventListenerTestServicer` object.\n\nRaises:\n ValueError: If polling the server process for ready state is not successful\n within maximum polling count."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_pixel_values: Optional[torch.FloatTensor]=None, image_sizes: Optional[torch.LongTensor]=None, image_attention_mask=None, audio_input_features: Optional[torch.FloatTensor]=None, audio_embed_sizes=None, audio_attention_mask=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, image_pixel_values=image_pixel_values, image_sizes=image_sizes, image_attention_mask=image_attention_mask, audio_input_features=audio_input_features, audio_embed_sizes=audio_embed_sizes, audio_attention_mask=audio_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, self.vocab_size)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "image_pixel_values (`torch.FloatTensor`, *optional*):\n If the input contains images, these correspond to the pixel values after transformations (as returned by\n the Processor)\nimage_sizes (`torch.LongTensor`, *optional*):\n If the input contains images, these correspond to size of each image.\nimage_attention_mask (`torch.LongTensor`, *optional*):\n Attention mask for the images.\naudio_input_features (`torch.FloatTensor`, *optional*):\n If the input contains audio samples, these correspond to the values after transformation (as returned by\n the Processor).\naudio_embed_sizes (`torch.Tensor`, *optional*):\n Size of the audio inputs.\naudio_attention_mask (`torch.Tensor, *optional*):\n Attention mask for the audio inputs.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n```python\n>>> from transformers import AutoTokenizer, Phi4MultimodalForCausalLM\n>>> model = Phi4MultimodalForCausalLM.from_pretrained(\"TBA\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"TBA\")\n>>> prompt = \"This is an example script .\"\n>>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n'This is an example script .\\n Certainly! Below is a sample script that demonstrates a simple task, such as calculating the sum'\n```"} +{"repo": "mobly", "function": "def _rpc(self, rpc_func_name, *args, **kwargs):\n try:\n self.check_server_proc_running()\n except Exception:\n self.log.error('Server process running check failed, skip sending RPC method(%s).', rpc_func_name)\n raise\n with self._lock:\n rpc_id = next(self._counter)\n request = self._gen_rpc_request(rpc_id, rpc_func_name, *args, **kwargs)\n self.log.debug('Sending RPC request %s.', request)\n response = self.send_rpc_request(request)\n self.log.debug('RPC request sent.')\n if self.verbose_logging or _MAX_RPC_RESP_LOGGING_LENGTH >= len(response):\n self.log.debug('Snippet received: %s', response)\n else:\n self.log.debug('Snippet received: %s... %d chars are truncated', response[:_MAX_RPC_RESP_LOGGING_LENGTH], len(response) - _MAX_RPC_RESP_LOGGING_LENGTH)\n response_decoded = self._decode_response_string_and_validate_format(rpc_id, response)\n return self._handle_rpc_response(rpc_func_name, response_decoded)", "docstring": "Sends an RPC to the server.\n\nArgs:\n rpc_func_name: str, the name of the snippet function to execute on the\n server.\n *args: any, the positional arguments of the RPC request.\n **kwargs: any, the keyword arguments of the RPC request.\n\nReturns:\n The result of the RPC.\n\nRaises:\n errors.ProtocolError: something went wrong when exchanging data with the\n server.\n errors.ApiError: the RPC went through, however executed with errors."} +{"repo": "transformers", "function": "class TFMaskedLMOutput(ModelOutput):\n loss: tf.Tensor | None = None\n logits: Optional[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for masked language models outputs.\n\nArgs:\n loss (`tf.Tensor` of shape `(n,)`, *optional*, where n is the number of non-masked labels, returned when `labels` is provided):\n Masked language modeling (MLM) loss.\n logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "class ImageClassifierOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for outputs of image classification models.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states\n (also called feature maps) of the model at the output of each stage.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.FloatTensor, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if attention_mask is not None:\n attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n position_bias = self.embed_positions(hidden_states)\n synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n if head_mask is not None:\n if head_mask.size()[0] != len(self.layers):\n raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n skip_the_layer = False\n if self.training:\n dropout_probability = torch.rand([])\n skip_the_layer = dropout_probability < self.layerdrop\n if not skip_the_layer or synced_gpus:\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, position_bias, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if skip_the_layer:\n layer_outputs = (None, None)\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[1],)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions)", "docstring": "Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):\n Features extracted from the speech or text input by the encoder prenet.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in\n `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "tensorflow", "function": "def _convert_to_sparse_tensor(sp_input):\n if isinstance(sp_input, sparse_tensor.SparseTensorValue):\n return sparse_tensor.SparseTensor.from_value(sp_input)\n if not isinstance(sp_input, sparse_tensor.SparseTensor):\n raise TypeError('Input must be a SparseTensor.')\n return sp_input", "docstring": "Convert `sp_input` to `SparseTensor` and return it.\n\nArgs:\n sp_input: `SparseTensor` or `SparseTensorValue`.\n\nReturns:\n `sp_input` converted to `SparseTensor`.\n\nRaises:\n ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`."} +{"repo": "tensorflow", "function": "def fuse_resize_and_conv(input_graph_def: graph_pb2.GraphDef, output_node_names: Sequence[str]) -> graph_pb2.GraphDef:\n input_node_map = {}\n for node in input_graph_def.node:\n if node.name not in input_node_map:\n input_node_map[node.name] = node\n else:\n raise ValueError('Duplicate node names detected for ', node.name)\n node_reference_count = collections.defaultdict(int)\n for node in input_graph_def.node:\n for input_name in node.input:\n stripped_name = node_name_from_input(input_name)\n node_reference_count[stripped_name] += 1\n for output_name in output_node_names:\n node_reference_count[output_name] += 1\n new_ops = []\n for node in input_graph_def.node:\n if node.op != 'Conv2D':\n continue\n conv_op = node\n input_op = node_from_map(input_node_map, conv_op.input[0])\n if input_op.op == 'MirrorPad':\n mirror_pad_op = input_op\n resize_op = node_from_map(input_node_map, mirror_pad_op.input[0])\n if resize_op.op != 'ResizeBilinear':\n resize_op = None\n else:\n mirror_pad_op = None\n if input_op.op == 'ResizeBilinear':\n resize_op = input_op\n else:\n resize_op = None\n if not mirror_pad_op and (not resize_op):\n continue\n node_reference_count[conv_op.name] = 0\n if mirror_pad_op:\n node_reference_count[mirror_pad_op.name] -= 1\n if resize_op:\n node_reference_count[resize_op.name] -= 1\n fused_conv_op = node_def_pb2.NodeDef()\n if resize_op:\n fused_conv_op.op = 'FusedResizeAndPadConv2D'\n else:\n fused_conv_op.op = 'FusedPadConv2D'\n fused_conv_op.name = conv_op.name\n if mirror_pad_op:\n mirror_paddings_name = mirror_pad_op.input[1]\n mirror_paddings_mode = mirror_pad_op.attr['mode']\n else:\n paddings_op = node_def_pb2.NodeDef()\n paddings_op.op = 'Const'\n paddings_op.name = conv_op.name + '_dummy_paddings'\n paddings_op.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum))\n paddings_op.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto([0, 0, 0, 0, 0, 0, 0, 0], dtypes.int32, [4, 2])))\n new_ops.extend([paddings_op])\n mirror_paddings_name = paddings_op.name\n mirror_paddings_mode = attr_value_pb2.AttrValue(s=b'REFLECT')\n if resize_op:\n fused_conv_op.input.extend([resize_op.input[0], resize_op.input[1], mirror_paddings_name, conv_op.input[1]])\n fused_conv_op.attr['resize_align_corners'].CopyFrom(resize_op.attr['align_corners'])\n else:\n fused_conv_op.input.extend([mirror_pad_op.input[0], mirror_paddings_name, conv_op.input[1]])\n fused_conv_op.attr['T'].CopyFrom(conv_op.attr['T'])\n fused_conv_op.attr['mode'].CopyFrom(mirror_paddings_mode)\n fused_conv_op.attr['strides'].CopyFrom(conv_op.attr['strides'])\n fused_conv_op.attr['padding'].CopyFrom(conv_op.attr['padding'])\n new_ops.extend([fused_conv_op])\n result_graph_def = graph_pb2.GraphDef()\n for node in input_graph_def.node:\n if node_reference_count[node.name] < 1:\n continue\n new_node = node_def_pb2.NodeDef()\n new_node.CopyFrom(node)\n result_graph_def.node.extend([new_node])\n result_graph_def.node.extend(new_ops)\n return result_graph_def", "docstring": "Merges preceding resize and mirror pad ops into a specialized convolution.\n\nThere's a common pattern of enlarging the input to a convolution using a\nresize operation, and also using MirrorPad to extend the boundaries to that\nzero edge pixels don't bleed inwards when convolving. This routine looks for\nthat pattern of operations, and fuses them together into a Conv2DWithResizeOp.\n\nArgs:\n input_graph_def: A GraphDef containing a model.\n output_node_names: A list of names of the nodes that produce the final\n results.\n\nReturns:\n Modified graph with resize and pad ops merged.\n\nRaises:\n ValueError: If the graph is badly formed with duplicate node names."} +{"repo": "tensorflow", "function": "def py_func_from_autograph(python_func, autograph_options=None):\n _, original_func = tf_decorator.unwrap(python_func)\n\n def autograph_handler(*args, **kwargs):\n \"\"\"Calls a converted version of original_func.\"\"\"\n try:\n return api.converted_call(original_func, args, kwargs, options=converter.ConversionOptions(recursive=True, optional_features=autograph_options, user_requested=True))\n except Exception as e:\n if hasattr(e, 'ag_error_metadata'):\n raise e.ag_error_metadata.to_exception(e)\n else:\n raise\n converted_func = tf_decorator.make_decorator(original_func, autograph_handler)\n return tf_decorator.rewrap(python_func, original_func, converted_func)", "docstring": "Compile a python function using autograph, for use with FuncGraph.\n\nArgs:\n python_func: the Python function to compile.\n autograph_options: additional knobs to control when `autograph=True`.\n See https://www.tensorflow.org/guide/autograph for more information.\nReturns:\n python_func, converted using autograph."} +{"repo": "transformers", "function": "class ChineseCLIPVisionEncoder(nn.Module):\n\n def __init__(self, config: ChineseCLIPConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(self, inputs_embeds, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n \"\"\"\n Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`ChineseCLIPVisionEncoderLayer`].\n\nArgs:\n config: ChineseCLIPConfig"} +{"repo": "transformers", "function": "def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict]=None):\n output_dict = {}\n if state_dict_keys is not None:\n old_text = '\\n'.join(state_dict_keys)\n new_text = old_text\n for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():\n if replacement is None:\n new_text = re.sub(pattern, '', new_text)\n continue\n new_text = re.sub(pattern, replacement, new_text)\n output_dict = dict(zip(old_text.split('\\n'), new_text.split('\\n')))\n return output_dict", "docstring": "Converts old keys to new keys using the mapping and dynamically removes the 'ijepa.' prefix if necessary.\n\nArgs:\n state_dict_keys (dict): The keys from the state_dict to convert.\n\nReturns:\n dict: A mapping from old keys to new keys."} +{"repo": "tensorflow", "function": "def get_entries(attr_name):\n assert attr_name in ['inputs', 'outputs']\n entries = {}\n for op_type in ops._gradient_registry.list():\n if op_type in _EXCLUDED_OPS:\n continue\n num_values = _get_num_inputs_outputs(op_type)[0 if attr_name == 'inputs' else 1]\n gradient_fn = ops._gradient_registry.lookup(op_type)\n if gradient_fn is None:\n if num_values != -1:\n entries[op_type] = '{\"%s\"},' % op_type\n continue\n used_tensors = _live_tensors(gradient_fn, attr_name=attr_name)\n if used_tensors is _ALL:\n continue\n elif not used_tensors:\n entries[op_type] = '{\"%s\"},' % op_type\n else:\n all_tensors = set(range(num_values))\n unused_tensors = all_tensors - used_tensors\n if unused_tensors:\n unused_tensor_list = sorted(list(unused_tensors))\n entries[op_type] = '{\"%s\", %d, {%s}},' % (op_type, len(unused_tensor_list), ', '.join((str(i) for i in unused_tensor_list)))\n return entries", "docstring": "Returns the dict of entries.\n\nEach entry is of the form {op_name, {true|false, indices}}\n\ntrue: All values are unused.\nfalse: `indices` are the only unused indices.\n\nNote: ops for which all values are used are not printed.\n\nArgs:\n attr_name: inputs or outputs.\n\nReturns:\n A dict from op_type to formatted entry in the dict."} +{"repo": "tensorflow", "function": "def __init__(self, feature_config: Union[tpu_embedding_v2_utils.FeatureConfig, Iterable], optimizer: Optional[tpu_embedding_v2_utils._Optimizer], pipeline_execution_with_tensor_core: bool=False):\n self._strategy = distribute_lib.get_strategy()\n self._using_tpu = isinstance(self._strategy, (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV2))\n self._pipeline_execution_with_tensor_core = pipeline_execution_with_tensor_core\n self._feature_config = feature_config\n self._output_shapes = []\n for feature in nest.flatten(feature_config):\n self._output_shapes.append(feature.output_shape)\n device_assignment = getattr(self._strategy.extended, '_device_assignment', None)\n self._num_cores_per_replica = device_assignment.num_cores_per_replica if device_assignment else None\n self._table_config = []\n for feature in nest.flatten(feature_config):\n if feature.table not in self._table_config:\n self._table_config.append(feature.table)\n table_names = []\n for i, table in enumerate(self._table_config):\n if table.optimizer is None:\n table.optimizer = optimizer\n if (table.optimizer is not None or self._using_tpu) and (not isinstance(table.optimizer, tpu_embedding_v2_utils._Optimizer)):\n raise ValueError('{} is an unsupported optimizer class. Please pass an instance of one of the optimizer classes under tf.tpu.experimental.embedding.'.format(type(table.optimizer)))\n if table.name is None:\n table.name = 'table_{}'.format(i)\n if table.name in table_names:\n raise ValueError(f'Tables must have a unique name. Multiple tables with name {table.name} found.')\n table_names.append(table.name)\n if self._using_tpu:\n self._dynamic_learning_rates = []\n for table in self._table_config:\n if callable(table.optimizer.learning_rate) and table.optimizer.learning_rate not in self._dynamic_learning_rates:\n self._dynamic_learning_rates.append(table.optimizer.learning_rate)\n self._hosts = tpu_embedding_v2_utils.get_list_of_hosts(self._strategy)\n self._built = False\n self._verify_output_shapes_on_enqueue = True", "docstring": "Creates the TPUEmbedding mid level API object.\n\n```python\nstrategy = tf.distribute.TPUStrategy(...)\nwith strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(\n feature_config=tf.tpu.experimental.embedding.FeatureConfig(\n table=tf.tpu.experimental.embedding.TableConfig(\n dim=...,\n vocabulary_size=...)))\n```\n\nArgs:\n feature_config: A nested structure of\n `tf.tpu.experimental.embedding.FeatureConfig` configs.\n optimizer: An instance of one of `tf.tpu.experimental.embedding.SGD`,\n `tf.tpu.experimental.embedding.Adagrad` or\n `tf.tpu.experimental.embedding.Adam`. When not created under\n TPUStrategy may be set to None to avoid the creation of the optimizer\n slot variables, useful for optimizing memory consumption when exporting\n the model for serving where slot variables aren't needed.\n pipeline_execution_with_tensor_core: If True, the TPU embedding\n computations will overlap with the TensorCore computations (and hence\n will be one step old). Set to True for improved performance.\n\nRaises:\n ValueError: If optimizer is not one of tf.tpu.experimental.embedding.(SGD,\n Adam or Adagrad) or None when created under a TPUStrategy."} +{"repo": "qhbm-library", "function": "def vqt(input_qhbm: qhbm.QHBM, target_hamiltonian: Union[tf.Tensor, hamiltonian.Hamiltonian], beta: tf.Tensor):\n\n def f_vqt(bitstrings):\n h_expectations = tf.squeeze(input_qhbm.q_inference.expectation(bitstrings, target_hamiltonian), 1)\n beta_h_expectations = beta * h_expectations\n energies = tf.stop_gradient(input_qhbm.e_inference.energy(bitstrings))\n return beta_h_expectations - energies\n average_expectation = input_qhbm.e_inference.expectation(f_vqt)\n current_partition = tf.stop_gradient(input_qhbm.e_inference.log_partition())\n return average_expectation - current_partition", "docstring": "Computes the VQT loss of a given QHBM and Hamiltonian.\n\nThis function is differentiable within a `tf.GradientTape` scope.\n\nArgs:\n input_qhbm: Inference methods for the model.\n target_hamiltonian: The Hamiltonian whose thermal state is to be learned. If\n it is a `tf.Tensor`, it is of type `tf.string` with shape [1], result of\n calling `tfq.convert_to_tensor` on a list of `cirq.PauliSum`, `[op]`.\n Otherwise, a Hamiltonian.\n beta: A scalar `tf.Tensor` which is the inverse temperature at which the\n loss is calculated.\n\nReturns:\n The VQT loss."} +{"repo": "pytype", "function": "class ErrorMatcher:\n ERROR_RE = re.compile('^(?P(\\\\w+-)+\\\\w+)(\\\\[(?P.+)\\\\])?((?P([!=]=|[<>]=?))(?P\\\\d+\\\\.\\\\d+))?$')\n\n def __init__(self, src):\n self.errorlog = None\n self.marks = None\n self.expected = self._parse_comments(src)\n\n def _fail(self, msg):\n if self.marks:\n self.errorlog.print_to_stderr()\n raise AssertionError(msg)\n\n def has_error(self):\n return self.errorlog and self.errorlog.has_error()\n\n def assert_errors_match_expected(self, errorlog):\n \"\"\"Matches expected errors against the errorlog, populating self.marks.\"\"\"\n\n def _format_error(line, code, mark=None):\n formatted = 'Line %d: %s' % (line, code)\n if mark:\n formatted += f'[{mark}]'\n return formatted\n self.errorlog = errorlog\n self.marks = {}\n expected = copy.deepcopy(self.expected)\n for error in self.errorlog.unique_sorted_errors():\n errs = expected[error.line]\n for i, (code, mark) in enumerate(errs):\n if code == error.name:\n if mark:\n self.marks[mark] = error\n del errs[i]\n break\n else:\n if errs:\n code, mark = errs[0]\n exp = _format_error(error.line, code, mark)\n actual = _format_error(error.line, error.name)\n self._fail(f'Error does not match:\\nExpected: {exp}\\nActual: {actual}')\n else:\n self._fail(f'Unexpected error:\\n{error}')\n leftover_errors = []\n for line in sorted(expected):\n leftover_errors.extend((_format_error(line, code, mark) for code, mark in expected[line]))\n if leftover_errors:\n self._fail('Errors not found:\\n' + '\\n'.join(leftover_errors))\n\n def _assert_error_messages(self, matchers):\n \"\"\"Assert error messages.\"\"\"\n assert self.marks is not None\n for mark, error in self.marks.items():\n try:\n matcher = matchers.pop(mark)\n except KeyError:\n self._fail(f'No matcher for mark {mark}')\n if not matcher.match(error.message):\n self._fail('Bad error message for mark %s: expected %r, got %r' % (mark, matcher, error.message))\n if matchers:\n self._fail(f'Marks not found in code: {', '.join(matchers)}')\n\n def assert_diagnostic_messages(self, matchers):\n \"\"\"Assert error messages.\"\"\"\n assert self.marks is not None\n for mark, error in self.marks.items():\n try:\n matcher = matchers.pop(mark)\n except KeyError:\n self._fail(f'No matcher for mark {mark}')\n if isinstance(matcher, str):\n match = matcher.__eq__\n else:\n match = matcher.match\n error_as_string = error.as_string()\n if not match(error_as_string):\n self._fail('Bad error message for mark %s: expected %r, got %r' % (mark, matcher, error_as_string))\n if matchers:\n self._fail(f'Marks not found in code: {', '.join(matchers)}')\n\n def assert_error_regexes(self, expected_regexes):\n matchers = {k: RegexMatcher(v) for k, v in expected_regexes.items()}\n self._assert_error_messages(matchers)\n\n def assert_error_sequences(self, expected_sequences):\n matchers = {k: SequenceMatcher(v) for k, v in expected_sequences.items()}\n self._assert_error_messages(matchers)\n\n def assert_diagnostic_regexes(self, expected_diagnostic_regexes):\n matchers = {k: RegexMatcher(v) for k, v in expected_diagnostic_regexes.items()}\n self.assert_diagnostic_messages(matchers)\n\n def _parse_comment(self, comment):\n comment = comment.strip()\n error_match = self.ERROR_RE.fullmatch(comment)\n if not error_match:\n return None\n version_cmp = error_match.group('cmp')\n if version_cmp:\n version = utils.version_from_string(error_match.group('version'))\n actual_version = sys.version_info[:2]\n if not slots.COMPARES[version_cmp](actual_version, version):\n return None\n return (error_match.group('code'), error_match.group('mark'))\n\n def _parse_comments(self, src):\n \"\"\"Parse comments.\"\"\"\n src = io.StringIO(src)\n expected = collections.defaultdict(list)\n used_marks = set()\n for tok, s, (line, _), _, _ in tokenize.generate_tokens(src.readline):\n if tok != tokenize.COMMENT:\n continue\n for comment in s.split('#'):\n parsed_comment = self._parse_comment(comment)\n if parsed_comment is None:\n continue\n code, mark = parsed_comment\n if mark:\n if mark in used_marks:\n self._fail(f'Mark {mark} already used')\n used_marks.add(mark)\n expected[line].append((code, mark))\n return expected", "docstring": "An ErrorLog matcher to help with test assertions.\n\nTakes the source code as an init argument, and constructs two dictionaries\nholding parsed comment directives.\n\nAttributes:\n errorlog: The errorlog being matched against\n marks: { mark_name : errors.Error object }\n expected: { line number : sequence of expected error codes and mark names }\n\nAdds an assertion matcher to match errorlog.errors against a list of expected\nerrors of the form [(line number, error code, message regex)].\n\nSee tests/test_base_test.py for usage examples."} +{"repo": "fhir-py", "function": "def get_source_code_system(enum_value_descriptor: descriptor.EnumValueDescriptor) -> Optional[str]:\n return get_value_for_annotation_extension(enum_value_descriptor, annotations_pb2.source_code_system)", "docstring": "Returns the value associated with the source_code_system annotation.\n\nArgs:\n enum_value_descriptor: An EnumValueDescriptor describing a FHIR Code for\n which to return a source code system.\n\nReturns:\n The string value of the source code system, if one exists. Otherwise,\n returns None.\n\nRaises:\n ValueError: Unable to retrieve options for type: ."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_router_logits: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> MoeCausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_router_logits = output_router_logits if output_router_logits is not None else self.config.output_router_logits\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: MoeModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_router_logits=output_router_logits, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)\n aux_loss = None\n if output_router_logits:\n aux_loss = load_balancing_loss_func(outputs.router_logits, self.num_experts, self.num_experts_per_tok, attention_mask)\n if labels is not None:\n loss += self.router_aux_loss_coef * aux_loss.to(loss.device)\n return MoeCausalLMOutputWithPast(loss=loss, aux_loss=aux_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, router_logits=outputs.router_logits)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, MixtralForCausalLM\n\n>>> model = MixtralForCausalLM.from_pretrained(\"mistralai/Mixtral-8x7B-v0.1\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"mistralai/Mixtral-8x7B-v0.1\")\n\n>>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n>>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n```"} +{"repo": "transformers", "function": "def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:\n x = x[0]\n tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))\n ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)\n return ff * g", "docstring": "gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +\n0.3989423 * x * torch.exp(-0.5 * x * x)\n\nArgs:\n g (`torch.tensor`):\n gradient output tensor\n x (`torch.tensor`):\n input tensor"} +{"repo": "beam", "function": "def set_options(cls, pipeline_options):\n cls._pipeline_options = pipeline_options", "docstring": "Set filesystem options.\n\nArgs:\n pipeline_options: Instance of ``PipelineOptions``."} +{"repo": "transformers", "function": "def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=-1, seq_length: int=-1, is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[str, Any]:\n input_dict = super().generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)\n box = [48, 84, 73, 128]\n if not framework == TensorType.PYTORCH:\n raise NotImplementedError('Exporting LayoutLM to ONNX is currently only supported for PyTorch.')\n if not is_torch_available():\n raise ValueError('Cannot generate dummy inputs without PyTorch installed.')\n import torch\n batch_size, seq_length = input_dict['input_ids'].shape\n input_dict['bbox'] = torch.tensor([*[box] * seq_length]).tile(batch_size, 1, 1)\n return input_dict", "docstring": "Generate inputs to provide to the ONNX exporter for the specific framework\n\nArgs:\n tokenizer: The tokenizer associated with this model configuration\n batch_size: The batch size (int) to export the model for (-1 means dynamic axis)\n seq_length: The sequence length (int) to export the model for (-1 means dynamic axis)\n is_pair: Indicate if the input is a pair (sentence 1, sentence 2)\n framework: The framework (optional) the tokenizer will generate tensor for\n\nReturns:\n Mapping[str, Tensor] holding the kwargs to provide to the model's forward function"} +{"repo": "transformers", "function": "class MaskFormerForInstanceSegmentationOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n class_queries_logits: Optional[torch.FloatTensor] = None\n masks_queries_logits: Optional[torch.FloatTensor] = None\n auxiliary_logits: Optional[torch.FloatTensor] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n pixel_decoder_last_hidden_state: Optional[torch.FloatTensor] = None\n transformer_decoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n pixel_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n transformer_decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Class for outputs of [`MaskFormerForInstanceSegmentation`].\n\nThis output can be directly passed to [`~MaskFormerImageProcessor.post_process_semantic_segmentation`] or or\n[`~MaskFormerImageProcessor.post_process_instance_segmentation`] or\n[`~MaskFormerImageProcessor.post_process_panoptic_segmentation`] depending on the task. Please, see\n[`~MaskFormerImageProcessor] for details regarding usage.\n\nArgs:\n loss (`torch.Tensor`, *optional*):\n The computed loss, returned when labels are present.\n class_queries_logits (`torch.FloatTensor`):\n A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each\n query. Note the `+ 1` is needed because we incorporate the null class.\n masks_queries_logits (`torch.FloatTensor`):\n A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each\n query.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Last hidden states (final feature map) of the last stage of the encoder model (backbone).\n pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Last hidden states (final feature map) of the last stage of the pixel decoder model (FPN).\n transformer_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Last hidden states (final feature map) of the last stage of the transformer decoder model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the encoder\n model at the output of each stage.\n pixel_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, num_channels, height, width)`. Hidden-states (also called feature maps) of the pixel\n decoder model at the output of each stage.\n transformer_decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the transformer decoder at the output\n of each stage.\n hidden_states `tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` containing `encoder_hidden_states`, `pixel_decoder_hidden_states` and\n `decoder_hidden_states`.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights from Detr's decoder after the attention softmax, used to compute the\n weighted average in the self-attention heads."} +{"repo": "tensorflow", "function": "def __init__(self, on_ui_exit=None, config=None):\n self._on_ui_exit = on_ui_exit\n self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry()\n self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()\n self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)\n self._config = config or cli_config.CLIConfig()\n self._config_argparser = argparse.ArgumentParser(description='config command', usage=argparse.SUPPRESS)\n subparsers = self._config_argparser.add_subparsers()\n set_parser = subparsers.add_parser('set')\n set_parser.add_argument('property_name', type=str)\n set_parser.add_argument('property_value', type=str)\n set_parser = subparsers.add_parser('show')\n self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases=['cfg'])", "docstring": "Constructor of the base class.\n\nArgs:\n on_ui_exit: (`Callable`) the callback to be called when the UI exits.\n config: An instance of `cli_config.CLIConfig()` carrying user-facing\n configurations."} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. PLBart does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros."} +{"repo": "tensorflow", "function": "def get_v2_optimizer(name, **kwargs):\n try:\n return _V2_OPTIMIZER_MAP[name](**kwargs)\n except KeyError:\n raise ValueError('Could not find requested v2 optimizer: {}\\nValid choices: {}'.format(name, list(_V2_OPTIMIZER_MAP.keys())))", "docstring": "Get the v2 optimizer requested.\n\nThis is only necessary until v2 are the default, as we are testing in Eager,\nand Eager + v1 optimizers fail tests. When we are in v2, the strings alone\nshould be sufficient, and this mapping can theoretically be removed.\n\nArgs:\n name: string name of Keras v2 optimizer.\n **kwargs: any kwargs to pass to the optimizer constructor.\n\nReturns:\n Initialized Keras v2 optimizer.\n\nRaises:\n ValueError: if an unknown name was passed."} +{"repo": "keras", "function": "class Concatenate(Merge):\n\n def __init__(self, axis=-1, **kwargs):\n super().__init__(**kwargs)\n self.axis = axis\n self.supports_masking = True\n self._reshape_required = False\n\n def build(self, input_shape):\n if len(input_shape) < 1 or not isinstance(input_shape[0], (tuple, list)):\n raise ValueError(f'A `Concatenate` layer should be called on a list of at least 1 input. Received: input_shape={input_shape}')\n if all((shape is None for shape in input_shape)):\n return\n reduced_inputs_shapes = [list(shape) for shape in input_shape]\n reduced_inputs_shapes_copy = copy.copy(reduced_inputs_shapes)\n shape_set = set()\n for i in range(len(reduced_inputs_shapes_copy)):\n concat_axis = self.axis % len(reduced_inputs_shapes_copy[i])\n for axis, axis_value in enumerate(reduced_inputs_shapes_copy, start=1):\n if axis != concat_axis and axis_value == 1:\n del reduced_inputs_shapes[i][axis]\n if len(reduced_inputs_shapes[i]) > self.axis:\n del reduced_inputs_shapes[i][self.axis]\n shape_set.add(tuple(reduced_inputs_shapes[i]))\n if len(shape_set) != 1:\n err_msg = f'A `Concatenate` layer requires inputs with matching shapes except for the concatenation axis. Received: input_shape={input_shape}'\n ranks = set((len(shape) for shape in shape_set))\n if len(ranks) != 1:\n raise ValueError(err_msg)\n rank, = ranks\n for axis in range(rank):\n unique_dims = set((shape[axis] for shape in shape_set if shape[axis] is not None))\n if len(unique_dims) > 1:\n raise ValueError(err_msg)\n\n def _merge_function(self, inputs):\n return ops.concatenate(inputs, axis=self.axis)\n\n def compute_output_shape(self, input_shape):\n if not isinstance(input_shape, (tuple, list)) or not isinstance(input_shape[0], (tuple, list)):\n raise ValueError(f'A `Concatenate` layer should be called on a list of inputs. Received: input_shape={input_shape}')\n input_shapes = input_shape\n output_shape = list(input_shapes[0])\n for shape in input_shapes[1:]:\n if output_shape[self.axis] is None or shape[self.axis] is None:\n output_shape[self.axis] = None\n break\n output_shape[self.axis] += shape[self.axis]\n return tuple(output_shape)\n\n def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, (tuple, list)):\n raise ValueError(f'`mask` should be a list. Received mask={mask}')\n if not isinstance(inputs, (tuple, list)):\n raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}')\n if len(mask) != len(inputs):\n raise ValueError(f'The lists `inputs` and `mask` should have the same length. Received: inputs={inputs} of length {len(inputs)}, and mask={mask} of length {len(mask)}')\n if all((m is None for m in mask)):\n return None\n masks = []\n for input_i, mask_i in zip(inputs, mask):\n if mask_i is None:\n masks.append(ops.ones_like(input_i, dtype='bool'))\n elif mask_i.ndim < input_i.ndim:\n masks.append(ops.broadcast_to(ops.expand_dims(mask_i, axis=-1), ops.shape(input_i)))\n else:\n masks.append(mask_i)\n concatenated = ops.concatenate(masks, axis=self.axis)\n return ops.any(concatenated, axis=-1, keepdims=False)\n\n def get_config(self):\n config = {'axis': self.axis}\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))", "docstring": "Concatenates a list of inputs.\n\nIt takes as input a list of tensors, all of the same shape except\nfor the concatenation axis, and returns a single tensor that is the\nconcatenation of all inputs.\n\nExamples:\n\n>>> x = np.arange(20).reshape(2, 2, 5)\n>>> y = np.arange(20, 30).reshape(2, 1, 5)\n>>> keras.layers.Concatenate(axis=1)([x, y])\n\nUsage in a Keras model:\n\n>>> x1 = keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n>>> x2 = keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n>>> y = keras.layers.Concatenate()([x1, x2])\n\nArgs:\n axis: Axis along which to concatenate.\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor, the concatenation of the inputs alongside axis `axis`."} +{"repo": "tensorflow", "function": "def log_loss(labels, predictions, weights=1.0, epsilon=1e-07, scope=None, loss_collection=ops.GraphKeys.LOSSES, reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):\n if labels is None:\n raise ValueError('Argument `labels` must not be None.')\n if predictions is None:\n raise ValueError('Argument `predictions` must not be None.')\n with ops.name_scope(scope, 'log_loss', (predictions, labels, weights)) as scope:\n predictions = math_ops.cast(predictions, dtype=dtypes.float32)\n labels = math_ops.cast(labels, dtype=dtypes.float32)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n losses = -math_ops.multiply(labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(1 - labels, math_ops.log(1 - predictions + epsilon))\n return compute_weighted_loss(losses, weights, scope, loss_collection, reduction=reduction)", "docstring": "Adds a Log Loss term to the training procedure.\n\n`weights` acts as a coefficient for the loss. If a scalar is provided, then\nthe loss is simply scaled by the given value. If `weights` is a tensor of size\n`[batch_size]`, then the total loss for each sample of the batch is rescaled\nby the corresponding element in the `weights` vector. If the shape of\n`weights` matches the shape of `predictions`, then the loss of each\nmeasurable element of `predictions` is scaled by the corresponding value of\n`weights`.\n\nArgs:\n labels: The ground truth output tensor, same dimensions as 'predictions'.\n predictions: The predicted outputs.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `losses` dimension).\n epsilon: A small increment to add to avoid taking a log of zero.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n reduction: Type of reduction to apply to loss.\n\nReturns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `labels`; otherwise, it is scalar.\n\nRaises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or `predictions`\n is None.\n\n@compatibility(eager)\nThe `loss_collection` argument is ignored when executing eagerly. Consider\nholding on to the return value or collecting losses via a `tf.keras.Model`.\n@end_compatibility"} +{"repo": "beam", "function": "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Returns information about a `BuildTrigger`. This API is experimental.\n\nArgs:\n request: (CloudbuildProjectsTriggersGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (BuildTrigger) The response message."} +{"repo": "transformers", "function": "class Blip2ImageTextMatchingModelOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits_per_image: Optional[torch.FloatTensor] = None\n logits_per_text: Optional[torch.FloatTensor] = None\n text_embeds: Optional[torch.FloatTensor] = None\n image_embeds: Optional[torch.FloatTensor] = None\n text_model_output: BaseModelOutputWithPooling = None\n vision_model_output: BaseModelOutputWithPooling = None\n\n def to_tuple(self) -> Tuple[Any]:\n return tuple((self[k] if k not in ['text_model_output', 'vision_model_output'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):\n Contrastive loss for image-text similarity.\n logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):\n The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text\n similarity scores.\n logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):\n The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image\n similarity scores.\n text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):\n The text embeddings obtained by applying the projection layer to the pooled output.\n image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):\n The image embeddings obtained by applying the projection layer to the pooled output.\n text_model_output (`BaseModelOutputWithPooling`):\n The output of the [`Blip2QFormerModel`].\n vision_model_output (`BaseModelOutputWithPooling`):\n The output of the [`Blip2VisionModel`]."} +{"repo": "pytype", "function": "def __init__(self, ctx):\n if ctx.options.use_fiddle_overlay:\n member_map = {'Config': overlay.add_name('Config', BuildableBuilder), 'Partial': overlay.add_name('Partial', BuildableBuilder)}\n else:\n member_map = {}\n ast = ctx.loader.import_name('fiddle')\n super().__init__(ctx, 'fiddle', member_map, ast)", "docstring": "Initializes the FiddleOverlay.\n\nThis function loads the AST for the fiddle module, which is used to\naccess type information for any members that are not explicitly provided by\nthe overlay. See get_attribute in attribute.py for how it's used.\n\nArgs:\n ctx: An instance of context.Context."} +{"repo": "beam", "function": "def set_current_position(self, position):\n raise NotImplementedError", "docstring": "Updates the last-consumed position to the given position.\n\nA source may invoke this method for records that do not start at split\npoints. This may modify the internal state of the ``RangeTracker``. If the\nrecord starts at a split point, method ``try_claim()`` **must** be invoked\ninstead of this method.\n\nArgs:\n position: starting position of a record being read by a source."} +{"repo": "tensorflow", "function": "def index_to_string_table_from_tensor(vocabulary_list, default_value='UNK', name=None):\n if vocabulary_list is None:\n raise ValueError('`vocabulary_list` argument must be specified.')\n with ops.name_scope(name, 'index_to_string'):\n vocabulary_list = ops.convert_to_tensor(vocabulary_list, dtypes.string)\n num_elements = array_ops.size(vocabulary_list)\n keys = math_ops.cast(math_ops.range(num_elements), dtypes.int64)\n init = KeyValueTensorInitializer(keys, vocabulary_list, dtypes.int64, dtypes.string, name='table_init')\n return StaticHashTableV1(init, default_value)", "docstring": "Returns a lookup table that maps a `Tensor` of indices into strings.\n\nThis operation constructs a lookup table to map int64 indices into string\nvalues. The mapping is initialized from a string `vocabulary_list` 1-D\n`Tensor` where each element is a value and the corresponding index within the\ntensor is the key.\n\nAny input which does not have a corresponding index in 'vocabulary_list'\n(an out-of-vocabulary entry) is assigned the `default_value`\n\nThe underlying table must be initialized by calling\n`session.run(tf.compat.v1.tables_initializer())` or\n`session.run(table.init())` once.\n\nElements in `vocabulary_list` cannot have duplicates, otherwise when executing\nthe table initializer op, it will throw a `FailedPreconditionError`.\n\nSample Usages:\n\n```python\nvocabulary_list = tf.constant([\"emerson\", \"lake\", \"palmer\"])\nindices = tf.constant([1, 5], tf.int64)\ntable = tf.lookup.index_to_string_table_from_tensor(\n vocabulary_list, default_value=\"UNKNOWN\")\nvalues = table.lookup(indices)\n...\ntf.compat.v1.tables_initializer().run()\n\nvalues.eval() ==> [\"lake\", \"UNKNOWN\"]\n```\n\nArgs:\n vocabulary_list: A 1-D string `Tensor` that specifies the strings to map\n from indices.\n default_value: The value to use for out-of-vocabulary indices.\n name: A name for this op (optional).\n\nReturns:\n The lookup table to map a string values associated to a given index `int64`\n `Tensors`.\n\nRaises:\n ValueError: when `vocabulary_list` is not set."} +{"repo": "tensorflow", "function": "def matmul(self, matmul_input: core.Tensor) -> Mapping[str, core.Tensor]:\n out = math_ops.matmul(matmul_input, self.matmul_filters)\n return {'output': out}", "docstring": "Performs a matrix multiplication.\n\nArgs:\n matmul_input: Input tensor to matmul with the filter.\n\nReturns:\n A map of: output key -> output result."} +{"repo": "beam", "function": "def __init__(self, model_name: str, columns: list[str], title: Optional[str]=None, task_type: str=DEFAULT_TASK_TYPE, project: Optional[str]=None, location: Optional[str]=None, credentials: Optional[Credentials]=None, **kwargs):\n self.model_name = model_name\n self.project = project\n self.location = location\n self.credentials = credentials\n self.title = title\n self.task_type = task_type\n super().__init__(columns=columns, **kwargs)", "docstring": "Embedding Config for Vertex AI Text Embedding models following\nhttps://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings # pylint: disable=line-too-long\nText Embeddings are generated for a batch of text using the Vertex AI SDK.\nEmbeddings are returned in a list for each text in the batch. Look at\nhttps://cloud.google.com/vertex-ai/docs/generative-ai/learn/model-versioning#stable-versions-available.md # pylint: disable=line-too-long\nfor more information on model versions and lifecycle.\n\nArgs:\n model_name: The name of the Vertex AI Text Embedding model.\n columns: The columns containing the text to be embedded.\n task_type: The downstream task for the embeddings. Valid values are\n RETRIEVAL_QUERY, RETRIEVAL_DOCUMENT, SEMANTIC_SIMILARITY,\n CLASSIFICATION, CLUSTERING. For more information on the task type,\n look at https://cloud.google.com/vertex-ai/docs/generative-ai/embeddings/get-text-embeddings # pylint: disable=line-too-long\n title: Identifier of the text content.\n project: The default GCP project for API calls.\n location: The default location for API calls.\n credentials: Custom credentials for API calls.\n Defaults to environment credentials."} +{"repo": "transformers", "function": "def window_reverse(windows, window_size, height, width):\n num_channels = windows.shape[-1]\n windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)\n windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)\n return windows", "docstring": "Merges windows to produce higher resolution features.\nArgs:\n windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):\n Input windows\n window_size (`int`):\n Window size\n height (`int`):\n Height of the resized audio\n width (`int`):\n Width of the resized audio"} +{"repo": "tensorflow", "function": "def np_doc_only(np_fun_name, np_fun=None):\n np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)\n\n def decorator(f):\n f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)\n return f\n return decorator", "docstring": "Attachs numpy docstring to a function.\n\nThis differs from np_doc in that it doesn't check for a match in signature.\n\nArgs:\n np_fun_name: name for the np_fun symbol. At least one of np_fun or\n np_fun_name shoud be set.\n np_fun: (optional) the numpy function whose docstring will be used.\n\nReturns:\n A function decorator that attaches the docstring from `np_fun` to the\n decorated function."} +{"repo": "transformers", "function": "def sparsemixer(scores, jitter_eps, training, top_k=2):\n if top_k != 2:\n raise ValueError('top_k must be equal to 2')\n with torch.no_grad():\n mask_logits_threshold, max_ind = scores.max(dim=-1, keepdim=True)\n factor = scores.abs().clamp(min=mask_logits_threshold)\n mask_logits_threshold = (mask_logits_threshold - scores) / factor > 2 * jitter_eps\n masked_gates = scores.masked_fill(mask_logits_threshold, float('-inf'))\n if training:\n selected_experts = (masked_gates - torch.empty_like(masked_gates, memory_format=torch.legacy_contiguous_format).exponential_().log()).max(dim=-1)[1].unsqueeze(-1)\n else:\n selected_experts = max_ind\n masked_gates = torch.softmax(masked_gates, dim=-1)\n multiplier_o = masked_gates.gather(dim=-1, index=selected_experts)\n if training:\n max_scores, max_ind = masked_gates.max(dim=-1, keepdim=True)\n mask_for_one = torch.logical_or(selected_experts == max_ind, torch.rand_like(max_scores) > 0.75)\n mask_for_one = torch.add(0.3333, mask_for_one, alpha=0.6667).type_as(masked_gates)\n multiplier = MultiplierProcessor.apply(scores, multiplier_o, selected_experts, masked_gates, mask_for_one)\n else:\n multiplier = multiplier_o\n masked_scores = torch.scatter(scores, -1, selected_experts, float('-inf'))\n with torch.no_grad():\n mask_logits_threshold, max_ind = masked_scores.max(dim=-1, keepdim=True)\n factor = scores.abs().clamp(min=mask_logits_threshold)\n mask_logits_threshold = (mask_logits_threshold - scores) / factor > 2 * jitter_eps\n masked_gates_top2 = masked_scores.masked_fill(mask_logits_threshold, float('-inf'))\n if training:\n selected_experts_top2 = (masked_gates_top2 - torch.empty_like(masked_gates_top2, memory_format=torch.legacy_contiguous_format).exponential_().log()).max(dim=-1)[1].unsqueeze(-1)\n else:\n selected_experts_top2 = max_ind\n masked_gates_top2 = torch.softmax(masked_gates_top2, dim=-1)\n multiplier_top2_o = masked_gates_top2.gather(dim=-1, index=selected_experts_top2)\n if training:\n max_scores, max_ind = masked_gates_top2.max(dim=-1, keepdim=True)\n mask_for_one_top2 = torch.logical_or(selected_experts_top2 == max_ind, torch.rand_like(max_scores).uniform_() > 0.75)\n mask_for_one_top2 = torch.add(0.3333, mask_for_one_top2, alpha=0.6667).type_as(masked_gates_top2)\n multiplier_top2 = MultiplierProcessor.apply(scores, multiplier_top2_o, selected_experts_top2, masked_gates_top2, mask_for_one_top2)\n else:\n multiplier_top2 = multiplier_top2_o\n multiplier = torch.concat((multiplier, multiplier_top2), dim=-1)\n selected_experts = torch.concat((selected_experts, selected_experts_top2), dim=-1)\n return (multiplier, selected_experts)", "docstring": "Sparse mixer function to select top-k experts and compute multipliers.\nBased on the paper: https://huggingface.co/papers/2409.12136\nWe first replace the TopK(\u00b7) function as random sampling of discrete variables\nin model training. Then, following Liu et al. (2023a) and Liu et al. (2023b), we apply Heun's\nthird order method to approximate the expert routing gradient and construct a modified\nback-propagation to give a mathematically sound gradient estimation for expert routing.\n\nArgs:\n scores (torch.Tensor): Input scores tensor.\n jitter_eps (float): Jitter epsilon for numerical stability.\n training (bool): Flag indicating if the model is in training mode.\n top_k (int): Number of top experts to select.\n\nReturns:\n Tuple[torch.Tensor, torch.Tensor]: Multiplier and selected experts tensors."} +{"repo": "mobly", "function": "def list_adb_devices():\n out = adb.AdbProxy().devices()\n return parse_device_list(out, 'device')", "docstring": "List all android devices connected to the computer that are detected by\nadb.\n\nReturns:\n A list of android device serials. Empty if there's none."} +{"repo": "tensorflow", "function": "def erosion2d(value, kernel, strides, rates, padding, name=None):\n with ops.name_scope(name, 'erosion2d', [value, kernel]) as name:\n return math_ops.negative(gen_nn_ops.dilation2d(input=math_ops.negative(value), filter=array_ops.reverse_v2(kernel, [0, 1]), strides=strides, rates=rates, padding=padding, name=name))", "docstring": "Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.\n\nThe `value` tensor has shape `[batch, in_height, in_width, depth]` and the\n`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,\neach input channel is processed independently of the others with its own\nstructuring function. The `output` tensor has shape\n`[batch, out_height, out_width, depth]`. The spatial dimensions of the\noutput tensor depend on the `padding` algorithm. We currently only support the\ndefault \"NHWC\" `data_format`.\n\nIn detail, the grayscale morphological 2-D erosion is given by:\n\n output[b, y, x, c] =\n min_{dy, dx} value[b,\n strides[1] * y - rates[1] * dy,\n strides[2] * x - rates[2] * dx,\n c] -\n kernel[dy, dx, c]\n\nDuality: The erosion of `value` by the `kernel` is equal to the negation of\nthe dilation of `-value` by the reflected `kernel`.\n\nArgs:\n value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.\n kernel: A `Tensor`. Must have the same type as `value`.\n 3-D with shape `[kernel_height, kernel_width, depth]`.\n strides: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The stride of the sliding window for each dimension of\n the input tensor. Must be: `[1, stride_height, stride_width, 1]`.\n rates: A list of `ints` that has length `>= 4`.\n 1-D of length 4. The input stride for atrous morphological dilation.\n Must be: `[1, rate_height, rate_width, 1]`.\n padding: A `string` from: `\"SAME\", \"VALID\"`.\n The type of padding algorithm to use.\n name: A name for the operation (optional). If not specified \"erosion2d\"\n is used.\n\nReturns:\n A `Tensor`. Has the same type as `value`.\n 4-D with shape `[batch, out_height, out_width, depth]`.\nRaises:\n ValueError: If the `value` depth does not match `kernel`' shape, or if\n padding is other than `'VALID'` or `'SAME'`."} +{"repo": "tensorflow", "function": "def opcode_to_name(model, op_code):\n op = model.operatorCodes[op_code]\n code = max(op.builtinCode, op.deprecatedBuiltinCode)\n for name, value in vars(schema_fb.BuiltinOperator).items():\n if value == code:\n return name\n return None", "docstring": "Converts a TFLite op_code to the human readable name.\n\nArgs:\n model: The input tflite model.\n op_code: The op_code to resolve to a readable name.\n\nReturns:\n A string containing the human readable op name, or None if not resolvable."} +{"repo": "tensorflow", "function": "def get_compile_flags():\n flags = []\n flags.append('-I%s' % get_include())\n flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)\n cxx_version_flag = None\n if _CXX_VERSION == 201103:\n cxx_version_flag = '--std=c++11'\n elif _CXX_VERSION == 201402:\n cxx_version_flag = '--std=c++14'\n elif _CXX_VERSION == 201703:\n cxx_version_flag = '--std=c++17'\n elif _CXX_VERSION == 202002:\n cxx_version_flag = '--std=c++20'\n if cxx_version_flag:\n flags.append(cxx_version_flag)\n flags.append('-DEIGEN_MAX_ALIGN_BYTES=%d' % pywrap_tf_session.get_eigen_max_align_bytes())\n return flags", "docstring": "Returns the compilation flags for compiling with TensorFlow.\n\nThe returned list of arguments can be passed to the compiler for compiling\nagainst TensorFlow headers. The result is platform dependent.\n\nFor example, on a typical Linux system with Python 3.7 the following command\nprints `['-I/usr/local/lib/python3.7/dist-packages/tensorflow/include',\n'-D_GLIBCXX_USE_CXX11_ABI=1', '-DEIGEN_MAX_ALIGN_BYTES=64']`\n\n>>> print(tf.sysconfig.get_compile_flags())\n\nReturns:\n A list of strings for the compiler flags."} +{"repo": "tensorflow", "function": "def has_atomic_move(path):\n try:\n return _pywrap_file_io.HasAtomicMove(compat.path_to_bytes(path))\n except errors.OpError:\n return True", "docstring": "Checks whether the file system supports atomic moves.\n\nReturns whether or not the file system of the given path supports the atomic\nmove operation for a file or folder. If atomic move is supported, it is\nrecommended to use a temp location for writing and then move to the final\nlocation.\n\nArgs:\n path: string, path to a file\n\nReturns:\n True, if the path is on a file system that supports atomic move\n False, if the file system does not support atomic move. In such cases\n we need to be careful about using moves. In some cases it is safer\n not to use temporary locations in this case."} +{"repo": "beam", "function": "def __init__(self, fn, buffer_size=_DEFAULT_BUFFER_SIZE):\n if not callable(fn):\n raise TypeError('Expected a callable object instead of: %r' % fn)\n super().__init__()\n self._fn = fn\n self._buffer_size = buffer_size", "docstring": "Initializes a CallableFn object wrapping a callable.\n\nArgs:\n fn: A callable object that reduces elements of an iterable to a single\n value (like the builtins sum and max). This callable must be capable of\n receiving the kind of values it generates as output in its input, and\n for best results, its operation must be commutative and associative.\n\nRaises:\n TypeError: if fn parameter is not a callable type."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "transformers", "function": "def forward(self, inputs: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor):\n mean = loc.transpose(-1, -2)\n mean = mean.unsqueeze(-2)\n mean = mean.repeat(1, 1, self.num_patches, 1)\n stdev = scale.transpose(-1, -2)\n stdev = stdev.unsqueeze(-2)\n stdev = stdev.repeat(1, 1, self.num_patches, 1)\n concat_stats = torch.cat([mean, stdev], dim=-1)\n concat_stats = self.map_scale_expansion(concat_stats)\n concat_stats = self.map_scale_compression(concat_stats)\n inputs = torch.cat([inputs, concat_stats], dim=-1)\n inputs = self.inverse_trans_expansion(inputs)\n inputs = self.inverse_trans_compression(inputs)\n return inputs", "docstring": "Args:\n inputs (`torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`)\n loc (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)\n scale (`torch.Tensor` of shape `(batch_size, 1, num_input_channels)`)\nReturns:\n `torch.Tensor` of shape `(batch_size, num_input_channels, num_patch, d_model)`"} +{"repo": "transformers", "function": "def _pad(self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int]=None, padding_strategy: PaddingStrategy=PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_attention_mask: Optional[bool]=None) -> dict:\n if return_attention_mask is None:\n return_attention_mask = 'attention_mask' in self.model_input_names\n required_input = encoded_inputs[self.model_input_names[0]]\n if padding_strategy == PaddingStrategy.LONGEST:\n max_length = len(required_input)\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length\n if return_attention_mask and 'attention_mask' not in encoded_inputs:\n encoded_inputs['attention_mask'] = [1] * len(required_input)\n if needs_to_be_padded:\n difference = max_length - len(required_input)\n padding_side = padding_side if padding_side is not None else self.padding_side\n if padding_side == 'right':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'] + [0] * difference\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'] + [self.pad_token_type_id] * difference\n if 'bbox' in encoded_inputs:\n encoded_inputs['bbox'] = encoded_inputs['bbox'] + [self.pad_token_box] * difference\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = encoded_inputs['labels'] + [self.pad_token_label] * difference\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = encoded_inputs['special_tokens_mask'] + [1] * difference\n encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference\n elif padding_side == 'left':\n if return_attention_mask:\n encoded_inputs['attention_mask'] = [0] * difference + encoded_inputs['attention_mask']\n if 'token_type_ids' in encoded_inputs:\n encoded_inputs['token_type_ids'] = [self.pad_token_type_id] * difference + encoded_inputs['token_type_ids']\n if 'bbox' in encoded_inputs:\n encoded_inputs['bbox'] = [self.pad_token_box] * difference + encoded_inputs['bbox']\n if 'labels' in encoded_inputs:\n encoded_inputs['labels'] = [self.pad_token_label] * difference + encoded_inputs['labels']\n if 'special_tokens_mask' in encoded_inputs:\n encoded_inputs['special_tokens_mask'] = [1] * difference + encoded_inputs['special_tokens_mask']\n encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input\n else:\n raise ValueError('Invalid padding strategy:' + str(padding_side))\n return encoded_inputs", "docstring": "Pad encoded inputs (on left/right and up to predefined length or max length in the batch)\n\nArgs:\n encoded_inputs:\n Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).\n max_length: maximum length of the returned list and optionally padding length (see below).\n Will truncate by taking into account the special tokens.\n padding_strategy: PaddingStrategy to use for padding.\n\n - PaddingStrategy.LONGEST Pad to the longest sequence in the batch\n - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)\n - PaddingStrategy.DO_NOT_PAD: Do not pad\n The tokenizer padding sides are defined in self.padding_side:\n\n - 'left': pads on the left of the sequences\n - 'right': pads on the right of the sequences\n pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.\n This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability\n `>= 7.5` (Volta).\n padding_side:\n The side on which the model should have padding applied. Should be selected between ['right', 'left'].\n Default value is picked from the class attribute of the same name.\n return_attention_mask:\n (optional) Set to False to avoid returning attention mask (default: set to model specifics)"} +{"repo": "fhir-py", "function": "class CastFunction(StandardSqlExpression):\n expression: StandardSqlExpression\n cast_to: StandardSqlDataType\n _sql_alias: Optional[str] = None\n\n def __str__(self) -> str:\n return f'CAST(\\n{self.expression} AS {self.cast_to.big_query_type_name})'\n\n @property\n def sql_alias(self) -> str:\n return self._sql_alias or 'cast_'\n\n @property\n def sql_data_type(self) -> StandardSqlDataType:\n return self.cast_to", "docstring": "Representation of a SQL cast.\n\nAttributes:\n expression: The expression being cast.\n cast_to: The type the expression is being cast to."} +{"repo": "beam", "function": "def validate(self):\n errors = []\n for cls in self.OPTIONS:\n if 'validate' in cls.__dict__ and callable(cls.__dict__['validate']):\n errors.extend(self.options.view_as(cls).validate(self))\n return errors", "docstring": "Calls validate on subclassess and returns a list of errors.\n\nvalidate will call validate method on subclasses, accumulate the returned\nlist of errors, and returns the aggregate list.\n\nReturns:\n Aggregate list of errors after all calling all possible validate methods."} +{"repo": "transformers", "function": "class ClvpTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n\n def __init__(self, vocab_file, merges_file, errors='replace', unk_token='[UNK]', bos_token='<|endoftext|>', eos_token='[STOP]', pad_token='[STOP]', add_prefix_space=False, add_bos_token=False, add_eos_token=False, **kwargs):\n bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self._normalizer = None\n with open(vocab_file, encoding='utf-8') as vocab_handle:\n self.encoder = json.load(vocab_handle)\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.errors = errors\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n with open(merges_file, encoding='utf-8') as merges_handle:\n bpe_merges = merges_handle.read().split('\\n')[1:-1]\n bpe_merges = [tuple(merge.split()) for merge in bpe_merges]\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n self.cache = {}\n self.add_prefix_space = add_prefix_space\n self.pat = re.compile(\"'s|'t|'re|'ve|'m|'ll|'d| ?\\\\p{L}+| ?\\\\p{N}+| ?[^\\\\s\\\\p{L}\\\\p{N}]+|\\\\s+(?!\\\\S)|\\\\s+\")\n super().__init__(errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, add_eos_token=add_eos_token, **kwargs)\n\n @property\n def vocab_size(self):\n return len(self.encoder)\n\n @property\n def normalizer(self):\n if self._normalizer is None:\n self._normalizer = EnglishNormalizer()\n return self._normalizer\n\n def get_vocab(self):\n return dict(self.encoder, **self.added_tokens_encoder)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token)\n pairs = get_pairs(word)\n if not pairs:\n return token\n while True:\n bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n except ValueError:\n new_word.extend(word[i:])\n break\n else:\n new_word.extend(word[i:j])\n i = j\n if word[i] == first and i < len(word) - 1 and (word[i + 1] == second):\n new_word.append(first + second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n output = bos_token_id + token_ids_0 + eos_token_id\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n return output\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n \"\"\"\n Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if not self.add_bos_token:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0)\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)\n\n def _tokenize(self, text):\n \"\"\"Tokenize a string.\"\"\"\n bpe_tokens = []\n text = self.normalizer(text)\n for token in re.findall(self.pat, text):\n token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))\n bpe_tokens.extend(('[SPACE]' if bpe_token == '\u0120' and '[SPACE]' in self.encoder.keys() else bpe_token for bpe_token in self.bpe(token).split(' ')))\n return bpe_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.decoder.get(index)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n text = ''.join(tokens)\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\n return text\n\n def clean_up_tokenization(self, text):\n text = ''.join(text)\n vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())\n text = text.replace('[SPACE]', ' ') if '[SPACE]' in vocab_tokens else text\n text = text.replace('[STOP]', ' ') if '[STOP]' in vocab_tokens else text\n text = text.replace(self.unk_token, '').replace(' ', ' ').replace(' ', ' ')\n return text\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if not os.path.isdir(save_directory):\n logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n return\n vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n merge_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])\n with open(vocab_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\\n')\n index = 0\n with open(merge_file, 'w', encoding='utf-8') as writer:\n writer.write('#version: 0.2\\n')\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')\n index = token_index\n writer.write(' '.join(bpe_tokens) + '\\n')\n index += 1\n return (vocab_file, merge_file)", "docstring": "Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.\n\nThis tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\nbe encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n```python\n>>> from transformers import ClvpTokenizer\n\n>>> tokenizer = ClvpTokenizer.from_pretrained(\"susnato/clvp_dev\")\n>>> tokenizer(\"Hello world\")[\"input_ids\"]\n[62, 84, 28, 2, 179, 79]\n\n>>> tokenizer(\" Hello world\")[\"input_ids\"]\n[2, 62, 84, 28, 2, 179, 79]\n```\n\nYou can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you\ncall it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.\n\n\n\nWhen used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).\n\n\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\nthis superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n unk_token (`str`, *optional*, defaults to `\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str`, *optional*, defaults to `\"<|endoftext|>\"`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `\"[STOP]\"`):\n The end of sequence token.\n pad_token (`str`, *optional*, defaults to `\"[STOP]\"`):\n The pad token of the sequence.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (CLVP tokenizer detect beginning of words by the preceding space).\n add_bos_token (`bool`, *optional*, defaults to `False`):\n Whether to add `bos_token` in front of the sequence when add_special_tokens=True.\n add_eos_token (`bool`, *optional*, defaults to `False`):\n Whether to add `eos_token` in end of the sequence when add_special_tokens=True."} +{"repo": "yapf", "function": "def _AddTokenOnCurrentLine(self, dry_run):\n current = self.next_token\n previous = current.previous_token\n spaces = current.spaces_required_before\n if isinstance(spaces, list):\n spaces = 0\n if not dry_run:\n current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)\n if previous.OpensScope():\n if not current.is_comment:\n self.stack[-1].closing_scope_indent = self.column - 1\n if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):\n self.stack[-1].closing_scope_indent += 1\n self.stack[-1].indent = self.column + spaces\n else:\n self.stack[-1].closing_scope_indent = self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')\n self.column += spaces", "docstring": "Puts the token on the current line.\n\nAppends the next token to the state and updates information necessary for\nindentation.\n\nArguments:\n dry_run: (bool) Commit whitespace changes to the FormatToken if True."} +{"repo": "tensorflow", "function": "def maybe_shuffle_batch_join(tensors_list, batch_size, capacity, min_after_dequeue, keep_input, seed=None, enqueue_many=False, shapes=None, allow_smaller_final_batch=False, shared_name=None, name=None):\n return _shuffle_batch_join(tensors_list, batch_size, capacity, min_after_dequeue, keep_input, seed=seed, enqueue_many=enqueue_many, shapes=shapes, allow_smaller_final_batch=allow_smaller_final_batch, shared_name=shared_name, name=name)", "docstring": "Create batches by randomly shuffling conditionally-enqueued tensors.\n\nSee docstring in `shuffle_batch_join` for more details.\n\nArgs:\n tensors_list: A list of tuples or dictionaries of tensors to enqueue.\n batch_size: An integer. The new batch size pulled from the queue.\n capacity: An integer. The maximum number of elements in the queue.\n min_after_dequeue: Minimum number elements in the queue after a\n dequeue, used to ensure a level of mixing of elements.\n keep_input: A `bool` Tensor. This tensor controls whether the input is\n added to the queue or not. If it is a scalar and evaluates `True`, then\n `tensors` are all added to the queue. If it is a vector and `enqueue_many`\n is `True`, then each example is added to the queue only if the\n corresponding value in `keep_input` is `True`. This tensor essentially\n acts as a filtering mechanism.\n seed: Seed for the random shuffling within the queue.\n enqueue_many: Whether each tensor in `tensor_list_list` is a single\n example.\n shapes: (Optional) The shapes for each example. Defaults to the\n inferred shapes for `tensors_list[i]`.\n allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final\n batch to be smaller if there are insufficient items left in the queue.\n shared_name: (optional). If set, this queue will be shared under the given\n name across multiple sessions.\n name: (Optional) A name for the operations.\n\nReturns:\n A list or dictionary of tensors with the same number and types as\n `tensors_list[i]`.\n\nRaises:\n ValueError: If the `shapes` are not specified, and cannot be\n inferred from the elements of `tensors_list`.\n\n@compatibility(eager)\nInput pipelines based on Queues are not supported when eager execution is\nenabled. Please use the `tf.data` API to ingest data under eager execution.\n@end_compatibility"} +{"repo": "beam", "function": "def __init__(self, msg, exception_details=None):\n message = '%s with exceptions %s' % (msg, exception_details)\n super().__init__(message)\n self.exception_details = exception_details", "docstring": "Class representing the errors thrown in the batch file operations.\nArgs:\n msg: Message string for the exception thrown\n exception_details: Optional map of individual input to exception for\n failed operations in batch. This parameter is optional so if specified\n the user can assume that the all errors in the filesystem operation\n have been reported. When the details are missing then the operation\n may have failed anywhere so the user should use match to determine\n the current state of the system."} +{"repo": "yapf", "function": "def _FindStmtParent(node):\n if pytree_utils.NodeName(node) in _STATEMENT_NODES:\n return node\n else:\n return _FindStmtParent(node.parent)", "docstring": "Find the nearest parent of node that is a statement node.\n\nArguments:\n node: node to start from\n\nReturns:\n Nearest parent (or node itself, if suitable)."} +{"repo": "tensorflow", "function": "def mean_pairwise_squared_error(labels, predictions, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES):\n if labels is None:\n raise ValueError('Argument `labels` must not be None.')\n if predictions is None:\n raise ValueError('Argument `predictions` must not be None.')\n with ops.name_scope(scope, 'mean_pairwise_squared_error', (predictions, labels, weights)) as scope:\n weights = math_ops.cast(weights, dtype=dtypes.float32)\n labels = math_ops.cast(labels, dtype=dtypes.float32)\n\n def compute_loss(labels, predictions, weights, loss_collection):\n predictions = math_ops.cast(predictions, dtype=dtypes.float32)\n predictions.get_shape().assert_is_compatible_with(labels.get_shape())\n diffs = math_ops.subtract(predictions, labels)\n axis = math_ops.range(1, array_ops.rank(diffs))\n sum_squares_diff_per_batch = math_ops.reduce_sum(math_ops.square(diffs), axis=axis, keepdims=True)\n num_present_per_batch = _num_present(diffs, weights, per_batch=True)\n term1 = 2.0 * math_ops.div_no_nan(sum_squares_diff_per_batch, math_ops.maximum(num_present_per_batch - 1, 0), name='value')\n sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)\n term2 = 2.0 * math_ops.div_no_nan(math_ops.square(sum_diff), math_ops.maximum(math_ops.multiply(num_present_per_batch, num_present_per_batch - 1), 0), name='value')\n weighted_losses = math_ops.multiply(term1 - term2, weights)\n loss = math_ops.reduce_sum(weighted_losses)\n mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0, loss, array_ops.zeros_like(loss), name='value')\n util.add_loss(mean_loss, loss_collection)\n return mean_loss\n if control_flow_ops.get_enclosing_xla_context() is not None:\n return compute_loss(labels, predictions, weights, loss_collection)\n else:\n with ops.control_dependencies((weights_broadcast_ops.assert_broadcastable(weights, labels),)):\n return compute_loss(labels, predictions, weights, loss_collection)", "docstring": "Adds a pairwise-errors-squared loss to the training procedure.\n\nUnlike `mean_squared_error`, which is a measure of the differences between\ncorresponding elements of `predictions` and `labels`,\n`mean_pairwise_squared_error` is a measure of the differences between pairs of\ncorresponding elements of `predictions` and `labels`.\n\nFor example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are\nthree pairs of differences are summed to compute the loss:\n loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3\n\nNote that since the inputs are of shape `[batch_size, d0, ... dN]`, the\ncorresponding pairs are computed within each batch sample but not across\nsamples within a batch. For example, if `predictions` represents a batch of\n16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs\nis drawn from each image, but not across images.\n\n`weights` acts as a coefficient for the loss. If a scalar is provided, then\nthe loss is simply scaled by the given value. If `weights` is a tensor of size\n`[batch_size]`, then the total loss for each sample of the batch is rescaled\nby the corresponding element in the `weights` vector.\n\nArgs:\n labels: The ground truth output tensor, whose shape must match the shape of\n `predictions`.\n predictions: The predicted outputs, a tensor of size\n `[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in\n `predictions`.\n weights: Coefficients for the loss a scalar, a tensor of shape\n `[batch_size]` or a tensor whose shape matches `predictions`.\n scope: The scope for the operations performed in computing the loss.\n loss_collection: collection to which the loss will be added.\n\nReturns:\n A scalar `Tensor` that returns the weighted loss.\n\nRaises:\n ValueError: If the shape of `predictions` doesn't match that of `labels` or\n if the shape of `weights` is invalid. Also if `labels` or `predictions`\n is None.\n\n@compatibility(eager)\nThe `loss_collection` argument is ignored when executing eagerly. Consider\nholding on to the return value or collecting losses via a `tf.keras.Model`.\n@end_compatibility"} +{"repo": "tf-quant-finance", "function": "def brownian_bridge_single(*, x_start, x_end, variance, barrier, dtype=None, name=None):\n with tf.name_scope(name or 'brownian_bridge_single'):\n x_start = tf.convert_to_tensor(x_start, dtype=dtype, name='x_start')\n dtype = x_start.dtype\n variance = tf.convert_to_tensor(variance, dtype=dtype, name='variance')\n x_end = tf.convert_to_tensor(x_end, dtype=dtype, name='x_end')\n a = (x_start - barrier) * (x_end - barrier)\n return 1 - tf.math.exp(-2 * a / variance)", "docstring": "Computes proba of not touching the barrier for a 1D Brownian Bridge.\n\nThe Brownian bridge starts at `x_start`, ends at `x_end` and has a variance\n`variance`. The no-touch probabilities are calculated assuming that `x_start`\nand `x_end` are the same side of the barrier (either both above or both\nbelow).\nThis can be used in Monte Carlo pricing for adjusting probability of\ntouching the barrier from discrete case to continuous case.\nTypically in practise, the tensors `x_start`, `x_end` and `variance` should be\nbi-dimensional (with time steps and paths being the 2 dimensions).\n\n#### Example\n\n```python\nx_start = np.asarray([[4.5, 4.5, 4.5], [4.5, 4.6, 4.7]])\nx_end = np.asarray([[5.0, 4.9, 4.8], [4.8, 4.9, 5.0]])\nvariance = np.asarray([[0.1, 0.2, 0.1], [0.3, 0.1, 0.2]])\nbarrier = 5.1\n\nno_touch_proba = brownian_bridge_single(\n x_start=x_start,\n x_end=x_end,\n variance=variance,\n barrier=barrier)\n# Expected print output of no_touch_proba:\n# [[0.69880579 0.69880579 0.97267628]\n# [0.69880579 0.86466472 0.32967995]]\n```\n\n#### References\n\n[1] Emmanuel Gobet. Advanced Monte Carlo methods for barrier and related\nexotic options.\nhttps://papers.ssrn.com/sol3/papers.cfm?abstract_id=1265669\n\nArgs:\n x_start: A real `Tensor` of any shape and dtype.\n x_end: A real `Tensor` of the same dtype and compatible shape as\n `x_start`.\n variance: A real `Tensor` of the same dtype and compatible shape as\n `x_start`.\n barrier: A scalar `Tensor` of the same dtype as `x_start`. Stands for the\n boundary for the Brownian Bridge.\n dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion\n of any supplied non-`Tensor` arguments to `Tensor`.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: str. The name for the ops created by this function.\n Default value: None which is mapped to the default name\n `brownian_bridge_single`.\n\nReturns:\n A `Tensor` of the same shape as the input data which is the probability\n of not touching the barrier."} +{"repo": "beam", "function": "def get_replacement_inputs(self, applied_ptransform):\n return tuple(applied_ptransform.inputs) + tuple((side_input.pvalue for side_input in applied_ptransform.side_inputs))", "docstring": "Provides inputs that will be passed to the replacement PTransform.\n\nArgs:\n applied_ptransform: Original AppliedPTransform containing the PTransform\n to be replaced.\n\nReturns:\n An iterable of PValues that will be passed to the expand() method of the\n replacement PTransform."} +{"repo": "transformers", "function": "class TextToAudioPipeline(Pipeline):\n _pipeline_calls_generate = True\n _default_generation_config = GenerationConfig(max_new_tokens=256)\n\n def __init__(self, *args, vocoder=None, sampling_rate=None, **kwargs):\n super().__init__(*args, **kwargs)\n if self.framework == 'tf':\n raise ValueError('The TextToAudioPipeline is only available in PyTorch.')\n self.vocoder = None\n if self.model.__class__ in MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING.values():\n self.vocoder = SpeechT5HifiGan.from_pretrained(DEFAULT_VOCODER_ID).to(self.model.device) if vocoder is None else vocoder\n self.sampling_rate = sampling_rate\n if self.vocoder is not None:\n self.sampling_rate = self.vocoder.config.sampling_rate\n if self.sampling_rate is None:\n config = self.model.config\n gen_config = self.model.__dict__.get('generation_config', None)\n if gen_config is not None:\n config.update(gen_config.to_dict())\n for sampling_rate_name in ['sample_rate', 'sampling_rate']:\n sampling_rate = getattr(config, sampling_rate_name, None)\n if sampling_rate is not None:\n self.sampling_rate = sampling_rate\n\n def preprocess(self, text, **kwargs):\n if isinstance(text, str):\n text = [text]\n if self.model.config.model_type == 'bark':\n new_kwargs = {'max_length': self.generation_config.semantic_config.get('max_input_semantic_length', 256), 'add_special_tokens': False, 'return_attention_mask': True, 'return_token_type_ids': False, 'padding': 'max_length'}\n new_kwargs.update(kwargs)\n kwargs = new_kwargs\n output = self.tokenizer(text, **kwargs, return_tensors='pt')\n return output\n\n def _forward(self, model_inputs, **kwargs):\n kwargs = self._ensure_tensor_on_device(kwargs, device=self.device)\n forward_params = kwargs['forward_params']\n generate_kwargs = kwargs['generate_kwargs']\n if self.model.can_generate():\n generate_kwargs = self._ensure_tensor_on_device(generate_kwargs, device=self.device)\n if 'generation_config' not in generate_kwargs:\n generate_kwargs['generation_config'] = self.generation_config\n forward_params.update(generate_kwargs)\n output = self.model.generate(**model_inputs, **forward_params)\n else:\n if len(generate_kwargs):\n raise ValueError(f\"You're using the `TextToAudioPipeline` with a forward-only model, but `generate_kwargs` is non empty. For forward-only TTA models, please use `forward_params` instead of `generate_kwargs`. For reference, the `generate_kwargs` used here are: {generate_kwargs.keys()}\")\n output = self.model(**model_inputs, **forward_params)[0]\n if self.vocoder is not None:\n output = self.vocoder(output)\n return output\n\n @overload\n def __call__(self, text_inputs: str, **forward_params: Any) -> Dict[str, Any]:\n ...\n\n @overload\n def __call__(self, text_inputs: List[str], **forward_params: Any) -> List[Dict[str, Any]]:\n ...\n\n def __call__(self, text_inputs: Union[str, List[str]], **forward_params) -> Union[Dict[str, Any], List[Dict[str, Any]]]:\n \"\"\"\n Generates speech/audio from the inputs. See the [`TextToAudioPipeline`] documentation for more information.\n\n Args:\n text_inputs (`str` or `List[str]`):\n The text(s) to generate.\n forward_params (`dict`, *optional*):\n Parameters passed to the model generation/forward method. `forward_params` are always passed to the\n underlying model.\n generate_kwargs (`dict`, *optional*):\n The dictionary of ad-hoc parametrization of `generate_config` to be used for the generation call. For a\n complete overview of generate, check the [following\n guide](https://huggingface.co/docs/transformers/en/main_classes/text_generation). `generate_kwargs` are\n only passed to the underlying model if the latter is a generative model.\n\n Return:\n A `dict` or a list of `dict`: The dictionaries have two keys:\n\n - **audio** (`np.ndarray` of shape `(nb_channels, audio_length)`) -- The generated audio waveform.\n - **sampling_rate** (`int`) -- The sampling rate of the generated audio waveform.\n \"\"\"\n return super().__call__(text_inputs, **forward_params)\n\n def _sanitize_parameters(self, preprocess_params=None, forward_params=None, generate_kwargs=None):\n if getattr(self, 'assistant_model', None) is not None:\n generate_kwargs['assistant_model'] = self.assistant_model\n if getattr(self, 'assistant_tokenizer', None) is not None:\n generate_kwargs['tokenizer'] = self.tokenizer\n generate_kwargs['assistant_tokenizer'] = self.assistant_tokenizer\n params = {'forward_params': forward_params if forward_params else {}, 'generate_kwargs': generate_kwargs if generate_kwargs else {}}\n if preprocess_params is None:\n preprocess_params = {}\n postprocess_params = {}\n return (preprocess_params, params, postprocess_params)\n\n def postprocess(self, waveform):\n output_dict = {}\n if isinstance(waveform, dict):\n waveform = waveform['waveform']\n elif isinstance(waveform, tuple):\n waveform = waveform[0]\n output_dict['audio'] = waveform.to(device='cpu', dtype=torch.float).numpy()\n output_dict['sampling_rate'] = self.sampling_rate\n return output_dict", "docstring": "Text-to-audio generation pipeline using any `AutoModelForTextToWaveform` or `AutoModelForTextToSpectrogram`. This\npipeline generates an audio file from an input text and optional other conditional inputs.\n\nUnless the model you're using explicitly sets these generation parameters in its configuration files\n(`generation_config.json`), the following default values will be used:\n- max_new_tokens: 256\n\nExample:\n\n```python\n>>> from transformers import pipeline\n\n>>> pipe = pipeline(model=\"suno/bark-small\")\n>>> output = pipe(\"Hey it's HuggingFace on the phone!\")\n\n>>> audio = output[\"audio\"]\n>>> sampling_rate = output[\"sampling_rate\"]\n```\n\nLearn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)\n\n\n\nYou can specify parameters passed to the model by using [`TextToAudioPipeline.__call__.forward_params`] or\n[`TextToAudioPipeline.__call__.generate_kwargs`].\n\nExample:\n\n```python\n>>> from transformers import pipeline\n\n>>> music_generator = pipeline(task=\"text-to-audio\", model=\"facebook/musicgen-small\", framework=\"pt\")\n\n>>> # diversify the music generation by adding randomness with a high temperature and set a maximum music length\n>>> generate_kwargs = {\n... \"do_sample\": True,\n... \"temperature\": 0.7,\n... \"max_new_tokens\": 35,\n... }\n\n>>> outputs = music_generator(\"Techno music with high melodic riffs\", generate_kwargs=generate_kwargs)\n```\n\n\n\nThis pipeline can currently be loaded from [`pipeline`] using the following task identifiers: `\"text-to-speech\"` or\n`\"text-to-audio\"`.\n\nSee the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=text-to-speech)."} +{"repo": "tensorflow", "function": "def on_test_batch_begin(self, batch, logs=None):", "docstring": "Called at the beginning of a batch in `evaluate` methods.\n\nAlso called at the beginning of a validation batch in the `fit`\nmethods, if validation data is provided.\n\nSubclasses should override for any actions to run.\n\nNote that if the `steps_per_execution` argument to `compile` in\n`tf.keras.Model` is set to `N`, this method will only be called every `N`\nbatches.\n\nArgs:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.test_step`. Typically,\n the values of the `Model`'s metrics are returned. Example:\n `{'loss': 0.2, 'accuracy': 0.7}`."} +{"repo": "tensorflow", "function": "def isotonic_regression(inputs, decreasing=True, axis=-1):\n type_promotions = {dtypes.float32: dtypes.float32, dtypes.half: dtypes.half, dtypes.bfloat16: dtypes.bfloat16, dtypes.int8: dtypes.float32, dtypes.int16: dtypes.float32}\n inputs = ops.convert_to_tensor(inputs)\n try:\n output_dtype = type_promotions[inputs.dtype]\n except KeyError:\n output_dtype = dtypes.float64\n\n def compute_on_matrix(matrix, name=None):\n iso_fn = functools.partial(gen_nn_ops.isotonic_regression, output_dtype=output_dtype, name=name)\n if decreasing:\n return iso_fn(matrix)\n else:\n output, segments = iso_fn(-matrix)\n return (-output, segments)\n return _wrap_2d_function(inputs, compute_on_matrix, axis)", "docstring": "Solves isotonic regression problems along the given axis.\n\nFor each vector x, the problem solved is\n\n$$\\argmin_{y_1 >= y_2 >= ... >= y_n} \\sum_i (x_i - y_i)^2.$$\n\nAs the solution is component-wise constant, a second tensor is returned that\nencodes the segments. The problems are solved over the given axis.\n\nConsider the following example, where we solve a batch of two problems. The\nfirst input is [3, 1, 2], while the second [1, 3, 4] (as the axis is 1).\n>>> x = tf.constant([[3, 1, 2], [1, 3, 4]], dtype=tf.float32)\n>>> y, segments = tf.nn.isotonic_regression(x, axis=1)\n>>> y # The solution.\n\n\nNote that the first solution has two blocks [2] and [1.5, 1.5]. The second\nsolution is constant, and thus has a single segment. These segments are\nexactly what the second returned tensor encodes:\n\n>>> segments\n\n\n\nArgs:\n inputs: A tensor holding the inputs.\n decreasing: If set to False, the inequalities in the optimizing constrained\n are flipped.\n axis: The axis along which the problems should be solved.\n\nReturns:\n output: The solutions, same shape as type as the input.\n segments: An int32 tensor, same shape as the input indicating the segments\n that have the same value. Specifically, those positions that have the same\n value correspond to the same segment. These values start at zero, and are\n monotonously increasing for each solution."} +{"repo": "tf-quant-finance", "function": "def volatility_fn(self):\n pass", "docstring": "Python callable calculating the instantaneous volatility matrix.\n\nThe callable should accept two real `Tensor` arguments of the same dtype and\nshape `times_shape`. The first argument is the scalar time t, the second\nargument is the value of Ito process X - `Tensor` of shape `batch_shape +\n[dim]`. Here `batch_shape` is an arbitrary shape. The result is value of\nvolatility `S_ij`(t, X). The return value of the callable is a real `Tensor`\nof the same dtype as the input arguments and of shape\n`batch_shape + [dim, dim]`.\n\nReturns:\n The instantaneous volatility callable."} +{"repo": "tensorflow", "function": "def build(self, input_shape):\n if not hasattr(self.build, '_is_default'):\n self._build_input_shape = input_shape\n self.built = True", "docstring": "Creates the variables of the layer (optional, for subclass implementers).\n\nThis is a method that implementers of subclasses of `Layer` or `Model`\ncan override if they need a state-creation step in-between\nlayer instantiation and layer call.\n\nThis is typically used to create the weights of `Layer` subclasses.\n\nArgs:\n input_shape: Instance of `TensorShape`, or list of instances of\n `TensorShape` if the layer expects a list of inputs\n (one instance per input)."} +{"repo": "beam", "function": "def _fill_in_missing(x):\n default_value = '' if x.dtype == tf.string else 0\n return tf.squeeze(tf.sparse.to_dense(tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1)", "docstring": "Replace missing values in a SparseTensor.\n\nFills in missing values of `x` with '' or 0, and converts to a dense tensor.\n\nArgs:\n x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1\n in the second dimension.\n\nReturns:\n A rank 1 tensor where missing values of `x` have been filled in."} +{"repo": "tensorflow", "function": "def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):\n summary_ops = ops.get_collection(key)\n if not summary_ops:\n return None\n else:\n return merge_summary(summary_ops)", "docstring": "Merges all summaries collected in the default graph.\n\nThis op is deprecated. Please switch to tf.compat.v1.summary.merge_all, which\nhas\nidentical behavior.\n\nArgs:\n key: `GraphKey` used to collect the summaries. Defaults to\n `GraphKeys.SUMMARIES`.\n\nReturns:\n If no summaries were collected, returns None. Otherwise returns a scalar\n `Tensor` of type `string` containing the serialized `Summary` protocol\n buffer resulting from the merging."} +{"repo": "tensorflow", "function": "class ConvolutionOrthogonal(Initializer):\n\n def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):\n self.gain = gain\n self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))\n self.seed = seed\n\n def __call__(self, shape, dtype=None, partition_info=None):\n raise NotImplementedError\n\n def get_config(self):\n return {'gain': self.gain, 'seed': self.seed, 'dtype': self.dtype.name}\n\n def _orthogonal_matrix(self, n):\n \"\"\"Construct an n x n orthogonal matrix.\n\n Args:\n n: Dimension.\n\n Returns:\n A n x n orthogonal matrix.\n \"\"\"\n a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)\n if self.seed:\n self.seed += 1\n q, r = gen_linalg_ops.qr(a)\n d = array_ops.diag_part(r)\n q *= math_ops.sign(d)\n return q\n\n def _symmetric_projection(self, n):\n \"\"\"Compute a n x n symmetric projection matrix.\n\n Args:\n n: Dimension.\n\n Returns:\n A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.\n \"\"\"\n q = self._orthogonal_matrix(n)\n mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0, self.dtype)\n if self.seed:\n self.seed += 1\n c = math_ops.multiply(q, mask)\n return math_ops.matmul(c, array_ops.matrix_transpose(c))", "docstring": "Initializer that generates orthogonal kernel for ConvNets.\n\nBase class used to construct 1D, 2D and 3D orthogonal kernels for convolution.\n\nArgs:\n gain: multiplicative factor to apply to the orthogonal matrix. Default is 1.\n The 2-norm of an input is multiplied by a factor of `gain` after applying\n this convolution.\n seed: A Python integer. Used to create random seeds. See\n `tf.compat.v1.set_random_seed` for behavior.\n dtype: Default data type, used if no `dtype` argument is provided when\n calling the initializer. Only floating point types are supported.\nReferences:\n [Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)\n ([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))"} +{"repo": "beam", "function": "def Launch(self, request, global_params=None):\n config = self.GetMethodConfig('Launch')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Launch a job with a FlexTemplate.\n\nArgs:\n request: (DataflowProjectsLocationsFlexTemplatesLaunchRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (LaunchFlexTemplateResponse) The response message."} +{"repo": "tensorflow", "function": "def __init__(self, input_gen):\n self.input_gen = input_gen", "docstring": "Creates a representative dataset.\n\nArgs:\n input_gen: A generator function that generates input samples for the model\n and has the same order, type and shape as the inputs to the model.\n Usually, this is a small subset of a few hundred samples randomly\n chosen, in no particular order, from the training or evaluation dataset."} +{"repo": "pytype", "function": "def trace_call(self, node, func, sigs, posargs, namedargs, result):\n log.debug('Logging call to %r with %d args, return %r', func, len(posargs), result)\n args = tuple(posargs)\n kwargs = tuple((namedargs or {}).items())\n record = _CallRecord(node, func, sigs, args, kwargs, result)\n if isinstance(func.data, abstract.BoundPyTDFunction):\n self._method_calls.add(record)\n elif isinstance(func.data, abstract.PyTDFunction):\n self._calls.add(record)", "docstring": "Add an entry into the call trace.\n\nArgs:\n node: The CFG node right after this function call.\n func: A cfg.Binding of a function that was called.\n sigs: The signatures that the function might have been called with.\n posargs: The positional arguments, an iterable over cfg.Variable.\n namedargs: The keyword arguments, a dict mapping str to cfg.Variable.\n result: A Variable of the possible result values."} +{"repo": "tensorflow", "function": "def SendEvents(self, request_iterator, context):\n core_metadata_count = 0\n graph_def_chunks = {}\n tensor_chunks = {}\n stream_handler = None\n for event in request_iterator:\n if not stream_handler:\n stream_handler = self._stream_handler_class()\n if event.summary and event.summary.value:\n maybe_tensor_event = self._process_tensor_event_in_chunks(event, tensor_chunks)\n if maybe_tensor_event:\n event_reply = stream_handler.on_value_event(maybe_tensor_event)\n if event_reply is not None:\n yield self._process_debug_op_state_changes(event_reply)\n elif event.graph_def:\n maybe_graph_def, maybe_device_name, maybe_wall_time = self._process_encoded_graph_def_in_chunks(event, graph_def_chunks)\n if maybe_graph_def:\n reply = stream_handler.on_graph_def(maybe_graph_def, maybe_device_name, maybe_wall_time)\n yield self._process_debug_op_state_changes(reply)\n elif event.log_message.message:\n core_metadata_count += 1\n if core_metadata_count > 1:\n raise ValueError('Expected one core metadata event; received multiple')\n reply = stream_handler.on_core_metadata_event(event)\n yield self._process_debug_op_state_changes(reply)", "docstring": "Implementation of the SendEvents service method.\n\nThis method receives streams of Event protos from the client, and processes\nthem in ways specified in the on_event() callback. The stream is\nbi-directional, but currently only the client-to-server stream (i.e., the\nstream from the debug ops to the server) is used.\n\nArgs:\n request_iterator: The incoming stream of Event protos.\n context: Server context.\n\nRaises:\n ValueError: If there are more than one core metadata events.\n\nYields:\n An empty stream of responses."} +{"repo": "python-fire", "function": "def _DescriptionSection(component, info):\n if custom_descriptions.NeedsCustomDescription(component):\n available_space = LINE_LENGTH - SECTION_INDENTATION\n description = custom_descriptions.GetDescription(component, available_space, LINE_LENGTH)\n summary = custom_descriptions.GetSummary(component, available_space, LINE_LENGTH)\n else:\n description = _GetDescription(info)\n summary = _GetSummary(info)\n text = description or summary or None\n if text:\n return ('DESCRIPTION', text)\n else:\n return None", "docstring": "The \"Description\" sections of the help string.\n\nArgs:\n component: The component to produce the description section for.\n info: The info dict for the component of interest.\n\nReturns:\n Returns the description if available. If not, returns the summary.\n If neither are available, returns None."} +{"repo": "starthinker", "function": "def recipe_trends_places_to_bigquery_via_query(config, auth_write, secret, key, places_dataset, places_query, places_legacy, destination_dataset, destination_table):\n twitter(config, {'auth': auth_write, 'secret': secret, 'key': key, 'trends': {'places': {'single_cell': True, 'bigquery': {'dataset': places_dataset, 'query': places_query, 'legacy': places_legacy}}}, 'out': {'bigquery': {'dataset': destination_dataset, 'table': destination_table}}})", "docstring": "Move using a WOEID query.\n\nArgs:\n auth_write (authentication) - Credentials used for writing data.\n secret (string) - NA\n key (string) - NA\n places_dataset (string) - NA\n places_query (string) - NA\n places_legacy (boolean) - NA\n destination_dataset (string) - NA\n destination_table (string) - NA"} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, table_mask: np.ndarray | tf.Tensor | None=None, aggregation_labels: np.ndarray | tf.Tensor | None=None, float_answer: np.ndarray | tf.Tensor | None=None, numeric_values: np.ndarray | tf.Tensor | None=None, numeric_values_scale: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]:\n outputs = self.tapas(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n sequence_output = outputs[0]\n pooled_output = outputs[1]\n sequence_output = self.dropout(sequence_output)\n if input_ids is not None:\n input_shape = shape_list(input_ids)\n else:\n input_shape = shape_list(inputs_embeds)[:-1]\n if token_type_ids is None:\n token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0)\n token_types = ['segment_ids', 'column_ids', 'row_ids', 'prev_labels', 'column_ranks', 'inv_column_ranks', 'numeric_relations']\n row_ids = token_type_ids[:, :, token_types.index('row_ids')]\n column_ids = token_type_ids[:, :, token_types.index('column_ids')]\n row_index = IndexMap(indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1), num_segments=self.config.max_num_rows, batch_dims=1)\n col_index = IndexMap(indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1), num_segments=self.config.max_num_columns, batch_dims=1)\n cell_index = ProductIndexMap(row_index, col_index)\n input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1]\n if attention_mask is None:\n attention_mask = tf.ones(input_shape)\n if table_mask is None:\n table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids))\n input_mask_float = tf.cast(attention_mask, tf.float32)\n table_mask_float = tf.cast(table_mask, tf.float32)\n cell_mask, _ = reduce_mean(input_mask_float, cell_index)\n logits = self.compute_token_logits(sequence_output)\n column_logits = None\n if self.config.select_one_column:\n column_logits = self.compute_column_logits(sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection)\n logits_aggregation = None\n if self.config.num_aggregation_labels > 0:\n logits_aggregation = self.aggregation_classifier(pooled_output)\n total_loss = tf.zeros(shape=(1,), dtype=tf.float32)\n calculate_loss = False\n if labels is not None:\n calculate_loss = True\n is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision\n if is_supervised:\n aggregate_mask = None\n elif float_answer is not None:\n assert shape_list(labels)[0] == shape_list(float_answer)[0], 'Make sure the answers are a FloatTensor of shape (batch_size,)'\n aggregate_mask = _calculate_aggregate_mask(float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier)\n else:\n aggregate_mask = None\n raise ValueError('You have to specify float answers in order to calculate the aggregate mask')\n if self.config.average_logits_per_cell:\n logits_per_cell, _ = reduce_mean(logits, cell_index)\n logits = gather(logits_per_cell, cell_index)\n dist_per_token = tfp.distributions.Bernoulli(logits=logits)\n selection_loss_per_example = None\n if not self.config.select_one_column:\n weight = tf.where(labels == 0, tf.ones_like(labels, dtype=tf.float32), self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32))\n selection_loss_per_token = -dist_per_token.log_prob(labels) * weight\n selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / (tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION)\n else:\n selection_loss_per_example, logits = _single_column_cell_selection_loss(logits, column_logits, labels, cell_index, col_index, cell_mask)\n dist_per_token = tfp.distributions.Bernoulli(logits=logits)\n if self.config.disable_per_token_loss:\n pass\n elif is_supervised:\n total_loss += tf.reduce_mean(selection_loss_per_example)\n else:\n total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask))\n if self.config.num_aggregation_labels > 0:\n if is_supervised:\n if aggregation_labels is not None:\n assert shape_list(labels)[0] == shape_list(aggregation_labels)[0], 'Make sure the aggregation labels are a LongTensor of shape (batch_size,)'\n per_example_additional_loss = _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight)\n else:\n raise ValueError('You have to specify aggregation labels in order to calculate the aggregation loss')\n else:\n aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32)\n per_example_additional_loss = _calculate_aggregation_loss(logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight)\n if self.config.use_answer_as_supervision:\n if numeric_values is not None and numeric_values_scale is not None:\n assert shape_list(numeric_values) == shape_list(numeric_values_scale)\n answer_loss, large_answer_loss_mask = _calculate_regression_loss(float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config)\n per_example_additional_loss += answer_loss\n per_example_additional_loss *= large_answer_loss_mask\n else:\n raise ValueError('You have to specify numeric values and numeric values scale in order to calculate the regression loss')\n total_loss += tf.reduce_mean(per_example_additional_loss)\n else:\n labels = tf.zeros_like(logits)\n _, logits = _single_column_cell_selection_loss(logits, column_logits, labels, cell_index, col_index, cell_mask)\n if not return_dict:\n output = (logits, logits_aggregation) + outputs[2:]\n return (total_loss,) + output if calculate_loss else output\n return TFTableQuestionAnsweringOutput(loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):\n Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and\n padding are 0.\nlabels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):\n Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the\n answer appearing in the table. Can be obtained using [`AutoTokenizer`].\n\n - 1 for tokens that are **part of the answer**,\n - 0 for tokens that are **not part of the answer**.\n\naggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*):\n Aggregation function index for every example in the batch for computing the aggregation loss. Indices\n should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for\n aggregation (WikiSQL-supervised).\nfloat_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*):\n Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only\n required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.\nnumeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):\n Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using\n [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the\n regression loss.\nnumeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):\n Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case\n of weak supervision for aggregation (WTQ) to calculate the regression loss.\n\nReturns:\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TapasForQuestionAnswering\n>>> import pandas as pd\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/tapas-base-finetuned-wtq\")\n>>> model = TapasForQuestionAnswering.from_pretrained(\"google/tapas-base-finetuned-wtq\")\n\n>>> data = {\n... \"Actors\": [\"Brad Pitt\", \"Leonardo Di Caprio\", \"George Clooney\"],\n... \"Age\": [\"56\", \"45\", \"59\"],\n... \"Number of movies\": [\"87\", \"53\", \"69\"],\n... }\n>>> table = pd.DataFrame.from_dict(data)\n>>> queries = [\"How many movies has George Clooney played in?\", \"How old is Brad Pitt?\"]\n\n>>> inputs = tokenizer(table=table, queries=queries, padding=\"max_length\", return_tensors=\"tf\")\n>>> outputs = model(**inputs)\n\n>>> logits = outputs.logits\n>>> logits_aggregation = outputs.logits_aggregation\n```"} +{"repo": "tensorflow", "function": "def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):\n if dtype is None:\n dtype = floatx()\n if seed is None:\n seed = np.random.randint(10000000.0)\n return random_ops.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)", "docstring": "Returns a tensor with uniform distribution of values.\n\nArgs:\n shape: A tuple of integers, the shape of tensor to create.\n minval: A float, lower boundary of the uniform distribution\n to draw samples.\n maxval: A float, upper boundary of the uniform distribution\n to draw samples.\n dtype: String, dtype of returned tensor.\n seed: Integer, random seed.\n\nReturns:\n A tensor.\n\nExample:\n\n>>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3),\n... minval=0.0, maxval=1.0)\n>>> random_uniform_tensor\n"} +{"repo": "transformers", "function": "def handle_metrics(split, metrics, output_dir):\n logger.info(f'***** {split} metrics *****')\n for key in sorted(metrics.keys()):\n logger.info(f' {key} = {metrics[key]}')\n save_json(metrics, os.path.join(output_dir, f'{split}_results.json'))", "docstring": "Log and save metrics\n\nArgs:\n- split: one of train, val, test\n- metrics: metrics dict\n- output_dir: where to save the metrics"} +{"repo": "transformers", "function": "def reduce_labels_transform(labels: np.ndarray, **kwargs) -> np.ndarray:\n labels[labels == 0] = 255\n labels = labels - 1\n labels[labels == 254] = 255\n return labels", "docstring": "Set `0` label as with value 255 and then reduce all other labels by 1.\n\nExample:\n Initial class labels: 0 - background; 1 - road; 2 - car;\n Transformed class labels: 255 - background; 0 - road; 1 - car;\n\n**kwargs are required to use this function with albumentations."} +{"repo": "beam", "function": "def copy_tree(self, src, dest):\n assert src.endswith('/')\n assert dest.endswith('/')\n results = []\n for entry in self.list_prefix(src):\n rel_path = entry[len(src):]\n try:\n self.copy(entry, dest + rel_path)\n results.append((entry, dest + rel_path, None))\n except messages.S3ClientError as e:\n results.append((entry, dest + rel_path, e))\n return results", "docstring": "Renames the given S3 directory and it's contents recursively\nfrom src to dest.\n\nArgs:\n src: S3 file path pattern in the form s3:////.\n dest: S3 file path pattern in the form s3:////.\n\nReturns:\n List of tuples of (src, dest, exception) where exception is None if the\n operation succeeded or the relevant exception if the operation failed."} +{"repo": "starthinker", "function": "def sheets_values_batch_update(config, auth, sheet_url_or_name, data):\n sheet_id = sheets_id(config, auth, sheet_url_or_name)\n API_Sheets(config, auth).spreadsheets().values().batchUpdate(spreadsheetId=sheet_id, body=data).execute()", "docstring": "Helper for performing batch value operations.\n\nArgs:\n config - see starthinker/util/configuration.py\n auth - user or service\n sheet_url_or_name - one of: URL, document title, or id\n data - JSON data for sending to batch request\n\nNo Return"} +{"repo": "tensorflow", "function": "def _batch_gather(params, indices, axis, batch_dims):\n if not params.shape[:batch_dims].is_compatible_with(indices.shape[:batch_dims]):\n raise ValueError('batch shape from indices %s does not match params shape %s' % (indices.shape[:batch_dims], params.shape))\n if batch_dims > 1:\n if not isinstance(params, ragged_tensor.RaggedTensor):\n if indices.uniform_row_length is None:\n raise ValueError('batch shape from indices does not match params shape: ragged indices dimension corresponds to uniform params dimension')\n params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n if not isinstance(indices, ragged_tensor.RaggedTensor):\n if params.uniform_row_length is None:\n raise ValueError('batch shape from indices does not match params shape: ragged params dimension corresponds to uniform indices dimension')\n indices = ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=1, row_splits_dtype=params.row_splits.dtype)\n return params.with_values(_gather(params.values, indices.values, axis - 1, batch_dims - 1))\n if axis > 1:\n if not isinstance(indices, ragged_tensor.RaggedTensor):\n adjusted_indices = params.with_values(array_ops.repeat(indices, params.row_lengths(), 0))\n else:\n if not isinstance(params, ragged_tensor.RaggedTensor):\n params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n adjusted_indices = _gather(indices, params.with_values(array_ops.repeat(math_ops.range(params.nrows()), params.row_lengths())), 0, 0)\n return _batch_gather(params, adjusted_indices, axis, batch_dims + 1)\n if indices.shape.rank is None:\n raise ValueError('rank(indices) must be known statically')\n assert batch_dims == 1\n flat_params = _flatten_dims_0_and_1(params)\n adjustments = _row_starts(params, indices.dtype)\n adjustments = _increase_rank_to(adjustments, indices.shape.ndims)\n adjusted_indices = indices + adjustments\n return _gather(flat_params, adjusted_indices, axis - 1, 0)", "docstring": "Helper that implements the body for ragged gather() when batch_dims>0.\n\nArgs:\n params: The tensor from which to gather values.\n indices: The indices of values to gather.\n axis: The axis in `params` to gather `indices` from.\n batch_dims: The number of batch dimensions.\n\nReturns:\n A potentially ragged tensor."} +{"repo": "tensorflow", "function": "def _pyval_empty_list_depth(pyval):\n if isinstance(pyval, list):\n if not pyval:\n return 1\n depths = [_pyval_empty_list_depth(v) for v in pyval]\n if any((depth is None for depth in depths)):\n return None\n else:\n return max(depths) + 1\n else:\n return None", "docstring": "Find the max depth for nested empty lists.\n\nArgs:\n pyval: A nested python list.\n\nReturns:\n The maximum depth of empty lists in `pyval`, or None if `pyval` contains\n anything other than nested empty lists."} +{"repo": "transformers", "function": "def box_area(boxes: Tensor) -> Tensor:\n boxes = _upcast(boxes)\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "docstring": "Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates.\n\nArgs:\n boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`):\n Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1\n < x2` and `0 <= y1 < y2`.\n\nReturns:\n `torch.FloatTensor`: a tensor containing the area for each box."} +{"repo": "transformers", "function": "def convert_to_tensors(self, inputs, tensor_type: Optional[Union[str, TensorType]]=None, prepend_batch_axis: bool=False):\n if not isinstance(tensor_type, TensorType):\n tensor_type = TensorType(tensor_type)\n if tensor_type == TensorType.TENSORFLOW:\n if not is_tf_available():\n raise ImportError('Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.')\n import tensorflow as tf\n as_tensor = tf.constant\n is_tensor = tf.is_tensor\n elif tensor_type == TensorType.PYTORCH:\n if not is_torch_available():\n raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.')\n import torch\n as_tensor = torch.tensor\n is_tensor = torch.is_tensor\n elif tensor_type == TensorType.JAX:\n if not is_flax_available():\n raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.')\n import jax.numpy as jnp\n as_tensor = jnp.array\n is_tensor = _is_jax\n else:\n as_tensor = np.asarray\n is_tensor = _is_numpy\n try:\n if prepend_batch_axis:\n inputs = [inputs]\n if not is_tensor(inputs):\n inputs = as_tensor(inputs)\n except:\n raise ValueError(\"Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length.\")\n return inputs", "docstring": "Convert the inner content to tensors.\n\nArgs:\n tensor_type (`str` or [`~utils.TensorType`], *optional*):\n The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If\n unset, no modification is done.\n prepend_batch_axis (`int`, *optional*, defaults to `False`):\n Whether or not to add the batch dimension during the conversion."} +{"repo": "transformers", "function": "class Qwen2_5OmniToken2WavConfig(PretrainedConfig):\n model_type = 'qwen2_5_omni_token2wav'\n sub_configs = {'dit_config': Qwen2_5OmniDiTConfig, 'bigvgan_config': Qwen2_5OmniBigVGANConfig}\n\n def __init__(self, dit_config=None, bigvgan_config=None, **kwargs):\n if dit_config is None:\n dit_config = {}\n if bigvgan_config is None:\n bigvgan_config = {}\n self.dit_config = Qwen2_5OmniDiTConfig(**dit_config)\n self.bigvgan_config = Qwen2_5OmniBigVGANConfig(**bigvgan_config)\n super().__init__(**kwargs)", "docstring": "This is the configuration class to store the configuration of a [`Qwen2_5OmniToken2WavModel`].\nIt is used to instantiate the Qwen2.5-Omni-Token2Wav model which combines a Diffusion Transformer (DiT) for mel-spectrogram generation with a BigVGAN model for waveform synthesis. The configuration contains sub-configurations for both components.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n dit_config ([`DiT_Args`], *optional*):\n Configuration class for the Diffusion Transformer (DiT) module responsible for generating mel-spectrograms.\n bigvgan_config ([`BigVGAN_Args`], *optional*):\n Configuration class for the BigVGAN module responsible for converting mel-spectrograms to waveforms.\nExample:\n\n```python\n>>> from transformers import Qwen2_5OmniToken2WavModel, DiT_Args, BigVGAN_Args\n\n>>> # Initialize DiT configuration\n>>> dit_config = DiT_Args(\n... dim=1024,\n... depth=22,\n... heads=16,\n... ff_mult=2\n... )\n\n>>> # Initialize BigVGAN configuration\n>>> bigvgan_config = BigVGAN_Args(\n... mel_dim=80,\n... upsample_rates=[5,3,2,2,2,2]\n... )\n\n>>> # Initialize main configuration\n>>> config = Qwen2_5OmniToken2WavConfig(dit_config, bigvgan_config)\n\n>>> # Initialize model with config\n>>> model = Qwen2_5OmniToken2Wav(config)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "beam", "function": "def create_squad_example(text):\n question, context = text\n yield (question, QuestionAnsweringPipeline.create_sample(question, context))", "docstring": "Creates SquadExample objects to be fed to QuestionAnsweringPipeline\nsupported by Hugging Face.\n\nCheck out https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.QuestionAnsweringPipeline.__call__.X #pylint: disable=line-too-long\nto learn about valid input types for QuestionAnswering Pipeline.\nArgs:\n text (Tuple[str,str]): a tuple of question and context."} +{"repo": "pyglove", "function": "def should_stop_early(self) -> bool:\n if not self._trial.measurements:\n return False\n return self._should_stop_early_fn(self._trial)", "docstring": "Tells whether current trial should be stopped early.\n\nIn `pg.sample`, an optional `EarlyStoppingPolicy` can be provided, which is\nuseful for terminating trials which are progressive evaluated. Progressive\nevaluation on examples can be achieved by calling `feedback.add_measurement`\nmultiple times at different steps. In-between these steps, users can call\nthis method to determine if current trial is considered less competitive by\nthe early stopping policy, and thus can be abandoned. In that case, users\nshould call `feedback.skip()` to abandon current trial without feeding back\nthe reward to the search algorithm.\n\nReturns:\n If current trial can be stopped early."} +{"repo": "python-fire", "function": "def AddCalledComponent(self, component, target, args, filename, lineno, capacity, action=CALLED_CALLABLE):\n element = FireTraceElement(component=component, action=action, target=target, args=args, filename=filename, lineno=lineno, capacity=capacity)\n self.elements.append(element)", "docstring": "Adds an element to the trace indicating that a component was called.\n\nAlso applies to instantiating a class.\n\nArgs:\n component: The result of calling the callable.\n target: The name of the callable.\n args: The args consumed in order to call this callable.\n filename: The file in which the callable is defined, or None if N/A.\n lineno: The line number on which the callable is defined, or None if N/A.\n capacity: (bool) Whether the callable could have accepted additional args.\n action: The value to include as the action in the FireTraceElement."} +{"repo": "transformers", "function": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n if 'max_size' in kwargs:\n logger.warning_once(\"The `max_size` parameter is deprecated and will be removed in v4.26. Please specify in `size['longest_edge'] instead`.\")\n max_size = kwargs.pop('max_size')\n else:\n max_size = None\n size = get_size_dict(size, max_size=max_size, default_to_square=False)\n if 'shortest_edge' in size and 'longest_edge' in size:\n new_size = get_resize_output_image_size(image, size['shortest_edge'], size['longest_edge'], input_data_format=input_data_format)\n elif 'max_height' in size and 'max_width' in size:\n new_size = get_image_size_for_max_height_width(image, size['max_height'], size['max_width'], input_data_format=input_data_format)\n elif 'height' in size and 'width' in size:\n new_size = (size['height'], size['width'])\n else:\n raise ValueError(f\"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.\")\n image = resize(image, size=new_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n return image", "docstring": "Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an\nint, smaller edge of the image will be matched to this number.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the image's `(height, width)` dimensions after resizing. Available options are:\n - `{\"height\": int, \"width\": int}`: The image will be resized to the exact size `(height, width)`.\n Do NOT keep the aspect ratio.\n - `{\"shortest_edge\": int, \"longest_edge\": int}`: The image will be resized to a maximum size respecting\n the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge\n less or equal to `longest_edge`.\n - `{\"max_height\": int, \"max_width\": int}`: The image will be resized to the maximum size respecting the\n aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to\n `max_width`.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use if resizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred."} +{"repo": "python-fire", "function": "def IsCompatible(self, allow_py3=False, raise_exception=False):\n error = None\n if not self.version:\n error = 'ERROR: Your current version of Python is not compatible with the Google Cloud SDK. {0}\\n'.format(self.SupportedVersionMessage(allow_py3))\n elif self.version[0] < 3:\n if self.version < PythonVersion.MIN_REQUIRED_PY2_VERSION:\n error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3))\n elif not allow_py3:\n error = 'ERROR: Python 3 and later is not compatible with the Google Cloud SDK. {0}\\n'.format(self.SupportedVersionMessage(allow_py3))\n elif self.version < PythonVersion.MIN_SUPPORTED_PY3_VERSION:\n error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3))\n if error:\n if raise_exception:\n raise Error(error)\n sys.stderr.write(error)\n sys.stderr.write(PythonVersion.ENV_VAR_MESSAGE)\n return False\n if self.version >= self.MIN_REQUIRED_PY2_VERSION and self.version < self.MIN_SUPPORTED_PY2_VERSION:\n sys.stderr.write('WARNING: Python 2.6.x is no longer officially supported by the Google Cloud SDK\\nand may not function correctly. {0}\\n{1}'.format(self.SupportedVersionMessage(allow_py3), PythonVersion.ENV_VAR_MESSAGE))\n return True", "docstring": "Ensure that the Python version we are using is compatible.\n\nThis will print an error message if not compatible.\n\nCompatible versions are 2.6 and 2.7 and > 3.4 if allow_py3 is True.\nWe don't guarantee support for 2.6 so we want to warn about it.\n\nArgs:\n allow_py3: bool, True if we should allow a Python 3 interpreter to run\n gcloud. If False, this returns an error for Python 3.\n raise_exception: bool, True to raise an exception rather than printing\n the error and exiting.\n\nRaises:\n Error: If not compatible and raise_exception is True.\n\nReturns:\n bool, True if the version is valid, False otherwise."} +{"repo": "tensorflow", "function": "def convert_variables_to_constants_v2(func, lower_control_flow=True, aggressive_inlining=False):\n converter_data = _FunctionConverterDataInEager(func=func, lower_control_flow=lower_control_flow, aggressive_inlining=aggressive_inlining)\n output_graph_def, converted_input_indices = _replace_variables_by_constants(converter_data=converter_data)\n return _construct_concrete_function(func, output_graph_def, converted_input_indices)", "docstring": "Replaces all the variables in a graph with constants of the same values.\n\nTensorFlow 2.0 function for converting all Variable ops into Const ops holding\nthe same values. This makes it possible to describe the network fully with a\nsingle GraphDef file, and allows the removal of a lot of ops related to\nloading and saving the variables. This function runs Grappler's function\ninlining optimization in order to return a single subgraph.\n\nThe current implementation only works for graphs that do not contain any\ncontrol flow or embedding related ops.\n\nArgs:\n func: ConcreteFunction.\n lower_control_flow: Boolean indicating whether or not to lower control flow\n ops such as If and While. (default True)\n aggressive_inlining: Boolean indicating whether or not to do aggressive\n function inlining (might be unsafe if function has stateful ops, not\n properly connected to control outputs). (default False)\n\nReturns:\n ConcreteFunction containing a simplified version of the original."} +{"repo": "tensorflow", "function": "def _broadcast_dynamic_shape_one_layer(a, b):\n a_0 = a[0]\n b_0 = b[0]\n\n def broadcast_from_a():\n a_layer = array_ops.zeros(b_0, dtype=b_0.dtype)\n b_layer = math_ops.range(b_0)\n target = b\n return [a_layer, b_layer, target]\n a_static = tensor_util.constant_value(a)\n if a_static is not None and a_static[0] == 1:\n [a_gi, b_gi, target] = broadcast_from_a()\n a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n return [a_layer, b_layer, target]\n\n def broadcast_from_b():\n a_layer = math_ops.range(a_0)\n b_layer = array_ops.zeros(a_0, dtype=a_0.dtype)\n target = a\n return [a_layer, b_layer, target]\n b_static = tensor_util.constant_value(b)\n if b_static is not None and b_static[0] == 1:\n [a_gi, b_gi, target] = broadcast_from_b()\n a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n return [a_layer, b_layer, target]\n\n def broadcast_noop():\n a_layer = math_ops.range(a_0)\n b_layer = math_ops.range(b_0)\n target = b\n return [a_layer, b_layer, target]\n can_broadcast_from_a = math_ops.equal(a_0, 1)\n can_broadcast_from_b = math_ops.equal(b_0, 1)\n\n def broadcast_not_from_a():\n return cond.cond(can_broadcast_from_b, true_fn=broadcast_from_b, false_fn=broadcast_noop)\n nrows_equal = math_ops.equal(a_0, b_0)\n can_broadcast = math_ops.logical_or(can_broadcast_from_a, math_ops.logical_or(can_broadcast_from_b, nrows_equal))\n check_can_broadcast = check_ops.assert_equal(can_broadcast, True, message='Cannot broadcast')\n results = cond.cond(can_broadcast_from_a, true_fn=broadcast_from_a, false_fn=broadcast_not_from_a)\n results = [control_flow_ops.with_dependencies([check_can_broadcast], x) for x in results]\n [a_gi, b_gi, target] = results\n a_layer = _LayerBroadcaster.from_gather_index(a_gi)\n b_layer = _LayerBroadcaster.from_gather_index(b_gi)\n return [a_layer, b_layer, target]", "docstring": "Broadcast two vectors, given their shapes.\n\nArgs:\n a: the number of rows in a.\n b: the number of rows in b.\n\nReturns:\n (layer_a, layer_b, target_shape)\n layer_a is a _LayerBroadcaster from a to the target_shape.\n layer_b is a _LayerBroadcaster from b to the target_shape.\n target_shape is the target_shape\n\nRaises:\n InvalidArgumentError if the shapes are not consistent."} +{"repo": "tensorflow", "function": "class Cropping1D(Layer):\n\n def __init__(self, cropping=(1, 1), **kwargs):\n super(Cropping1D, self).__init__(**kwargs)\n self.cropping = conv_utils.normalize_tuple(cropping, 2, 'cropping')\n self.input_spec = InputSpec(ndim=3)\n\n def compute_output_shape(self, input_shape):\n input_shape = tensor_shape.TensorShape(input_shape).as_list()\n if input_shape[1] is not None:\n length = input_shape[1] - self.cropping[0] - self.cropping[1]\n else:\n length = None\n return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])\n\n def call(self, inputs):\n if self.cropping[1] == 0:\n return inputs[:, self.cropping[0]:, :]\n else:\n return inputs[:, self.cropping[0]:-self.cropping[1], :]\n\n def get_config(self):\n config = {'cropping': self.cropping}\n base_config = super(Cropping1D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "docstring": "Cropping layer for 1D input (e.g. temporal sequence).\n\nIt crops along the time dimension (axis 1).\n\nExamples:\n\n>>> input_shape = (2, 3, 2)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> print(x)\n[[[ 0 1]\n [ 2 3]\n [ 4 5]]\n [[ 6 7]\n [ 8 9]\n [10 11]]]\n>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)\n>>> print(y)\ntf.Tensor(\n [[[2 3]]\n [[8 9]]], shape=(2, 1, 2), dtype=int64)\n\nArgs:\n cropping: Int or tuple of int (length 2)\n How many units should be trimmed off at the beginning and end of\n the cropping dimension (axis 1).\n If a single int is provided, the same value will be used for both.\n\nInput shape:\n 3D tensor with shape `(batch_size, axis_to_crop, features)`\n\nOutput shape:\n 3D tensor with shape `(batch_size, cropped_axis, features)`"} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_value: Optional[HybridMambaAttentionDynamicCache]=None, output_attentions: Optional[bool]=False, output_router_logits: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.pre_ff_layernorm(hidden_states)\n ff_outputs = self.feed_forward(hidden_states)\n if isinstance(ff_outputs, tuple):\n hidden_states, router_logits = ff_outputs\n else:\n hidden_states, router_logits = (ff_outputs, None)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n if output_router_logits:\n outputs += (router_logits,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, sequence_length)` where padding elements are indicated by 0.\n past_key_value (`HybridMambaAttentionDynamicCache`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_router_logits (`bool`, *optional*):\n Whether or not to return the logits of all the routers. They are useful for computing the router loss, and\n should not be returned during inference.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence."} +{"repo": "keras", "function": "class ConvLSTM(RNN):\n\n def __init__(self, rank, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, activation='tanh', recurrent_activation='sigmoid', use_bias=True, kernel_initializer='glorot_uniform', recurrent_initializer='orthogonal', bias_initializer='zeros', unit_forget_bias=True, kernel_regularizer=None, recurrent_regularizer=None, bias_regularizer=None, kernel_constraint=None, recurrent_constraint=None, bias_constraint=None, dropout=0.0, recurrent_dropout=0.0, seed=None, return_sequences=False, return_state=False, go_backwards=False, stateful=False, **kwargs):\n cell = ConvLSTMCell(rank=rank, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, recurrent_activation=recurrent_activation, use_bias=use_bias, kernel_initializer=kernel_initializer, recurrent_initializer=recurrent_initializer, bias_initializer=bias_initializer, unit_forget_bias=unit_forget_bias, kernel_regularizer=kernel_regularizer, recurrent_regularizer=recurrent_regularizer, bias_regularizer=bias_regularizer, kernel_constraint=kernel_constraint, recurrent_constraint=recurrent_constraint, bias_constraint=bias_constraint, dropout=dropout, recurrent_dropout=recurrent_dropout, seed=seed, name='conv_lstm_cell', dtype=kwargs.get('dtype'))\n super().__init__(cell, return_sequences=return_sequences, return_state=return_state, go_backwards=go_backwards, stateful=stateful, **kwargs)\n self.input_spec = InputSpec(ndim=rank + 3)\n\n def call(self, sequences, initial_state=None, mask=None, training=False):\n return super().call(sequences, initial_state=initial_state, mask=mask, training=training)\n\n def compute_output_shape(self, sequences_shape, initial_state_shape=None):\n batch_size = sequences_shape[0]\n steps = sequences_shape[1]\n step_shape = (batch_size,) + sequences_shape[2:]\n state_shape = self.cell.compute_output_shape(step_shape)[0][1:]\n if self.return_sequences:\n output_shape = (batch_size, steps) + state_shape\n else:\n output_shape = (batch_size,) + state_shape\n if self.return_state:\n batched_state_shape = (batch_size,) + state_shape\n return (output_shape, batched_state_shape, batched_state_shape)\n return output_shape\n\n def compute_mask(self, _, mask):\n mask = tree.flatten(mask)[0]\n output_mask = mask if self.return_sequences else None\n if self.return_state:\n state_mask = [None, None]\n return [output_mask] + state_mask\n else:\n return output_mask\n\n @property\n def filters(self):\n return self.cell.filters\n\n @property\n def kernel_size(self):\n return self.cell.kernel_size\n\n @property\n def strides(self):\n return self.cell.strides\n\n @property\n def padding(self):\n return self.cell.padding\n\n @property\n def data_format(self):\n return self.cell.data_format\n\n @property\n def dilation_rate(self):\n return self.cell.dilation_rate\n\n @property\n def activation(self):\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n return self.cell.recurrent_activation\n\n @property\n def use_bias(self):\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n return self.cell.recurrent_initializer\n\n @property\n def bias_initializer(self):\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n return self.cell.bias_regularizer\n\n @property\n def kernel_constraint(self):\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n return self.cell.bias_constraint\n\n @property\n def dropout(self):\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n return self.cell.recurrent_dropout\n\n def get_config(self):\n config = {'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate, 'activation': activations.serialize(self.activation), 'recurrent_activation': activations.serialize(self.recurrent_activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'recurrent_initializer': initializers.serialize(self.recurrent_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'unit_forget_bias': self.unit_forget_bias, 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'recurrent_constraint': constraints.serialize(self.recurrent_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint), 'dropout': self.dropout, 'recurrent_dropout': self.recurrent_dropout, 'seed': self.cell.seed}\n base_config = super().get_config()\n del base_config['cell']\n return {**base_config, **config}\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)", "docstring": "Abstract N-D Convolutional LSTM layer (used as implementation base).\n\nSimilar to an LSTM layer, but the input transformations\nand recurrent transformations are both convolutional.\n\nArgs:\n rank: Integer, rank of the convolution, e.g. \"2\" for 2D convolutions.\n filters: Integer, the dimensionality of the output space\n (i.e. the number of output filters in the convolution).\n kernel_size: An integer or tuple/list of n integers, specifying the\n dimensions of the convolution window.\n strides: An integer or tuple/list of n integers,\n specifying the strides of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string,\n one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, time, ..., channels)`\n while `channels_first` corresponds to\n inputs with shape `(batch, time, channels, ...)`.\n When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else 'channels_last'.\n Defaults to `'channels_last'`.\n dilation_rate: An integer or tuple/list of n integers, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function to use.\n By default hyperbolic tangent activation function is applied\n (`tanh(x)`).\n recurrent_activation: Activation function to use\n for the recurrent step.\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n bias_initializer: Initializer for the bias vector.\n unit_forget_bias: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Use in combination with `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al., 2015](\n http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix.\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to.\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix.\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix.\n bias_constraint: Constraint function applied to the bias vector.\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n seed: Random seed for dropout.\n return_sequences: Boolean. Whether to return the last output\n in the output sequence, or the full sequence. (default False)\n return_state: Boolean Whether to return the last state\n in addition to the output. (default False)\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch."} +{"repo": "transformers", "function": "class PLBartDecoder(PLBartPreTrainedModel):\n\n def __init__(self, config: PLBartConfig, embed_tokens: Optional[nn.Embedding]=None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.embed_tokens = PLBartScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale)\n if embed_tokens is not None:\n self.embed_tokens.weight = embed_tokens.weight\n self.embed_positions = PLBartLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model)\n self.layers = nn.ModuleList([PLBartDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])\n self.layernorm_embedding = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...')\n use_cache = False\n if (input_ids is None) ^ (inputs_embeds is not None):\n raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')\n elif input_ids is not None:\n input = input_ids\n input_shape = input.shape\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n input = inputs_embeds[:, :, -1]\n else:\n raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input)\n return_legacy_cache = False\n if use_cache and (not isinstance(past_key_values, Cache)):\n return_legacy_cache = True\n logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')\n past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)\n batch_size, seq_length = inputs_embeds.size()[:-1]\n past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0\n if cache_position is None:\n cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)\n if attention_mask is None and (not is_torchdynamo_compiling()):\n mask_seq_length = past_key_values_length + seq_length\n attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)\n self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values\n attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache)\n encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)\n positions = self.embed_positions(input, past_key_values_length, position_ids=cache_position)\n positions = positions.to(inputs_embeds.device)\n hidden_states = inputs_embeds + positions\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n next_decoder_cache = None\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):\n if attn_mask is not None:\n if attn_mask.size()[0] != len(self.layers):\n raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[3 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if return_legacy_cache:\n next_cache = past_key_values.to_legacy_cache()\n if not return_dict:\n return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))\n return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PLBartDecoderLayer`]\n\nArgs:\n config: PLBartConfig\n embed_tokens (nn.Embedding): output embedding"} +{"repo": "mobly", "function": "def callEventWaitAndGetRpc(self, callback_id, event_name, timeout_sec):\n timeout_ms = int(timeout_sec * 1000)\n try:\n return self._event_client.eventWaitAndGet(callback_id, event_name, timeout_ms)\n except Exception as e:\n if TIMEOUT_ERROR_MESSAGE in str(e):\n raise errors.CallbackHandlerTimeoutError(self._device, f'Timed out after waiting {timeout_sec}s for event \"{event_name}\" triggered by {self._method_name} ({self.callback_id}).') from e\n raise", "docstring": "Waits and returns an existing CallbackEvent for the specified identifier.\n\nThis function calls snippet lib's eventWaitAndGet RPC.\n\nArgs:\n callback_id: str, the callback identifier.\n event_name: str, the callback name.\n timeout_sec: float, the number of seconds to wait for the event.\n\nReturns:\n The event dictionary.\n\nRaises:\n errors.CallbackHandlerTimeoutError: The expected event does not occur\n within the time limit."} +{"repo": "tensorflow", "function": "def convert_inference_tf_type_to_tflite_type(tf_type: dtypes.DType, usage: str='') -> _types_pb2.IODataType:\n mapping = {dtypes.float32: _types_pb2.FLOAT, dtypes.uint8: _types_pb2.QUANTIZED_UINT8, dtypes.int8: _types_pb2.QUANTIZED_INT8, dtypes.int16: _types_pb2.QUANTIZED_INT16}\n tflite_type = mapping.get(tf_type)\n if tflite_type is None:\n raise ValueError('Unsupported TensorFlow type `{0}` provided for the {1}'.format(tf_type, usage))\n return tflite_type", "docstring": "Convert inference type from tf type to tflite type.\n\nArgs:\n tf_type: TensorFlow type.\n usage: Text describing the reason for invoking this function.\n\nRaises:\n ValueError: If `tf_type` is unsupported.\n\nReturns:\n tflite_type: TFLite type. Refer to compiler/mlir/lite/types.proto."} +{"repo": "transformers", "function": "class UnivNetLvcBlock(nn.Module):\n\n def __init__(self, config: UnivNetConfig, layer_id: int, lvc_hop_size: int=256):\n super().__init__()\n self.hidden_channels = config.model_hidden_channels\n self.kernel_size = config.resblock_kernel_sizes[layer_id]\n self.stride = config.resblock_stride_sizes[layer_id]\n self.dilations = config.resblock_dilation_sizes[layer_id]\n self.cond_hop_length = lvc_hop_size\n self.leaky_relu_slope = config.leaky_relu_slope\n self.num_blocks = len(self.dilations)\n self.convt_pre = nn.ConvTranspose1d(self.hidden_channels, self.hidden_channels, 2 * self.stride, stride=self.stride, padding=self.stride // 2 + self.stride % 2, output_padding=self.stride % 2)\n self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks)\n self.resblocks = nn.ModuleList([UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)])\n\n def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor):\n hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)\n hidden_states = self.convt_pre(hidden_states)\n kernels, biases = self.kernel_predictor(spectrogram)\n for i, resblock in enumerate(self.resblocks):\n kernel = kernels[:, i, :, :, :, :]\n bias = biases[:, i, :, :]\n hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length)\n return hidden_states\n\n def apply_weight_norm(self):\n weight_norm = nn.utils.weight_norm\n if hasattr(nn.utils.parametrizations, 'weight_norm'):\n weight_norm = nn.utils.parametrizations.weight_norm\n weight_norm(self.convt_pre)\n self.kernel_predictor.apply_weight_norm()\n for layer in self.resblocks:\n layer.apply_weight_norm()\n\n def remove_weight_norm(self):\n nn.utils.remove_weight_norm(self.convt_pre)\n self.kernel_predictor.remove_weight_norm()\n for layer in self.resblocks:\n layer.remove_weight_norm()", "docstring": "Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a\n`UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers.\n\nBased on LVCBlock in\n[maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98)\n\nParameters:\n config (`UnivNetConfig`):\n Config for the `UnivNetModel` model.\n layer_id (`int`):\n An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and\n `len(config.resblock_stride_sizes) - 1)` inclusive.\n lvc_hop_size (`int`, *optional*, defaults to 256):\n The hop size for the location variable convolutional layers."} +{"repo": "fhir-py", "function": "class Slice:\n slice_def: message.Message\n relative_path: str\n slice_rules: Sequence[Tuple[str, message.Message]]", "docstring": "A container for all element definitions describing a slice.\n\nhttps://build.fhir.org/profiling.html#slicing\n\nAttributes:\n slice_def: The element definition describing the slice itself.\n relative_path: The path to the sliced collection relative to the structure\n definition defining the slice.\n slice_rules: Tuples of (relative_path, element_definition) for the element\n definitions describing the contents of the slice and the path to them\n relaitve to the structure definition defining the slice."} +{"repo": "transformers", "function": "class PegasusConfig(PretrainedConfig):\n model_type = 'pegasus'\n keys_to_ignore_at_inference = ['past_key_values']\n attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}\n\n def __init__(self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=0, scale_embedding=False, pad_token_id=0, eos_token_id=1, forced_eos_token_id=1, **kwargs):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.d_model = d_model\n self.encoder_ffn_dim = encoder_ffn_dim\n self.encoder_layers = encoder_layers\n self.encoder_attention_heads = encoder_attention_heads\n self.decoder_ffn_dim = decoder_ffn_dim\n self.decoder_layers = decoder_layers\n self.decoder_attention_heads = decoder_attention_heads\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_dropout = activation_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.encoder_layerdrop = encoder_layerdrop\n self.decoder_layerdrop = decoder_layerdrop\n self.use_cache = use_cache\n self.num_hidden_layers = encoder_layers\n self.scale_embedding = scale_embedding\n super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)\n\n @property\n def num_attention_heads(self) -> int:\n return self.encoder_attention_heads\n\n @property\n def hidden_size(self) -> int:\n return self.d_model", "docstring": "This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an\nPEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the PEGASUS\n[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 50265):\n Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].\n d_model (`int`, *optional*, defaults to 1024):\n Dimensionality of the layers and the pooler layer.\n encoder_layers (`int`, *optional*, defaults to 12):\n Number of encoder layers.\n decoder_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n encoder_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n decoder_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer decoder.\n decoder_ffn_dim (`int`, *optional*, defaults to 4096):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 4096):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n activation_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for activations inside the fully connected layer.\n max_position_embeddings (`int`, *optional*, defaults to 1024):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n encoder_layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n decoder_layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n scale_embedding (`bool`, *optional*, defaults to `False`):\n Scale embeddings by diving by sqrt(d_model).\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models)\n forced_eos_token_id (`int`, *optional*, defaults to 1):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n\nExample:\n\n```python\n>>> from transformers import PegasusConfig, PegasusModel\n\n>>> # Initializing a PEGASUS google/pegasus-large style configuration\n>>> configuration = PegasusConfig()\n\n>>> # Initializing a model (with random weights) from the google/pegasus-large style configuration\n>>> model = PegasusModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "beam", "function": "def repeat(self, caller: Caller[RequestT, ResponseT], request: RequestT, timeout: float, metrics_collector: Optional[_MetricsCollector]=None) -> ResponseT:\n return _execute_request(caller, request, timeout, metrics_collector)", "docstring": "repeat method is called from the RequestResponseIO when\na repeater is enabled.\n\nArgs:\n caller: a `~apache_beam.io.requestresponse.Caller` object that\n calls the API.\n request: input request to repeat.\n timeout: time to wait for the request to complete.\n metrics_collector: (Optional) a\n `~apache_beam.io.requestresponse._MetricsCollector` object to\n collect the metrics for RequestResponseIO."} +{"repo": "tensorflow", "function": "def from_value(cls, value):\n return cls(value.shape, dtype=value.dtype, trainable=value.trainable)", "docstring": "Creates a `VariableSpec` from the given `Variable`.\n\n`value`'s shape, dtype, and trainable attributes will be used to create\nthe new `VariableSpec`.\n\nExample:\n\n>>> v = tf.Variable([1., 2., 3.])\n>>> VariableSpec.from_value(v)\nVariableSpec(shape=(3,), dtype=tf.float32, trainable=True, alias_id=None)\n\nArgs:\n value: A Variable.\n\nReturns:\n A `VariableSpec` created from `value`."} +{"repo": "transformers", "function": "def get_adapter_state_dict(self, adapter_name: Optional[str]=None, state_dict: Optional[dict]=None) -> dict:\n check_peft_version(min_version=MIN_PEFT_VERSION)\n if not self._hf_peft_config_loaded:\n raise ValueError('No adapter loaded. Please load an adapter first.')\n from peft import get_peft_model_state_dict\n if adapter_name is None:\n adapter_name = self.active_adapters()[0]\n adapter_state_dict = get_peft_model_state_dict(self, state_dict=state_dict, adapter_name=adapter_name)\n return adapter_state_dict", "docstring": "If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\nofficial documentation: https://huggingface.co/docs/peft\n\nGets the adapter state dict that should only contain the weights tensors of the specified adapter_name adapter.\nIf no adapter_name is passed, the active adapter is used.\n\nArgs:\n adapter_name (`str`, *optional*):\n The name of the adapter to get the state dict from. If no name is passed, the active adapter is used.\n state_dict (nested dictionary of `torch.Tensor`, *optional*)\n The state dictionary of the model. Will default to `self.state_dict()`, but can be used if special\n precautions need to be taken when recovering the state dictionary of a model (like when using model\n parallelism)."} +{"repo": "tensorflow", "function": "def _prefix_to_checkpoint_path(prefix, format_version):\n if format_version == saver_pb2.SaverDef.V2:\n return prefix + '.index'\n return prefix", "docstring": "Returns the pathname of a checkpoint file, given the checkpoint prefix.\n\nFor V1 checkpoint, simply returns the prefix itself (the data file). For V2,\nreturns the pathname to the index file.\n\nArgs:\n prefix: a string, the prefix of a checkpoint.\n format_version: the checkpoint format version that corresponds to the\n prefix.\nReturns:\n The pathname of a checkpoint file, taking into account the checkpoint\n format version."} +{"repo": "tensorflow", "function": "def generator_next_fn(iterator_id_t):\n if output_types and output_shapes:\n flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]\n flattened_shapes = nest.flatten(output_shapes)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n values = next(generator_state.get_iterator(iterator_id))\n try:\n flattened_values = nest.flatten_up_to(output_types, values)\n except (TypeError, ValueError) as e:\n raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_types}, but the yielded element was {values}.') from e\n ret_arrays = []\n for ret, dtype in zip(flattened_values, flattened_types):\n try:\n ret_arrays.append(script_ops.FuncRegistry._convert(ret, dtype=dtype.as_numpy_dtype))\n except (TypeError, ValueError) as e:\n raise TypeError(f'`generator` yielded an element that could not be converted to the expected type. The expected type was {dtype.name}, but the yielded element was {ret}.') from e\n for ret_array, expected_dtype, expected_shape in zip(ret_arrays, flattened_types, flattened_shapes):\n if ret_array.dtype != expected_dtype.as_numpy_dtype:\n raise TypeError(f'`generator` yielded an element of type {ret_array.dtype} where an element of type {expected_dtype.as_numpy_dtype} was expected.')\n if not expected_shape.is_compatible_with(ret_array.shape):\n raise TypeError(f'`generator` yielded an element of shape {ret_array.shape} where an element of shape {expected_shape} was expected.')\n return ret_arrays\n flat_values = script_ops.numpy_function(generator_py_func, [iterator_id_t], flattened_types)\n if not isinstance(flat_values, (list, tuple)):\n flat_values = [flat_values]\n if output_shapes is not None:\n for ret_t, shape in zip(flat_values, flattened_shapes):\n ret_t.set_shape(shape)\n return nest.pack_sequence_as(output_types, flat_values)\n else:\n flat_output_types = structure.get_flat_tensor_types(output_signature)\n\n def generator_py_func(iterator_id):\n \"\"\"A `py_func` that will be called to invoke the iterator.\"\"\"\n values = next(generator_state.get_iterator(iterator_id.numpy()))\n try:\n values = structure.normalize_element(values, output_signature)\n except (TypeError, ValueError) as e:\n raise TypeError(f'`generator` yielded an element that did not match the expected structure. The expected structure was {output_signature}, but the yielded element was {values}.') from e\n values_spec = structure.type_spec_from_value(values)\n if not structure.are_compatible(values_spec, output_signature):\n raise TypeError(f'`generator` yielded an element of {values_spec} where an element of {output_signature} was expected.')\n return structure.to_tensor_list(output_signature, values)\n return script_ops.eager_py_func(generator_py_func, inp=[iterator_id_t], Tout=flat_output_types)", "docstring": "Generates the next element from iterator with ID `iterator_id_t`.\n\nWe map this function across an infinite repetition of the\n`iterator_id_t`, and raise `StopIteration` to terminate the iteration.\n\nArgs:\n iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the\n iterator in `generator_state` from which to generate an element.\n\nReturns:\n The next element to generate from the iterator."} +{"repo": "tensorflow", "function": "def reverse(tensor: ragged_tensor.Ragged, axis, name=None):\n type_error_msg = '`axis` must be a list of int or a constant tensorwhen reversing axes in a ragged tensor'\n with ops.name_scope(name, 'Reverse', [tensor, axis]):\n if isinstance(axis, tensor_lib.Tensor):\n axis = tensor_util.constant_value(axis)\n if axis is None:\n raise TypeError(type_error_msg)\n elif not (isinstance(axis, (list, tuple)) and all((isinstance(dim, int) for dim in axis))):\n raise TypeError(type_error_msg)\n tensor = ragged_tensor.convert_to_tensor_or_ragged_tensor(tensor, name='tensor')\n axis = [array_ops.get_positive_axis(dim, tensor.shape.rank, 'axis[%d]' % i, 'rank(tensor)') for i, dim in enumerate(axis)]\n slices = [slice(None)] * (max(axis) + 1 if axis else 0)\n for dim in axis:\n slices[dim] = slice(None, None, -1)\n return tensor[tuple(slices)]", "docstring": "Reverses a RaggedTensor along the specified axes.\n\n#### Example:\n\n>>> data = tf.ragged.constant([\n... [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10], [11, 12]]])\n>>> tf.reverse(data, axis=[0, 2])\n\n\nArgs:\n tensor: A 'RaggedTensor' to reverse.\n axis: A list or tuple of 'int' or a constant 1D 'tf.Tensor'. The indices of\n the axes to reverse.\n name: A name prefix for the returned tensor (optional).\n\nReturns:\n A 'RaggedTensor'."} +{"repo": "tensorflow", "function": "def convert_op_hints_to_stubs(session=None, graph_def=None, write_callback=lambda graph_def, comments: None):\n if session is not None and graph_def is not None:\n raise ValueError('Provide only one of session and graph_def.')\n if session is not None:\n return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)\n elif graph_def is not None:\n return _convert_op_hints_to_stubs_helper(graph_def, write_callback)\n else:\n raise ValueError('Must specify session or graph_def as input.')", "docstring": "Converts a graphdef with LiteOp hints into stub operations.\n\nThis is used to prepare for toco conversion of complex intrinsic usages.\nNote: only one of session or graph_def should be used, not both.\n\nArgs:\n session: A TensorFlow session that contains the graph to convert.\n graph_def: A graph def that we should convert.\n write_callback: A function pointer that can be used to write intermediate\n steps of graph transformation (optional).\n\nReturns:\n A new graphdef with all ops contained in OpHints being replaced by\n a single op call with the right parameters.\nRaises:\n ValueError: If both session and graph_def are provided."} +{"repo": "transformers", "function": "def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, *args, audio=None, videos=None, **kwargs: Unpack[Owlv2ProcessorKwargs]) -> BatchFeature:\n output_kwargs = self._merge_kwargs(Owlv2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, **self.prepare_and_validate_optional_call_args(*args))\n query_images = output_kwargs['images_kwargs'].pop('query_images', None)\n return_tensors = output_kwargs['common_kwargs']['return_tensors']\n if text is None and query_images is None and (images is None):\n raise ValueError('You have to specify at least one text or query image or image. All three cannot be none.')\n images, text = _validate_images_text_input_order(images, text)\n data = {}\n if text is not None:\n if isinstance(text, str) or (isinstance(text, List) and (not isinstance(text[0], List))):\n encodings = [self.tokenizer(text, **output_kwargs['text_kwargs'])]\n elif isinstance(text, List) and isinstance(text[0], List):\n encodings = []\n max_num_queries = max([len(text_single) for text_single in text])\n for text_single in text:\n if len(text_single) != max_num_queries:\n text_single = text_single + [' '] * (max_num_queries - len(text_single))\n encoding = self.tokenizer(text_single, **output_kwargs['text_kwargs'])\n encodings.append(encoding)\n else:\n raise TypeError('Input text should be a string, a list of strings or a nested list of strings')\n if return_tensors == 'np':\n input_ids = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)\n elif return_tensors == 'jax' and is_flax_available():\n import jax.numpy as jnp\n input_ids = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)\n elif return_tensors == 'pt' and is_torch_available():\n import torch\n input_ids = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0)\n attention_mask = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0)\n elif return_tensors == 'tf' and is_tf_available():\n import tensorflow as tf\n input_ids = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0)\n else:\n raise ValueError('Target return tensor type could not be returned')\n data['input_ids'] = input_ids\n data['attention_mask'] = attention_mask\n if query_images is not None:\n query_pixel_values = self.image_processor(query_images, **output_kwargs['images_kwargs']).pixel_values\n data = {'query_pixel_values': query_pixel_values}\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n data['pixel_values'] = image_features.pixel_values\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and\n`kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode:\nthe text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\nCLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\nof the above two methods for more information.\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`,\n `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The query image to be prepared, one query image is expected per target image to be queried. Each image\n can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image\n should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`."} +{"repo": "keras", "function": "class Flatten(Layer):\n\n def __init__(self, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.data_format = backend.standardize_data_format(data_format)\n self.input_spec = InputSpec(min_ndim=1)\n self._channels_first = self.data_format == 'channels_first'\n\n def call(self, inputs):\n input_shape = inputs.shape\n rank = len(input_shape)\n if self._channels_first and rank > 1:\n inputs = ops.transpose(inputs, axes=(0, *range(2, rank), 1))\n output_shape = tuple((dim if dim is not None else -1 for dim in self.compute_output_shape(input_shape)))\n return ops.reshape(inputs, output_shape)\n\n def compute_output_shape(self, input_shape):\n non_batch_dims = input_shape[1:]\n if len(non_batch_dims) == 0:\n flattened_dim = 1\n elif any((d is None for d in non_batch_dims)):\n flattened_dim = None\n else:\n flattened_dim = math.prod(non_batch_dims)\n return (input_shape[0], flattened_dim)\n\n def compute_output_spec(self, inputs):\n output_shape = self.compute_output_shape(inputs.shape)\n return KerasTensor(shape=output_shape, dtype=inputs.dtype, sparse=inputs.sparse)\n\n def get_config(self):\n config = {'data_format': self.data_format}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Flattens the input. Does not affect the batch size.\n\nNote: If inputs are shaped `(batch,)` without a feature axis, then\nflattening adds an extra channel dimension and output shape is `(batch, 1)`.\n\nArgs:\n data_format: A string, one of `\"channels_last\"` (default) or\n `\"channels_first\"`. The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, ..., channels)` while `\"channels_first\"` corresponds to\n inputs with shape `(batch, channels, ...)`.\n When unspecified, uses `image_data_format` value found in your Keras\n config file at `~/.keras/keras.json` (if exists). Defaults to\n `\"channels_last\"`.\n\nExample:\n\n>>> x = keras.Input(shape=(10, 64))\n>>> y = keras.layers.Flatten()(x)\n>>> y.shape\n(None, 640)"} +{"repo": "transformers", "function": "def forward(self, pixel_values: torch.FloatTensor, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> BackboneOutput:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs = self.encoder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n hidden_states = outputs.hidden_states\n feature_maps = ()\n for idx, stage in enumerate(self.stage_names):\n if stage in self.out_features:\n feature_maps += (hidden_states[idx],)\n if not return_dict:\n output = (feature_maps,)\n if output_hidden_states:\n output += (outputs.hidden_states,)\n return output\n return BackboneOutput(feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=None)", "docstring": "Examples:\n\n```python\n>>> from transformers import AutoImageProcessor, AutoBackbone\n>>> import torch\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> processor = AutoImageProcessor.from_pretrained(\"OpenGVLab/pvt_v2_b0\")\n>>> model = AutoBackbone.from_pretrained(\n... \"OpenGVLab/pvt_v2_b0\", out_features=[\"stage1\", \"stage2\", \"stage3\", \"stage4\"]\n... )\n\n>>> inputs = processor(image, return_tensors=\"pt\")\n\n>>> outputs = model(**inputs)\n>>> feature_maps = outputs.feature_maps\n>>> list(feature_maps[-1].shape)\n[1, 256, 7, 7]\n```"} +{"repo": "transformers", "function": "def call(self, pixel_values: TFModelInputType | None=None, noise: Optional[tf.Tensor]=None, head_mask: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False, interpolate_pos_encoding: bool=False) -> Union[TFViTMAEForPreTrainingOutput, Tuple[tf.Tensor]]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.vit(pixel_values=pixel_values, noise=noise, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, interpolate_pos_encoding=interpolate_pos_encoding)\n latent = outputs.last_hidden_state\n ids_restore = outputs.ids_restore\n mask = outputs.mask\n decoder_outputs = self.decoder(latent, ids_restore, interpolate_pos_encoding=interpolate_pos_encoding)\n logits = decoder_outputs.logits\n loss = self.forward_loss(pixel_values, logits, mask, interpolate_pos_encoding=interpolate_pos_encoding)\n if not return_dict:\n output = (logits, mask, ids_restore) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return TFViTMAEForPreTrainingOutput(loss=loss, logits=logits, mask=mask, ids_restore=ids_restore, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, TFViTMAEForPreTraining\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/vit-mae-base\")\n>>> model = TFViTMAEForPreTraining.from_pretrained(\"facebook/vit-mae-base\")\n\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n>>> loss = outputs.loss\n>>> mask = outputs.mask\n>>> ids_restore = outputs.ids_restore\n```"} +{"repo": "tensorflow", "function": "def read_value_no_copy(self):\n with ops.name_scope('Read'):\n value = self._read_variable_op(no_copy=True)\n return array_ops.identity(value)", "docstring": "Constructs an op which reads the value of this variable without copy.\n\nThe variable is read without making a copy even when it has been sparsely\naccessed. Variables in copy-on-read mode will be converted to copy-on-write\nmode.\n\nReturns:\n The value of the variable."} +{"repo": "tensorflow", "function": "def log_abs_determinant(self, name='log_abs_det'):\n if self.is_square is False:\n raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.')\n with self._name_scope(name):\n return self._log_abs_determinant()", "docstring": "Log absolute value of determinant for every batch member.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n `Tensor` with shape `self.batch_shape` and same `dtype` as `self`.\n\nRaises:\n NotImplementedError: If `self.is_square` is `False`."} +{"repo": "transformers", "function": "class OwlViTVisionConfig(PretrainedConfig):\n model_type = 'owlvit_vision_model'\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=768, patch_size=32, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.image_size = image_size\n self.patch_size = patch_size\n self.hidden_act = hidden_act\n self.layer_norm_eps = layer_norm_eps\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor", "docstring": "This is the configuration class to store the configuration of an [`OwlViTVisionModel`]. It is used to instantiate\nan OWL-ViT image encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the OWL-ViT\n[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n Number of channels in the input images.\n image_size (`int`, *optional*, defaults to 768):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1.0):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\nExample:\n\n```python\n>>> from transformers import OwlViTVisionConfig, OwlViTVisionModel\n\n>>> # Initializing a OwlViTVisionModel with google/owlvit-base-patch32 style configuration\n>>> configuration = OwlViTVisionConfig()\n\n>>> # Initializing a OwlViTVisionModel model from the google/owlvit-base-patch32 style configuration\n>>> model = OwlViTVisionModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. MVP does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros."} +{"repo": "tensorflow", "function": "def _internal_operation_seed(self):\n return self._rng.randint(0, _MAXINT32)", "docstring": "Returns a fake operation seed.\n\n In eager mode, user shouldn't set or depend on operation seed.\n Here, we generate a random seed based on global seed to make\n operation's randomness different and depend on the global seed.\n\nReturns:\n A fake operation seed based on global seed."} +{"repo": "keras", "function": "def identity(n, dtype=None):\n return backend.numpy.identity(n, dtype=dtype)", "docstring": "Return the identity tensor.\n\nThe identity tensor is a square tensor with ones on the main diagonal and\nzeros elsewhere.\n\nArgs:\n n: Number of rows (and columns) in the `n x n` output tensor.\n dtype: Data type of the output tensor.\n\nReturns:\n The identity tensor."} +{"repo": "tensorflow", "function": "def is_broadcast_compatible(shape_x, shape_y):\n if shape_x.ndims is None or shape_y.ndims is None:\n return False\n return _broadcast_shape_helper(shape_x, shape_y) is not None", "docstring": "Returns True if `shape_x` and `shape_y` are broadcast compatible.\n\nArgs:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\nReturns:\n True if a shape exists that both `shape_x` and `shape_y` can be broadcasted\n to. False otherwise."} +{"repo": "keras", "function": "class DepthwiseConv1D(BaseDepthwiseConv):\n\n def __init__(self, kernel_size, strides=1, padding='valid', depth_multiplier=1, data_format=None, dilation_rate=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, bias_constraint=None, **kwargs):\n super().__init__(rank=1, depth_multiplier=depth_multiplier, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, depthwise_initializer=depthwise_initializer, bias_initializer=bias_initializer, depthwise_regularizer=depthwise_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, depthwise_constraint=depthwise_constraint, bias_constraint=bias_constraint, **kwargs)", "docstring": "1D depthwise convolution layer.\n\nDepthwise convolution is a type of convolution in which each input channel\nis convolved with a different kernel (called a depthwise kernel). You can\nunderstand depthwise convolution as the first step in a depthwise separable\nconvolution.\n\nIt is implemented via the following steps:\n\n- Split the input into individual channels.\n- Convolve each channel with an individual depthwise kernel with\n `depth_multiplier` output channels.\n- Concatenate the convolved outputs along the channels axis.\n\nUnlike a regular 1D convolution, depthwise convolution does not mix\ninformation across different input channels.\n\nThe `depth_multiplier` argument determines how many filters are applied to\none input channel. As such, it controls the amount of output channels that\nare generated per input channel in the depthwise step.\n\nArgs:\n kernel_size: int or tuple/list of 1 integer, specifying the size of the\n depthwise convolution window.\n strides: int or tuple/list of 1 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.\n padding: string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.\n depth_multiplier: The number of depthwise convolution output channels\n for each input channel. The total number of depthwise convolution\n output channels will be equal to `input_channel * depth_multiplier`.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n dilation_rate: int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated convolution.\n activation: Activation function. If `None`, no activation is applied.\n use_bias: bool, if `True`, bias will be added to the output.\n depthwise_initializer: Initializer for the convolution kernel.\n If `None`, the default initializer (`\"glorot_uniform\"`)\n will be used.\n bias_initializer: Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.\n depthwise_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n depthwise_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n\nInput shape:\n\n- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, steps, channels)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, channels, steps)`\n\nOutput shape:\n\n- If `data_format=\"channels_last\"`:\n A 3D tensor with shape:\n `(batch_shape, new_steps, channels * depth_multiplier)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape:\n `(batch_shape, channels * depth_multiplier, new_steps)`\n\nReturns:\n A 3D tensor representing\n `activation(depthwise_conv1d(inputs, kernel) + bias)`.\n\nRaises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n\nExample:\n\n>>> x = np.random.rand(4, 10, 12)\n>>> y = keras.layers.DepthwiseConv1D(3, 3, 2, activation='relu')(x)\n>>> print(y.shape)\n(4, 4, 36)"} +{"repo": "transformers", "function": "def preprocess(self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, return_segmentation_masks: Optional[bool]=None, masks_path: Optional[Union[str, pathlib.Path]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[Union[int, float]]=None, do_normalize: Optional[bool]=None, do_convert_annotations: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_pad: Optional[bool]=None, format: Optional[Union[str, AnnotationFormat]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, pad_size: Optional[Dict[str, int]]=None, **kwargs) -> BatchFeature:\n if 'pad_and_return_pixel_mask' in kwargs:\n logger.warning_once('The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, use `do_pad` instead.')\n do_pad = kwargs.pop('pad_and_return_pixel_mask')\n if 'max_size' in kwargs:\n logger.warning_once(\"The `max_size` argument is deprecated and will be removed in a future version, use `size['longest_edge']` instead.\")\n size = kwargs.pop('max_size')\n do_resize = self.do_resize if do_resize is None else do_resize\n size = self.size if size is None else size\n size = get_size_dict(size=size, default_to_square=False)\n resample = self.resample if resample is None else resample\n do_rescale = self.do_rescale if do_rescale is None else do_rescale\n rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor\n do_normalize = self.do_normalize if do_normalize is None else do_normalize\n image_mean = self.image_mean if image_mean is None else image_mean\n image_std = self.image_std if image_std is None else image_std\n do_convert_annotations = self.do_convert_annotations if do_convert_annotations is None else do_convert_annotations\n do_pad = self.do_pad if do_pad is None else do_pad\n pad_size = self.pad_size if pad_size is None else pad_size\n format = self.format if format is None else format\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)\n if annotations is not None and isinstance(annotations, dict):\n annotations = [annotations]\n if annotations is not None and len(images) != len(annotations):\n raise ValueError(f'The number of images ({len(images)}) and annotations ({len(annotations)}) do not match.')\n format = AnnotationFormat(format)\n if annotations is not None:\n validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)\n if masks_path is not None and format == AnnotationFormat.COCO_PANOPTIC and (not isinstance(masks_path, (pathlib.Path, str))):\n raise ValueError(f'The path to the directory containing the mask PNG files should be provided as a `pathlib.Path` or string object, but is {type(masks_path)} instead.')\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n if annotations is not None:\n prepared_images = []\n prepared_annotations = []\n for image, target in zip(images, annotations):\n target = self.prepare_annotation(image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path, input_data_format=input_data_format)\n prepared_images.append(image)\n prepared_annotations.append(target)\n images = prepared_images\n annotations = prepared_annotations\n del prepared_images, prepared_annotations\n if do_resize:\n if annotations is not None:\n resized_images, resized_annotations = ([], [])\n for image, target in zip(images, annotations):\n orig_size = get_image_size(image, input_data_format)\n resized_image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)\n resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image, input_data_format))\n resized_images.append(resized_image)\n resized_annotations.append(resized_annotation)\n images = resized_images\n annotations = resized_annotations\n del resized_images, resized_annotations\n else:\n images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]\n if do_rescale:\n images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]\n if do_normalize:\n images = [self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images]\n if do_convert_annotations and annotations is not None:\n annotations = [self.normalize_annotation(annotation, get_image_size(image, input_data_format)) for annotation, image in zip(annotations, images)]\n if do_pad:\n encoded_inputs = self.pad(images, annotations=annotations, return_pixel_mask=True, data_format=data_format, input_data_format=input_data_format, update_bboxes=do_convert_annotations, return_tensors=return_tensors, pad_size=pad_size)\n else:\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]\n encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)\n if annotations is not None:\n encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations]\n return encoded_inputs", "docstring": "Preprocess an image or a batch of images so that it can be used by the model.\n\nArgs:\n images (`ImageInput`):\n Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging\n from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):\n List of annotations associated with the image or batch of images. If annotation is for object\n detection, the annotations should be a dictionary with the following keys:\n - \"image_id\" (`int`): The image id.\n - \"annotations\" (`List[Dict]`): List of annotations for an image. Each annotation should be a\n dictionary. An image can have no annotations, in which case the list should be empty.\n If annotation is for segmentation, the annotations should be a dictionary with the following keys:\n - \"image_id\" (`int`): The image id.\n - \"segments_info\" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary.\n An image can have no segments, in which case the list should be empty.\n - \"file_name\" (`str`): The file name of the image.\n return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks):\n Whether to return segmentation masks.\n masks_path (`str` or `pathlib.Path`, *optional*):\n Path to the directory containing the segmentation masks.\n do_resize (`bool`, *optional*, defaults to self.do_resize):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to self.size):\n Size of the image's `(height, width)` dimensions after resizing. Available options are:\n - `{\"height\": int, \"width\": int}`: The image will be resized to the exact size `(height, width)`.\n Do NOT keep the aspect ratio.\n - `{\"shortest_edge\": int, \"longest_edge\": int}`: The image will be resized to a maximum size respecting\n the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge\n less or equal to `longest_edge`.\n - `{\"max_height\": int, \"max_width\": int}`: The image will be resized to the maximum size respecting the\n aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to\n `max_width`.\n resample (`PILImageResampling`, *optional*, defaults to self.resample):\n Resampling filter to use when resizing the image.\n do_rescale (`bool`, *optional*, defaults to self.do_rescale):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to self.rescale_factor):\n Rescale factor to use when rescaling the image.\n do_normalize (`bool`, *optional*, defaults to self.do_normalize):\n Whether to normalize the image.\n do_convert_annotations (`bool`, *optional*, defaults to self.do_convert_annotations):\n Whether to convert the annotations to the format expected by the model. Converts the bounding\n boxes from the format `(top_left_x, top_left_y, width, height)` to `(center_x, center_y, width, height)`\n and in relative coordinates.\n image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean):\n Mean to use when normalizing the image.\n image_std (`float` or `List[float]`, *optional*, defaults to self.image_std):\n Standard deviation to use when normalizing the image.\n do_pad (`bool`, *optional*, defaults to self.do_pad):\n Whether to pad the image. If `True`, padding will be applied to the bottom and right of\n the image with zeros. If `pad_size` is provided, the image will be padded to the specified\n dimensions. Otherwise, the image will be padded to the maximum height and width of the batch.\n format (`str` or `AnnotationFormat`, *optional*, defaults to self.format):\n Format of the annotations.\n return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors):\n Type of tensors to return. If `None`, will return the list of images.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch."} +{"repo": "beam", "function": "def visit_inner_types(type_constraint, visitor, visitor_arg):\n if isinstance(type_constraint, TypeConstraint):\n return type_constraint.visit(visitor, visitor_arg)\n return visitor(type_constraint, visitor_arg)", "docstring": "Visitor pattern to visit all inner types of a type constraint.\n\nArgs:\n type_constraint: A type constraint or a type.\n visitor: A callable invoked for all nodes in the type tree comprising a\n composite type. The visitor will be called with the node visited and the\n visitor argument specified here.\n visitor_arg: Visitor callback second argument.\n\nNote:\n Raise and capture a StopIteration to terminate the visit, e.g.\n\n ```\n def visitor(type_constraint, visitor_arg):\n if ...:\n raise StopIteration\n\n try:\n visit_inner_types(type_constraint, visitor, visitor_arg)\n except StopIteration:\n pass\n ```"} +{"repo": "keras", "function": "def array_to_img(x, data_format=None, scale=True, dtype=None):\n data_format = backend.standardize_data_format(data_format)\n if dtype is None:\n dtype = backend.floatx()\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. The use of `array_to_img` requires PIL.')\n x = np.asarray(x, dtype=dtype)\n if x.ndim != 3:\n raise ValueError(f'Expected image array to have rank 3 (single image). Got array with shape: {x.shape}')\n if data_format == 'channels_first':\n x = x.transpose(1, 2, 0)\n if scale:\n x = x - np.min(x)\n x_max = np.max(x)\n if x_max != 0:\n x /= x_max\n x *= 255\n if x.shape[2] == 4:\n return pil_image.fromarray(x.astype('uint8'), 'RGBA')\n elif x.shape[2] == 3:\n return pil_image.fromarray(x.astype('uint8'), 'RGB')\n elif x.shape[2] == 1:\n if np.max(x) > 255:\n return pil_image.fromarray(x[:, :, 0].astype('int32'), 'I')\n return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')\n else:\n raise ValueError(f'Unsupported channel number: {x.shape[2]}')", "docstring": "Converts a 3D NumPy array to a PIL Image instance.\n\nExample:\n\n```python\nfrom PIL import Image\nimg = np.random.random(size=(100, 100, 3))\npil_img = keras.utils.array_to_img(img)\n```\n\nArgs:\n x: Input data, in any form that can be converted to a NumPy array.\n data_format: Image data format, can be either `\"channels_first\"` or\n `\"channels_last\"`. Defaults to `None`, in which case the global\n setting `keras.backend.image_data_format()` is used (unless you\n changed it, it defaults to `\"channels_last\"`).\n scale: Whether to rescale the image such that minimum and maximum values\n are 0 and 255 respectively. Defaults to `True`.\n dtype: Dtype to use. `None` means the global setting\n `keras.backend.floatx()` is used (unless you changed it, it\n defaults to `\"float32\"`). Defaults to `None`.\n\nReturns:\n A PIL Image instance."} +{"repo": "budoux", "function": "def resolve(phrases: typing.List[str], html: str, separator: str='\\u200b') -> str:\n resolver = HTMLChunkResolver(phrases, separator)\n resolver.feed(html)\n result = '%s' % (PARENT_CSS_STYLE, resolver.output)\n return result", "docstring": "Wraps phrases in the HTML string with non-breaking markup.\n\nArgs:\n phrases (List[str]): The phrases included in the HTML string.\n html (str): The HTML string to resolve.\n separator (str, optional): The separator string.\n\nReturns:\n The HTML string with phrases wrapped in non-breaking markup."} +{"repo": "tensorflow", "function": "def is_ref(x):\n return isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, 'dtype') and hasattr(x, 'shape'))", "docstring": "Evaluates if the object has reference semantics.\n\nAn object is deemed \"reference\" if it is a `tf.Variable` instance or is\nderived from a `tf.Module` with `dtype` and `shape` properties.\n\nArgs:\n x: Any object.\n\nReturns:\n is_ref: Python `bool` indicating input is has nonreference semantics, i.e.,\n is a `tf.Variable` or a `tf.Module` with `dtype` and `shape` properties."} +{"repo": "starthinker", "function": "def report_file(config, auth, account, report_id=None, name=None, timeout=60, chunksize=DCM_CHUNK_SIZE):\n account_id, advertiser_id = parse_account(config, auth, account)\n file_json = report_fetch(config, auth, account, report_id, name, timeout)\n if file_json == False:\n return (None, None)\n elif file_json == True:\n return ('report_running.csv', None)\n else:\n filename = '%s_%s.csv' % (file_json['fileName'], file_json['dateRange']['endDate'].replace('-', ''))\n if chunksize:\n return (filename, media_download(API_DCM(config, auth).files().get_media(reportId=file_json['reportId'], fileId=file_json['id']).execute(False), chunksize, 'utf-8'))\n else:\n return (filename, StringIO(API_DCM(config, auth).files().get_media(reportId=file_json['reportId'], fileId=file_json['id']).execute().decode('utf-8')))", "docstring": "Retrieves most recent DCM file by name or ID, if in progress, waits for it to complete.\n\nBulletproofing:\nhttps://developers.google.com/doubleclick-advertisers/v3.2/files/get\n\nTimeout is in minutes ( retries will happen at 1 minute interval, default\ntotal time is 60 minutes )\nIf chunksize is set to 0 then the whole file is downloaded at once.\n\nArgs:\n * auth: (string) Either user or service.\n * account: (string) [account:advertiser@profile] token.\n * report_id: (int) ID of DCm report to fetch ( either or name ).\n * name: (string) Name of report to fetch ( either or report_id ).\n * timeout: (int) Minutes to wait for in progress report before giving up.\n * chunksize: (int) number of bytes to download at a time, for memory\n constrained systems.\n\nReturns:\n * (filename, iterator) if file exists and is ready to download in chunks.\n * (filename, file) if file exists and chunking is off.\n * ('report_running.csv', None) if report is in progress.\n * (None, None) if file does not exist."} +{"repo": "tensorflow", "function": "class PreprocessingLayer(Layer, metaclass=abc.ABCMeta):\n _must_restore_from_config = True\n\n def __init__(self, streaming=True, **kwargs):\n super(PreprocessingLayer, self).__init__(**kwargs)\n self._streaming = streaming\n self._is_compiled = False\n self._is_adapted = False\n self._reset_state_impl = self.reset_state\n self.reset_state = self._reset_state_wrapper\n self._adapt_function = None\n\n @property\n def streaming(self):\n \"\"\"Whether `adapt` can be called twice without resetting the state.\"\"\"\n return self._streaming\n\n @property\n def is_adapted(self):\n \"\"\"Whether the layer has been fit to data already.\"\"\"\n return self._is_adapted\n\n def update_state(self, data):\n \"\"\"Accumulates statistics for the preprocessing layer.\n\n Arguments:\n data: A mini-batch of inputs to the layer.\n \"\"\"\n raise NotImplementedError\n\n def reset_state(self):\n \"\"\"Resets the statistics of the preprocessing layer.\"\"\"\n raise NotImplementedError\n\n def merge_state(self, layers):\n \"\"\"Merge the statistics of multiple preprocessing layers.\n\n This layer will contain the merged state.\n\n Arguments:\n layers: Layers whose statistics should be merge with the statistics of\n this layer.\n \"\"\"\n raise NotImplementedError\n\n def finalize_state(self):\n \"\"\"Finalize the statistics for the preprocessing layer.\n\n This method is called at the end of `adapt` or after restoring a serialized\n preprocessing layer's state. This method handles any one-time operations\n that should occur on the layer's state before `Layer.__call__`.\n \"\"\"\n pass\n\n def make_adapt_function(self):\n \"\"\"Creates a function to execute one step of `adapt`.\n\n This method can be overridden to support custom adapt logic.\n This method is called by `PreprocessingLayer.adapt`.\n\n Typically, this method directly controls `tf.function` settings,\n and delegates the actual state update logic to\n `PreprocessingLayer.update_state`.\n\n This function is cached the first time `PreprocessingLayer.adapt`\n is called. The cache is cleared whenever `PreprocessingLayer.compile`\n is called.\n\n Returns:\n Function. The function created by this method should accept a\n `tf.data.Iterator`, retrieve a batch, and update the state of the\n layer.\n \"\"\"\n if self._adapt_function is not None:\n return self._adapt_function\n\n def adapt_step(iterator):\n data = next(iterator)\n self._adapt_maybe_build(data)\n self.update_state(data)\n if self._steps_per_execution.numpy().item() == 1:\n adapt_fn = adapt_step\n else:\n\n def adapt_fn(iterator):\n for _ in math_ops.range(self._steps_per_execution):\n adapt_step(iterator)\n if not self._run_eagerly:\n adapt_fn = def_function.function(adapt_fn)\n self._adapt_function = adapt_fn\n return self._adapt_function\n\n def compile(self, run_eagerly=None, steps_per_execution=None):\n \"\"\"Configures the layer for `adapt`.\n\n Arguments:\n run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s logic\n will not be wrapped in a `tf.function`. Recommended to leave this as\n `None` unless your `Model` cannot be run inside a `tf.function`.\n steps_per_execution: Int. Defaults to 1. The number of batches to run\n during each `tf.function` call. Running multiple batches inside a\n single `tf.function` call can greatly improve performance on TPUs or\n small models with a large Python overhead.\n \"\"\"\n if steps_per_execution is None:\n steps_per_execution = 1\n self._configure_steps_per_execution(steps_per_execution)\n if run_eagerly is None:\n run_eagerly = self.dynamic\n self._run_eagerly = run_eagerly\n self._is_compiled = True\n\n def adapt(self, data, batch_size=None, steps=None, reset_state=True):\n \"\"\"Fits the state of the preprocessing layer to the data being passed.\n\n After calling `adapt` on a layer, a preprocessing layer's state will not\n update during training. In order to make preprocessing layers efficient in\n any distribution context, they are kept constant with respect to any\n compiled `tf.Graph`s that call the layer. This does not affect the layer use\n when adapting each layer only once, but if you adapt a layer multiple times\n you will need to take care to re-compile any compiled functions as follows:\n\n * If you are adding a preprocessing layer to a `keras.Model`, you need to\n call `model.compile` after each subsequent call to `adapt`.\n * If you are calling a preprocessing layer inside `tf.data.Dataset.map`,\n you should call `map` again on the input `tf.data.Dataset` after each\n `adapt`.\n * If you are using a `tf.function` directly which calls a preprocessing\n layer, you need to call `tf.function` again on your callable after\n each subsequent call to `adapt`.\n\n `tf.keras.Model` example with multiple adapts:\n\n >>> layer = tf.keras.layers.experimental.preprocessing.Normalization(\n ... axis=None)\n >>> layer.adapt([0, 2])\n >>> model = tf.keras.Sequential(layer)\n >>> model.predict([0, 1, 2])\n array([-1., 0., 1.], dtype=float32)\n >>> layer.adapt([-1, 1])\n >>> model.compile() # This is needed to re-compile model.predict!\n >>> model.predict([0, 1, 2])\n array([0., 1., 2.], dtype=float32)\n\n `tf.data.Dataset` example with multiple adapts:\n\n >>> layer = tf.keras.layers.experimental.preprocessing.Normalization(\n ... axis=None)\n >>> layer.adapt([0, 2])\n >>> input_ds = tf.data.Dataset.range(3)\n >>> normalized_ds = input_ds.map(layer)\n >>> list(normalized_ds.as_numpy_iterator())\n [array([-1.], dtype=float32),\n array([0.], dtype=float32),\n array([1.], dtype=float32)]\n >>> layer.adapt([-1, 1])\n >>> normalized_ds = input_ds.map(layer) # Re-map over the input dataset.\n >>> list(normalized_ds.as_numpy_iterator())\n [array([0.], dtype=float32),\n array([1.], dtype=float32),\n array([2.], dtype=float32)]\n\n Arguments:\n data: The data to train on. It can be passed either as a tf.data\n Dataset, or as a numpy array.\n batch_size: Integer or `None`.\n Number of samples per state update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of datasets, generators, or `keras.utils.Sequence` instances\n (since they generate batches).\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset, and 'steps' is None, the epoch will run until\n the input dataset is exhausted. When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs.\n reset_state: Optional argument specifying whether to clear the state of\n the layer at the start of the call to `adapt`, or whether to start\n from the existing state. This argument may not be relevant to all\n preprocessing layers: a subclass of PreprocessingLayer may choose to\n throw if 'reset_state' is set to False.\n \"\"\"\n _disallow_inside_tf_function('adapt')\n if not version_utils.should_use_v2():\n raise RuntimeError('`adapt` is only supported in tensorflow v2.')\n if not self.streaming and self._is_adapted and (not reset_state):\n raise ValueError('{} does not supporting calling `adapt` twice without resetting the state.'.format(self.__class__.__name__))\n if not self._is_compiled:\n self.compile()\n if self.built and reset_state:\n self.reset_state()\n data_handler = data_adapter.DataHandler(data, batch_size=batch_size, steps_per_epoch=steps, epochs=1, steps_per_execution=self._steps_per_execution, distribute=False)\n self._adapt_function = self.make_adapt_function()\n for _, iterator in data_handler.enumerate_epochs():\n with data_handler.catch_stop_iteration():\n for _ in data_handler.steps():\n self._adapt_function(iterator)\n if data_handler.should_sync:\n context.async_wait()\n self.finalize_state()\n self._is_adapted = True\n\n def _reset_state_wrapper(self):\n \"\"\"Calls `reset_state` and sets `adapted` to `False`.\"\"\"\n self._reset_state_impl()\n self._is_adapted = False\n\n @trackable.no_automatic_dependency_tracking\n def _configure_steps_per_execution(self, steps_per_execution):\n self._steps_per_execution = variables.Variable(steps_per_execution, dtype='int64', aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)\n\n def _adapt_maybe_build(self, data):\n if not self.built:\n try:\n data_shape = data.shape\n data_shape_nones = tuple([None] * len(data.shape))\n except AttributeError:\n data_shape = None\n data_shape_nones = None\n batch_input_shape = getattr(self, '_batch_input_shape', None)\n if batch_input_shape is None:\n self._batch_input_shape = data_shape_nones\n self.build(data_shape)\n self.built = True", "docstring": "Base class for Preprocessing Layers.\n\n**Don't use this class directly: it's an abstract base class!** You may\nbe looking for one of the many built-in\n[preprocessing layers](https://keras.io/guides/preprocessing_layers/)\ninstead.\n\nPreprocessing layers are layers whose state gets computed before model\ntraining starts. They do not get updated during training.\nMost preprocessing layers implement an `adapt()` method for state computation.\n\nThe `PreprocessingLayer` class is the base class you would subclass to\nimplement your own preprocessing layers.\n\nAttributes:\n streaming: Whether a layer can be adapted multiple times without resetting\n the state of the layer."} +{"repo": "keras", "function": "def __call__(self, shape, dtype=None):\n dtype = standardize_dtype(dtype)\n return ops.ones(shape, dtype=dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes\n are supported. If not specified, `keras.backend.floatx()`\n is used, which default to `float32` unless you configured it\n otherwise (via `keras.backend.set_floatx(float_dtype)`)."} +{"repo": "tensorflow", "function": "def get_matching_files_v2(pattern):\n if isinstance(pattern, six.string_types):\n return [compat.as_str_any(matching_filename) for matching_filename in _pywrap_file_io.GetMatchingFiles(compat.as_bytes(pattern))]\n else:\n return [compat.as_str_any(matching_filename) for single_filename in pattern for matching_filename in _pywrap_file_io.GetMatchingFiles(compat.as_bytes(single_filename))]", "docstring": "Returns a list of files that match the given pattern(s).\n\nThe patterns are defined as strings. Supported patterns are defined\nhere. Note that the pattern can be a Python iteratable of string patterns.\n\nThe format definition of the pattern is:\n\n**pattern**: `{ term }`\n\n**term**:\n * `'*'`: matches any sequence of non-'/' characters\n * `'?'`: matches a single non-'/' character\n * `'[' [ '^' ] { match-list } ']'`: matches any single\n character (not) on the list\n * `c`: matches character `c` where `c != '*', '?', '\\\\', '['`\n * `'\\\\' c`: matches character `c`\n\n**character range**:\n * `c`: matches character `c` while `c != '\\\\', '-', ']'`\n * `'\\\\' c`: matches character `c`\n * `lo '-' hi`: matches character `c` for `lo <= c <= hi`\n\nExamples:\n\n>>> tf.io.gfile.glob(\"*.py\")\n... # For example, ['__init__.py']\n\n>>> tf.io.gfile.glob(\"__init__.??\")\n... # As above\n\n>>> files = {\"*.py\"}\n>>> the_iterator = iter(files)\n>>> tf.io.gfile.glob(the_iterator)\n... # As above\n\nSee the C++ function `GetMatchingPaths` in\n[`core/platform/file_system.h`]\n(../../../core/platform/file_system.h)\nfor implementation details.\n\nArgs:\n pattern: string or iterable of strings. The glob pattern(s).\n\nReturns:\n A list of strings containing filenames that match the given pattern(s).\n\nRaises:\n errors.OpError: If there are filesystem / directory listing errors.\n errors.NotFoundError: If pattern to be matched is an invalid directory."} +{"repo": "pytype", "function": "def PrepareForExport(module_name, ast, loader):\n src = pytd_utils.Print(ast)\n return SourceToExportableAst(module_name, src, loader)", "docstring": "Prepare an ast as if it was parsed and loaded.\n\nExternal dependencies will not be resolved, as the ast generated by this\nmethod is supposed to be exported.\n\nArgs:\n module_name: The module_name as a string for the returned ast.\n ast: pytd.TypeDeclUnit, is only used if src is None.\n loader: A load_pytd.Loader instance.\n\nReturns:\n A pytd.TypeDeclUnit representing the supplied AST as it would look after\n being written to a file and parsed."} +{"repo": "pyglove", "function": "def search(nasbench, search_model, algo, repeat_id, max_train_hours=5000000.0):\n nasbench.reset_budget_counters()\n times, best_valids, best_tests = ([0.0], [0.0], [0.0])\n valid_models = 0\n time_spent = 0\n start_time = time.time()\n last_report_time = start_time\n for model, feedback in pg.sample(search_model, algo, name=str(repeat_id)):\n spec = model()\n if nasbench.is_valid(spec):\n results = nasbench.query(spec)\n valid_models += 1\n feedback(results['validation_accuracy'])\n if results['validation_accuracy'] > best_valids[-1]:\n best_valids.append(results['validation_accuracy'])\n best_tests.append(results['test_accuracy'])\n else:\n best_valids.append(best_valids[-1])\n best_tests.append(best_tests[-1])\n time_spent, _ = nasbench.get_budget_counters()\n times.append(time_spent)\n if time_spent > max_train_hours:\n feedback.end_loop()\n break\n else:\n feedback.skip()\n if feedback.id % 100 == 0:\n now = time.time()\n print(f'Tried {feedback.id} models, valid {valid_models}, time_spent {time_spent}, elapse since last report: {now - last_report_time} seconds.')\n last_report_time = now\n print(f'Total time elapse: {time.time() - start_time} seconds.')\n return (times, best_valids, best_tests)", "docstring": "Define the search procedure.\n\nArgs:\n nasbench: NASBench object.\n search_model: which is a `model` object annotated with `oneof`.\n algo: algorithm for search.\n repeat_id: identifier of current repeat.\n max_train_hours: max time budget to train the models, which is the sum\n of training time queried from NAS-Bench.\n\nReturns:\n A tuple of (total time spent at step i for all steps,\n best validation accuracy at step i for all steps,\n best test accuracy at step i for all steps)"} +{"repo": "pyglove", "function": "def error(msg: str, *args, **kwargs) -> None:\n _DEFAULT_LOGGER.error(msg, *args, **kwargs)", "docstring": "Logs error message.\n\nArgs:\n msg: Message with possible format string.\n *args: Values for variables in the format string.\n **kwargs: Keyword arguments for the logger."} +{"repo": "transformers", "function": "def set_optimizer(self, name: Union[str, OptimizerNames]='adamw_torch', learning_rate: float=5e-05, weight_decay: float=0, beta1: float=0.9, beta2: float=0.999, epsilon: float=1e-08, args: Optional[str]=None):\n self.optim = OptimizerNames(name)\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.adam_beta1 = beta1\n self.adam_beta2 = beta2\n self.adam_epsilon = epsilon\n self.optim_args = args\n return self", "docstring": "A method that regroups all arguments linked to the optimizer and its hyperparameters.\n\nArgs:\n name (`str` or [`training_args.OptimizerNames`], *optional*, defaults to `\"adamw_torch\"`):\n The optimizer to use: `\"adamw_torch\"`, `\"adamw_torch_fused\"`, `\"adamw_apex_fused\"`,\n `\"adamw_anyprecision\"` or `\"adafactor\"`.\n learning_rate (`float`, *optional*, defaults to 5e-5):\n The initial learning rate.\n weight_decay (`float`, *optional*, defaults to 0):\n The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights.\n beta1 (`float`, *optional*, defaults to 0.9):\n The beta1 hyperparameter for the adam optimizer or its variants.\n beta2 (`float`, *optional*, defaults to 0.999):\n The beta2 hyperparameter for the adam optimizer or its variants.\n epsilon (`float`, *optional*, defaults to 1e-8):\n The epsilon hyperparameter for the adam optimizer or its variants.\n args (`str`, *optional*):\n Optional arguments that are supplied to AnyPrecisionAdamW (only useful when\n `optim=\"adamw_anyprecision\"`).\n\nExample:\n\n```py\n>>> from transformers import TrainingArguments\n\n>>> args = TrainingArguments(\"working_dir\")\n>>> args = args.set_optimizer(name=\"adamw_torch\", beta1=0.8)\n>>> args.optim\n'adamw_torch'\n```"} +{"repo": "tensorflow", "function": "def __init__(self, funcs, trackable_obj=None):\n super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)", "docstring": "Constructor for TFLiteConverter.\n\nArgs:\n funcs: List of TensorFlow ConcreteFunctions. The list should not contain\n duplicate elements.\n trackable_obj: tf.AutoTrackable object associated with `funcs`. A\n reference to this object needs to be maintained so that Variables do not\n get garbage collected since functions have a weak reference to\n Variables. This is only required when the tf.AutoTrackable object is not\n maintained by the user (e.g. `from_saved_model`)."} +{"repo": "tensorflow", "function": "def map_values(op, *args, **kwargs):\n sparse_list = []\n inner_args = _replace_sparse_with_values(args, sparse_list)\n inner_kwargs = _replace_sparse_with_values(kwargs, sparse_list)\n if not sparse_list:\n raise ValueError('No SparseTensor in argument list of map_values')\n with ops.control_dependencies(_assert_sparse_compatible(sparse_list)):\n return sparse_tensor.SparseTensor(sparse_list[0].indices, op(*inner_args, **inner_kwargs), sparse_list[0].dense_shape)", "docstring": "Applies `op` to the `.values` tensor of one or more `SparseTensor`s.\n\nReplaces any `SparseTensor` in `args` or `kwargs` with its `values`\ntensor (which contains the non-default values for the SparseTensor),\nand then calls `op`. Returns a `SparseTensor` that is constructed\nfrom the input `SparseTensor`s' `indices`, `dense_shape`, and the\nvalue returned by the `op`.\n\nIf the input arguments contain multiple `SparseTensor`s, then they must have\nequal `indices` and dense shapes.\n\nExamples:\n\n>>> s = tf.sparse.from_dense([[1, 2, 0],\n... [0, 4, 0],\n... [1, 0, 0]])\n>>> tf.sparse.to_dense(tf.sparse.map_values(tf.ones_like, s)).numpy()\narray([[1, 1, 0],\n [0, 1, 0],\n [1, 0, 0]], dtype=int32)\n\n>>> tf.sparse.to_dense(tf.sparse.map_values(tf.multiply, s, s)).numpy()\narray([[ 1, 4, 0],\n [ 0, 16, 0],\n [ 1, 0, 0]], dtype=int32)\n\n>>> tf.sparse.to_dense(tf.sparse.map_values(tf.add, s, 5)).numpy()\narray([[6, 7, 0],\n [0, 9, 0],\n [6, 0, 0]], dtype=int32)\n\nNote: even though `tf.add(0, 5) != 0`, implicit zeros\nwill remain unchanged. However, if the sparse tensor contains any explicit\nzeros, these will be affected by the mapping!\n\nArgs:\n op: The operation that should be applied to the SparseTensor `values`. `op`\n is typically an element-wise operation (such as math_ops.add), but any\n operation that preserves the shape can be used.\n *args: Arguments for `op`.\n **kwargs: Keyword arguments for `op`.\n\nReturns:\n A `SparseTensor` whose `indices` and `dense_shape` matches the `indices`\n and `dense_shape` of all input `SparseTensor`s.\nRaises:\n ValueError: If args contains no `SparseTensor`, or if the `indices`\n or `dense_shape`s of the input `SparseTensor`s are not equal."} +{"repo": "transformers", "function": "def forward(self, inputs: torch.Tensor):\n output = inputs.transpose(1, 2)\n output = self.batchnorm(output)\n return output.transpose(1, 2)", "docstring": "Parameters:\n inputs (`torch.Tensor` of shape `(batch_size, sequence_length, d_model)`):\n input for Batch norm calculation\nReturns:\n `torch.Tensor` of shape `(batch_size, sequence_length, d_model)`"} +{"repo": "beam", "function": "class XGBoostModelHandlerPandas(XGBoostModelHandler[pandas.DataFrame, PredictionResult, Union[xgboost.Booster, xgboost.XGBModel]]):\n\n def run_inference(self, batch: Sequence[pandas.DataFrame], model: Union[xgboost.Booster, xgboost.XGBModel], inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:\n \"\"\"Runs inferences on a batch of pandas dataframes.\n\n Args:\n batch: A sequence of examples as pandas dataframes. Each\n row in a dataframe is a single example. The dimensions\n must match the dimensions of the data used to train\n the model.\n model: XGBoost booster or XBGModel (sklearn interface). Must\n implement predict(X). Where the parameter X is a pandas dataframe.\n inference_args: Any additional arguments for an inference.\n\n Returns:\n An Iterable of type PredictionResult.\n \"\"\"\n return self._inference_fn(batch, model, inference_args)\n\n def get_num_bytes(self, batch: Sequence[pandas.DataFrame]) -> int:\n \"\"\"\n Returns:\n The number of bytes of data for a batch of Numpy arrays.\n \"\"\"\n return sum((df.memory_usage(deep=True).sum() for df in batch))", "docstring": "Implementation of the ModelHandler interface for XGBoost\nusing pandas dataframes as input.\n\nExample Usage::\n\n pcoll | RunInference(\n XGBoostModelHandlerPandas(\n model_class=\"XGBoost Model Class\",\n model_state=\"my_model_state.json\")))\n\nArgs:\n model_class: class of the XGBoost model that defines the model\n structure.\n model_state: path to a json file that contains the model's\n configuration.\n inference_fn: the inference function to use during RunInference.\n default=default_xgboost_inference_fn"} +{"repo": "transformers", "function": "class ProphetNetTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names: List[str] = ['input_ids', 'attention_mask']\n\n def __init__(self, vocab_file: str, do_lower_case: Optional[bool]=True, do_basic_tokenize: Optional[bool]=True, never_split: Optional[Iterable]=None, unk_token: Optional[str]='[UNK]', sep_token: Optional[str]='[SEP]', x_sep_token: Optional[str]='[X_SEP]', pad_token: Optional[str]='[PAD]', mask_token: Optional[str]='[MASK]', tokenize_chinese_chars: Optional[bool]=True, strip_accents: Optional[bool]=None, clean_up_tokenization_spaces: bool=True, **kwargs):\n if not os.path.isfile(vocab_file):\n raise ValueError(f\"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\")\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))\n super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, x_sep_token=x_sep_token, pad_token=pad_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs)\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n def get_vocab(self):\n return dict(self.vocab, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n split_tokens = []\n if self.do_basic_tokenize:\n for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):\n if token in self.basic_tokenizer.never_split:\n split_tokens.append(token)\n else:\n split_tokens += self.wordpiece_tokenizer.tokenize(token)\n else:\n split_tokens = self.wordpiece_tokenizer.tokenize(text)\n return split_tokens\n\n def _convert_token_to_id(self, token: str):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index: int):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: str):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n out_string = ' '.join(tokens).replace(' ##', '').strip()\n return out_string\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: Optional[bool]=False) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [0] * len(token_ids_0) + [1]\n return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n index = 0\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n else:\n vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')\n index = token_index\n writer.write(token + '\\n')\n index += 1\n return (vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return token_ids_0 + [self.sep_token_id]\n sep = [self.sep_token_id]\n return token_ids_0 + sep + token_ids_1 + sep", "docstring": "Construct a ProphetNetTokenizer. Based on WordPiece.\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\nthis superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n File containing the vocabulary.\n do_lower_case (`bool`, *optional*, defaults to `True`):\n Whether or not to lowercase the input when tokenizing.\n do_basic_tokenize (`bool`, *optional*, defaults to `True`):\n Whether or not to do basic tokenization before WordPiece.\n never_split (`Iterable`, *optional*):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n `do_basic_tokenize=True`\n unk_token (`str`, *optional*, defaults to `\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (`str`, *optional*, defaults to `\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n x_sep_token (`str`, *optional*, defaults to `\"[X_SEP]\"`):\n Special second separator token, which can be generated by [`ProphetNetForConditionalGeneration`]. It is\n used to separate bullet-point like sentences in summarization, *e.g.*.\n pad_token (`str`, *optional*, defaults to `\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n mask_token (`str`, *optional*, defaults to `\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):\n Whether or not to tokenize Chinese characters.\n\n This should likely be deactivated for Japanese (see this\n [issue](https://github.com/huggingface/transformers/issues/328)).\n strip_accents (`bool`, *optional*):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for `lowercase` (as in the original BERT).\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):\n Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like\n extra spaces."} +{"repo": "keras", "function": "class Ones(Initializer):\n\n def __call__(self, shape, dtype=None):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only numeric or boolean dtypes\n are supported. If not specified, `keras.backend.floatx()`\n is used, which default to `float32` unless you configured it\n otherwise (via `keras.backend.set_floatx(float_dtype)`).\n \"\"\"\n dtype = standardize_dtype(dtype)\n return ops.ones(shape, dtype=dtype)", "docstring": "Initializer that generates tensors initialized to 1.\n\nAlso available via the shortcut function `ones`.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = Ones()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = Ones()\n>>> layer = Dense(3, kernel_initializer=initializer)"} +{"repo": "tf-quant-finance", "function": "def douglas_adi_step(theta=0.5):\n\n def _step_fn(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, num_steps_performed, dtype=None, name=None):\n \"\"\"Performs the step.\"\"\"\n del num_steps_performed\n name = name or 'douglas_adi_step'\n return multidim_parabolic_equation_step(time, next_time, coord_grid, value_grid, boundary_conditions, douglas_adi_scheme(theta), second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, dtype=dtype, name=name)\n return _step_fn", "docstring": "Creates a stepper function with Crank-Nicolson time marching scheme.\n\nDouglas ADI scheme is the simplest time marching scheme for solving parabolic\nPDEs with multiple spatial dimensions. The time step consists of several\nsubsteps: the first one is fully explicit, and the following `N` steps are\nimplicit with respect to contributions of one of the `N` axes (hence \"ADI\" -\nalternating direction implicit). See `douglas_adi_scheme` below for more\ndetails.\n\nArgs:\n theta: positive Number. `theta = 0` corresponds to fully explicit scheme.\n The larger `theta` the stronger are the corrections by the implicit\n substeps. The recommended value is `theta = 0.5`, because the scheme is\n second order accurate in that case, unless mixed second derivative terms are\n present in the PDE.\nReturns:\n Callable to be used in finite-difference PDE solvers (see fd_solvers.py)."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutputWithPooling:\n return self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states)", "docstring": "Examples:\n\n```python\n>>> from transformers import AutoTokenizer, SiglipTextModel\n\n>>> model = SiglipTextModel.from_pretrained(\"google/siglip-base-patch16-224\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/siglip-base-patch16-224\")\n\n>>> # important: make sure to set padding=\"max_length\" as that's how the model was trained\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=\"max_length\", return_tensors=\"pt\")\n\n>>> outputs = model(**inputs)\n>>> last_hidden_state = outputs.last_hidden_state\n>>> pooled_output = outputs.pooler_output # pooled (EOS token) states\n```"} +{"repo": "tensorflow", "function": "def scatter_nd_max(self, indices, updates, name=None):\n return self._lazy_read(gen_state_ops.resource_scatter_nd_max(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))", "docstring": "Updates this variable with the max of `tf.IndexedSlices` and itself.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nArgs:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\nReturns:\n The updated variable."} +{"repo": "tensorflow", "function": "def mlir_convert_file(graph_def_filename, input_tensors, output_tensors, quantization_params=None, additional_flags=''):\n bin_path = resource_loader.get_path_to_datafile('../../../../compiler/mlir/lite/tf_tfl_translate')\n with tempfile.NamedTemporaryFile() as output_file, tempfile.NamedTemporaryFile('w+') as stdout_file:\n input_shapes = []\n for input_tensor in input_tensors:\n shape = input_tensor[1]\n input_shapes.append(','.join([str(dim) for dim in shape]))\n input_shapes_str = ':'.join(input_shapes)\n input_types = ','.join([x[2] for x in input_tensors])\n quant_flags = ''\n if quantization_params is not None:\n min_vals = ','.join([str(val) for val in quantization_params[1]])\n max_vals = ','.join([str(val) for val in quantization_params[2]])\n quant_flags = '-tf-inference-type=' + quantization_params[0] + \" -tf-input-min-values='\" + min_vals + \"' -tf-input-max-values='\" + max_vals + \"' \" + '-emit-quant-adaptor-ops '\n cmd = '%s -tf-input-arrays=%s -tf-input-data-types=%s -tf-input-shapes=%s -tf-output-arrays=%s ' + quant_flags + additional_flags + '%s -o %s'\n cmd = cmd % (bin_path, ','.join([x[0] for x in input_tensors]), input_types, input_shapes_str, ','.join(output_tensors), graph_def_filename, output_file.name)\n exit_code = os.system(cmd)\n log = cmd + 'exited with code %d' % exit_code + '\\n------------------\\n' + stdout_file.read()\n return (None if exit_code != 0 else output_file.read(), log)", "docstring": "Convert a graphdef file into a tflite model with MLIR-based conversion.\n\nNOTE: this currently shells out to the MLIR binary binary, but we would like\nconvert to Python API tooling in the future.\n\nArgs:\n graph_def_filename: A GraphDef file.\n input_tensors: List of input tensor tuples `(name, shape, type)`. name\n should be a string. shape should be a tuple of integers. type should be a\n string, for example 'DT_FLOAT'\n output_tensors: List of output tensors (names).\n quantization_params: parameters `(inference_type, min_values, max_values)`\n to quantize the model.\n additional_flags: A string of additional command line flags to be passed to\n MLIR converter.\n\nReturns:\n output tflite model, log_txt from conversion\n or None, log_txt if it did not convert properly."} +{"repo": "transformers", "function": "def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Embedded representation of the inputs. Should be float, not int tokens.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "keras", "function": "class UpSampling2D(Layer):\n\n def __init__(self, size=(2, 2), data_format=None, interpolation='nearest', **kwargs):\n super().__init__(**kwargs)\n self.data_format = backend.standardize_data_format(data_format)\n self.size = argument_validation.standardize_tuple(size, 2, 'size')\n self.interpolation = interpolation.lower()\n self.input_spec = InputSpec(ndim=4)\n\n def compute_output_shape(self, input_shape):\n if self.data_format == 'channels_first':\n height = self.size[0] * input_shape[2] if input_shape[2] is not None else None\n width = self.size[1] * input_shape[3] if input_shape[3] is not None else None\n return (input_shape[0], input_shape[1], height, width)\n else:\n height = self.size[0] * input_shape[1] if input_shape[1] is not None else None\n width = self.size[1] * input_shape[2] if input_shape[2] is not None else None\n return (input_shape[0], height, width, input_shape[3])\n\n def call(self, inputs):\n return self._resize_images(inputs, self.size[0], self.size[1], self.data_format, interpolation=self.interpolation)\n\n def get_config(self):\n config = {'size': self.size, 'data_format': self.data_format, 'interpolation': self.interpolation}\n base_config = super().get_config()\n return {**base_config, **config}\n\n def _resize_images(self, x, height_factor, width_factor, data_format, interpolation='nearest'):\n \"\"\"Resizes the images contained in a 4D tensor.\n\n Args:\n x: Tensor or variable to resize.\n height_factor: Positive integer.\n width_factor: Positive integer.\n data_format: One of `\"channels_first\"`, `\"channels_last\"`.\n interpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`,\n `\"lanczos3\"`, `\"lanczos5\"`, or `\"nearest\"`.\n\n Returns:\n A tensor.\n \"\"\"\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError(f'Invalid `data_format` argument: {data_format}')\n if data_format == 'channels_first':\n x = ops.transpose(x, [0, 2, 3, 1])\n if interpolation == 'nearest':\n x = ops.repeat(x, height_factor, axis=1)\n x = ops.repeat(x, width_factor, axis=2)\n else:\n shape = ops.shape(x)\n new_shape = (shape[1] * height_factor, shape[2] * width_factor)\n x = ops.image.resize(x, new_shape, interpolation=interpolation)\n if data_format == 'channels_first':\n x = ops.transpose(x, [0, 3, 1, 2])\n return x", "docstring": "Upsampling layer for 2D inputs.\n\nThe implementation uses interpolative resizing, given the resize method\n(specified by the `interpolation` argument). Use `interpolation=nearest`\nto repeat the rows and columns of the data.\n\nExample:\n\n>>> input_shape = (2, 2, 1, 3)\n>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)\n>>> print(x)\n[[[[ 0 1 2]]\n [[ 3 4 5]]]\n [[[ 6 7 8]]\n [[ 9 10 11]]]]\n>>> y = keras.layers.UpSampling2D(size=(1, 2))(x)\n>>> print(y)\n[[[[ 0 1 2]\n [ 0 1 2]]\n [[ 3 4 5]\n [ 3 4 5]]]\n [[[ 6 7 8]\n [ 6 7 8]]\n [[ 9 10 11]\n [ 9 10 11]]]]\n\nArgs:\n size: Int, or tuple of 2 integers.\n The upsampling factors for rows and columns.\n data_format: A string,\n one of `\"channels_last\"` (default) or `\"channels_first\"`.\n The ordering of the dimensions in the inputs.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch_size, height, width, channels)` while `\"channels_first\"`\n corresponds to inputs with shape\n `(batch_size, channels, height, width)`.\n When unspecified, uses\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json` (if exists) else `\"channels_last\"`.\n Defaults to `\"channels_last\"`.\n interpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`, `\"lanczos3\"`,\n `\"lanczos5\"`, `\"nearest\"`.\n\nInput shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, rows, cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, rows, cols)`\n\nOutput shape:\n 4D tensor with shape:\n - If `data_format` is `\"channels_last\"`:\n `(batch_size, upsampled_rows, upsampled_cols, channels)`\n - If `data_format` is `\"channels_first\"`:\n `(batch_size, channels, upsampled_rows, upsampled_cols)`"} +{"repo": "transformers", "function": "class MarianConfig(PretrainedConfig):\n model_type = 'marian'\n keys_to_ignore_at_inference = ['past_key_values']\n attribute_map = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}\n\n def __init__(self, vocab_size=58101, decoder_vocab_size=None, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function='gelu', d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, decoder_start_token_id=58100, scale_embedding=False, pad_token_id=58100, eos_token_id=0, forced_eos_token_id=0, share_encoder_decoder_embeddings=True, **kwargs):\n self.vocab_size = vocab_size\n self.decoder_vocab_size = decoder_vocab_size or vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.d_model = d_model\n self.encoder_ffn_dim = encoder_ffn_dim\n self.encoder_layers = encoder_layers\n self.encoder_attention_heads = encoder_attention_heads\n self.decoder_ffn_dim = decoder_ffn_dim\n self.decoder_layers = decoder_layers\n self.decoder_attention_heads = decoder_attention_heads\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_dropout = activation_dropout\n self.activation_function = activation_function\n self.init_std = init_std\n self.encoder_layerdrop = encoder_layerdrop\n self.decoder_layerdrop = decoder_layerdrop\n self.use_cache = use_cache\n self.num_hidden_layers = encoder_layers\n self.scale_embedding = scale_embedding\n self.share_encoder_decoder_embeddings = share_encoder_decoder_embeddings\n super().__init__(pad_token_id=pad_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, decoder_start_token_id=decoder_start_token_id, forced_eos_token_id=forced_eos_token_id, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`MarianModel`]. It is used to instantiate an\nMarian model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the Marian\n[Helsinki-NLP/opus-mt-en-de](https://huggingface.co/Helsinki-NLP/opus-mt-en-de) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 58101):\n Vocabulary size of the Marian model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MarianModel`] or [`TFMarianModel`].\n d_model (`int`, *optional*, defaults to 1024):\n Dimensionality of the layers and the pooler layer.\n encoder_layers (`int`, *optional*, defaults to 12):\n Number of encoder layers.\n decoder_layers (`int`, *optional*, defaults to 12):\n Number of decoder layers.\n encoder_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n decoder_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer decoder.\n decoder_ffn_dim (`int`, *optional*, defaults to 4096):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 4096):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in decoder.\n activation_function (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n activation_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for activations inside the fully connected layer.\n max_position_embeddings (`int`, *optional*, defaults to 1024):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n init_std (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n encoder_layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n decoder_layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n scale_embedding (`bool`, *optional*, defaults to `False`):\n Scale embeddings by diving by sqrt(d_model).\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models)\n forced_eos_token_id (`int`, *optional*, defaults to 0):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n\nExamples:\n\n```python\n>>> from transformers import MarianModel, MarianConfig\n\n>>> # Initializing a Marian Helsinki-NLP/opus-mt-en-de style configuration\n>>> configuration = MarianConfig()\n\n>>> # Initializing a model from the Helsinki-NLP/opus-mt-en-de style configuration\n>>> model = MarianModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class FlaxBaseModelOutputWithPast(ModelOutput):\n last_hidden_state: Optional[jnp.ndarray] = None\n past_key_values: Optional[Dict[str, jnp.ndarray]] = None\n hidden_states: Optional[Tuple[jnp.ndarray]] = None\n attentions: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for model's outputs, with potential hidden states and attentions.\n\nArgs:\n last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n past_key_values (`Dict[str, jnp.ndarray]`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "def _resize_for_patching(self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension) -> np.array:\n new_height, new_width = get_patch_output_size(image, target_resolution, input_data_format)\n resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n return resized_image", "docstring": "Resizes an image to a target resolution while maintaining aspect ratio.\n\nArgs:\n image (np.array):\n The input image.\n target_resolution (tuple):\n The target resolution (height, width) of the image.\n resample (`PILImageResampling`):\n Resampling filter to use if resizing the image.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\n\nReturns:\n np.array: The resized and padded image."} +{"repo": "pytype", "function": "def _GetGenericBasesLookupMap(self, node):\n mapping = collections.defaultdict(list)\n seen_bases = set()\n bases = list(reversed(node.bases))\n while bases:\n base = bases.pop()\n if base in seen_bases:\n continue\n seen_bases.add(base)\n if isinstance(base, pytd.GenericType) and isinstance(base.base_type, pytd.ClassType):\n mapping[base.base_type].append(base)\n bases.extend(reversed(base.base_type.cls.bases))\n elif isinstance(base, pytd.ClassType):\n bases.extend(reversed(base.cls.bases))\n return mapping", "docstring": "Get a lookup map for the generic bases of a class.\n\nGets a map from a pytd.ClassType to the list of pytd.GenericType bases of\nthe node that have that class as their base. This method does depth-first\ntraversal of the bases, which ensures that the order of elements in each\nlist is consistent with the node's MRO.\n\nArgs:\n node: A pytd.Class node.\n\nReturns:\n A pytd.ClassType -> List[pytd.GenericType] map."} +{"repo": "yapf", "function": "def _LineContainsI18n(line):\n if style.Get('I18N_COMMENT'):\n for tok in line.tokens:\n if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value):\n return True\n if style.Get('I18N_FUNCTION_CALL'):\n length = len(line.tokens)\n for index in range(length - 1):\n if line.tokens[index + 1].value == '(' and line.tokens[index].value in style.Get('I18N_FUNCTION_CALL'):\n return True\n return False", "docstring": "Return true if there are i18n comments or function calls in the line.\n\nI18n comments and pseudo-function calls are closely related. They cannot\nbe moved apart without breaking i18n.\n\nArguments:\n line: (logical_line.LogicalLine) The line currently being formatted.\n\nReturns:\n True if the line contains i18n comments or function calls. False otherwise."} +{"repo": "python-fire", "function": "def _matches_section_title(title, section_title):\n title = title.lower()\n section_title = section_title.lower()\n return section_title in (title, title[:-1])", "docstring": "Returns whether title is a match for a specific section_title.\n\nExample:\n _matches_section_title('Yields', 'yield') == True\n\nArgs:\n title: The title to check for matching.\n section_title: A specific known section title to check against."} +{"repo": "beam", "function": "def Patch(self, request, global_params=None):\n config = self.GetMethodConfig('Patch')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Patch specific fields in the specified model.\n\nArgs:\n request: (BigqueryModelsPatchRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Model) The response message."} +{"repo": "transformers", "function": "class GroupViTVisionConfig(PretrainedConfig):\n model_type = 'groupvit_vision_model'\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size=384, intermediate_size=1536, depths=[6, 3, 3], num_hidden_layers=12, num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=6, image_size=224, patch_size=16, num_channels=3, hidden_act='gelu', layer_norm_eps=1e-05, dropout=0.0, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, assign_eps=1.0, assign_mlp_ratio=[0.5, 4], **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.depths = depths\n if num_hidden_layers != sum(depths):\n logger.warning(f'Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers = sum(depth) = {sum(depths)}')\n self.num_hidden_layers = num_hidden_layers\n self.num_group_tokens = num_group_tokens\n self.num_output_groups = num_output_groups\n self.num_attention_heads = num_attention_heads\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.hidden_act = hidden_act\n self.layer_norm_eps = layer_norm_eps\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.assign_eps = assign_eps\n self.assign_mlp_ratio = assign_mlp_ratio", "docstring": "This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate\nan GroupViT model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the GroupViT\n[nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 384):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 1536):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n depths (`List[int]`, *optional*, defaults to [6, 3, 3]):\n The number of layers in each encoder block.\n num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]):\n The number of group tokens for each stage.\n num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]):\n The number of output groups for each stage, 0 means no group.\n num_attention_heads (`int`, *optional*, defaults to 6):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1.0):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\nExample:\n\n```python\n>>> from transformers import GroupViTVisionConfig, GroupViTVisionModel\n\n>>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration\n>>> configuration = GroupViTVisionConfig()\n\n>>> model = GroupViTVisionModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "beam", "function": "def copy_paths(self, src_dest_pairs):\n if not src_dest_pairs:\n return []\n results = []\n for src_path, dest_path in src_dest_pairs:\n if src_path.endswith('/') and dest_path.endswith('/'):\n try:\n results += self.copy_tree(src_path, dest_path)\n except messages.S3ClientError as err:\n results.append((src_path, dest_path, err))\n elif not src_path.endswith('/') and (not dest_path.endswith('/')):\n src_bucket, src_key = parse_s3_path(src_path)\n dest_bucket, dest_key = parse_s3_path(dest_path)\n request = messages.CopyRequest(src_bucket, src_key, dest_bucket, dest_key)\n try:\n self.client.copy(request)\n results.append((src_path, dest_path, None))\n except messages.S3ClientError as err:\n results.append((src_path, dest_path, err))\n else:\n e = messages.S3ClientError(\"Can't copy mismatched paths (one directory, one non-directory):\" + ' %s, %s' % (src_path, dest_path), 400)\n results.append((src_path, dest_path, e))\n return results", "docstring": "Copies the given S3 objects from src to dest. This can handle directory\nor file paths.\n\nArgs:\n src_dest_pairs: list of (src, dest) tuples of s3:/// file\n paths to copy from src to dest\nReturns: List of tuples of (src, dest, exception) in the same order as the\n src_dest_pairs argument, where exception is None if the operation\n succeeded or the relevant exception if the operation failed."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n logits = self.output_projection(outputs[0])\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import (\n... TrOCRConfig,\n... TrOCRProcessor,\n... TrOCRForCausalLM,\n... ViTConfig,\n... ViTModel,\n... VisionEncoderDecoderModel,\n... )\n>>> import requests\n>>> from PIL import Image\n\n>>> # TrOCR is a decoder model and should be used within a VisionEncoderDecoderModel\n>>> # init vision2text model with random weights\n>>> encoder = ViTModel(ViTConfig())\n>>> decoder = TrOCRForCausalLM(TrOCRConfig())\n>>> model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder)\n\n>>> # If you want to start from the pretrained model, load the checkpoint with `VisionEncoderDecoderModel`\n>>> processor = TrOCRProcessor.from_pretrained(\"microsoft/trocr-base-handwritten\")\n>>> model = VisionEncoderDecoderModel.from_pretrained(\"microsoft/trocr-base-handwritten\")\n\n>>> # load image from the IAM dataset\n>>> url = \"https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw).convert(\"RGB\")\n>>> pixel_values = processor(image, return_tensors=\"pt\").pixel_values\n>>> text = \"industry, ' Mr. Brown commented icily. ' Let us have a\"\n\n>>> # training\n>>> model.config.decoder_start_token_id = processor.tokenizer.eos_token_id\n>>> model.config.pad_token_id = processor.tokenizer.pad_token_id\n>>> model.config.vocab_size = model.config.decoder.vocab_size\n\n>>> labels = processor.tokenizer(text, return_tensors=\"pt\").input_ids\n>>> outputs = model(pixel_values, labels=labels)\n>>> loss = outputs.loss\n>>> round(loss.item(), 2)\n5.30\n\n>>> # inference\n>>> generated_ids = model.generate(pixel_values)\n>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n>>> generated_text\n'industry, \" Mr. Brown commented icily. \" Let us have a'\n```"} +{"repo": "transformers", "function": "class NewModelConfig(PretrainedConfig):\n model_type = 'new_model'\n keys_to_ignore_at_inference = ['past_key_values']\n base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_proj': 'colwise', 'layers.*.mlp.up_proj': 'colwise', 'layers.*.mlp.down_proj': 'rowwise'}\n base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}\n\n def __init__(self, vocab_size=256030, hidden_size=64, intermediate_size=90, num_hidden_layers=28, num_attention_heads=16, num_key_value_heads=16, head_dim=256, hidden_act='gelu_pytorch_tanh', hidden_activation=None, max_position_embeddings=1500, initializer_range=0.02, rms_norm_eps=1e-06, use_cache=True, pad_token_id=0, eos_token_id=1, bos_token_id=2, tie_word_embeddings=True, rope_theta=10000.0, attention_bias=False, attention_dropout=0.0, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.head_dim = head_dim\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.hidden_activation = hidden_activation\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.use_cache = use_cache\n self.rope_theta = rope_theta\n self.attention_bias = attention_bias\n self.attention_dropout = attention_dropout\n\n @property\n def num_heads(self):\n return self.num_attention_heads", "docstring": "This is the configuration class to store the configuration of a [`NewModelModel`]. It is used to instantiate an NewModel\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the NewModel-7B.\ne.g. [google/new_model-7b](https://huggingface.co/google/new_model-7b)\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\nArgs:\n vocab_size (`int`, *optional*, defaults to 256000):\n Vocabulary size of the NewModel model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`NewModelModel`]\n hidden_size (`int`, *optional*, defaults to 3072):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 24576):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 28):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer decoder.\n num_key_value_heads (`int`, *optional*, defaults to 16):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to\n `num_attention_heads`.\n head_dim (`int`, *optional*, defaults to 256):\n The attention head dimension.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu_pytorch_tanh\"`):\n The legacy activation function. It is overwritten by the `hidden_activation`.\n hidden_activation (`str` or `function`, *optional*):\n The non-linear activation function (function or string) in the decoder. Will default to `\"gelu_pytorch_tanh\"`\n if not specified. `\"gelu_pytorch_tanh\"` uses an approximation of the `\"gelu\"` activation function.\n max_position_embeddings (`int`, *optional*, defaults to 8192):\n The maximum sequence length that this model might ever be used with.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*, defaults to 0):\n Padding token id.\n eos_token_id (`int`, *optional*, defaults to 1):\n End of stream token id.\n bos_token_id (`int`, *optional*, defaults to 2):\n Beginning of stream token id.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether to tie weight embeddings\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n```python\n>>> from transformers import NewModelModel, NewModelConfig\n>>> # Initializing a NewModel new_model-7b style configuration\n>>> configuration = NewModelConfig()\n>>> # Initializing a model from the new_model-7b style configuration\n>>> model = NewModelModel(configuration)\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def maybe_propagate_compile_time_consts_in_xla(op):\n if control_flow_util.GraphOrParentsInXlaContext(op.graph):\n op._set_attr('_xla_propagate_compile_time_consts', attr_value_pb2.AttrValue(b=True))", "docstring": "Tells XLA whether to propagate compile-time consts in the loop body.\n\nThis is needed to make compile time constants available to ops, for example\n`max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this\nwould always be turned on, but that doesn't work with legacy functionalized\nwhile_loops.\n\nArgs:\n op: A `While` Operation."} +{"repo": "tensorflow", "function": "def remove(self, keys, name=None):\n return self.erase(keys, name)", "docstring": "Removes `keys` and its associated values from the table.\n\nIf a key is not present in the table, it is silently ignored.\n\nArgs:\n keys: Keys to remove. Can be a tensor of any shape. Must match the table's\n key type.\n name: A name for the operation (optional).\n\nReturns:\n The created Operation.\n\nRaises:\n TypeError: when `keys` do not match the table data types."} +{"repo": "beam", "function": "def temp_pubsub_emulator(project_id='apache-beam-testing'):\n with PubSubContainer(project=project_id) as pubsub_container:\n publisher = pubsub_v1.PublisherClient()\n random_front_charactor = random.choice(string.ascii_lowercase)\n topic_id = f'{random_front_charactor}{uuid.uuid4().hex[:8]}'\n topic_name_to_create = f'projects/{pubsub_container.project}/topics/{topic_id}'\n created_topic_object = publisher.create_topic(name=topic_name_to_create)\n yield created_topic_object.name", "docstring": "Context manager to provide a temporary Pub/Sub emulator for testing.\n\nThis function uses 'testcontainers' to spin up a Google Cloud SDK\ncontainer running the Pub/Sub emulator. It yields the emulator host\nstring (e.g., \"localhost:xxxxx\") that can be used to configure Pub/Sub\nclients.\n\nThe Docker container is automatically managed and torn down when the\ncontext manager exits.\n\nArgs:\n project_id (str): The GCP project ID to be used by the emulator.\n This doesn't need to be a real project for the emulator.\n\nYields:\n str: The host and port for the Pub/Sub emulator (e.g., \"localhost:xxxx\").\n This will be the address to point your Pub/Sub client to.\n\nRaises:\n Exception: If the container fails to start or the emulator isn't ready."} +{"repo": "pytype", "function": "class Unknown(_base.BaseValue):\n _current_id = 0\n IGNORED_ATTRIBUTES: Sequence[str] = ['__get__', '__set__', '__getattribute__']\n\n def __init__(self, ctx: 'context.Context') -> None:\n name = escape.unknown(Unknown._current_id)\n super().__init__(name, ctx)\n self.members = datatypes.MonitorDict()\n self.owner = None\n Unknown._current_id += 1\n self.class_name = self.name\n self._calls: list[tuple[tuple[cfg.Variable, ...], dict[str, cfg.Variable], cfg.Variable]] = []\n log.info('Creating %s', self.class_name)\n\n def compute_mro(self):\n return self.default_mro()\n\n def get_fullhash(self, seen: set[int] | None=None) -> int:\n return hash((type(self),) + tuple(sorted(self.members)))\n\n @classmethod\n def _to_pytd(cls, node, v):\n if isinstance(v, cfg.Variable):\n return pytd_utils.JoinTypes((cls._to_pytd(node, t) for t in v.data))\n elif isinstance(v, Unknown):\n return pytd.NamedType(v.class_name)\n else:\n return v.to_pytd_type(node)\n\n @classmethod\n def _make_params(cls, node: cfg.CFGNode, args: Sequence[cfg.Variable], kwargs: dict[str, cfg.Variable]) -> tuple[pytd.Parameter, ...]:\n \"\"\"Convert a list of types/variables to pytd parameters.\"\"\"\n\n def _make_param(name: str, p):\n return pytd.Parameter(name, cls._to_pytd(node, p), kind=pytd.ParameterKind.REGULAR, optional=False, mutated_type=None)\n pos_params = tuple((_make_param(f'_{i + 1}', p) for i, p in enumerate(args)))\n key_params = tuple((_make_param(name, p) for name, p in kwargs.items()))\n return pos_params + key_params\n\n def get_special_attribute(self, node: cfg.CFGNode, name: str, valself: cfg.Variable) -> cfg.Variable | None:\n del node, valself\n if name in self.IGNORED_ATTRIBUTES:\n return None\n if name in self.members:\n return self.members[name]\n new = self.ctx.convert.create_new_unknown(self.ctx.root_node, action='getattr_' + self.name + ':' + name)\n self.ctx.attribute_handler.set_attribute(self.ctx.root_node, self, name, new)\n return new\n\n def call(self, node: cfg.CFGNode, func: cfg.Binding | None, args: 'function.Args', alias_map: datatypes.UnionFind | None=None) -> tuple[cfg.CFGNode, cfg.Variable]:\n ret = self.ctx.convert.create_new_unknown(node, source=self.owner, action='call:' + self.name)\n self._calls.append((args.posargs, args.namedargs, ret))\n return (node, ret)\n\n def argcount(self, _) -> int:\n return 0\n\n def to_variable(self, node: cfg.CFGNode) -> cfg.Variable:\n v = self.ctx.program.NewVariable()\n val = v.AddBinding(self, source_set=[], where=node)\n self.owner = val\n self.ctx.vm.trace_unknown(self.class_name, val)\n return v\n\n def to_structural_def(self, node: cfg.CFGNode, class_name: str) -> pytd.Class:\n \"\"\"Convert this Unknown to a pytd.Class.\"\"\"\n self_param = (pytd.Parameter('self', pytd.AnythingType(), pytd.ParameterKind.REGULAR, False, None),)\n starargs = None\n starstarargs = None\n\n def _make_sig(args: tuple[cfg.Variable, ...], kwargs: dict[str, cfg.Variable], ret: cfg.Variable) -> pytd.Signature:\n return pytd.Signature(self_param + self._make_params(node, args, kwargs), starargs, starstarargs, return_type=Unknown._to_pytd(node, ret), exceptions=(), template=())\n calls = tuple(pytd_utils.OrderedSet((_make_sig(args, kwargs, ret) for args, kwargs, ret in self._calls)))\n if calls:\n methods = (pytd.Function('__call__', calls, pytd.MethodKind.METHOD),)\n else:\n methods = ()\n return pytd.Class(name=class_name, keywords=(), bases=(pytd.NamedType('builtins.object'),), methods=methods, constants=tuple((pytd.Constant(name, Unknown._to_pytd(node, c)) for name, c in self.members.items())), classes=(), decorators=(), slots=None, template=())\n\n def instantiate(self, node: cfg.CFGNode, container: '_instance_base.SimpleValue | abstract_utils.DummyContainer | None'=None) -> cfg.Variable:\n return self.to_variable(node)", "docstring": "Representation of unknown values.\n\nThese are e.g. the return values of certain functions (e.g. eval()). They\n\"adapt\": E.g. they'll respond to get_attribute requests by creating that\nattribute.\n\nAttributes:\n members: Attributes that were written or read so far. Mapping of str to\n cfg.Variable.\n owner: cfg.Binding that contains this instance as data."} +{"repo": "budoux", "function": "def preprocess(train_data_path: str, feature_thres: int, val_data_path: typing.Optional[str]=None) -> typing.Tuple[Dataset, typing.List[str], typing.Optional[Dataset]]:\n features = extract_features(train_data_path, feature_thres)\n feature_index = dict(((feature, i) for i, feature in enumerate(features)))\n train_dataset = load_dataset(train_data_path, feature_index)\n val_dataset = load_dataset(val_data_path, feature_index) if val_data_path else None\n return (train_dataset, features, val_dataset)", "docstring": "Loads entries and translates them into JAX arrays. The boolean matrix of\nthe input data is represented by row indices and column indices of True values\ninstead of the matrix itself for memory efficiency, assuming the matrix is\nhighly sparse. Row and column indices are not guaranteed to be sorted.\n\nArgs:\n train_data_path (str): A file path to the training data file.\n feature_thres (str): A threshold to filter out features whose number of\n occurances does not exceed the value.\n val_data_path (str, optional): A file path to the validation data file.\n\nReturns:\n A tuple of following items:\n - train_dataset (Dataset): The training dataset.\n - features (List[str]): The list of features.\n - val_dataset (Optional[Dataset]): The validation dataset.\n This becomes None if val_data_path is None."} +{"repo": "tensorflow", "function": "def _maybe_save_assets(write_fn, assets_to_add=None):\n asset_filename_map = {}\n if assets_to_add is None:\n tf_logging.info('No assets to save.')\n return asset_filename_map\n for asset_tensor in assets_to_add:\n asset_source_filepath = _asset_path_from_tensor(asset_tensor)\n if not asset_source_filepath:\n raise ValueError(f'Asset filepath tensor {asset_tensor} in is invalid.')\n asset_filename = get_asset_filename_to_add(asset_source_filepath, asset_filename_map)\n write_fn(asset_filename, asset_tensor)\n asset_filename_map[asset_filename] = asset_source_filepath\n tf_logging.info('Assets added to graph.')\n return asset_filename_map", "docstring": "Saves assets to the meta graph.\n\nArgs:\n write_fn: A function callback that writes assets into meta graph.\n assets_to_add: The list where the asset paths are setup.\n\nReturns:\n A dict of asset basenames for saving to the original full path to the asset.\n\nRaises:\n ValueError: Indicating an invalid filepath tensor."} +{"repo": "yapf", "function": "def AsCode(self, indent_per_depth=2):\n indent = ' ' * indent_per_depth * self.depth\n tokens_str = ' '.join((tok.value for tok in self._tokens))\n return indent + tokens_str", "docstring": "Return a \"code\" representation of this line.\n\nThe code representation shows how the line would be printed out as code.\n\nTODO(eliben): for now this is rudimentary for debugging - once we add\nformatting capabilities, this method will have other uses (not all tokens\nhave spaces around them, for example).\n\nArguments:\n indent_per_depth: how much spaces to indent per depth level.\n\nReturns:\n A string representing the line as code."} +{"repo": "tensorflow", "function": "def is_closed(self, name=None):\n if name is None:\n name = '%s_Is_Closed' % self._name\n if self._queue_ref.dtype == _dtypes.resource:\n return gen_data_flow_ops.queue_is_closed_v2(self._queue_ref, name=name)\n else:\n return gen_data_flow_ops.queue_is_closed_(self._queue_ref, name=name)", "docstring": "Returns true if queue is closed.\n\nThis operation returns true if the queue is closed and false if the queue\nis open.\n\n>>> q = tf.queue.FIFOQueue(capacity=3, dtypes=tf.int32)\n>>> q.is_closed()\n\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n True if the queue is closed and false if the queue is open."} +{"repo": "transformers", "function": "def __call__(self, inputs: Union[str, List[str]], **kwargs: Any) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n outputs = super().__call__(inputs, **kwargs)\n if isinstance(inputs, list) and len(inputs) == 1:\n return outputs[0]\n return outputs", "docstring": "Fill the masked token in the text(s) given as inputs.\n\nArgs:\n inputs (`str` or `List[str]`):\n One or several texts (or one list of prompts) with masked tokens.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first\n resulting token will be used (with a warning, and that might be slower).\n top_k (`int`, *optional*):\n When passed, overrides the number of predictions to return.\n\nReturn:\n A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:\n\n - **sequence** (`str`) -- The corresponding input with the mask token prediction.\n - **score** (`float`) -- The corresponding probability.\n - **token** (`int`) -- The predicted token id (to replace the masked one).\n - **token_str** (`str`) -- The predicted token (to replace the masked one)."} +{"repo": "beam", "function": "def _update_graph(self, vertex_dict=None, edge_dict=None):\n\n def set_attrs(ref, attrs):\n for attr_name, attr_val in attrs.items():\n ref.set(attr_name, attr_val)\n with self._lock:\n if vertex_dict:\n for vertex, vertex_attrs in vertex_dict.items():\n set_attrs(self._vertex_refs[vertex], vertex_attrs)\n if edge_dict:\n for edge, edge_attrs in edge_dict.items():\n if isinstance(edge, tuple):\n set_attrs(self._edge_refs[edge], edge_attrs)\n else:\n for vertex_pair in self._edge_to_vertex_pairs[edge]:\n set_attrs(self._edge_refs[vertex_pair], edge_attrs)", "docstring": "Updates the pydot.Dot object with the given attribute update\n\nArgs:\n vertex_dict: (Dict[str, Dict[str, str]]) maps vertex names to attributes\n edge_dict: This should be\n Either (Dict[str, Dict[str, str]]) which maps edge names to attributes\n Or (Dict[(str, str), Dict[str, str]]) which maps vertex pairs to edge\n attributes"} +{"repo": "tensorflow", "function": "def get_input_params(distribution_strategy, num_samples, steps, batch_size, mode=None):\n use_per_replica_batch = not dist_utils.global_batch_size_supported(distribution_strategy)\n if context.executing_eagerly():\n allow_partial_batch = mode != ModeKeys.TRAIN or not backend.is_tpu_strategy(distribution_strategy)\n else:\n allow_partial_batch = mode == ModeKeys.TRAIN or ((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and backend.is_tpu_strategy(distribution_strategy))\n if steps is None:\n if batch_size is None:\n global_batch_size = min(num_samples, 32)\n else:\n global_batch_size = batch_size\n if use_per_replica_batch:\n global_batch_size *= distribution_strategy.num_replicas_in_sync\n if allow_partial_batch:\n steps = np.ceil(num_samples / global_batch_size).astype(int)\n else:\n if num_samples % global_batch_size:\n raise ValueError('The number of samples %s is not divisible by batch size %s.' % (num_samples, global_batch_size))\n steps = num_samples // global_batch_size\n elif batch_size is None:\n if num_samples % steps:\n raise ValueError('The number of samples %s is not divisible by steps %s. Please change the number of steps to a value that can consume all the samples' % (num_samples, steps))\n global_batch_size = num_samples // steps\n else:\n global_batch_size = batch_size\n if use_per_replica_batch:\n global_batch_size *= distribution_strategy.num_replicas_in_sync\n min_num_samples = global_batch_size * steps\n if allow_partial_batch:\n min_num_samples = global_batch_size * (steps - 1) + 1 if steps > 1 else 0\n if num_samples < min_num_samples:\n raise ValueError('Number of samples %s is less than samples required for specified batch_size %s and steps %s' % (num_samples, global_batch_size, steps))\n if use_per_replica_batch:\n if global_batch_size % distribution_strategy.num_replicas_in_sync:\n raise ValueError('The batch size (%s) could not be sharded evenly across the sync replicas (%s) in the distribution strategy.' % (global_batch_size, distribution_strategy.num_replicas_in_sync))\n batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync\n else:\n batch_size = global_batch_size\n return (steps, batch_size)", "docstring": "Calculate the number of batches and steps/steps_per_epoch.\n\nArgs:\n distribution_strategy: The DistributionStrategy used to compile the model.\n num_samples: The number of samples from which we determine the batch size\n and steps.\n steps: The specified number of steps.\n batch_size: The specified batch_size.\n mode: ModeKey representing whether input will be used for training,\n evaluation, or prediction. This is used to relax the constraints on\n consuming all the training samples to keep compatibility till we support\n partial batches. If none, then partial batches are not allowed.\n\nReturns:\n steps: The steps or steps_per_epoch argument depending on if a user is\n calling `fit`, `evaluate` or `predict`. If the is_training flag is set\n we don't require the number of samples to be used completely.\n batch_size: The batch size to be used in model iterations.\n\nRaises:\n ValueError: If the number of batches or steps evaluates to 0."} +{"repo": "keras", "function": "class Precision(Metric):\n\n def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None):\n super().__init__(name=name, dtype=dtype)\n self._direction = 'up'\n self.init_thresholds = thresholds\n self.top_k = top_k\n self.class_id = class_id\n default_threshold = 0.5 if top_k is None else metrics_utils.NEG_INF\n self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=default_threshold)\n self._thresholds_distributed_evenly = metrics_utils.is_evenly_distributed_thresholds(self.thresholds)\n self.true_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='true_positives')\n self.false_positives = self.add_variable(shape=(len(self.thresholds),), initializer=initializers.Zeros(), name='false_positives')\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n \"\"\"Accumulates true positive and false positive statistics.\n\n Args:\n y_true: The ground truth values, with the same dimensions as\n `y_pred`. Will be cast to `bool`.\n y_pred: The predicted values. Each element must be in the range\n `[0, 1]`.\n sample_weight: Optional weighting of each example. Defaults to `1`.\n Can be a tensor whose rank is either 0, or the same rank as\n `y_true`, and must be broadcastable to `y_true`.\n \"\"\"\n metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)\n\n def result(self):\n result = ops.divide_no_nan(self.true_positives, ops.add(self.true_positives, self.false_positives))\n return result[0] if len(self.thresholds) == 1 else result\n\n def reset_state(self):\n num_thresholds = len(to_list(self.thresholds))\n self.true_positives.assign(ops.zeros((num_thresholds,)))\n self.false_positives.assign(ops.zeros((num_thresholds,)))\n\n def get_config(self):\n config = {'thresholds': self.init_thresholds, 'top_k': self.top_k, 'class_id': self.class_id}\n base_config = super().get_config()\n return {**base_config, **config}", "docstring": "Computes the precision of the predictions with respect to the labels.\n\nThe metric creates two local variables, `true_positives` and\n`false_positives` that are used to compute the precision. This value is\nultimately returned as `precision`, an idempotent operation that simply\ndivides `true_positives` by the sum of `true_positives` and\n`false_positives`.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nIf `top_k` is set, we'll calculate precision as how often on average a class\namong the top-k classes with the highest predicted values of a batch entry\nis correct and can be found in the label for that entry.\n\nIf `class_id` is specified, we calculate precision by considering only the\nentries in the batch for which `class_id` is above the threshold and/or in\nthe top-k highest predictions, and computing the fraction of them for which\n`class_id` is indeed a correct label.\n\nArgs:\n thresholds: (Optional) A float value, or a Python list/tuple of float\n threshold values in `[0, 1]`. A threshold is compared with\n prediction values to determine the truth value of predictions (i.e.,\n above the threshold is `True`, below is `False`). If used with a\n loss function that sets `from_logits=True` (i.e. no sigmoid applied\n to predictions), `thresholds` should be set to 0. One metric value\n is generated for each threshold value. If neither `thresholds` nor\n `top_k` are set, the default is to calculate precision with\n `thresholds=0.5`.\n top_k: (Optional) Unset by default. An int value specifying the top-k\n predictions to consider when calculating precision.\n class_id: (Optional) Integer class ID for which we want binary metrics.\n This must be in the half-open interval `[0, num_classes)`, where\n `num_classes` is the last dimension of predictions.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.Precision()\n>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])\n>>> m.result()\n0.6666667\n\n>>> m.reset_state()\n>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])\n>>> m.result()\n1.0\n\n>>> # With top_k=2, it will calculate precision over y_true[:2]\n>>> # and y_pred[:2]\n>>> m = keras.metrics.Precision(top_k=2)\n>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])\n>>> m.result()\n0.0\n\n>>> # With top_k=4, it will calculate precision over y_true[:4]\n>>> # and y_pred[:4]\n>>> m = keras.metrics.Precision(top_k=4)\n>>> m.update_state([0, 0, 1, 1], [1, 1, 1, 1])\n>>> m.result()\n0.5\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\n loss='binary_crossentropy',\n metrics=[keras.metrics.Precision()])\n```\n\nUsage with a loss with `from_logits=True`:\n\n```python\nmodel.compile(optimizer='adam',\n loss=keras.losses.BinaryCrossentropy(from_logits=True),\n metrics=[keras.metrics.Precision(thresholds=0)])\n```"} +{"repo": "tensorflow", "function": "def scatter_nd_update(self, indices, updates, name=None):\n raise NotImplementedError", "docstring": "Applies sparse assignment to individual values or slices in a Variable.\n\nThe Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into self.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of self.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].\n```\n\nFor example, say we want to add 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n v.scatter_nd_update(indices, updates)\n print(v)\n```\n\nThe resulting update to v would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nArgs:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\nReturns:\n The updated variable."} +{"repo": "tf-quant-finance", "function": "def discount_rate(self, date: Optional[types.DateTensor]=None, time: Optional[types.FloatTensor]=None, context=None) -> tf.Tensor:\n pass", "docstring": "Returns the discount rates to a specified set of dates.\n\nArgs:\n date: A `DateTensor` specifying the dates at which to evaluate the\n discount rates. The function expects either `date` or `time` to be\n specified.\n time: A real `Tensor` specifying the times at which to evaluate the\n discount rates. The function expects either `date` or `time` to be\n specified.\n context: The context object, e.g., curve_type.\n\nReturns:\n A `Tensor` of the same shape as `dates` with the corresponding discount\n rates."} +{"repo": "tensorflow", "function": "def permute_dimensions(x, pattern):\n return array_ops.transpose(x, perm=pattern)", "docstring": "Permutes axes in a tensor.\n\nArgs:\n x: Tensor or variable.\n pattern: A tuple of\n dimension indices, e.g. `(0, 2, 1)`.\n\nReturns:\n A tensor.\n\nExample:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n >>> a\n \n >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))\n "} +{"repo": "pytype", "function": "def check_annotation_type_mismatch(self, node, name, typ, value, stack, allow_none, details=None):\n if not typ or not value:\n return\n if value.data == [self.convert.ellipsis] or (allow_none and value.data == [self.convert.none]):\n return\n contained_type = abstract_utils.match_type_container(typ, ('typing.ClassVar', 'dataclasses.InitVar'))\n if contained_type:\n typ = contained_type\n bad = self.matcher(node).compute_one_match(value, typ).bad_matches\n for match in bad:\n self.errorlog.annotation_type_mismatch(stack, match.expected.typ, match.actual_binding, name, match.error_details, details)", "docstring": "Checks for a mismatch between a variable's annotation and value.\n\nArgs:\n node: node\n name: variable name\n typ: variable annotation\n value: variable value\n stack: a frame stack for error reporting\n allow_none: whether a value of None is allowed for any type\n details: any additional details to add to the error message"} +{"repo": "tensorflow", "function": "def __init__(self, capacity, types, shapes=None, names=None, shared_name=None, name='priority_queue'):\n types = _as_type_list(types)\n shapes = _as_shape_list(shapes, types)\n queue_ref = gen_data_flow_ops.priority_queue_v2(component_types=types, shapes=shapes, capacity=capacity, shared_name=_shared_name(shared_name), name=name)\n priority_dtypes = [_dtypes.int64] + types\n priority_shapes = [()] + shapes if shapes else shapes\n super(PriorityQueue, self).__init__(priority_dtypes, priority_shapes, names, queue_ref)", "docstring": "Creates a queue that dequeues elements in a first-in first-out order.\n\nA `PriorityQueue` has bounded capacity; supports multiple concurrent\nproducers and consumers; and provides exactly-once delivery.\n\nA `PriorityQueue` holds a list of up to `capacity` elements. Each\nelement is a fixed-length tuple of tensors whose dtypes are\ndescribed by `types`, and whose shapes are optionally described\nby the `shapes` argument.\n\nIf the `shapes` argument is specified, each component of a queue\nelement must have the respective fixed shape. If it is\nunspecified, different queue elements may have different shapes,\nbut the use of `dequeue_many` is disallowed.\n\nEnqueues and Dequeues to the `PriorityQueue` must include an additional\ntuple entry at the beginning: the `priority`. The priority must be\nan int64 scalar (for `enqueue`) or an int64 vector (for `enqueue_many`).\n\nArgs:\n capacity: An integer. The upper bound on the number of elements\n that may be stored in this queue.\n types: A list of `DType` objects. The length of `types` must equal\n the number of tensors in each queue element, except the first priority\n element. The first tensor in each element is the priority,\n which must be type int64.\n shapes: (Optional.) A list of fully-defined `TensorShape` objects,\n with the same length as `types`, or `None`.\n names: (Optional.) A list of strings naming the components in the queue\n with the same length as `dtypes`, or `None`. If specified, the dequeue\n methods return a dictionary with the names as keys.\n shared_name: (Optional.) If non-empty, this queue will be shared under\n the given name across multiple sessions.\n name: Optional name for the queue operation."} +{"repo": "transformers", "function": "def call(self, sequence_output: tf.Tensor) -> tf.Tensor:\n logits = (tf.einsum('bsj,j->bs', sequence_output, self.output_weights) + self.output_bias) / self.temperature\n return logits", "docstring": "Computes logits per token\n\nArgs:\n sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the\n model.\n\nReturns:\n logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token."} +{"repo": "pyglove", "function": "def __init__(self, index: Optional[int]=None):\n super().__init__()\n self._index = index", "docstring": "Constructor.\n\nArgs:\n index: index of the tuple field that this key spec applies to.\n If None, this tuple value spec applies to all elements of a\n variable-length tuple."} +{"repo": "fhir-py", "function": "class Select(StandardSqlExpression):\n select_part: StandardSqlExpression\n from_part: Optional[str]\n where_part: Optional[str] = None\n limit_part: Optional[int] = None\n sql_dialect: Optional[SqlDialect] = SqlDialect.BIGQUERY\n\n @property\n def sql_data_type(self) -> StandardSqlDataType:\n return self.select_part.sql_data_type\n\n @property\n def sql_alias(self) -> str:\n return self.select_part.sql_alias\n\n def union(self, rhs: Select, distinct: bool) -> UnionExpression:\n \"\"\"Builds a UNION with this and the given Select.\"\"\"\n return UnionExpression(self, rhs, distinct)\n\n def to_subquery(self) -> StandardSqlExpression:\n \"\"\"Renders the expression as a subquery.\"\"\"\n return SubQuery(self)\n\n def __str__(self) -> str:\n \"\"\"Builds the SQL expression from its given components.\"\"\"\n query_parts = ['SELECT ']\n select_part = wrap_time_types(str(self.select_part), self.sql_data_type, self.sql_dialect)\n query_parts.append(select_part)\n if select_part != str(self.select_part) or not self.select_part.matches_alias(self.sql_alias):\n query_parts.extend((' AS ', str(self.sql_alias)))\n if self.from_part:\n query_parts.extend(f'\\nFROM {self.from_part}')\n if self.where_part:\n query_parts.extend(('\\n', 'WHERE ', str(self.where_part)))\n if self.limit_part:\n query_parts.extend(('\\n', 'LIMIT ', str(self.limit_part)))\n return ''.join(query_parts)\n\n def as_operand(self) -> str:\n \"\"\"Returns select_part.as_operand() if this expression has no other parts.\n\n Otherwise it just returns the expression's __str__ representation in a\n subquery.\n\n Also excludes the inital `SELECT`.\n \"\"\"\n if self.from_part or self.where_part:\n return str(self.to_subquery())\n return wrap_time_types(self.select_part.as_operand(), self.sql_data_type, self.sql_dialect)", "docstring": "Representation of a Standard SQL SELECT expression.\n\nAttributes:\n select_part: The expression being SELECT'd\n from_part: The body of the FROM clause. Optional to support subquery\n expressions taking their FROM from a parent query.\n where_part: The body of the WHERE clause.\n limit_part: The body of the LIMIT clause.\n sql_dialect: The SQL dialect to use. Defaults to BigQuery"} +{"repo": "tensorflow", "function": "def from_operator(cls, operator):\n validation_fields = ('is_non_singular', 'is_self_adjoint', 'is_positive_definite', 'is_square')\n kwargs = _extract_attrs(operator, keys=set(operator._composite_tensor_fields + validation_fields))\n non_tensor_params = {}\n param_specs = {}\n for k, v in list(kwargs.items()):\n type_spec_or_v = _extract_type_spec_recursively(v)\n is_tensor = [isinstance(x, type_spec.TypeSpec) for x in nest.flatten(type_spec_or_v)]\n if all(is_tensor):\n param_specs[k] = type_spec_or_v\n elif not any(is_tensor):\n non_tensor_params[k] = v\n else:\n raise NotImplementedError(f'Field {k} contains a mix of `Tensor` and non-`Tensor` values.')\n return cls(param_specs=param_specs, non_tensor_params=non_tensor_params, prefer_static_fields=operator._composite_tensor_prefer_static_fields)", "docstring": "Builds a `_LinearOperatorSpec` from a `LinearOperator` instance.\n\nArgs:\n operator: An instance of `LinearOperator`.\n\nReturns:\n linear_operator_spec: An instance of `_LinearOperatorSpec` to be used as\n the `TypeSpec` of `operator`."} +{"repo": "tensorflow", "function": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = math_ops.cast(y_true, self._dtype)\n y_pred = math_ops.cast(y_pred, self._dtype)\n if y_pred.shape.ndims > 1:\n y_pred = array_ops.reshape(y_pred, [-1])\n if y_true.shape.ndims > 1:\n y_true = array_ops.reshape(y_true, [-1])\n if sample_weight is not None:\n sample_weight = math_ops.cast(sample_weight, self._dtype)\n if sample_weight.shape.ndims > 1:\n sample_weight = array_ops.reshape(sample_weight, [-1])\n current_cm = confusion_matrix.confusion_matrix(y_true, y_pred, self.num_classes, weights=sample_weight, dtype=self._dtype)\n return self.total_cm.assign_add(current_cm)", "docstring": "Accumulates the confusion matrix statistics.\n\nArgs:\n y_true: The ground truth values.\n y_pred: The predicted values.\n sample_weight: Optional weighting of each example. Defaults to 1. Can be a\n `Tensor` whose rank is either 0, or the same rank as `y_true`, and must\n be broadcastable to `y_true`.\n\nReturns:\n Update op."} +{"repo": "tensorflow", "function": "def collective_manager_ids_from_op(op):\n if op.type == 'CollectiveReduce':\n try:\n return [op.get_attr('_collective_manager_id')]\n except ValueError:\n pass\n elif op.type == 'StatefulPartitionedCall':\n try:\n return op.get_attr(utils.COLLECTIVE_MANAGER_IDS)\n except ValueError:\n pass\n return []", "docstring": "Returns CollectiveManager ID from the op if one exists, else None.\n\nCollectiveManager adds collective and no_op operations tagged with an ID,\nunique to the manager object. This function extracts that ID, or None, if the\nnode was not generated by a CollectiveManager.\n\nArgs:\n op: `Operation` to get the collective manager ID from.\n\nReturns:\n List of CollectiveManager IDs used by the op."} +{"repo": "keras", "function": "def logdet(x):\n if any_symbolic_tensors((x,)):\n return Logdet().symbolic_call(x)\n return backend.math.logdet(x)", "docstring": "Computes log of the determinant of a hermitian positive definite matrix.\n\nArgs:\n x: Input matrix. It must 2D and square.\n\nReturns:\n The natural log of the determinant of matrix."} +{"repo": "keras", "function": "class MeanSquaredLogarithmicError(LossFunctionWrapper):\n\n def __init__(self, reduction='sum_over_batch_size', name='mean_squared_logarithmic_error', dtype=None):\n super().__init__(mean_squared_logarithmic_error, name=name, reduction=reduction, dtype=dtype)\n\n def get_config(self):\n return Loss.get_config(self)", "docstring": "Computes the mean squared logarithmic error between `y_true` & `y_pred`.\n\nFormula:\n\n```python\nloss = mean(square(log(y_true + 1) - log(y_pred + 1)))\n```\n\nArgs:\n reduction: Type of reduction to apply to the loss. In almost all cases\n this should be `\"sum_over_batch_size\"`. Supported options are\n `\"sum\"`, `\"sum_over_batch_size\"`, `\"mean\"`,\n `\"mean_with_sample_weight\"` or `None`. `\"sum\"` sums the loss,\n `\"sum_over_batch_size\"` and `\"mean\"` sum the loss and divide by the\n sample size, and `\"mean_with_sample_weight\"` sums the loss and\n divides by the sum of the sample weights. `\"none\"` and `None`\n perform no aggregation. Defaults to `\"sum_over_batch_size\"`.\n name: Optional name for the loss instance.\n dtype: The dtype of the loss's computations. Defaults to `None`, which\n means using `keras.backend.floatx()`. `keras.backend.floatx()` is a\n `\"float32\"` unless set to different value\n (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is\n provided, then the `compute_dtype` will be utilized."} +{"repo": "transformers", "function": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension=None) -> tuple[int, int]:\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n if channel_dim == ChannelDimension.FIRST:\n return (image.shape[-2], image.shape[-1])\n elif channel_dim == ChannelDimension.LAST:\n return (image.shape[-3], image.shape[-2])\n else:\n raise ValueError(f'Unsupported data format: {channel_dim}')", "docstring": "Returns the (height, width) dimensions of the image.\n\nArgs:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\nReturns:\n A tuple of the image's height and width."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "transformers", "function": "class FlaxSeq2SeqSequenceClassifierOutput(ModelOutput):\n logits: Optional[jnp.ndarray] = None\n past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None\n decoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None\n decoder_attentions: Optional[Tuple[jnp.ndarray]] = None\n cross_attentions: Optional[Tuple[jnp.ndarray]] = None\n encoder_last_hidden_state: Optional[jnp.ndarray] = None\n encoder_hidden_states: Optional[Tuple[jnp.ndarray]] = None\n encoder_attentions: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for outputs of sequence-to-sequence sentence classification models.\n\nArgs:\n logits (`jnp.ndarray` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads."} +{"repo": "fhir-py", "function": "def memberOf(self, value_set: Union[str, message.Message]) -> 'Builder':\n param_nodes = self._function_args_to_nodes(self.node, [value_set])\n return self._to_builder(_evaluation.MemberOfFunction(self.node.context, self.node, param_nodes))", "docstring": "The FHIRPath memberOf() function.\n\nThis is used to determine whether a codeable concept is a member of a given\nvalue set. The value set may be a literal, FHIR ValueSet proto or\n(more commonly) a URI that is resolved by the underlying evaluation engine.\n\nFor example, the following expression can be used to determine if an\nobservation code is part of a specific valueset:\n\n>>> obs = \n>>> obs.code.MemberOf('url:example:my:valueset')\n\nArgs:\n value_set: may be either a string containing a value set URL or an\n expanded value set in the form of a proto. See examples in the README\n documentation for more details.\n\nReturns:\n An expression to that evaluates to true if the parent is a member of\n the given value set."} +{"repo": "keras", "function": "def pad(x, pad_width, mode='constant', constant_values=None):\n return Pad(pad_width, mode=mode)(x, constant_values=constant_values)", "docstring": "Pad a tensor.\n\nArgs:\n x: Tensor to pad.\n pad_width: Number of values padded to the edges of each axis.\n `((before_1, after_1), ...(before_N, after_N))` unique pad\n widths for each axis.\n `((before, after),)` yields same before and after pad for\n each axis.\n `(pad,)` or `int` is a shortcut for `before = after = pad`\n width for all axes.\n mode: One of `\"constant\"`, `\"edge\"`, `\"linear_ramp\"`,\n `\"maximum\"`, `\"mean\"`, `\"median\"`, `\"minimum\"`,\n `\"reflect\"`, `\"symmetric\"`, `\"wrap\"`, `\"empty\"`,\n `\"circular\"`. Defaults to `\"constant\"`.\n constant_values: value to pad with if `mode == \"constant\"`.\n Defaults to `0`. A `ValueError` is raised if not None and\n `mode != \"constant\"`.\n\nNote:\n Torch backend only supports modes `\"constant\"`, `\"reflect\"`,\n `\"symmetric\"` and `\"circular\"`.\n Only Torch backend supports `\"circular\"` mode.\n\nNote:\n Tensorflow backend only supports modes `\"constant\"`, `\"reflect\"`\n and `\"symmetric\"`.\n\nReturns:\n Padded tensor."} +{"repo": "transformers", "function": "def __call__(self, table: 'pd.DataFrame', queries: Optional[Union[TextInput, PreTokenizedInput, EncodedInput, List[TextInput], List[PreTokenizedInput], List[EncodedInput]]]=None, answer_coordinates: Optional[Union[List[Tuple], List[List[Tuple]]]]=None, answer_text: Optional[Union[List[TextInput], List[List[TextInput]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TapasTruncationStrategy]=False, max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n assert isinstance(table, pd.DataFrame), 'Table must be of type pd.DataFrame'\n valid_query = False\n if queries is None or isinstance(queries, str):\n valid_query = True\n elif isinstance(queries, (list, tuple)):\n if len(queries) == 0 or isinstance(queries[0], str):\n valid_query = True\n if not valid_query:\n raise ValueError('queries input must of type `str` (single example), `List[str]` (batch or single pretokenized example). ')\n is_batched = isinstance(queries, (list, tuple))\n if is_batched:\n return self.batch_encode_plus(table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)\n else:\n return self.encode_plus(table=table, query=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Main method to tokenize and prepare for the model one or several sequence(s) related to a table.\n\nArgs:\n table (`pd.DataFrame`):\n Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas\n dataframe to convert it to string.\n queries (`str` or `List[str]`):\n Question or batch of questions related to a table to be encoded. Note that in case of a batch, all\n questions must refer to the **same** table.\n answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):\n Answer coordinates of each table-question pair in the batch. In case only a single table-question pair\n is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must\n be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The\n first column has index 0. In case a batch of table-question pairs is provided, then the\n answer_coordinates must be a list of lists of tuples (each list corresponding to a single\n table-question pair).\n answer_text (`List[str]` or `List[List[str]]`, *optional*):\n Answer text of each table-question pair in the batch. In case only a single table-question pair is\n provided, then the answer_text must be a single list of one or more strings. Each string must be the\n answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided,\n then the answer_coordinates must be a list of lists of strings (each list corresponding to a single\n table-question pair)."} +{"repo": "fhir-py", "function": "def to_sql(self, view: views.View, limit: Optional[int]=None) -> str:\n encoder = _spark_interpreter.SparkSqlInterpreter(value_set_codes_table='VALUESET_VIEW')\n dataset = f'{self._fhir_dataset}'\n sql_generator = runner_utils.RunnerSqlGenerator(view, encoder, dataset, self._snake_case_resource_tables)\n sql_statement = sql_generator.build_sql_statement()\n valuesets_clause = sql_generator.build_valueset_expression(self._value_set_codes_table)\n if limit is not None and limit < 1:\n raise ValueError('Query limits must be positive integers.')\n limit_clause = '' if limit is None else f' LIMIT {limit}'\n return f'{valuesets_clause}{sql_statement}{limit_clause}'", "docstring": "Returns the SQL used to run the given view in Spark.\n\nArgs:\n view: the view used to generate the SQL.\n limit: optional limit to attach to the generated SQL.\n\nReturns:\n The SQL used to run the given view."} +{"repo": "fhir-py", "function": "def fixed_field_for_type_code(type_code: str) -> str:\n url_type_code = _TYPE_CODE_URI_RE.search(type_code)\n if url_type_code is not None:\n type_code = url_type_code[1]\n fixed_field = stringcase.snakecase(type_code[:1].lower() + type_code[1:])\n return 'string_value' if fixed_field == 'string' else fixed_field", "docstring": "Retrieves the `ElementDefinition.fixed.choice` oneof field for `type_code`.\n\nArgs:\n type_code: The FHIR type code to look up. Could be a value like 'boolean' or\n URL like 'http://hl7.org/fhirpath/System.Boolean'\n\nReturns:\n The attribute corresponding to this type code on the\n ElementDefinition.FixedX.choice oneof."} +{"repo": "transformers", "function": "class MoshiDepthConfig(PretrainedConfig):\n model_type = 'moshi_depth'\n keys_to_ignore_at_inference = ['past_key_values']\n\n def __init__(self, vocab_size=32000, hidden_size=1024, input_size=4096, num_hidden_layers=6, num_attention_heads=16, num_key_value_heads=None, audio_vocab_size=2048, max_position_embeddings=9, hidden_act='silu', head_dim=None, initializer_range=0.02, use_cache=True, sliding_window=8, attention_dropout=0.0, ffn_dim=5632, rms_norm_eps=1e-08, num_codebooks=8, tie_word_embeddings=False, **kwargs):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.input_size = input_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads\n self.max_position_embeddings = max_position_embeddings\n self.hidden_act = hidden_act\n self.head_dim = head_dim or hidden_size // num_attention_heads\n self.initializer_range = initializer_range\n self.use_cache = use_cache\n self.sliding_window = sliding_window\n self.attention_dropout = attention_dropout\n if ffn_dim % 2 == 1:\n raise ValueError(f'`ffn_dim={ffn_dim}` must be even.')\n self.ffn_dim = ffn_dim\n self.rms_norm_eps = rms_norm_eps\n self.num_codebooks = num_codebooks\n self.audio_vocab_size = audio_vocab_size\n super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`MoshiDepthDecoder`]. It is used to instantiate a\nMoshi depth decoder model according to the specified arguments, defining the Moshi depth decoder config.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the MoshiDepthDecoder model. Defines the number of different tokens that can be\n represented by the `inputs_ids` passed when calling [`MoshiDepthDecoder`].\n hidden_size (`int`, *optional*, defaults to 1024):\n Dimensionality of the layers and the pooler layer of the depth decoder.\n input_size (`int`, *optional*, defaults to 4096):\n Dimensionality of the input hidden states. Used to connect the main decoder to the depth decoder.\n num_hidden_layers (`int`, *optional*, defaults to 6):\n Number of depth decoder layers.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the depth decoder block.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `num_attention_heads`.\n audio_vocab_size (`int`, *optional*, defaults to 2048):\n Vocabulary size of the audio part of model. Defines the number of different tokens that can be\n represented by the `audio_codes` passed when calling the Moshi models.\n max_position_embeddings (`int`, *optional*, defaults to 9):\n The maximum sequence length that this model might ever be used with. Typically, set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the depth decoder.\n head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):\n The attention head dimension.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n sliding_window (`int`, *optional*, defaults to 8):\n Sliding window attention window size. If not specified, will default to `8`.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n ffn_dim (`int`, *optional*, defaults to 5632):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the depth decoder block. Must be even.\n rms_norm_eps (`float`, *optional*, defaults to 1e-08):\n The epsilon used by the rms normalization layers.\n num_codebooks (`int`, *optional*, defaults to 8):\n The number of audio codebooks for each audio channels.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n kwargs (*optional*):\n Dictionary of keyword arguments. Notably:\n - **audio_encoder_config** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that\n defines the audio encoder config.\n\nExample:\n\n```python\n>>> from transformers import (\n... MoshiDepthConfig,\n... MoshiDepthDecoder,\n... )\n\n>>> configuration = MoshiDepthConfig()\n\n>>> # Initializing a MoshiDepthDecoder (with random weights) from the kmhf/hf-moshiko style configuration\n>>> model = MoshiDepthDecoder(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "keras", "function": "def amax(x, axis=None, keepdims=False):\n if any_symbolic_tensors((x,)):\n return Amax(axis=axis, keepdims=keepdims).symbolic_call(x)\n return backend.numpy.amax(x, axis=axis, keepdims=keepdims)", "docstring": "Returns the maximum of an array or maximum value along an axis.\n\nArgs:\n x: Input tensor.\n axis: Axis along which to compute the maximum.\n By default (`axis=None`), find the maximum value in all the\n dimensions of the input array.\n keepdims: If `True`, axes which are reduced are left in the result as\n dimensions that are broadcast to the size of the original\n input tensor. Defaults to `False`.\n\nReturns:\n An array with the maximum value. If `axis=None`, the result is a scalar\n value representing the maximum element in the entire array. If `axis` is\n given, the result is an array with the maximum values along\n the specified axis.\n\nExamples:\n>>> x = keras.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]])\n>>> keras.ops.amax(x)\narray(6, dtype=int32)\n\n>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])\n>>> keras.ops.amax(x, axis=0)\narray([1, 6, 8], dtype=int32)\n\n>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])\n>>> keras.ops.amax(x, axis=1, keepdims=True)\narray([[8], [5]], dtype=int32)"} +{"repo": "transformers", "function": "def detach(self) -> Rotation:\n if self._rot_mats is not None:\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif self._quats is not None:\n return Rotation(rot_mats=None, quats=self._quats.detach(), normalize_quats=False)\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns a copy of the Rotation whose underlying Tensor has been detached from its torch graph.\n\nReturns:\n A copy of the Rotation whose underlying Tensor has been detached from its torch graph"} +{"repo": "tensorflow", "function": "def uniform_row_length(self):\n return self._row_partition.uniform_row_length()", "docstring": "The length of each row in this ragged tensor, or None if rows are ragged.\n\n>>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n>>> print(rt1.uniform_row_length) # rows are ragged.\nNone\n\n>>> rt2 = tf.RaggedTensor.from_uniform_row_length(\n... values=rt1, uniform_row_length=2)\n>>> print(rt2)\n\n>>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).\ntf.Tensor(2, shape=(), dtype=int64)\n\nA RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)\nif it can be determined statically (at graph construction time) that the\nrows all have the same length.\n\nReturns:\n A scalar integer `Tensor`, specifying the length of every row in this\n ragged tensor (for ragged tensors whose rows are uniform); or `None`\n (for ragged tensors whose rows are ragged)."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n logits = self.lm_head(outputs[0])\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, MvpForCausalLM\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"RUCAIBox/mvp\")\n>>> model = MvpForCausalLM.from_pretrained(\"RUCAIBox/mvp\", add_cross_attention=False)\n\n>>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n\n>>> logits = outputs.logits\n>>> list(logits.shape)\n[1, 8, 50267]\n```"} +{"repo": "transformers", "function": "def forward(self, hidden_states, attention_mask, output_attentions=False, query_states=None, relative_pos=None, rel_embeddings=None):\n if query_states is None:\n query_states = hidden_states\n query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)\n key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)\n value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)\n rel_att = None\n scale_factor = 1\n if 'c2p' in self.pos_att_type:\n scale_factor += 1\n if 'p2c' in self.pos_att_type:\n scale_factor += 1\n scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor)\n attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2) / scale.to(dtype=query_layer.dtype))\n if self.relative_attention:\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_attention_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n attention_scores = attention_scores\n attention_scores = attention_scores.view(-1, self.num_attention_heads, attention_scores.size(-2), attention_scores.size(-1))\n attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1)\n attention_probs = self.dropout(attention_probs)\n context_layer = torch.bmm(attention_probs.view(-1, attention_probs.size(-2), attention_probs.size(-1)), value_layer)\n context_layer = context_layer.view(-1, self.num_attention_heads, context_layer.size(-2), context_layer.size(-1)).permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(new_context_layer_shape)\n if output_attentions:\n return (context_layer, attention_probs)\n else:\n return context_layer", "docstring": "Call the module\n\nArgs:\n hidden_states (`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n *Attention(Q,K,V)*\n\n attention_mask (`torch.BoolTensor`):\n An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum\n sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*\n th token.\n\n output_attentions (`bool`, *optional*):\n Whether return the attention matrix.\n\n query_states (`torch.FloatTensor`, *optional*):\n The *Q* state in *Attention(Q,K,V)*.\n\n relative_pos (`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with\n values ranging in [*-max_relative_positions*, *max_relative_positions*].\n\n rel_embeddings (`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [\\(2 \\times\n \\text{max_relative_positions}\\), *hidden_size*]."} +{"repo": "transformers", "function": "class ConvBertSequenceSummary(nn.Module):\n\n def __init__(self, config: ConvBertConfig):\n super().__init__()\n self.summary_type = getattr(config, 'summary_type', 'last')\n if self.summary_type == 'attn':\n raise NotImplementedError\n self.summary = nn.Identity()\n if hasattr(config, 'summary_use_proj') and config.summary_use_proj:\n if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0):\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n activation_string = getattr(config, 'summary_activation', None)\n self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()\n self.first_dropout = nn.Identity()\n if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = nn.Identity()\n if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)\n\n def forward(self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n \"\"\"\n Compute a single vector summary of a sequence hidden states.\n\n Args:\n hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):\n The hidden states of the last layer.\n cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):\n Used if `summary_type == \"cls_index\"` and takes the last token of the sequence as classification token.\n\n Returns:\n `torch.FloatTensor`: The summary of the sequence hidden states.\n \"\"\"\n if self.summary_type == 'last':\n output = hidden_states[:, -1]\n elif self.summary_type == 'first':\n output = hidden_states[:, 0]\n elif self.summary_type == 'mean':\n output = hidden_states.mean(dim=1)\n elif self.summary_type == 'cls_index':\n if cls_index is None:\n cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long)\n else:\n cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)\n cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))\n output = hidden_states.gather(-2, cls_index).squeeze(-2)\n elif self.summary_type == 'attn':\n raise NotImplementedError\n output = self.first_dropout(output)\n output = self.summary(output)\n output = self.activation(output)\n output = self.last_dropout(output)\n return output", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\n config ([`ConvBertConfig`]):\n The config used by the model. Relevant arguments in the config class of the model are (refer to the actual\n config class of your model for the default values it uses):\n\n - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:\n\n - `\"last\"` -- Take the last token hidden state (like XLNet)\n - `\"first\"` -- Take the first token hidden state (like Bert)\n - `\"mean\"` -- Take the mean of all tokens hidden states\n - `\"cls_index\"` -- Supply a Tensor of classification token position (GPT/GPT-2)\n - `\"attn\"` -- Not implemented now, use multi-head attention\n\n - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n (otherwise to `config.hidden_size`).\n - **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\n another string or `None` will add no activation.\n - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation."} +{"repo": "keras", "function": "def remat(f):\n return tf.recompute_grad(f)", "docstring": "Implementation of rematerialization.\n\nArgs:\n f: The function or operation to rematerialize.\nReturns:\n A function wrapping f that defines a custom gradient, which\n recomputes f on the backwards pass of a gradient call."} +{"repo": "transformers", "function": "class SamHQPromptEncoderConfig(SamPromptEncoderConfig):\n pass", "docstring": "This is the configuration class to store the configuration of a [`SamHQPromptEncoderModel`].The [`SamHQPromptEncoderModel`]\nmodule is used to encode the input 2D points and bounding boxes. Instantiating a configuration defaults will yield a\nsimilar configuration to that of the SAM_HQ model. The configuration is used to store the configuration of the model.\n[Uminosachi/sam-hq](https://huggingface.co/Uminosachi/sam-hq) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model's output.Read the documentation from\n[`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 256):\n Dimensionality of the hidden states.\n image_size (`int`, *optional*, defaults to 1024):\n The expected output resolution of the image.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n mask_input_channels (`int`, *optional*, defaults to 16):\n The number of channels to be fed to the `MaskDecoder` module.\n num_point_embeddings (`int`, *optional*, defaults to 4):\n The number of point embeddings to be used.\n hidden_act (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function in the encoder and pooler."} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not\nmake use of token type ids, therefore a list of zeros is returned.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of zeros."} +{"repo": "transformers", "function": "class OwlViTTextConfig(PretrainedConfig):\n model_type = 'owlvit_text_model'\n base_config_key = 'text_config'\n\n def __init__(self, vocab_size=49408, hidden_size=512, intermediate_size=2048, num_hidden_layers=12, num_attention_heads=8, max_position_embeddings=16, hidden_act='quick_gelu', layer_norm_eps=1e-05, attention_dropout=0.0, initializer_range=0.02, initializer_factor=1.0, pad_token_id=0, bos_token_id=49406, eos_token_id=49407, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.max_position_embeddings = max_position_embeddings\n self.hidden_act = hidden_act\n self.layer_norm_eps = layer_norm_eps\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor", "docstring": "This is the configuration class to store the configuration of an [`OwlViTTextModel`]. It is used to instantiate an\nOwlViT text encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the OwlViT\n[google/owlvit-base-patch32](https://huggingface.co/google/owlvit-base-patch32) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 49408):\n Vocabulary size of the OWL-ViT text model. Defines the number of different tokens that can be represented\n by the `inputs_ids` passed when calling [`OwlViTTextModel`].\n hidden_size (`int`, *optional*, defaults to 512):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 2048):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 8):\n Number of attention heads for each attention layer in the Transformer encoder.\n max_position_embeddings (`int`, *optional*, defaults to 16):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1.0):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n pad_token_id (`int`, *optional*, defaults to 0):\n The id of the padding token in the input sequences.\n bos_token_id (`int`, *optional*, defaults to 49406):\n The id of the beginning-of-sequence token in the input sequences.\n eos_token_id (`int`, *optional*, defaults to 49407):\n The id of the end-of-sequence token in the input sequences.\n\nExample:\n\n```python\n>>> from transformers import OwlViTTextConfig, OwlViTTextModel\n\n>>> # Initializing a OwlViTTextModel with google/owlvit-base-patch32 style configuration\n>>> configuration = OwlViTTextConfig()\n\n>>> # Initializing a OwlViTTextConfig from the google/owlvit-base-patch32 style configuration\n>>> model = OwlViTTextModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_bias: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=False, past_key_values: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, use_cache: Optional[bool]=None):\n outputs = self.layernorm_before_attention(hidden_states)\n outputs = self.self_attention(outputs, outputs, attention_mask, position_bias, output_attentions, past_key_values, use_cache)\n outputs, attn_weights, current_key_value = outputs\n if self.dropout is not None:\n outputs = self.dropout(outputs)\n hidden_states = hidden_states + outputs\n return (hidden_states, attn_weights, current_key_value)", "docstring": "Args:\n hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):\n Input of transformer block(self-attention block). It can be the raw embedding of a batch of sequences.\n attention_mask (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):\n Avoid invalid areas to participate in the calculation of self-attention.\n position_bias (`torch.Tensor` of shape `(batch, len_seq, len_seq)`):\n Provide positional information to self-attention block.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers.\n past_key_values (`Tuple(torch.FloatTensor)`, *optional*):\n Cached past key and value projection states.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`)."} +{"repo": "tf-quant-finance", "function": "def cashflows(self, market: pmd.ProcessedMarketData, past_fixing: Optional[types.FloatTensor]=None, name: Optional[str]=None) -> Tuple[types.DateTensor, types.FloatTensor]:\n name = name or self._name + '_cashflows'\n with tf.name_scope(name):\n _, forward_rates = self.forward_rates(market, past_fixing=past_fixing)\n coupon_rate = forward_rates + tf.expand_dims(self._spread, axis=-1)\n notional = tf.expand_dims(self._notional, axis=-1)\n cashflows = notional * (self._daycount_fractions * coupon_rate)\n return (self._coupon_end_dates, cashflows)", "docstring": "Returns cashflows for the floating leg.\n\nArgs:\n market: An instance of `ProcessedMarketData`.\n past_fixing: An optional `Tensor` of shape compatible with\n `batch_shape + [1]`. Represents the fixings for the cashflows as\n observed at `market.date`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'cashflows'.\n\nReturns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\n containing the dates and the corresponding cashflows price for each\n stream based on the input market data."} +{"repo": "beam", "function": "def create_rag_adapter() -> EmbeddingTypeAdapter[Chunk, Chunk]:\n return EmbeddingTypeAdapter(input_fn=_extract_chunk_text, output_fn=_add_embedding_fn)", "docstring": "Creates adapter for converting between Chunk and Embedding types.\n\nThe adapter:\n- Extracts text from Chunk.content.text for embedding\n- Creates Embedding objects from model output\n- Sets Embedding in Chunk.embedding\n\nReturns:\n EmbeddingTypeAdapter configured for RAG pipeline types"} +{"repo": "keras", "function": "class AveragePooling1D(BasePooling):\n\n def __init__(self, pool_size, strides=None, padding='valid', data_format=None, name=None, **kwargs):\n super().__init__(pool_size, strides, pool_dimensions=1, pool_mode='average', padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Average pooling for temporal data.\n\nDownsamples the input representation by taking the average value over the\nwindow defined by `pool_size`. The window is shifted by `strides`. The\nresulting output when using \"valid\" padding option has a shape of:\n`output_shape = (input_shape - pool_size + 1) / strides)`\n\nThe resulting output shape when using the \"same\" padding option is:\n`output_shape = input_shape / strides`\n\nArgs:\n pool_size: int, size of the max pooling window.\n strides: int or None. Specifies how much the pooling window moves\n for each pooling step. If None, it will default to `pool_size`.\n padding: string, either `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n\nInput shape:\n\n- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, steps)`.\n\nOutput shape:\n\n- If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, downsampled_steps, features)`.\n- If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, downsampled_steps)`.\n\nExamples:\n\n`strides=1` and `padding=\"valid\"`:\n\n>>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=1, padding=\"valid\")\n>>> avg_pool_1d(x)\n\n`strides=2` and `padding=\"valid\"`:\n\n>>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=2, padding=\"valid\")\n>>> avg_pool_1d(x)\n\n`strides=1` and `padding=\"same\"`:\n\n>>> x = np.array([1., 2., 3., 4., 5.])\n>>> x = np.reshape(x, [1, 5, 1])\n>>> avg_pool_1d = keras.layers.AveragePooling1D(pool_size=2,\n... strides=1, padding=\"same\")\n>>> avg_pool_1d(x)"} +{"repo": "transformers", "function": "def get_image_processor_dict(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> tuple[dict[str, Any], dict[str, Any]]:\n cache_dir = kwargs.pop('cache_dir', None)\n force_download = kwargs.pop('force_download', False)\n resume_download = kwargs.pop('resume_download', None)\n proxies = kwargs.pop('proxies', None)\n token = kwargs.pop('token', None)\n use_auth_token = kwargs.pop('use_auth_token', None)\n local_files_only = kwargs.pop('local_files_only', False)\n revision = kwargs.pop('revision', None)\n subfolder = kwargs.pop('subfolder', '')\n image_processor_filename = kwargs.pop('image_processor_filename', IMAGE_PROCESSOR_NAME)\n from_pipeline = kwargs.pop('_from_pipeline', None)\n from_auto_class = kwargs.pop('_from_auto', False)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if token is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n token = use_auth_token\n user_agent = {'file_type': 'image processor', 'from_auto_class': from_auto_class}\n if from_pipeline is not None:\n user_agent['using_pipeline'] = from_pipeline\n if is_offline_mode() and (not local_files_only):\n logger.info('Offline mode: forcing local_files_only=True')\n local_files_only = True\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if os.path.isdir(pretrained_model_name_or_path):\n image_processor_file = os.path.join(pretrained_model_name_or_path, image_processor_filename)\n if os.path.isfile(pretrained_model_name_or_path):\n resolved_image_processor_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n image_processor_file = pretrained_model_name_or_path\n resolved_image_processor_file = download_url(pretrained_model_name_or_path)\n else:\n image_processor_file = image_processor_filename\n try:\n resolved_image_processor_file = cached_file(pretrained_model_name_or_path, image_processor_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder)\n except OSError:\n raise\n except Exception:\n raise OSError(f\"Can't load image processor for '{pretrained_model_name_or_path}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory containing a {image_processor_filename} file\")\n try:\n with open(resolved_image_processor_file, encoding='utf-8') as reader:\n text = reader.read()\n image_processor_dict = json.loads(text)\n except json.JSONDecodeError:\n raise OSError(f\"It looks like the config file at '{resolved_image_processor_file}' is not a valid JSON file.\")\n if is_local:\n logger.info(f'loading configuration file {resolved_image_processor_file}')\n else:\n logger.info(f'loading configuration file {image_processor_file} from cache at {resolved_image_processor_file}')\n return (image_processor_dict, kwargs)", "docstring": "From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\nimage processor of type [`~image_processor_utils.ImageProcessingMixin`] using `from_dict`.\n\nParameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n image_processor_filename (`str`, *optional*, defaults to `\"config.json\"`):\n The name of the file in the model directory to use for the image processor config.\n\nReturns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the image processor object."} +{"repo": "fhir-py", "function": "def matches(self, regex: str) -> 'Builder':\n param_nodes = self._function_args_to_nodes(self.node, [regex])\n return self._to_builder(_evaluation.MatchesFunction(self.node.context, self.node, param_nodes))", "docstring": "The FHIRPath matches() function.\n\nArgs:\n regex: a regular expression to match against the parent element.\n\nReturns:\n An expression that evaluates to True if the parent matches the given\n regular expression."} +{"repo": "mobly", "function": "def _parse_getprop_output(self, output):\n output = output.decode('utf-8', errors='ignore').replace('\\r\\n', '\\n')\n results = {}\n for line in output.split(']\\n'):\n if not line:\n continue\n try:\n name, value = line.split(': ', 1)\n except ValueError:\n logging.debug('Failed to parse adb getprop line %s', line)\n continue\n name = name.strip()[1:-1]\n if value and value[0] == '[':\n value = value[1:]\n results[name] = value\n return results", "docstring": "Parses the raw output of `adb shell getprop` into a dictionary.\n\nArgs:\n output: byte str, the raw output of the `adb shell getprop` call.\n\nReturns:\n dict, name-value pairs of the properties."} +{"repo": "tensorflow", "function": "def get_grappler_config(optimizers_list):\n config = _config_pb2.ConfigProto()\n rewrite_options = config.graph_options.rewrite_options\n for optimizer in optimizers_list:\n rewrite_options.optimizers.append(optimizer)\n return config", "docstring": "Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\nArgs:\n optimizers_list: List of strings that represents the list of optimizers.\n\nReturns:\n tf.ConfigProto."} +{"repo": "pyglove", "function": "def is_compatible(self, other: 'Schema') -> bool:\n if not isinstance(other, Schema):\n raise TypeError(f\"Argument 'other' should be a Schema object. Encountered {other}.\")\n for key_spec in other.keys():\n if key_spec not in self:\n return False\n for key_spec, field in self.items():\n if key_spec not in other:\n return False\n if not field.value.is_compatible(other[key_spec].value):\n return False\n return True", "docstring": "Returns whether current schema is compatible with the other schema.\n\nNOTE(daiyip): schema A is compatible with schema B when:\nschema A and schema B have the same keys, with compatible values specs.\n\nArgs:\n other: Other schema.\n\nReturns:\n True if values that is acceptable to the other schema is acceptable to\n current schema.\nRaises:\n TypeError: If `other` is not a schema object."} +{"repo": "transformers", "function": "class CLIPImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values']\n\n def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[Dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:\n super().__init__(**kwargs)\n size = size if size is not None else {'shortest_edge': 224}\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}\n crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')\n self.do_resize = do_resize\n self.size = size\n self.resample = resample\n self.do_center_crop = do_center_crop\n self.crop_size = crop_size\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.do_normalize = do_normalize\n self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN\n self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD\n self.do_convert_rgb = do_convert_rgb\n self._valid_processor_keys = ['images', 'do_resize', 'size', 'resample', 'do_center_crop', 'crop_size', 'do_rescale', 'rescale_factor', 'do_normalize', 'image_mean', 'image_std', 'do_convert_rgb', 'return_tensors', 'data_format', 'input_data_format']\n if 'use_square_size' in kwargs and kwargs['use_square_size']:\n self.size = {'height': size['shortest_edge'], 'width': size['shortest_edge']}\n delattr(self, 'use_square_size')\n\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n\n def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, param_name='size', default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)\n images = make_flat_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n if do_normalize:\n image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n all_images.append(image)\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]\n data = {'pixel_values': images}\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Constructs a CLIP image processor.\n\nArgs:\n do_resize (`bool`, *optional*, defaults to `True`):\n Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by\n `do_resize` in the `preprocess` method.\n size (`Dict[str, int]` *optional*, defaults to `{\"shortest_edge\": 224}`):\n Size of the image after resizing. The shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`\n method.\n resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):\n Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.\n do_center_crop (`bool`, *optional*, defaults to `True`):\n Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the\n `preprocess` method.\n crop_size (`Dict[str, int]` *optional*, defaults to 224):\n Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`\n method.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in\n the `preprocess` method.\n rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):\n Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`\n method.\n do_normalize (`bool`, *optional*, defaults to `True`):\n Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.\n image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):\n Mean to use if normalizing the image. This is a float or list of floats the length of the number of\n channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.\n image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):\n Standard deviation to use if normalizing the image. This is a float or list of floats the length of the\n number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.\n Can be overridden by the `image_std` parameter in the `preprocess` method.\n do_convert_rgb (`bool`, *optional*, defaults to `True`):\n Whether to convert the image to RGB."} +{"repo": "transformers", "function": "class SuperModel(SuperPreTrainedModel):\n\n def __init__(self, config: SuperConfig):\n super().__init__(config)\n self.padding_idx = config.pad_token_id\n self.vocab_size = config.vocab_size\n self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)\n self.layers = nn.ModuleList([SuperDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)])\n self.norm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.rotary_emb = SuperRotaryEmbedding(config=config)\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n @can_return_tuple\n @add_start_docstrings_to_model_forward(SUPER_INPUTS_DOCSTRING)\n def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[tuple, CausalLMOutputWithPast]:\n out = super().forward(input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict, cache_position)\n out.logits *= 2 ** 4\n return out\n\n def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):\n if self.config._attn_implementation == 'flash_attention_2':\n if attention_mask is not None and (attention_mask == 0.0).any():\n return attention_mask\n return None\n if self.config._attn_implementation == 'flex_attention':\n if isinstance(attention_mask, torch.Tensor):\n attention_mask = make_flex_block_causal_mask(attention_mask)\n return attention_mask\n past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0\n using_static_cache = isinstance(past_key_values, StaticCache)\n if self.config._attn_implementation == 'sdpa' and (not using_static_cache) and (not output_attentions):\n if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):\n return None\n dtype = input_tensor.dtype\n sequence_length = input_tensor.shape[1]\n if using_static_cache:\n target_length = past_key_values.get_max_cache_shape()\n else:\n target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1\n causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])\n if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):\n min_dtype = torch.finfo(dtype).min\n causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)\n return causal_mask\n\n @staticmethod\n def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n \"\"\"\n Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\n Args:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size.\n \"\"\"\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SuperDecoderLayer`]\n\nArgs:\n config: SuperConfig"} +{"repo": "keras", "function": "def potentially_ragged_concat(tensors):\n if len(tensors) == 1:\n return tensors[0]\n elif isinstance(tensors[0], tf.SparseTensor):\n return tf.sparse.concat(axis=0, sp_inputs=tensors)\n elif isinstance(tensors[0], tf.RaggedTensor):\n return tf.concat(tensors, axis=0)\n non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors])\n constant_dims = tf.math.reduce_all(non_batch_shapes == non_batch_shapes[:1], axis=0)\n if tf.math.reduce_all(constant_dims).numpy().item():\n if _is_scalar(tensors[0]):\n return tf.stack(tensors, axis=0)\n else:\n return tf.concat(tensors, axis=0)\n constant_inner_dimensions = constant_dims.numpy().tolist()[::-1].index(False)\n if constant_inner_dimensions == 0:\n constant_inner_shape = None\n else:\n constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:]\n return tf.ragged.constant([tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape).merge_dims(0, 1)", "docstring": "Concats `Tensor`s along their first dimension.\n\nArgs:\n tensors: List of `Tensor`s.\n\nReturns:\n Concatenation of the inputs along the first dimension -- of type\n `np.ndarray` if all input shapes are compatible, or `tf.RaggedTensor`\n if not."} +{"repo": "tensorflow", "function": "def _partitioner(shape, dtype):\n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list", "docstring": "Partitioner that partitions list for a variable of given shape and type.\n\nEx: Consider partitioning a variable of type float32 with\n shape=[1024, 1024].\n If `max_partitions` >= 16, this function would return\n [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].\n If `max_partitions` < 16, this function would return\n [`max_partitions`, 1].\n\nArgs:\n shape: Shape of the variable.\n dtype: Type of the variable.\n\nReturns:\n List of partitions for each axis (currently only one axis can be\n partitioned).\n\nRaises:\n ValueError: If axis to partition along does not exist for the variable."} +{"repo": "tensorflow", "function": "def lu_matrix_inverse(lower_upper, perm, validate_args=False, name=None):\n with ops.name_scope(name or 'lu_matrix_inverse'):\n lower_upper = ops.convert_to_tensor(lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n shape = array_ops.shape(lower_upper)\n return lu_solve(lower_upper, perm, rhs=eye(shape[-1], batch_shape=shape[:-2], dtype=lower_upper.dtype), validate_args=False)", "docstring": "Computes the inverse given the LU decomposition(s) of one or more matrices.\n\nThis op is conceptually identical to,\n\n```python\ninv_X = tf.lu_matrix_inverse(*tf.linalg.lu(X))\ntf.assert_near(tf.matrix_inverse(X), inv_X)\n# ==> True\n```\n\nNote: this function does not verify the implied matrix is actually invertible\nnor is this condition checked even when `validate_args=True`.\n\nArgs:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_matrix_inverse').\n\nReturns:\n inv_x: The matrix_inv, i.e.,\n `tf.matrix_inverse(tf.linalg.lu_reconstruct(lu, perm))`.\n\n#### Examples\n\n```python\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nx = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\ninv_x = tf.linalg.lu_matrix_inverse(*tf.linalg.lu(x))\ntf.assert_near(tf.matrix_inverse(x), inv_x)\n# ==> True\n```"} +{"repo": "beam", "function": "def reducer_override(self, obj):\n t = type(obj)\n try:\n is_anyclass = issubclass(t, type)\n except TypeError:\n is_anyclass = False\n if is_anyclass:\n return _class_reduce(obj)\n elif isinstance(obj, types.FunctionType):\n return self._function_reduce(obj)\n else:\n return NotImplemented", "docstring": "Type-agnostic reducing callback for function and classes.\n\nFor performance reasons, subclasses of the C `pickle.Pickler` class\ncannot register custom reducers for functions and classes in the\ndispatch_table attribute. Reducers for such types must instead\nimplemented via the special `reducer_override` method.\n\nNote that this method will be called for any object except a few\nbuiltin-types (int, lists, dicts etc.), which differs from reducers\nin the Pickler's dispatch_table, each of them being invoked for\nobjects of a specific type only.\n\nThis property comes in handy for classes: although most classes are\ninstances of the ``type`` metaclass, some of them can be instances\nof other custom metaclasses (such as enum.EnumMeta for example). In\nparticular, the metaclass will likely not be known in advance, and\nthus cannot be special-cased using an entry in the dispatch_table.\nreducer_override, among other things, allows us to register a\nreducer that will be called for any class, independently of its\ntype.\n\nNotes:\n\n* reducer_override has the priority over dispatch_table-registered\nreducers.\n* reducer_override can be used to fix other limitations of\n cloudpickle for other types that suffered from type-specific\n reducers, such as Exceptions. See\n https://github.com/cloudpipe/cloudpickle/issues/248"} +{"repo": "tensorflow", "function": "def dynamic_slice(operand, start_indices, slice_sizes):\n operand = tf_np.asarray(operand).data\n start_indices = tf_np.asarray(start_indices, np.int32).data\n idx = _get_dynamic_indices(operand, start_indices, slice_sizes)\n if idx is not None:\n operand = array_ops.gather_nd(operand, idx)\n return tf_np.asarray(operand)", "docstring": "Slicing operation where the indices can be dynamic vlaues.\n\nSee the docstring of `jax.lax.dynamic_slice`\n(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_slice.html)\nfor details.\n\nArgs:\n operand: an array to slice.\n start_indices: a vector of integers, one per dimension. The starts of the\n slice. The vector can be dynamic.\n slice_sizes: a list of integers, one per dimension. The sizes of the slice.\n\nReturns:\n An array containing the slice, with shape equal to `slice_sizes`."} +{"repo": "starthinker", "function": "def _dict_to_feed(self, parse=True):\n if not self.raw_feed:\n return []\n headers = self.raw_feed[0]\n row = 1\n for item in self.feed:\n for key in iter(item.keys()):\n if key in headers:\n column = headers.index(key)\n value = self._parse_value(item[key]) if parse else item[key]\n while column >= len(self.raw_feed[row]):\n self.raw_feed[row].append('')\n self.raw_feed[row][column] = value\n row += 1\n return self.raw_feed", "docstring": "Turns a feed into a list of strings to be written back to the feed.\n\nArgs:\n feed: Dictionary list to convert into a list of lists of strings.\n\nReturns:\n List of list of strings representing the values of the feed."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n prefix_ones = [1] * len(self.prefix_tokens)\n suffix_ones = [1] * len(self.suffix_tokens)\n if token_ids_1 is None:\n return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "tensorflow", "function": "def _parse_lambda(lam):\n mod = inspect.getmodule(lam)\n f = inspect.getsourcefile(lam)\n def_line = lam.__code__.co_firstlineno\n lines = linecache.getlines(f, mod.__dict__)\n source = ''.join(lines)\n all_nodes = parse(source, preamble_len=0, single_node=False)\n search_nodes = []\n for node in all_nodes:\n if getattr(node, 'lineno', def_line) <= def_line:\n search_nodes.append(node)\n else:\n break\n lambda_nodes = []\n for node in search_nodes:\n lambda_nodes.extend((n for n in gast.walk(node) if isinstance(n, gast.Lambda)))\n candidates = []\n for ln in lambda_nodes:\n minl, maxl = (MAX_SIZE, 0)\n for n in gast.walk(ln):\n minl = min(minl, getattr(n, 'lineno', minl))\n lineno = getattr(n, 'lineno', maxl)\n end_lineno = getattr(n, 'end_lineno', None)\n if end_lineno is not None:\n lineno = end_lineno\n maxl = max(maxl, lineno)\n if minl <= def_line <= maxl:\n candidates.append((ln, minl, maxl))\n if len(candidates) == 1:\n (node, minl, maxl), = candidates\n return _without_context(node, lines, minl, maxl)\n elif not candidates:\n lambda_codes = '\\n'.join([unparse(l) for l in lambda_nodes])\n raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: no matching AST found among candidates:\\n{lambda_codes}')\n matches = [v for v in candidates if _node_matches_argspec(v[0], lam)]\n if len(matches) == 1:\n (node, minl, maxl), = matches\n return _without_context(node, lines, minl, maxl)\n matches = '\\n'.join(('Match {}:\\n{}\\n'.format(i, unparse(node, include_encoding_marker=False)) for i, (node, _, _) in enumerate(matches)))\n raise errors.UnsupportedLanguageElementError(f'could not parse the source code of {lam}: found multiple definitions with identical signatures at the location. This error may be avoided by defining each lambda on a single line and with unique argument names. The matching definitions were:\\n{matches}')", "docstring": "Returns the AST and source code of given lambda function.\n\nArgs:\n lam: types.LambdaType, Python function/method/class\n\nReturns:\n gast.AST, Text: the parsed AST node; the source code that was parsed to\n generate the AST (including any prefixes that this function may have added)."} +{"repo": "python-fire", "function": "def SplitLine(self, line, width):\n lines = []\n chunk = ''\n w = 0\n keep = False\n for normal, control in self.SplitIntoNormalAndControl(line):\n keep = True\n while True:\n n = width - w\n w += len(normal)\n if w <= width:\n break\n lines.append(chunk + normal[:n])\n chunk = ''\n keep = False\n w = 0\n normal = normal[n:]\n chunk += normal + control\n if chunk or keep:\n lines.append(chunk)\n return lines", "docstring": "Splits line into width length chunks.\n\nArgs:\n line: The line to split.\n width: The width of each chunk except the last which could be smaller than\n width.\n\nReturns:\n A list of chunks, all but the last with display width == width."} +{"repo": "transformers", "function": "class ElectraForPreTrainingOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Output type of [`ElectraForPreTraining`].\n\nArgs:\n loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):\n Total loss of the ELECTRA objective.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Prediction scores of the head (scores for each token before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def cond(self, name='cond'):\n with self._name_scope(name):\n return self._cond()", "docstring": "Returns the condition number of this linear operator.\n\nArgs:\n name: A name for this `Op`.\n\nReturns:\n Shape `[B1,...,Bb]` `Tensor` of same `dtype` as `self`."} +{"repo": "tensorflow", "function": "def __str__(self):\n info = {'section': self._section, 'config': self.config, 'req_type': self._req_type, 'req': str(self.req), 'range': str(self.range), 'exclude': str(self.exclude), 'include': str(self.include), 'init': str(self._initialized)}\n req_str = '\\n >>> _Reqs Instance <<<\\n'\n req_str += 'Section: {section}\\n'\n req_str += 'Configuration name: {config}\\n'\n req_str += 'Requirement type: {req_type}\\n'\n req_str += 'Requirement: {req}\\n'\n req_str += 'Range: {range}\\n'\n req_str += 'Exclude: {exclude}\\n'\n req_str += 'Include: {include}\\n'\n req_str += 'Initialized: {init}\\n\\n'\n return req_str.format(**info)", "docstring": "Prints a requirement and its components.\n\nReturns:\n String that has concatenated information about a requirement."} +{"repo": "tensorflow", "function": "def read_file_to_string(filename, binary_mode=False):\n if binary_mode:\n f = FileIO(filename, mode='rb')\n else:\n f = FileIO(filename, mode='r')\n return f.read()", "docstring": "Reads the entire contents of a file to a string.\n\nArgs:\n filename: string, path to a file\n binary_mode: whether to open the file in binary mode or not. This changes\n the type of the object returned.\n\nReturns:\n contents of the file as a string or bytes.\n\nRaises:\n errors.OpError: Raises variety of errors that are subtypes e.g.\n `NotFoundError` etc."} +{"repo": "tensorflow", "function": "def GenerateTableHtml(items, keys_to_print, display_index=True):\n html = ''\n html += '\\n'\n html += '\\n'\n if display_index:\n html += ''\n for h, mapper in keys_to_print:\n html += '' % h\n html += '\\n'\n for idx, tensor in enumerate(items):\n html += '\\n'\n if display_index:\n html += '' % idx\n for h, mapper in keys_to_print:\n val = tensor[h] if h in tensor else None\n val = val if mapper is None else mapper(val)\n html += '\\n' % val\n html += '\\n'\n html += '
index%s
%d%s
\\n'\n return html", "docstring": "Given a list of object values and keys to print, make an HTML table.\n\nArgs:\n items: Items to print an array of dicts.\n keys_to_print: (key, display_fn). `key` is a key in the object. i.e.\n items[0][key] should exist. display_fn is the mapping function on display.\n i.e. the displayed html cell will have the string returned by\n `mapping_fn(items[0][key])`.\n display_index: add a column which is the index of each row in `items`.\n\nReturns:\n An html table."} +{"repo": "pytype", "function": "class GenericType(Type):\n base_type: NamedType | ClassType | LateType\n parameters: tuple[TypeU, ...]\n\n @property\n def name(self):\n return self.base_type.name\n\n @property\n def element_type(self):\n \"\"\"Type of the contained type, assuming we only have one type parameter.\"\"\"\n element_type, = self.parameters\n return element_type", "docstring": "Generic type. Takes a base type and type parameters.\n\nThis is used for homogeneous tuples, lists, dictionaries, user classes, etc.\n\nAttributes:\n base_type: The base type. Instance of Type.\n parameters: Type parameters. Tuple of instances of Type."} +{"repo": "tensorflow", "function": "def advise(self, options):\n advise_pb = tfprof_output_pb2.AdviceProto()\n opts = _build_advisor_options(options)\n advise_pb.ParseFromString(print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))\n return advise_pb", "docstring": "Automatically detect problems and generate reports.\n\nArgs:\n options: A dict of options. See ALL_ADVICE example above.\n\nReturns:\n An Advise proto that contains the reports from all checkers."} +{"repo": "tensorflow", "function": "def from_config(cls, config):\n return cls(**config)", "docstring": "Creates a regularizer from its config.\n\nThis method is the reverse of `get_config`,\ncapable of instantiating the same regularizer from the config\ndictionary.\n\nThis method is used by saving and loading models to HDF5 formats,\nKeras model cloning, some visualization utilities,\nand exporting models to and from JSON.\n\nArgs:\n config: A Python dictionary, typically the output of get_config.\n\nReturns:\n A regularizer instance."} +{"repo": "transformers", "function": "def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):\n logger.warning_once('`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use `post_process_semantic_segmentation`.')\n out_logits, raw_masks = (outputs.logits, outputs.pred_masks)\n empty_label = out_logits.shape[-1] - 1\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.tolist())\n for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes):\n cur_scores, cur_labels = cur_logits.softmax(-1).max(-1)\n keep = cur_labels.ne(empty_label) & (cur_scores > threshold)\n cur_scores = cur_scores[keep]\n cur_labels = cur_labels[keep]\n cur_masks = cur_masks[keep]\n cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode='bilinear').squeeze(1)\n cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1\n predictions = {'scores': cur_scores, 'labels': cur_labels, 'masks': cur_masks}\n preds.append(predictions)\n return preds", "docstring": "Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\nArgs:\n outputs ([`DetrSegmentationOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\n Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\n threshold (`float`, *optional*, defaults to 0.9):\n Threshold to use to filter out queries.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\nReturns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\n in the batch as predicted by the model."} +{"repo": "tensorflow", "function": "class KLDivergence(MeanMetricWrapper):\n\n def __init__(self, name='kullback_leibler_divergence', dtype=None):\n super(KLDivergence, self).__init__(kullback_leibler_divergence, name, dtype=dtype)", "docstring": "Computes Kullback-Leibler divergence metric between `y_true` and `y_pred`.\n\n`metric = y_true * log(y_true / y_pred)`\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.KLDivergence()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])\n>>> m.result().numpy()\n0.45814306\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],\n... sample_weight=[1, 0])\n>>> m.result().numpy()\n0.9162892\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\n loss='mse',\n metrics=[tf.keras.metrics.KLDivergence()])\n```"} +{"repo": "transformers", "function": "def forward(self, pixel_values: Optional[torch.Tensor]=None, bool_masked_pos: Optional[torch.BoolTensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if pixel_values is None:\n raise ValueError('You have to specify pixel_values')\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n embedding_output = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding)\n encoder_outputs = self.encoder(embedding_output, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = encoder_outputs[0]\n sequence_output = self.layernorm(sequence_output)\n if not return_dict:\n head_outputs = (sequence_output,)\n return head_outputs + encoder_outputs[1:]\n return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):\n Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, ViTMSNModel\n>>> import torch\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/vit-msn-small\")\n>>> model = ViTMSNModel.from_pretrained(\"facebook/vit-msn-small\")\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n>>> last_hidden_states = outputs.last_hidden_state\n```"} +{"repo": "transformers", "function": "def resize(self, images: 'torch.Tensor', size: SizeDict, interpolation: Optional['F.InterpolationMode']=None, size_divisor: Optional[int]=None) -> 'torch.Tensor':\n if interpolation is None:\n interpolation = self.resample\n shorter = size.shortest_edge\n longer = int(MAX_LONGER_EDGE / MAX_SHORTER_EDGE * shorter)\n heights = images.shape[-2]\n widths = images.shape[-1]\n if heights < widths:\n new_heights = shorter\n new_widths = widths * (shorter / heights)\n else:\n new_heights = heights * (shorter / widths)\n new_widths = shorter\n if max(new_heights, new_widths) > longer:\n scale = longer / max(new_heights, new_widths)\n new_heights = new_heights * scale\n new_widths = new_widths * scale\n new_heights = int(new_heights + 0.5)\n new_widths = int(new_widths + 0.5)\n if size_divisor is not None:\n new_heights = new_heights // size_divisor * size_divisor\n new_widths = new_widths // size_divisor * size_divisor\n return F.resize(images, [new_heights, new_widths], interpolation=interpolation)", "docstring": "Resize an image or batch of images to specified size.\n\nArgs:\n images (`torch.Tensor`): Image or batch of images to resize.\n size (`Dict[str, int]`): Size dictionary with shortest_edge key.\n interpolation (`F.InterpolationMode`, *optional*): Interpolation method to use.\n size_divisor (`int`, *optional*): Value to ensure height/width are divisible by.\n\nReturns:\n `torch.Tensor`: Resized image or batch of images."} +{"repo": "tensorflow", "function": "def gather_gpu_devices():\n try:\n dev_info = _gather_gpu_devices_proc()\n if not dev_info:\n raise ValueError('No devices found')\n return dev_info\n except (IOError, ValueError, errors.OpError):\n pass\n try:\n return _gather_gpu_devices_cudart()\n except (OSError, ValueError, NotImplementedError, errors.OpError):\n return []", "docstring": "Gather gpu device info.\n\nReturns:\n A list of test_log_pb2.GPUInfo messages."} +{"repo": "transformers", "function": "def _serialize_io(value, debug_path: Optional[str]=None, use_repr: bool=True, path_to_value: Optional[str]=None):\n if isinstance(value, (list, tuple)):\n return [_serialize_io(v, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{path_to_value}_{i}') for i, v in enumerate(value)]\n if isinstance(value, dict):\n return {k: _serialize_io(v, debug_path=debug_path, use_repr=use_repr, path_to_value=f'{path_to_value}_{k}') for k, v in value.items()}\n if hasattr(value, '_local_tensor'):\n return _serialize_tensor_like_io(value._local_tensor, debug_path=debug_path, use_repr=use_repr, path_to_value=path_to_value)\n if isinstance(value, torch.Tensor):\n return _serialize_tensor_like_io(value, debug_path=debug_path, use_repr=use_repr, path_to_value=path_to_value)\n return _sanitize_repr_for_diff(repr(value))", "docstring": "Recursively build a JSON-serializable Python structure from `value`.\nTensors and DTensors become either sanitized repr strings, or are saved to disk as SafeTensors files and their\nrelative paths are recorded in the returned Python structure.\nLists/tuples/dicts are recursed into.\nAll memory addresses are replaced with a stable placeholder.\n\nArgs:\n value: Any Python object, often including torch Tensors, lists, dicts, etc.\n debug_path (`str`, *optional*, defaults to `None`): Directory to dump debug JSON and SafeTensors files.\n use_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensors as the\n `value` property in the asscoiated FULL_TENSORS.json file, or to store full tensors in separate SafeTensors\n files and store the relative path to that file in the `value` property.\n path_to_value (`str`, *optional*, defaults to `None`): The file name for the SafeTensors file holding the full\n tensor value if `use_repr=False`.\n\nReturns:\n A nested Python structure (list, dict, or sanitized string) that is safe to json.dump."} +{"repo": "transformers", "function": "def set_default_language(self, language: str):\n if language not in self.config.languages:\n raise ValueError(f'{self} does not have an adapter for {language}. Supported languages: {list(self.config.languages)}')\n self.config.default_language = language", "docstring": "Set the default language code for the model. This is used when the language is not specified in the input.\n\nArgs:\n language (`str`): The language code, such as `\"en_XX\"` or `\"de_DE\"`."} +{"repo": "tensorflow", "function": "def atrous_conv2d_transpose(value, filters, output_shape, rate, padding, name=None):\n with ops.name_scope(name, 'atrous_conv2d_transpose', [value, filters, output_shape]) as name:\n value = ops.convert_to_tensor(value, name='value')\n filters = ops.convert_to_tensor(filters, name='filters')\n if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):\n raise ValueError(f'`value` channel count must be compatible with `filters` input channel count. Received: value.shape={value.get_shape()} with channel count {value.get_shape()[3]} and filters.shape={filters.get_shape()} with input channel count {filters.get_shape()[3]}.')\n if rate < 1:\n raise ValueError(f'`rate` cannot be less than one. Received: rate={rate}')\n if rate == 1:\n return conv2d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1], padding=padding, data_format='NHWC')\n output_shape_ = ops.convert_to_tensor(output_shape, name='output_shape')\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.TensorShape([4])):\n raise ValueError(f'`output_shape` must have shape (4,). Received: output_shape={output_shape_.get_shape()}')\n if isinstance(output_shape, tuple):\n output_shape = list(output_shape)\n if isinstance(output_shape, (list, np.ndarray)):\n if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):\n raise ValueError(f'`output_shape` channel count must be compatible with `filters` output channel count. Received: output_shape={output_shape} with channel count {output_shape[3]} and filters.shape={filters.get_shape()} with output channel count {filters.get_shape()[3]}.')\n if padding == 'SAME':\n if filters.get_shape().is_fully_defined():\n filter_shape = filters.get_shape().as_list()\n else:\n filter_shape = array_ops.shape(filters)\n filter_height, filter_width = (filter_shape[0], filter_shape[1])\n filter_height_up = filter_height + (filter_height - 1) * (rate - 1)\n filter_width_up = filter_width + (filter_width - 1) * (rate - 1)\n pad_height = filter_height_up - 1\n pad_width = filter_width_up - 1\n pad_top = pad_height // 2\n pad_bottom = pad_height - pad_top\n pad_left = pad_width // 2\n pad_right = pad_width - pad_left\n elif padding == 'VALID':\n pad_top = 0\n pad_bottom = 0\n pad_left = 0\n pad_right = 0\n else:\n raise ValueError(f\"`padding` must be either 'VALID' or 'SAME'. Received: padding={padding}\")\n in_height = output_shape[1] + pad_top + pad_bottom\n in_width = output_shape[2] + pad_left + pad_right\n pad_bottom_extra = (rate - in_height % rate) % rate\n pad_right_extra = (rate - in_width % rate) % rate\n space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]\n value = array_ops.space_to_batch(input=value, paddings=space_to_batch_pad, block_size=rate)\n input_sizes = [rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate, (in_width + pad_right_extra) // rate, output_shape[3]]\n value = gen_nn_ops.conv2d_backprop_input(input_sizes=input_sizes, filter=filters, out_backprop=value, strides=[1, 1, 1, 1], padding='VALID', data_format='NHWC')\n batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra], [pad_left, pad_right + pad_right_extra]]\n return array_ops.batch_to_space(input=value, crops=batch_to_space_crop, block_size=rate)", "docstring": "The transpose of `atrous_conv2d`.\n\nThis operation is sometimes called \"deconvolution\" after\n(Zeiler et al., 2010), but is really the transpose (gradient) of\n`atrous_conv2d` rather than an actual deconvolution.\n\nArgs:\n value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`\n format. Its shape is `[batch, in_height, in_width, in_channels]`.\n filters: A 4-D `Tensor` with the same type as `value` and shape\n `[filter_height, filter_width, out_channels, in_channels]`. `filters`'\n `in_channels` dimension must match that of `value`. Atrous convolution is\n equivalent to standard convolution with upsampled filters with effective\n height `filter_height + (filter_height - 1) * (rate - 1)` and effective\n width `filter_width + (filter_width - 1) * (rate - 1)`, produced by\n inserting `rate - 1` zeros along consecutive elements across the\n `filters`' spatial dimensions.\n output_shape: A 1-D `Tensor` of shape representing the output shape of the\n deconvolution op, of form `[batch, out_height, out_width, out_channels]`.\n rate: A positive int32. The stride with which we sample input values across\n the `height` and `width` dimensions. Equivalently, the rate by which we\n upsample the filter values by inserting zeros across the `height` and\n `width` dimensions. In the literature, the same parameter is sometimes\n called `input stride` or `dilation`.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. See\n [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2)\n for more information.\n name: Optional name for the returned tensor.\n\nReturns:\n A `Tensor` with the same type as `value`.\n\nRaises:\n ValueError: If input/output depth does not match `filters`' shape, or if\n padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less\n than one, or if the output_shape is not a tensor with 4 elements.\n\nReferences:\n Deconvolutional Networks:\n [Zeiler et al., 2010]\n (https://ieeexplore.ieee.org/abstract/document/5539957)\n ([pdf]\n (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.232.4023&rep=rep1&type=pdf))"} +{"repo": "tensorflow", "function": "def save(self, file_prefix, session=None, options=None):\n graph_building = not context.executing_eagerly()\n if graph_building:\n if ops.inside_function():\n raise NotImplementedError('Calling tf.train.Checkpoint.save() from a function is not supported, as save() modifies saving metadata in ways not supported by TensorFlow Operations. Consider using tf.train.Checkpoint.write(), a lower-level API which does not update metadata. tf.train.latest_checkpoint and related APIs will not see this checkpoint.')\n if session is None:\n session = get_session()\n if self._save_counter is None:\n session.run(self.save_counter.initializer)\n if not graph_building or self._save_assign_op is None:\n with ops.colocate_with(self.save_counter):\n assign_op = self.save_counter.assign_add(1, read_value=True)\n if graph_building:\n self._save_assign_op = data_structures.NoDependency(assign_op)\n if graph_building:\n checkpoint_number = session.run(self._save_assign_op)\n else:\n checkpoint_number = assign_op.numpy()\n file_path = self.write('%s-%d' % (file_prefix, checkpoint_number), session=session, options=options)\n checkpoint_management.update_checkpoint_state_internal(save_dir=os.path.dirname(file_prefix), model_checkpoint_path=file_path, all_model_checkpoint_paths=[file_path], save_relative_paths=True)\n return file_path", "docstring": "Saves a training checkpoint and provides basic checkpoint management.\n\nThe saved checkpoint includes variables created by this object and any\ntrackable objects it depends on at the time `Checkpoint.save()` is\ncalled.\n\n`save` is a basic convenience wrapper around the `write` method,\nsequentially numbering checkpoints using `save_counter` and updating the\nmetadata used by `tf.train.latest_checkpoint`. More advanced checkpoint\nmanagement, for example garbage collection and custom numbering, may be\nprovided by other utilities which also wrap `write`\n(`tf.train.CheckpointManager` for example).\n\nArgs:\n file_prefix: A prefix to use for the checkpoint filenames\n (/path/to/directory/and_a_prefix). Names are generated based on this\n prefix and `Checkpoint.save_counter`.\n session: The session to evaluate variables in. Ignored when executing\n eagerly. If not provided when graph building, the default session is\n used.\n options: Optional `tf.train.CheckpointOptions` object.\n\nReturns:\n The full path to the checkpoint."} +{"repo": "transformers", "function": "class TFSemanticSegmenterOutput(ModelOutput):\n loss: tf.Tensor | None = None\n logits: Optional[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for outputs of semantic segmentation models.\n\nArgs:\n loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):\n Classification scores for each pixel.\n\n \n\n The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is\n to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the\n original image size as post-processing. You should always check your logits shape and resize as needed.\n\n \n\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for\n the output of each layer) of shape `(batch_size, patch_size, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, patch_size, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "def can_return_loss(model_class):\n framework = infer_framework(model_class)\n if framework == 'tf':\n signature = inspect.signature(model_class.call)\n elif framework == 'pt':\n signature = inspect.signature(model_class.forward)\n else:\n signature = inspect.signature(model_class.__call__)\n for p in signature.parameters:\n if p == 'return_loss' and signature.parameters[p].default is True:\n return True\n return False", "docstring": "Check if a given model can return loss.\n\nArgs:\n model_class (`type`): The class of the model."} +{"repo": "nsscache", "function": "def FromTimestampToDateTime(ts):\n return datetime.datetime.utcfromtimestamp(ts)", "docstring": "Converts internal nss_cache timestamp to datetime object.\n\nArgs:\n ts: number of seconds since epoch\nReturns:\n datetime object"} +{"repo": "beam", "function": "def set_render_option(self, render_option):\n self._render_option = render_option", "docstring": "Sets the rendering option.\n\nArgs:\n render_option: (str) this parameter decides how the pipeline graph is\n rendered. See display.pipeline_graph_renderer for available options."} +{"repo": "transformers", "function": "def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, UniSpeechSatForPreTrainingOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.unispeech_sat(input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n transformer_features = outputs[0]\n extract_features = self.dropout_features(outputs[1])\n logits = extract_features\n loss = quantized_features = codevector_perplexity = None\n if not return_dict:\n if loss is not None:\n return (loss, logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n return (logits, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]\n return UniSpeechSatForPreTrainingOutput(loss=loss, logits=logits, projected_states=transformer_features, projected_quantized_states=quantized_features, codevector_perplexity=codevector_perplexity, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Example:\n\n```python\n>>> import torch\n>>> from transformers import AutoFeatureExtractor, UniSpeechSatForPreTraining\n>>> from transformers.models.unispeech_sat.modeling_unispeech_sat import _compute_mask_indices\n\n>>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"microsoft/unispeech-sat-base\")\n>>> model = UniSpeechSatForPreTraining.from_pretrained(\"microsoft/unispeech-sat-base\")\n>>> # TODO: Add full pretraining example\n```"} +{"repo": "pyglove", "function": "def kvlist_str(kvlist: List[Tuple[str, Any, Any]], compact: bool=True, verbose: bool=False, root_indent: int=0, *, label: Optional[str]=None, bracket_type: BracketType=BracketType.ROUND, custom_format: Optional[CustomFormatFn]=None, memo: Optional[Set[int]]=None, **kwargs) -> str:\n s = io.StringIO()\n is_first = True\n bracket_start, bracket_end = bracket_chars(bracket_type)\n child_indent = root_indent + 1 if label else root_indent\n body = io.StringIO()\n for k, v, d in kvlist:\n if isinstance(d, tuple):\n include_pair = True\n for sd in d:\n if sd == v:\n include_pair = False\n break\n else:\n include_pair = v != d\n if include_pair:\n if not is_first:\n body.write(',')\n body.write(' ' if compact else '\\n')\n v = format(v, compact=compact, verbose=verbose, root_indent=child_indent, custom_format=custom_format, memo=memo, **kwargs)\n if not compact:\n body.write(_indent('', child_indent))\n if k:\n body.write(f'{k}={str_ext(v, custom_format, child_indent)}')\n else:\n body.write(str_ext(v, custom_format, child_indent))\n is_first = False\n if label and (not is_first) and (not compact):\n body.write('\\n')\n body = body.getvalue()\n if label is None:\n return body\n else:\n s.write(label)\n s.write(bracket_start)\n if body:\n if not compact:\n s.write('\\n')\n s.write(body)\n if not compact:\n s.write(_indent('', root_indent))\n s.write(bracket_end)\n return s.getvalue()", "docstring": "Formats a list key/value pairs into a comma delimited string.\n\nArgs:\n kvlist: List of tuples in format of\n (key, value, default_value or a tuple of default values)\n compact: If True, format value in kvlist in compact form.\n verbose: If True, format value in kvlist in verbose.\n root_indent: The indent should be applied for values in kvlist if they are\n multi-line.\n label: (Optional) If not None, add label to brace all kv pairs.\n bracket_type: Bracket type used for embracing the kv pairs. Applicable only\n when `name` is not None.\n custom_format: An optional custom format function, which will be applied to\n each value (and child values) in kvlist. If the function returns None, it\n will fall back to the default `pg.format`.\n memo: A set of object ids that have been formatted. Used to avoid\n infinite recursion in the formatting process.\n **kwargs: Keyword arguments that will be passed through unto child\n ``Formattable`` objects.\nReturns:\n A formatted string from a list of key/value pairs delimited by comma."} +{"repo": "tf-quant-finance", "function": "def _cbnd(dh, dk, rho):\n dtype = rho.dtype\n h = tf.cast(-dh, dtype=dtype)\n k = tf.cast(-dk, dtype=dtype)\n hk = h * k\n bvn = tf.zeros_like(hk)\n hs = (h * h + k * k) / 2\n asr = tf.math.asin(rho)\n\n def transformed_bvn(hk, hs, asr):\n\n def transformed_bvn_distribution(x):\n hk_exp = tf.expand_dims(hk, axis=-1)\n hs_exp = tf.expand_dims(hs, axis=-1)\n asr_exp = tf.expand_dims(asr, axis=-1)\n sn1 = tf.math.sin(asr_exp * (1 * x + 1) / 2)\n return tf.math.exp((hk_exp * sn1 - hs_exp) / (1 - sn1 * sn1))\n ones = tf.ones_like(hk)\n res = integration.gauss_legendre(func=transformed_bvn_distribution, lower=-ones, upper=ones, num_points=20, dtype=dtype)\n return res\n bvn = bvn + transformed_bvn(hk, hs, asr)\n bvn = bvn * asr / (4 * np.pi)\n bvn = bvn + _ncdf(-h) * _ncdf(-k)\n return bvn", "docstring": "Computes values for the cumulative standard bivariate normal distribution.\n\nMore specifically, compultes `P(x > dh, y > dk)` where `x` and `y` are\nstandard normal variables with correlation `rho`.\n\nArgs:\n dh: A real `Tensor` representing lower integration limits for `x`.\n dk: A `Tensor` of the same dtype as `dh` and of compatible shape\n representing lower integration limits `y`.\n rho: A `Tensor` of the same dtype as `dh` and of compatible shape\n representing correlation coefficients.\n\nReturns:\n A `Tensor` of cumulative distribution function values.\n\n#### References:\n[1] Genz, A., Numerical Computation of Rectangular Bivariate and Trivariate\n Normal and t Probabilities, 2004\n http://www.math.wsu.edu/faculty/genz/papers/bvnt.pdf"} +{"repo": "keras", "function": "def gaussian_blur(images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None):\n if any_symbolic_tensors((images,)):\n return GaussianBlur(kernel_size=kernel_size, sigma=sigma, data_format=data_format).symbolic_call(images)\n return backend.image.gaussian_blur(images, kernel_size=kernel_size, sigma=sigma, data_format=data_format)", "docstring": "Applies a Gaussian blur to the image(s).\n\nArgs:\n images: Input image or batch of images. Must be 3D or 4D.\n kernel_size: A tuple of two integers, specifying the height and width\n of the Gaussian kernel.\n sigma: A tuple of two floats, specifying the standard deviation of\n the Gaussian kernel along height and width.\n data_format: A string specifying the data format of the input tensor.\n It can be either `\"channels_last\"` or `\"channels_first\"`.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)`, while `\"channels_first\"`\n corresponds to inputs with shape `(batch, channels, height, width)`.\n If not specified, the value will default to\n `keras.config.image_data_format`.\n\nReturns:\n Blurred image or batch of images.\n\nExamples:\n\n>>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images\n>>> y = keras.ops.image.gaussian_blur(x)\n>>> y.shape\n(2, 64, 80, 3)\n\n>>> x = np.random.random((64, 80, 3)) # single RGB image\n>>> y = keras.ops.image.gaussian_blur(x)\n>>> y.shape\n(64, 80, 3)\n\n>>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images\n>>> y = keras.ops.image.gaussian_blur(\n... x, data_format=\"channels_first\")\n>>> y.shape\n(2, 3, 64, 80)"} +{"repo": "transformers", "function": "def requires_grad(self) -> bool:\n if self._rot_mats is not None:\n return self._rot_mats.requires_grad\n elif self._quats is not None:\n return self._quats.requires_grad\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns the requires_grad property of the underlying rotation\n\nReturns:\n The requires_grad property of the underlying tensor"} +{"repo": "tensorflow", "function": "def dispatch(op, args, kwargs):\n for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR):\n result = dispatcher.handle(args, kwargs)\n if result is not OpDispatcher.NOT_SUPPORTED:\n return result\n for dispatcher in _GLOBAL_DISPATCHERS:\n result = dispatcher.handle(op, args, kwargs)\n if result is not OpDispatcher.NOT_SUPPORTED:\n return result\n return OpDispatcher.NOT_SUPPORTED", "docstring": "Returns the result from the first successful dispatcher for a given op.\n\nCalls the `handle` method of each `OpDispatcher` that has been registered\nto handle `op`, and returns the value from the first successful handler.\n\nArgs:\n op: Python function: the operation to dispatch for.\n args: The arguments to the operation.\n kwargs: They keyword arguments to the operation.\n\nReturns:\n The result of the operation, or `NOT_SUPPORTED` if no registered\n dispatcher can handle the given arguments."} +{"repo": "beam", "function": "def __init__(self, value, translator):\n self.value = value\n self.translator = translator", "docstring": "Creates a NestedValueProvider that wraps the provided ValueProvider.\n\nArgs:\n value: ValueProvider object to wrap\n translator: function that is applied to the ValueProvider\nRaises:\n ``RuntimeValueProviderError``: if any of the provided objects are not\n accessible."} +{"repo": "transformers", "function": "def _compute_offsets(self, token_ids, time_precision=0.02, segment_size=1500):\n offsets = []\n if 'torch' in str(type(token_ids)) and (hasattr(token_ids, 'cpu') and callable(token_ids.cpu)):\n token_ids = token_ids.cpu()\n token_ids = np.array(token_ids)\n if token_ids.shape[0] > 1 and len(token_ids.shape) > 1:\n raise ValueError('Can only process a single input at a time')\n timestamp_begin = self.all_special_ids[-1] + 1\n timestamp_tokens = token_ids >= timestamp_begin\n consecutive = np.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] + 1\n if consecutive.shape[0] == 0 and timestamp_tokens.sum() <= 1:\n return []\n elif np.where(timestamp_tokens)[0][-1] + 1 not in consecutive:\n consecutive = np.append(consecutive, np.where(timestamp_tokens)[0][-1] + 1)\n last_slice = np.where(timestamp_tokens)[0][0]\n cur_max_timestamp = 0\n prev_segments_len = 0\n for current_slice in consecutive:\n sliced_tokens = token_ids[last_slice:current_slice]\n if len(sliced_tokens) > 1:\n start_timestamp_position = sliced_tokens[0].item() - timestamp_begin\n end_timestamp_position = sliced_tokens[-1].item() - timestamp_begin\n if start_timestamp_position < cur_max_timestamp:\n is_single_ending = last_slice >= 2 and (not (token_ids[last_slice - 2] >= timestamp_begin and token_ids[last_slice - 1] >= timestamp_begin))\n if is_single_ending:\n prev_segments_len += segment_size\n else:\n prev_segments_len += cur_max_timestamp\n cur_max_timestamp = end_timestamp_position\n sliced_tokens = self._preprocess_token_ids(sliced_tokens)\n text = self._decode(sliced_tokens)\n text = self._filter_timestamp_ids(text)\n offsets.append({'text': text, 'timestamp': (start_timestamp_position * time_precision + prev_segments_len * time_precision, end_timestamp_position * time_precision + prev_segments_len * time_precision)})\n last_slice = current_slice\n return offsets", "docstring": "Compute offsets for a given tokenized input\n\nArgs:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n time_precision (`float`, *optional*, defaults to 0.02):\n The time ratio to convert from token to time.\n segment_size (`int`, *optional*, defaults to 1500):\n The number of features in the input mel spectrogram."} +{"repo": "tensorflow", "function": "def _as_graph_element(obj):\n conv_fn = getattr(obj, '_as_graph_element', None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None", "docstring": "Convert `obj` to a graph element if possible, otherwise return `None`.\n\nArgs:\n obj: Object to convert.\n\nReturns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`."} +{"repo": "transformers", "function": "def get_optimizer_cls_and_kwargs(args: TrainingArguments, model: Optional[PreTrainedModel]=None) -> tuple[Any, Any]:\n optim_args = {}\n if args.optim_args:\n for mapping in args.optim_args.replace(' ', '').split(','):\n key, value = mapping.split('=')\n optim_args[key] = value\n optimizer_kwargs = {'lr': args.learning_rate}\n adam_kwargs = {'betas': (args.adam_beta1, args.adam_beta2), 'eps': args.adam_epsilon}\n\n def setup_low_rank_optimizer(optimizer_name: str, optimizer_mapping: dict[str, Any], optim_kwargs: dict[str, Any], is_layerwise_supported: bool=True) -> tuple[Any, Any]:\n \"\"\"\n Helper function to set up low-rank optimizers like GaLore and Apollo.\n\n Args:\n optimizer_name (str): Name of the optimizer.\n optimizer_mapping (dict): Mapping of optimizer names to their classes.\n optim_kwargs (dict): Keyword arguments for the optimizer.\n is_layerwise_supported (bool): Whether layerwise optimization is supported.\n\n Returns:\n Tuple[Any, Any]: Optimizer class and updated optimizer kwargs.\n \"\"\"\n is_layerwise = optimizer_name.lower().endswith('layerwise')\n if is_layerwise and args.parallel_mode == ParallelMode.DISTRIBUTED and is_layerwise_supported:\n raise NotImplementedError(f'Layer-wise {optimizer_name} does not support DDP at this time')\n optimizer_cls = optimizer_mapping[optimizer_name]\n if args.optim_target_modules is None:\n raise ValueError(f'You need to define `optim_target_modules` to use {optimizer_name} optimizers')\n if not isinstance(args.optim_target_modules, (list, str)):\n raise ValueError(f\"`optim_target_modules` must be a list of strings, a regex string, or 'all-linear'. Got: {args.optim_target_modules}\")\n if model is None:\n raise ValueError(f'You need to pass a model to initialize {optimizer_name} optimizer.')\n all_linear = isinstance(args.optim_target_modules, str) and args.optim_target_modules.replace('_', '-') == 'all-linear'\n target_params_names = []\n for module_name, module in model.named_modules():\n target_module_exists, is_regex = check_target_module_exists(args.optim_target_modules, module_name, return_is_regex=True)\n if not isinstance(module, nn.Linear):\n if target_module_exists and (not is_regex):\n logger.warning(f'{module_name} matched but ignored. {optimizer_name} only supports linear layers.')\n continue\n if not target_module_exists and (not all_linear):\n continue\n target_params_names.append(module_name + '.weight')\n if len(target_params_names) == 0:\n raise ValueError(f'No target modules found for {optimizer_name} ({args.optim_target_modules}).')\n target_params = [p for n, p in model.named_parameters() if n in target_params_names]\n non_target_params = [p for n, p in model.named_parameters() if n not in target_params_names]\n optim_kwargs.update(optim_args)\n param_groups = [{'params': non_target_params}, {'params': target_params, **optim_kwargs}]\n if is_layerwise:\n if args.gradient_accumulation_steps != 1:\n raise ValueError(f'Layerwise {optimizer_name} does not support gradient accumulation!')\n optimizer_dict = {}\n for param in non_target_params:\n optimizer_dict[param] = optimizer_cls([{'params': [param]}], **optimizer_kwargs)\n for param in target_params:\n optimizer_dict[param] = optimizer_cls([{'params': [param], **optim_kwargs}], **optimizer_kwargs)\n\n def optimizer_hook(param):\n if param.grad is not None:\n optimizer_dict[param].step()\n optimizer_dict[param].zero_grad()\n for param in model.parameters():\n if param.requires_grad:\n param.register_post_accumulate_grad_hook(optimizer_hook)\n optimizer_cls = LayerWiseDummyOptimizer\n optimizer_kwargs.update({'optimizer_dict': optimizer_dict})\n optimizer_kwargs.update({'params': param_groups})\n return (optimizer_cls, optimizer_kwargs)\n if args.optim == OptimizerNames.ADAFACTOR:\n optimizer_cls = Adafactor\n optimizer_kwargs.update({'scale_parameter': False, 'relative_step': False})\n elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:\n from torch.optim import AdamW\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:\n optimizer_kwargs.update({'fused': True})\n elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError('Trainer failed to import syncfree AdamW from torch_xla.')\n elif args.optim == OptimizerNames.ADAMW_TORCH_NPU_FUSED:\n try:\n from torch_npu.optim import NpuFusedAdamW\n optimizer_cls = NpuFusedAdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError('Trainer failed to import FusedAdamW from torch_npu.')\n elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError('Trainer tried to instantiate apex FusedAdam but apex is not installed!')\n elif args.optim in [OptimizerNames.ADAMW_BNB, OptimizerNames.ADAMW_8BIT, OptimizerNames.PAGED_ADAMW, OptimizerNames.PAGED_ADAMW_8BIT, OptimizerNames.ADEMAMIX, OptimizerNames.ADEMAMIX_8BIT, OptimizerNames.PAGED_ADEMAMIX, OptimizerNames.PAGED_ADEMAMIX_8BIT, OptimizerNames.LION, OptimizerNames.LION_8BIT, OptimizerNames.PAGED_LION, OptimizerNames.PAGED_LION_8BIT, OptimizerNames.RMSPROP_BNB, OptimizerNames.RMSPROP_8BIT, OptimizerNames.RMSPROP_32BIT]:\n try:\n from bitsandbytes.optim import AdamW, Lion, RMSprop\n is_paged = False\n optim_bits = 32\n optimizer_cls = None\n additional_optim_kwargs = adam_kwargs\n if 'paged' in args.optim:\n is_paged = True\n if '8bit' in args.optim:\n optim_bits = 8\n if 'adam' in args.optim:\n optimizer_cls = AdamW\n elif 'lion' in args.optim:\n optimizer_cls = Lion\n additional_optim_kwargs = {'betas': (args.adam_beta1, args.adam_beta2)}\n elif 'rmsprop' in args.optim:\n optimizer_cls = RMSprop\n additional_optim_kwargs = optim_args\n elif 'ademamix' in args.optim:\n if is_bitsandbytes_available() and version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.44.0'):\n raise ValueError('The AdEMAMix optimizer is not supported by your current version of `bitsandbytes`. Please install `bitsandbytes` >= 0.44.0.')\n from bitsandbytes.optim import AdEMAMix\n optimizer_cls = AdEMAMix\n additional_optim_kwargs = {'betas': (float(optim_args.get('beta1', args.adam_beta1)), float(optim_args.get('beta2', args.adam_beta2)), float(optim_args.get('beta3', 0.9999))), 'alpha': float(optim_args.get('alpha', 5.0)), 'eps': float(optim_args.get('eps', args.adam_epsilon))}\n if 't_alpha' in optim_args:\n additional_optim_kwargs['t_alpha'] = int(optim_args['t_alpha'])\n if 't_beta3' in optim_args:\n additional_optim_kwargs['t_beta3'] = int(optim_args['t_beta3'])\n bnb_kwargs = {'optim_bits': optim_bits}\n if 'rmsprop' not in args.optim:\n bnb_kwargs['is_paged'] = is_paged\n optimizer_kwargs.update(additional_optim_kwargs)\n optimizer_kwargs.update(bnb_kwargs)\n except ImportError:\n raise ValueError('Trainer tried to instantiate bnb optimizer but `bitsandbytes` is not installed!')\n if is_bitsandbytes_available() and version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.41.1'):\n logger.warning('You are using 8-bit optimizers with a version of `bitsandbytes` < 0.41.1. It is recommended to update your version as a major bug has been fixed in 8-bit optimizers.')\n elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:\n try:\n from torchdistx.optimizers import AnyPrecisionAdamW\n optimizer_cls = AnyPrecisionAdamW\n optimizer_kwargs.update(adam_kwargs)\n optimizer_kwargs.update({'use_kahan_summation': strtobool(optim_args.get('use_kahan_summation', 'False')), 'momentum_dtype': getattr(torch, optim_args.get('momentum_dtype', 'float32')), 'variance_dtype': getattr(torch, optim_args.get('variance_dtype', 'float32')), 'compensation_buffer_dtype': getattr(torch, optim_args.get('compensation_buffer_dtype', 'bfloat16'))})\n except ImportError:\n raise ValueError('Please install https://github.com/pytorch/torchdistx')\n elif args.optim == OptimizerNames.SGD:\n optimizer_cls = torch.optim.SGD\n elif args.optim == OptimizerNames.ADAGRAD:\n optimizer_cls = torch.optim.Adagrad\n elif args.optim == OptimizerNames.RMSPROP:\n optimizer_cls = torch.optim.RMSprop\n elif args.optim in [OptimizerNames.GALORE_ADAMW, OptimizerNames.GALORE_ADAMW_8BIT, OptimizerNames.GALORE_ADAFACTOR, OptimizerNames.GALORE_ADAMW_LAYERWISE, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE]:\n if not is_galore_torch_available():\n raise ImportError('You need to install `galore_torch` in order to use GaLore optimizers install it with `pip install git+https://github.com/jiaweizzhao/GaLore`')\n from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit\n optimizer_mapping = {OptimizerNames.GALORE_ADAMW: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR: GaLoreAdafactor, OptimizerNames.GALORE_ADAMW_LAYERWISE: GaLoreAdamW, OptimizerNames.GALORE_ADAMW_8BIT_LAYERWISE: GaLoreAdamW8bit, OptimizerNames.GALORE_ADAFACTOR_LAYERWISE: GaLoreAdafactor}\n galore_optim_kwargs = {'rank': int(optim_args.pop('rank', 128)), 'update_proj_gap': int(optim_args.pop('update_proj_gap', 200)), 'scale': float(optim_args.pop('scale', 0.25)), 'proj_type': optim_args.pop('proj_type', 'std')}\n optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(args.optim, optimizer_mapping, galore_optim_kwargs)\n if args.optim == OptimizerNames.GALORE_ADAFACTOR:\n optimizer_kwargs.update({'scale_parameter': False, 'relative_step': False})\n elif args.optim in [OptimizerNames.APOLLO_ADAMW, OptimizerNames.APOLLO_ADAMW_LAYERWISE]:\n if not is_apollo_torch_available():\n raise ImportError('You need to install `apollo_torch` in order to use APOLLO optimizers install it with `pip install git+https://github.com/zhuhanqing/APOLLO`')\n from apollo_torch import APOLLOAdamW\n optimizer_mapping = {OptimizerNames.APOLLO_ADAMW: APOLLOAdamW, OptimizerNames.APOLLO_ADAMW_LAYERWISE: APOLLOAdamW}\n apollo_optim_kwargs = {'rank': int(optim_args.pop('rank', 128)), 'proj': optim_args.pop('proj', 'random'), 'scale_type': optim_args.pop('scale_type', 'channel'), 'update_proj_gap': int(optim_args.pop('update_proj_gap', 200)), 'scale': float(optim_args.pop('scale', 1.0)), 'proj_type': optim_args.pop('proj_type', 'std')}\n apollo_optim_kwargs.update(adam_kwargs)\n optimizer_cls, optimizer_kwargs = setup_low_rank_optimizer(args.optim, optimizer_mapping, apollo_optim_kwargs)\n elif args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:\n if not is_lomo_available():\n raise ImportError('You need to install `lomo_optim` in order to use LOMO optimizers install it with `pip install lomo-optim`')\n if not is_accelerate_available('0.30.0'):\n raise ImportError('You need to have `accelerate>=0.30.0` to be able to use LOMO optimizers')\n if model is None:\n raise ValueError('You need to pass a `model` in order to correctly initialize a LOMO optimizer.')\n from lomo_optim import AdaLomo, Lomo\n if 'ada' in args.optim:\n optimizer_cls = AdaLomo\n else:\n optimizer_cls = Lomo\n optimizer_kwargs.update({'model': model})\n elif args.optim == OptimizerNames.GROKADAMW:\n if not is_grokadamw_available():\n raise ValueError('Please install grokadamw with `pip install grokadamw`')\n from grokadamw import GrokAdamW\n optimizer_cls = GrokAdamW\n optimizer_kwargs.update({'alpha_init': float(optim_args.get('alpha_init', 0.98)), 'lamb': float(optim_args.get('lamb', 2.0)), 'gamma': float(optim_args.get('gamma', 0.1)), 'grokking_signal_decay_rate': float(optim_args.get('grokking_signal_decay_rate', 0.1)), 'gradient_clipping': float(optim_args.get('gradient_clipping', 1.0))})\n elif args.optim in [OptimizerNames.ADAMW_TORCH_4BIT, OptimizerNames.ADAMW_TORCH_8BIT]:\n if not is_torchao_available() or version.parse(importlib.metadata.version('torchao')) < version.parse('0.4.0'):\n raise ImportError('You need to have `torchao>=0.4.0` in order to use torch 4-bit optimizers.Install it with `pip install torchao` or follow the instructions here: https://github.com/pytorch/ao')\n if version.parse(importlib.metadata.version('torch')) <= version.parse('2.4'):\n raise ImportError('You need to have `torch>2.4` in order to use torch 4-bit optimizers. Install it with `pip install --upgrade torch` it is available on pipy. Otherwise, you need to install torch nightly.')\n if version.parse(importlib.metadata.version('torchao')) >= version.parse('0.11.0'):\n from torchao.optim import AdamW4bit, AdamW8bit\n else:\n from torchao.prototype.low_bit_optim import AdamW4bit, AdamW8bit\n if args.optim == OptimizerNames.ADAMW_TORCH_4BIT:\n optimizer_cls = AdamW4bit\n elif args.optim == OptimizerNames.ADAMW_TORCH_8BIT:\n optimizer_cls = AdamW8bit\n else:\n raise ValueError('Invalid optimizer')\n optimizer_kwargs.update(adam_kwargs)\n elif args.optim in [OptimizerNames.SCHEDULE_FREE_RADAM, OptimizerNames.SCHEDULE_FREE_ADAMW, OptimizerNames.SCHEDULE_FREE_SGD]:\n if not is_schedulefree_available():\n raise ImportError('You need to install `schedulefree` in order to use schedulefree optimizers. Install it with `pip install schedulefree.`')\n if not is_accelerate_available('0.30.0'):\n raise ImportError('You need to have `accelerate>=0.30.0` to be able to use schedulefree optimizers')\n from schedulefree import AdamWScheduleFree, SGDScheduleFree\n additional_optim_kwargs = {}\n require_warmup = True\n if args.optim == OptimizerNames.SCHEDULE_FREE_RADAM:\n if not is_schedulefree_available('1.4.0'):\n raise ImportError('You need to install `schedulefree>=1.4.0` in order to use RAdamScheduleFree optimizer. Install it with `pip install schedulefree.`')\n from schedulefree import RAdamScheduleFree\n optimizer_cls = RAdamScheduleFree\n additional_optim_kwargs = adam_kwargs\n require_warmup = False\n elif args.optim == OptimizerNames.SCHEDULE_FREE_ADAMW:\n optimizer_cls = AdamWScheduleFree\n additional_optim_kwargs = adam_kwargs\n elif args.optim == OptimizerNames.SCHEDULE_FREE_SGD:\n optimizer_cls = SGDScheduleFree\n else:\n raise ValueError('Invalid schedulefree optimizer')\n additional_optim_kwargs['weight_decay'] = args.weight_decay\n if require_warmup:\n additional_optim_kwargs['warmup_steps'] = args.warmup_steps\n additional_optim_kwargs.update({'weight_lr_power': float(optim_args.get('weight_lr_power', 2.0)), 'r': float(optim_args.get('r', 0.0))})\n optimizer_kwargs.update(additional_optim_kwargs)\n else:\n raise ValueError(f'Trainer cannot instantiate unsupported optimizer: {args.optim}')\n return (optimizer_cls, optimizer_kwargs)", "docstring": "Returns the optimizer class and optimizer parameters based on the training arguments.\n\nArgs:\n args (`transformers.training_args.TrainingArguments`):\n The training arguments for the training session."} +{"repo": "transformers", "function": "def init_cache(self, batch_size, max_length, encoder_outputs):\n decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape)\n\n def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n decoder_module = module._get_decoder_module()\n return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n return unfreeze(init_variables['cache'])", "docstring": "Args:\n batch_size (`int`):\n batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\n max_length (`int`):\n maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\n cache.\n encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\n is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder."} +{"repo": "tensorflow", "function": "def extract_glimpse(input, size, offsets, centered=True, normalized=True, uniform_noise=True, name=None):\n return gen_image_ops.extract_glimpse(input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, uniform_noise=uniform_noise, name=name)", "docstring": "Extracts a glimpse from the input tensor.\n\nReturns a set of windows called glimpses extracted at location\n`offsets` from the input tensor. If the windows only partially\noverlaps the inputs, the non-overlapping areas will be filled with\nrandom noise.\n\nThe result is a 4-D tensor of shape `[batch_size, glimpse_height,\nglimpse_width, channels]`. The channels and batch dimensions are the\nsame as that of the input tensor. The height and width of the output\nwindows are specified in the `size` parameter.\n\nThe argument `normalized` and `centered` controls how the windows are built:\n\n* If the coordinates are normalized but not centered, 0.0 and 1.0\n correspond to the minimum and maximum of each height and width\n dimension.\n* If the coordinates are both normalized and centered, they range from\n -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper\n left corner, the lower right corner is located at (1.0, 1.0) and the\n center is at (0, 0).\n* If the coordinates are not normalized they are interpreted as\n numbers of pixels.\n\nUsage Example:\n\n>>> x = [[[[0.0],\n... [1.0],\n... [2.0]],\n... [[3.0],\n... [4.0],\n... [5.0]],\n... [[6.0],\n... [7.0],\n... [8.0]]]]\n>>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]],\n... centered=False, normalized=False)\n\n\nArgs:\n input: A `Tensor` of type `float32`. A 4-D float tensor of shape\n `[batch_size, height, width, channels]`.\n size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the\n size of the glimpses to extract. The glimpse height must be specified\n first, following by the glimpse width.\n offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape\n `[batch_size, 2]` containing the y, x locations of the center of each\n window.\n centered: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are centered relative to the image, in which case the (0, 0)\n offset is relative to the center of the input images. If false, the (0,0)\n offset corresponds to the upper left corner of the input images.\n normalized: An optional `bool`. Defaults to `True`. indicates if the offset\n coordinates are normalized.\n uniform_noise: An optional `bool`. Defaults to `True`. indicates if the\n noise should be generated using a uniform distribution or a Gaussian\n distribution.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `float32`."} +{"repo": "tensorflow", "function": "def _shape_tuple(self) -> NoReturn:\n raise NotImplementedError()", "docstring": "The shape of this Tensor, as a tuple.\n\nThis is more performant than tuple(shape().as_list()) as it avoids\ntwo list and one object creation. Marked private for now as from an API\nperspective, it would be better to have a single performant way of\ngetting a shape rather than exposing shape() and shape_tuple()\n(and heaven forbid, shape_list() etc. as well!). Punting on that for now,\nbut ideally one would work things out and remove the need for this method.\n\nReturns:\n tuple with the shape."} +{"repo": "keras", "function": "def floatx():\n return _FLOATX", "docstring": "Return the default float type, as a string.\n\nE.g. `'bfloat16'`, `'float16'`, `'float32'`, `'float64'`.\n\nReturns:\n String, the current default float type.\n\nExample:\n\n>>> keras.config.floatx()\n'float32'"} +{"repo": "starthinker", "function": "def recipe_dcm_run(config, auth_read, account, report_id, report_name):\n dcm(config, {'auth': auth_read, 'report_run_only': True, 'report': {'account': account, 'report_id': report_id, 'name': report_name}})", "docstring": "Trigger a CM report run\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n account (integer) - CM network id.\n report_id (integer) - CM report id, empty if using name.\n report_name (string) - CM report name, empty if using id instead."} +{"repo": "tensorflow", "function": "def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, forward_input, loop_vars):\n init_slices = _create_grad_indexed_slices_init(grad_output_slices, forward_input)\n with body_grad_graph.as_default():\n input_slices = indexed_slices.IndexedSlices(values=body_grad_graph.capture(init_slices.values, allowlisted=True), indices=body_grad_graph.capture(init_slices.indices, allowlisted=True), dense_shape=body_grad_graph.capture(init_slices.dense_shape, allowlisted=True))\n for t in _flatten(init_slices):\n captured_t = body_grad_graph.captures.pop(t)\n body_grad_graph.inputs.remove(captured_t)\n new_output_slices = _rewrite_grad_indexed_slices_output(grad_output_slices, input_slices)\n return _update_indexed_slices_param(body_grad_graph, loop_vars, init_slices, input_slices, new_output_slices, grad_output_slices)", "docstring": "Rewrites grad_output_slices's corresponding input to be an IndexedSlices.\n\nThis rewrite requires that forward_input was captured in the forward loop,\ni.e. is not a user-specified loop variable. This is important because the\nrewrite assumes that forward_input is passed through to its corresponding\noutput unchanged. This assumption is used in _rewrite_input_as_indexed_slices,\nwhich depends on the exact gradient structure produced by the input's fanout.\n\nThis can yield a more efficient computation than using\n_rewrite_output_as_tensor, since it preserves the IndexedSlices structure\ninstead of converting the IndexedSlices to a dense Tensor.\n\nArgs:\n body_grad_graph: _WhileBodyGradFuncGraph.\n grad_output_slices: IndexedSlices output of body_grad_graph.\n forward_input: the corresponding Tensor input to the forward loop.\n loop_vars: list of Tensors. The inputs to body_grad_graph.\n\nReturns:\n The new loop_vars to pass to body_grad_graph."} +{"repo": "keras", "function": "def to_json(self, **kwargs):\n from keras.src.saving import serialization_lib\n model_config = serialization_lib.serialize_keras_object(self)\n return json.dumps(model_config, **kwargs)", "docstring": "Returns a JSON string containing the network configuration.\n\nTo load a network from a JSON save file, use\n`keras.models.model_from_json(json_string, custom_objects={...})`.\n\nArgs:\n **kwargs: Additional keyword arguments to be passed to\n `json.dumps()`.\n\nReturns:\n A JSON string."} +{"repo": "transformers", "function": "def init_cache(self, batch_size, max_length, encoder_outputs):\n decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4')\n decoder_attention_mask = jnp.ones_like(decoder_input_ids)\n decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape)\n\n def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n decoder_module = module._get_decoder_module()\n return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward)\n return unfreeze(init_variables['cache'])", "docstring": "Args:\n batch_size (`int`):\n batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.\n max_length (`int`):\n maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized\n cache.\n encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):\n `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:\n `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)\n is a sequence of hidden-states at the output of the last layer of the encoder. Used in the\n cross-attention of the decoder."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: Optional[torch.Tensor]=None, output_attentions: bool=False, **kwargs):\n residual = hidden_states\n if self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.self_attn_layer_norm(hidden_states)\n if self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n if not self.normalize_before:\n hidden_states = self.final_layer_norm(hidden_states)\n if self.training:\n if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\n values.\n position_embeddings (`torch.FloatTensor`, *optional*):\n Object queries (also called content embeddings), to be added to the hidden states.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "tensorflow", "function": "class ProgramContext(object):\n\n def __init__(self, options, autograph_module=None):\n self.options = options\n self.autograph_module = autograph_module", "docstring": "ProgramContext keeps track of converting function hierarchies.\n\nAttributes:\n options: ConversionOptions\n autograph_module: Deprecated. Do not use."} +{"repo": "transformers", "function": "def forward(self, input, tokens_per_expert):\n return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())", "docstring": "Perform grouped matrix multiplication.\n\nArgs:\n input (`torch.Tensor`):\n Input tensor of shape (num_tokens, in_features).\n tokens_per_expert (`torch.Tensor`):\n Number of tokens assigned to each expert.\n\nReturns:\n torch.Tensor: Output tensor of shape (num_tokens, out_features)."} +{"repo": "mobly", "function": "def from_dict(event_dict):\n return CallbackEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])", "docstring": "Creates a CallbackEvent object from a dictionary.\n\nArgs:\n event_dict: dict, a dictionary representing an event.\n\nReturns:\n A CallbackEvent object."} +{"repo": "transformers", "function": "class AriaTextConfig(LlamaConfig):\n model_type = 'aria_text'\n base_config_key = 'text_config'\n\n def __init__(self, intermediate_size: int=4096, moe_num_experts: int=8, moe_topk: int=2, moe_num_shared_experts: int=2, pad_token_id=2, **super_kwargs):\n super().__init__(pad_token_id=pad_token_id, **super_kwargs)\n self.intermediate_size = intermediate_size\n self.moe_num_experts = moe_num_experts\n self.moe_topk = moe_topk\n self.moe_num_shared_experts = moe_num_shared_experts", "docstring": "This class handles the configuration for the text component of the Aria model.\nInstantiating a configuration with the defaults will yield a similar configuration to that of the model of the Aria\n[rhymes-ai/Aria](https://huggingface.co/rhymes-ai/Aria) architecture.\nThis class extends the LlamaConfig to include additional parameters specific to the Mixture of Experts (MoE) architecture.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 4096):\n The size of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer decoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to\n `num_attention_heads`.\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Llama 1 supports up to 2048 tokens,\n Llama 2 up to 4096, CodeLlama up to 16384.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n pad_token_id (`int`, *optional*, defaults to 2):\n Padding token id.\n bos_token_id (`int`, *optional*, defaults to 1):\n Beginning of stream token id.\n eos_token_id (`int`, *optional*, defaults to 2):\n End of stream token id.\n pretraining_tp (`int`, *optional*, defaults to 1):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to\n understand more about it. This value is necessary to ensure exact reproducibility of the pretraining\n results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type\n and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value\n accordingly.\n Expected contents:\n `rope_type` (`str`):\n The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',\n 'llama3'], with 'default' being the original RoPE implementation.\n `factor` (`float`, *optional*):\n Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In\n most scaling types, a `factor` of x will enable the model to handle sequences of length x *\n original maximum pre-trained length.\n `original_max_position_embeddings` (`int`, *optional*):\n Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during\n pretraining.\n `attention_factor` (`float`, *optional*):\n Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention\n computation. If unspecified, it defaults to value recommended by the implementation, using the\n `factor` field to infer the suggested value.\n `beta_fast` (`float`, *optional*):\n Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear\n ramp function. If unspecified, it defaults to 32.\n `beta_slow` (`float`, *optional*):\n Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear\n ramp function. If unspecified, it defaults to 1.\n `short_factor` (`List[float]`, *optional*):\n Only used with 'longrope'. The scaling factor to be applied to short contexts (<\n `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden\n size divided by the number of attention heads divided by 2\n `long_factor` (`List[float]`, *optional*):\n Only used with 'longrope'. The scaling factor to be applied to long contexts (<\n `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden\n size divided by the number of attention heads divided by 2\n `low_freq_factor` (`float`, *optional*):\n Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE\n `high_freq_factor` (`float`, *optional*):\n Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE\n attention_bias (`bool`, *optional*, defaults to `False`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n mlp_bias (`bool`, *optional*, defaults to `False`):\n Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.\n head_dim (`int`, *optional*):\n The attention head dimension. If None, it will default to hidden_size // num_heads\n moe_num_experts (`int`, *optional*, defaults to 8):\n The number of experts in the MoE layer.\n moe_topk (`int`, *optional*, defaults to 2):\n The number of top experts to route to for each token.\n moe_num_shared_experts (`int`, *optional*, defaults to 2):\n The number of shared experts."} +{"repo": "transformers", "function": "def call(self, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, decoder_input_ids: tf.Tensor | None=None, decoder_attention_mask: tf.Tensor | None=None, decoder_position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, decoder_head_mask: tf.Tensor | None=None, cross_attn_head_mask: tf.Tensor | None=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]]=None, past_key_values: List[tf.Tensor] | None=None, inputs_embeds: tf.Tensor | None=None, decoder_inputs_embeds: tf.Tensor | None=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:\n if labels is not None:\n labels = tf.where(labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels)\n use_cache = False\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)\n outputs = self.model(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)\n lm_logits = self.bias_layer(lm_logits)\n masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return (masked_lm_loss,) + output if masked_lm_loss is not None else output\n return TFSeq2SeqLMOutput(loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nReturns:"} +{"repo": "tensorflow", "function": "def switch(condition, then_expression, else_expression):\n if condition.dtype != dtypes_module.bool:\n condition = math_ops.cast(condition, 'bool')\n cond_ndim = ndim(condition)\n if not cond_ndim:\n if not callable(then_expression):\n\n def then_expression_fn():\n return then_expression\n else:\n then_expression_fn = then_expression\n if not callable(else_expression):\n\n def else_expression_fn():\n return else_expression\n else:\n else_expression_fn = else_expression\n x = cond.cond(condition, then_expression_fn, else_expression_fn)\n else:\n if callable(then_expression):\n then_expression = then_expression()\n if callable(else_expression):\n else_expression = else_expression()\n expr_ndim = ndim(then_expression)\n if cond_ndim > expr_ndim:\n raise ValueError('Rank of `condition` should be less than or equal to rank of `then_expression` and `else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)=' + str(expr_ndim))\n if cond_ndim > 1:\n ndim_diff = expr_ndim - cond_ndim\n cond_shape = array_ops.concat([array_ops.shape(condition), [1] * ndim_diff], axis=0)\n condition = array_ops.reshape(condition, cond_shape)\n expr_shape = array_ops.shape(then_expression)\n shape_diff = expr_shape - cond_shape\n tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape, array_ops.ones_like(expr_shape))\n condition = array_ops.tile(condition, tile_shape)\n x = array_ops.where_v2(condition, then_expression, else_expression)\n return x", "docstring": "Switches between two operations depending on a scalar value.\n\nNote that both `then_expression` and `else_expression`\nshould be symbolic tensors of the *same shape*.\n\nArgs:\n condition: tensor (`int` or `bool`).\n then_expression: either a tensor, or a callable that returns a tensor.\n else_expression: either a tensor, or a callable that returns a tensor.\n\nReturns:\n The selected tensor.\n\nRaises:\n ValueError: If rank of `condition` is greater than rank of expressions."} +{"repo": "fhir-py", "function": "def copy_common_field(source_message: message.Message, target_message: message.Message, field_name: str):\n source_descriptor = source_message.DESCRIPTOR\n target_descriptor = target_message.DESCRIPTOR\n source_field = _field_descriptor_for_name(source_message, field_name)\n target_field = _field_descriptor_for_name(target_message, field_name)\n if source_field.type != target_field.type:\n raise ValueError(f'Field {field_name} differs in type between {source_descriptor.full_name} ({source_field.type}) and {target_descriptor.full_name} ({target_field.type}).')\n if field_is_repeated(source_field) != field_is_repeated(target_field):\n raise ValueError(f'Field {field_name} differs in size between {source_descriptor.full_name} and {target_descriptor.full_name}.')\n if field_is_set(source_message, source_field):\n source_value = get_value_at_field(source_message, source_field)\n set_value_at_field(target_message, target_field, source_value)", "docstring": "Copies field named field_name from source_message to target_message.\n\nArgs:\n source_message: The message to copy values from.\n target_message: The message to copy values to.\n field_name: The common field to examine/copy from source_message to\n target_message.\n\nRaises:\n ValueError: is not the same type as . Unable to copy field\n .\n ValueError: Field is not present in both and .\n ValueError: Field differs in type between () and\n ().\n ValueError: Field differs in size between and ."} +{"repo": "transformers", "function": "def loss_masks(self, masks_queries_logits: torch.Tensor, mask_labels: List[torch.Tensor], indices: Tuple[np.array], num_masks: int) -> Dict[str, torch.Tensor]:\n src_idx = self._get_predictions_permutation_indices(indices)\n tgt_idx = self._get_targets_permutation_indices(indices)\n pred_masks = masks_queries_logits[src_idx]\n target_masks, _ = self._pad_images_to_max_in_batch(mask_labels)\n target_masks = target_masks[tgt_idx]\n pred_masks = pred_masks[:, None]\n target_masks = target_masks[:, None]\n with torch.no_grad():\n point_coordinates = self.sample_points_using_uncertainty(pred_masks, lambda logits: self.calculate_uncertainty(logits), self.num_points, self.oversample_ratio, self.importance_sample_ratio)\n point_labels = sample_point(target_masks, point_coordinates, align_corners=False).squeeze(1)\n point_logits = sample_point(pred_masks, point_coordinates, align_corners=False).squeeze(1)\n losses = {'loss_mask': sigmoid_cross_entropy_loss(point_logits, point_labels, num_masks), 'loss_dice': dice_loss(point_logits, point_labels, num_masks)}\n del pred_masks\n del target_masks\n return losses", "docstring": "Compute the losses related to the masks using sigmoid_cross_entropy_loss and dice loss.\n\nArgs:\n masks_queries_logits (`torch.Tensor`):\n A tensor of shape `(batch_size, num_queries, height, width)`.\n mask_labels (`torch.Tensor`):\n List of mask labels of shape `(labels, height, width)`.\n indices (`Tuple[np.array])`:\n The indices computed by the Hungarian matcher.\n num_masks (`int)`:\n The number of masks, used for normalization.\n\nReturns:\n losses (`Dict[str, Tensor]`): A dict of `torch.Tensor` containing two keys:\n - **loss_mask** -- The loss computed using sigmoid cross entropy loss on the predicted and ground truth.\n masks.\n - **loss_dice** -- The loss computed using dice loss on the predicted on the predicted and ground truth,\n masks."} +{"repo": "fhir-py", "function": "def __getitem__(self, key: Any) -> 'ColumnExpressionBuilder':\n item = self._builder[key]\n if isinstance(item, expressions.Builder) and self._sealed:\n raise self._fhir_path_sealed_error(key)\n return ColumnExpressionBuilder._wrap_any(self, item)", "docstring": "Redirects to the expressions.Builder to get the item.\n\nArgs:\n key: the key of the item.\n\nReturns:\n A ColumnExpressionBuilder, because the item got from the\n expressions.Builder is always the type of Builder.\n\nRaises:\n AttributeError: if the FHIR path in this class is already sealed.\n TypeError: if getting the key from self._builder fails."} +{"repo": "tensorflow", "function": "def normalize_element(element, element_signature=None):\n normalized_components = []\n if element_signature is None:\n components = nest.flatten(element)\n flattened_signature = [None] * len(components)\n pack_as = element\n else:\n flattened_signature = nest.flatten(element_signature)\n components = nest.flatten_up_to(element_signature, element)\n pack_as = element_signature\n with ops.name_scope('normalize_element'):\n for i, (t, spec) in enumerate(zip(components, flattened_signature)):\n try:\n if spec is None:\n spec = type_spec_from_value(t, use_fallback=False)\n except TypeError:\n normalized_components.append(ops.convert_to_tensor(t, name='component_%d' % i))\n else:\n if spec.__class__.__name__ == 'DatasetSpec':\n normalized_components.append(t)\n elif isinstance(spec, sparse_tensor.SparseTensorSpec):\n normalized_components.append(sparse_tensor.SparseTensor.from_value(t))\n elif isinstance(spec, ragged_tensor.RaggedTensorSpec):\n normalized_components.append(ragged_tensor.convert_to_tensor_or_ragged_tensor(t, name='component_%d' % i))\n elif isinstance(spec, tensor_array_ops.TensorArraySpec):\n normalized_components.append(t)\n elif isinstance(spec, none_tensor.NoneTensorSpec):\n normalized_components.append(none_tensor.NoneTensor())\n elif isinstance(spec, resource_variable_ops.VariableSpec):\n normalized_components.append(ops.convert_to_tensor(t, name=f'component_{i}', dtype=spec.dtype))\n elif isinstance(t, composite_tensor.CompositeTensor):\n normalized_components.append(t)\n else:\n dtype = getattr(spec, 'dtype', None)\n normalized_components.append(ops.convert_to_tensor(t, name='component_%d' % i, dtype=dtype))\n return nest.pack_sequence_as(pack_as, normalized_components)", "docstring": "Normalizes a nested structure of element components.\n\n* Components matching `SparseTensorSpec` are converted to `SparseTensor`.\n* Components matching `RaggedTensorSpec` are converted to `RaggedTensor`.\n* Components matching `VariableSpec` are converted to `Tensor`.\n* Components matching `DatasetSpec` or `TensorArraySpec` are passed through.\n* `CompositeTensor` components are passed through.\n* All other components are converted to `Tensor`.\n\nArgs:\n element: A nested structure of individual components.\n element_signature: (Optional.) A nested structure of `tf.DType` objects\n corresponding to each component of `element`. If specified, it will be\n used to set the exact type of output tensor when converting input\n components which are not tensors themselves (e.g. numpy arrays, native\n python types, etc.)\n\nReturns:\n A nested structure of `Tensor`, `Variable`, `Dataset`, `SparseTensor`,\n `RaggedTensor`, or `TensorArray` objects."} +{"repo": "transformers", "function": "class XLMPoolerEndLogits(nn.Module):\n\n def __init__(self, config: XLMConfig):\n super().__init__()\n self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n self.activation = nn.Tanh()\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dense_1 = nn.Linear(config.hidden_size, 1)\n\n def forward(self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor]=None, start_positions: Optional[torch.LongTensor]=None, p_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):\n The final hidden states of the model.\n start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):\n The hidden states of the first tokens for the labeled span.\n start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n The position of the first token for the labeled span.\n p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):\n Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token\n should be masked.\n\n \n\n One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides\n `start_states`.\n\n \n\n Returns:\n `torch.FloatTensor`: The end logits for SQuAD.\n \"\"\"\n assert start_states is not None or start_positions is not None, 'One of start_states, start_positions should be not None'\n if start_positions is not None:\n slen, hsz = hidden_states.shape[-2:]\n start_positions = start_positions[:, None, None].expand(-1, -1, hsz)\n start_states = hidden_states.gather(-2, start_positions)\n start_states = start_states.expand(-1, slen, -1)\n x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))\n x = self.activation(x)\n x = self.LayerNorm(x)\n x = self.dense_1(x).squeeze(-1)\n if p_mask is not None:\n if p_mask.dtype == torch.float16:\n x = x * (1 - p_mask) - 65500 * p_mask\n else:\n x = x * (1 - p_mask) - 1e+30 * p_mask\n return x", "docstring": "Compute SQuAD end logits from sequence hidden states.\n\nArgs:\n config ([`XLMConfig`]):\n The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`\n to use."} +{"repo": "tensorflow", "function": "def tridiagonal_matmul(diagonals, rhs, diagonals_format='compact', name=None):\n if diagonals_format == 'compact':\n superdiag = diagonals[..., 0, :]\n maindiag = diagonals[..., 1, :]\n subdiag = diagonals[..., 2, :]\n elif diagonals_format == 'sequence':\n superdiag, maindiag, subdiag = diagonals\n elif diagonals_format == 'matrix':\n m1 = tensor_shape.dimension_value(diagonals.shape[-1])\n m2 = tensor_shape.dimension_value(diagonals.shape[-2])\n if m1 and m2 and (m1 != m2):\n raise ValueError('Expected last two dimensions of diagonals to be same, got {} and {}'.format(m1, m2))\n diags = array_ops.matrix_diag_part(diagonals, k=(-1, 1), padding_value=0.0, align='LEFT_RIGHT')\n superdiag = diags[..., 0, :]\n maindiag = diags[..., 1, :]\n subdiag = diags[..., 2, :]\n else:\n raise ValueError('Unrecognized diagonals_format: %s' % diagonals_format)\n superdiag = array_ops.expand_dims(superdiag, -2)\n maindiag = array_ops.expand_dims(maindiag, -2)\n subdiag = array_ops.expand_dims(subdiag, -2)\n return linalg_ops.tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name)", "docstring": "Multiplies tridiagonal matrix by matrix.\n\n`diagonals` is representation of 3-diagonal NxN matrix, which depends on\n`diagonals_format`.\n\nIn `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with\ntwo inner-most dimensions representing the square tridiagonal matrices.\nElements outside of the three diagonals will be ignored.\n\nIf `sequence` format, `diagonals` is list or tuple of three tensors:\n`[superdiag, maindiag, subdiag]`, each having shape [..., M]. Last element\nof `superdiag` first element of `subdiag` are ignored.\n\nIn `compact` format the three diagonals are brought together into one tensor\nof shape `[..., 3, M]`, with last two dimensions containing superdiagonals,\ndiagonals, and subdiagonals, in order. Similarly to `sequence` format,\nelements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.\n\nThe `sequence` format is recommended as the one with the best performance.\n\n`rhs` is matrix to the right of multiplication. It has shape `[..., M, N]`.\n\nExample:\n\n```python\nsuperdiag = tf.constant([-1, -1, 0], dtype=tf.float64)\nmaindiag = tf.constant([2, 2, 2], dtype=tf.float64)\nsubdiag = tf.constant([0, -1, -1], dtype=tf.float64)\ndiagonals = [superdiag, maindiag, subdiag]\nrhs = tf.constant([[1, 1], [1, 1], [1, 1]], dtype=tf.float64)\nx = tf.linalg.tridiagonal_matmul(diagonals, rhs, diagonals_format='sequence')\n```\n\nArgs:\n diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The\n shape depends of `diagonals_format`, see description above. Must be\n `float32`, `float64`, `complex64`, or `complex128`.\n rhs: A `Tensor` of shape [..., M, N] and with the same dtype as `diagonals`.\n diagonals_format: one of `sequence`, or `compact`. Default is `compact`.\n name: A name to give this `Op` (optional).\n\nReturns:\n A `Tensor` of shape [..., M, N] containing the result of multiplication.\n\nRaises:\n ValueError: An unsupported type is provided as input, or when the input\n tensors have incorrect shapes."} +{"repo": "tensorflow", "function": "def _check_rnn_cell_input_dtypes(inputs):\n for t in nest.flatten(inputs):\n _check_supported_dtypes(t.dtype)", "docstring": "Check whether the input tensors are with supported dtypes.\n\nDefault RNN cells only support floats and complex as its dtypes since the\nactivation function (tanh and sigmoid) only allow those types. This function\nwill throw a proper error message if the inputs is not in a supported type.\n\nArgs:\n inputs: tensor or nested structure of tensors that are feed to RNN cell as\n input or state.\n\nRaises:\n ValueError: if any of the input tensor are not having dtypes of float or\n complex."} +{"repo": "transformers", "function": "def forward(self, pixel_values: Tensor, mask_labels: Optional[List[Tensor]]=None, class_labels: Optional[List[Tensor]]=None, pixel_mask: Optional[Tensor]=None, output_hidden_states: Optional[bool]=None, output_auxiliary_logits: Optional[bool]=None, output_attentions: Optional[bool]=None, return_dict: Optional[bool]=None) -> Mask2FormerForUniversalSegmentationOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model(pixel_values=pixel_values, pixel_mask=pixel_mask, output_hidden_states=output_hidden_states or self.config.use_auxiliary_loss, output_attentions=output_attentions, return_dict=True)\n loss, loss_dict, auxiliary_logits = (None, None, None)\n class_queries_logits = ()\n for decoder_output in outputs.transformer_decoder_intermediate_states:\n class_prediction = self.class_predictor(decoder_output.transpose(0, 1))\n class_queries_logits += (class_prediction,)\n masks_queries_logits = outputs.masks_queries_logits\n auxiliary_logits = self.get_auxiliary_logits(class_queries_logits, masks_queries_logits)\n if mask_labels is not None and class_labels is not None:\n loss_dict = self.get_loss_dict(masks_queries_logits=masks_queries_logits[-1], class_queries_logits=class_queries_logits[-1], mask_labels=mask_labels, class_labels=class_labels, auxiliary_predictions=auxiliary_logits)\n loss = self.get_loss(loss_dict)\n encoder_hidden_states = None\n pixel_decoder_hidden_states = None\n transformer_decoder_hidden_states = None\n if output_hidden_states:\n encoder_hidden_states = outputs.encoder_hidden_states\n pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states\n transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states\n output_auxiliary_logits = self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits\n if not output_auxiliary_logits:\n auxiliary_logits = None\n output = Mask2FormerForUniversalSegmentationOutput(loss=loss, class_queries_logits=class_queries_logits[-1], masks_queries_logits=masks_queries_logits[-1], auxiliary_logits=auxiliary_logits, encoder_last_hidden_state=outputs.encoder_last_hidden_state, pixel_decoder_last_hidden_state=outputs.pixel_decoder_last_hidden_state, transformer_decoder_last_hidden_state=outputs.transformer_decoder_last_hidden_state, encoder_hidden_states=encoder_hidden_states, pixel_decoder_hidden_states=pixel_decoder_hidden_states, transformer_decoder_hidden_states=transformer_decoder_hidden_states, attentions=outputs.attentions)\n if not return_dict:\n output = tuple((v for v in output.values() if v is not None))\n if loss is not None:\n output = loss + output\n return output", "docstring": "mask_labels (`List[torch.Tensor]`, *optional*):\n List of mask labels of shape `(num_labels, height, width)` to be fed to a model\nclass_labels (`List[torch.LongTensor]`, *optional*):\n list of target class labels of shape `(num_labels, height, width)` to be fed to a model. They identify the\n labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`.\noutput_auxiliary_logits (`bool`, *optional*):\n Whether or not to output auxiliary logits.\n\nExamples:\n\nInstance segmentation example:\n\n```python\n>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation\n>>> from PIL import Image\n>>> import requests\n>>> import torch\n\n>>> # Load Mask2Former trained on COCO instance segmentation dataset\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/mask2former-swin-small-coco-instance\")\n>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(\n... \"facebook/mask2former-swin-small-coco-instance\"\n... )\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> inputs = image_processor(image, return_tensors=\"pt\")\n\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n\n>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`\n>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`\n>>> class_queries_logits = outputs.class_queries_logits\n>>> masks_queries_logits = outputs.masks_queries_logits\n\n>>> # Perform post-processing to get instance segmentation map\n>>> pred_instance_map = image_processor.post_process_instance_segmentation(\n... outputs, target_sizes=[(image.height, image.width)]\n... )[0]\n>>> print(pred_instance_map.shape)\ntorch.Size([480, 640])\n```\n\nSemantic segmentation example:\n```python\n>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation\n>>> from PIL import Image\n>>> import requests\n>>> import torch\n\n>>> # Load Mask2Former trained on ADE20k semantic segmentation dataset\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/mask2former-swin-small-ade-semantic\")\n>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(\"facebook/mask2former-swin-small-ade-semantic\")\n\n>>> url = (\n... \"https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg\"\n... )\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> inputs = image_processor(image, return_tensors=\"pt\")\n\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n\n>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`\n>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`\n>>> class_queries_logits = outputs.class_queries_logits\n>>> masks_queries_logits = outputs.masks_queries_logits\n\n>>> # Perform post-processing to get semantic segmentation map\n>>> pred_semantic_map = image_processor.post_process_semantic_segmentation(\n... outputs, target_sizes=[(image.height, image.width)]\n... )[0]\n>>> print(pred_semantic_map.shape)\ntorch.Size([512, 683])\n```\n\nPanoptic segmentation example:\n\n```python\n>>> from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation\n>>> from PIL import Image\n>>> import requests\n>>> import torch\n\n>>> # Load Mask2Former trained on CityScapes panoptic segmentation dataset\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/mask2former-swin-small-cityscapes-panoptic\")\n>>> model = Mask2FormerForUniversalSegmentation.from_pretrained(\n... \"facebook/mask2former-swin-small-cityscapes-panoptic\"\n... )\n\n>>> url = \"https://cdn-media.huggingface.co/Inference-API/Sample-results-on-the-Cityscapes-dataset-The-above-images-show-how-our-method-can-handle.png\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> inputs = image_processor(image, return_tensors=\"pt\")\n\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n\n>>> # Model predicts class_queries_logits of shape `(batch_size, num_queries)`\n>>> # and masks_queries_logits of shape `(batch_size, num_queries, height, width)`\n>>> class_queries_logits = outputs.class_queries_logits\n>>> masks_queries_logits = outputs.masks_queries_logits\n\n>>> # Perform post-processing to get panoptic segmentation map\n>>> pred_panoptic_map = image_processor.post_process_panoptic_segmentation(\n... outputs, target_sizes=[(image.height, image.width)]\n... )[0][\"segmentation\"]\n>>> print(pred_panoptic_map.shape)\ntorch.Size([338, 676])\n```"} +{"repo": "transformers", "function": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n encoder_hidden_states = encoder_outputs[0]\n if encoder_attention_mask is None:\n batch_size, sequence_length = encoder_hidden_states.shape[:2]\n encoder_attention_mask = jnp.ones((batch_size, sequence_length))\n batch_size, sequence_length = decoder_input_ids.shape\n if decoder_attention_mask is None:\n decoder_attention_mask = jnp.ones((batch_size, sequence_length))\n if decoder_position_ids is None:\n if past_key_values is not None:\n raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n inputs = {'params': params or self.params}\n if past_key_values:\n inputs['cache'] = past_key_values\n mutable = ['cache']\n else:\n mutable = False\n\n def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n decoder_module = module._get_decoder_module()\n outputs = decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs)\n hidden_states = outputs[0]\n if self.config.tie_word_embeddings:\n shared_embedding = module.model.variables['params']['shared']['embedding']\n lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n else:\n lm_logits = module.lm_head(hidden_states)\n lm_logits += module.final_logits_bias\n return (lm_logits, outputs)\n outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n if past_key_values is None:\n lm_logits, decoder_outputs = outputs\n else:\n (lm_logits, decoder_outputs), past = outputs\n if return_dict:\n outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n else:\n outputs = (lm_logits,) + decoder_outputs[1:]\n if past_key_values is not None and return_dict:\n outputs['past_key_values'] = unfreeze(past['cache'])\n return outputs\n elif past_key_values is not None and (not return_dict):\n outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> import jax.numpy as jnp\n>>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration\n\n>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained(\"facebook/blenderbot-400M-distill\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot-400M-distill\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> logits = outputs.logits\n```"} +{"repo": "tensorflow", "function": "def load_variable(ckpt_dir_or_file, name):\n if name.endswith(':0'):\n name = name[:-2]\n reader = load_checkpoint(ckpt_dir_or_file)\n return reader.get_tensor(name)", "docstring": "Returns the tensor value of the given variable in the checkpoint.\n\nWhen the variable name is unknown, you can use `tf.train.list_variables` to\ninspect all the variable names.\n\nExample usage:\n\n```python\nimport tensorflow as tf\na = tf.Variable(1.0)\nb = tf.Variable(2.0)\nckpt = tf.train.Checkpoint(var_list={'a': a, 'b': b})\nckpt_path = ckpt.save('tmp-ckpt')\nvar= tf.train.load_variable(\n ckpt_path, 'var_list/a/.ATTRIBUTES/VARIABLE_VALUE')\nprint(var) # 1.0\n```\n\nArgs:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n name: Name of the variable to return.\n\nReturns:\n A numpy `ndarray` with a copy of the value of this variable."} +{"repo": "tensorflow", "function": "def convert(model_flags: _model_flags_pb2.ModelFlags, conversion_flags: _conversion_flags_pb2.ConverterFlags, input_data_str: Optional[str]=None, debug_info_str: Optional[str]=None):\n try:\n return wrap_converter.wrapped_convert(model_flags.SerializeToString(), conversion_flags.SerializeToString(), input_data_str, debug_info_str)\n except Exception as e:\n converter_error = ConverterError(str(e))\n for error_data in _metrics_wrapper.retrieve_collected_errors():\n converter_error.append_error(error_data)\n if error_data.error_code == converter_error_data_pb2.ConverterErrorData.ERROR_STATEFUL_PARTITIONED_CALL_IN_FINAL_IR and (not conversion_flags.guarantee_all_funcs_one_use):\n conversion_flags.guarantee_all_funcs_one_use = True\n return convert(model_flags, conversion_flags, input_data_str, debug_info_str)\n raise converter_error", "docstring": "Converts `input_data_str` to a TFLite model.\n\nArgs:\n model_flags: Proto describing model properties, see `model_flags.proto`.\n conversion_flags: Proto describing conversion properties, see\n `compiler/mlir/lite/converter_flags.proto`.\n input_data_str: Input data in serialized form (e.g. a graphdef is common, or\n it can be hlo text or proto)\n debug_info_str: Serialized `GraphDebugInfo` proto describing logging\n information.\n\nReturns:\n Converted model in serialized form (e.g. a TFLITE model is common).\nRaises:\n ConverterError: When conversion fails in TFLiteConverter, usually due to\n ops not being supported."} +{"repo": "transformers", "function": "class Phi4MultimodalConfig(Phi3Config):\n sub_configs = {'audio_config': Phi4MultimodalAudioConfig, 'vision_config': Phi4MultimodalVisionConfig}\n\n def __init__(self, vocab_size=200064, hidden_size=3072, intermediate_size=8192, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, resid_pdrop=0.0, embd_pdrop=0.0, attention_dropout=0.0, hidden_act='silu', max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, partial_rotary_factor=1, bos_token_id=199999, eos_token_id=[199999, 200020], pad_token_id=199999, original_max_position_embeddings=4096, sliding_window=None, vision_config=None, audio_config=None, **kwargs):\n super().__init__(vocab_size=vocab_size, hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_key_value_heads=num_key_value_heads, resid_pdrop=resid_pdrop, embd_pdrop=embd_pdrop, attention_dropout=attention_dropout, hidden_act=hidden_act, max_position_embeddings=max_position_embeddings, initializer_range=initializer_range, rms_norm_eps=rms_norm_eps, use_cache=use_cache, tie_word_embeddings=tie_word_embeddings, rope_theta=rope_theta, rope_scaling=rope_scaling, partial_rotary_factor=partial_rotary_factor, bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, original_max_position_embeddings=original_max_position_embeddings, sliding_window=sliding_window, **kwargs)\n if isinstance(vision_config, dict):\n vision_config = Phi4MultimodalVisionConfig(**vision_config)\n elif vision_config is None:\n Phi4MultimodalVisionConfig()\n self.vision_config = vision_config\n if isinstance(audio_config, dict):\n audio_config = Phi4MultimodalAudioConfig(**audio_config)\n elif vision_config is None:\n audio_config = Phi4MultimodalAudioConfig()\n self.audio_config = audio_config", "docstring": "This is the configuration class to store the configuration of a [`Phi4MultimodalModel`]. It is used to instantiate a\nPhi4Multimodal model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the\n[microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 200064):\n Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`Phi3Model`].\n hidden_size (`int`, *optional*, defaults to 3072):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 8192):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer decoder.\n num_key_value_heads (`int`, *optional*, defaults to 8):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to\n `num_attention_heads`.\n resid_pdrop (`float`, *optional*, defaults to 0.0):\n Dropout probability for mlp outputs.\n embd_pdrop (`int`, *optional*, defaults to 0.0):\n The dropout ratio for the embeddings.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio after computing the attention scores.\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 131072):\n The maximum sequence length that this model might ever be used with.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon value used for the RMSNorm.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n rope_scaling (`dict`, *optional*):\n The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must\n contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and\n the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size\n divided by the number of attention heads divided by 2.\n partial_rotary_factor (`float`, *optional*, defaults to `1.0`):\n Percentage of the query and keys which will have rotary embedding. Must be between 0.0 and 1.0.\n bos_token_id (`int`, *optional*, defaults to 199999):\n The id of the \"beginning-of-sequence\" token.\n eos_token_id (`int` or `list[int]`, *optional*, defaults to `[199999, 200020]`):\n The id of the \"end-of-sequence\" token.\n pad_token_id (`int`, *optional*, defaults to 199999):\n The id of the padding token.\n original_max_position_embeddings (`int`, *optional*, defaults to 4096):\n The maximum sequence length that this model was trained with. This is used to determine the size of the\n original RoPE embeddings when using long scaling.\n sliding_window (`int`, *optional*):\n Sliding window attention window size. If `None`, no sliding window is applied.\n vision_config (`Phi4MultimodalVisionConfig` or `dict`, *optional*):\n The vision config for the underlying image embedding model. If not provided, will default to the configuration\n used to instantiate a model similar in architecture as\n [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct).\n audio_config (`Phi4MultimodalAudioConfig` or `dict`, *optional*):\n The audio config for the underlying audio embedding model. If not provided, will default to the configuration\n used to instantiate a model similar in architecture as\n [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct).\n\nExample:\n\n```python\n>>> from transformers import Phi4MultimodalModel, Phi4MultimodalConfig\n\n>>> # Initializing a Phi4Multimodal style configuration\n>>> configuration = Phi4MultimodalConfig.from_pretrained(\"microsoft/Phi-4-multimodal-instruct\")\n\n>>> # Initializing a model from the configuration\n>>> model = Phi4MultimodalModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class MegaConfig(PretrainedConfig):\n model_type = 'mega'\n\n def __init__(self, vocab_size=30522, hidden_size=128, num_hidden_layers=4, intermediate_size=256, ema_projection_size=16, bidirectional=True, shared_representation_size=64, use_chunking=False, chunk_size=-1, truncation=None, normalize_before_mega=True, normalization_type='scalenorm', norm_affine=True, activation='silu', attention_activation='softmax', dropout_prob=0.1, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, use_feature_dropout=False, use_normalized_ffn=True, nffn_hidden_size=256, normalize_before_ffn=True, nffn_activation_dropout_prob=0.1, max_positions=2048, add_token_type_embeddings=False, type_vocab_size=2, initializer_range=0.02, ema_delta_alpha_range=0.2, ema_beta_range=0.02, ema_gamma_omega_range=1.0, pad_token_id=1, bos_token_id=0, eos_token_id=2, relative_positional_bias='rotary', classifier_dropout=None, use_cache=True, add_lm_hidden_dense_layer=True, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.activation = activation\n self.attention_activation = attention_activation\n self.intermediate_size = intermediate_size\n self.ema_projection_size = ema_projection_size\n self.bidirectional = bidirectional\n self.shared_representation_size = shared_representation_size\n self.use_chunking = use_chunking\n self.chunk_size = chunk_size\n self.truncation = truncation\n self.normalize_before_mega = normalize_before_mega\n self.normalization_type = normalization_type\n self.norm_affine = norm_affine\n self.dropout_prob = dropout_prob\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.use_feature_dropout = use_feature_dropout\n self.use_normalized_ffn = use_normalized_ffn\n self.nffn_hidden_size = nffn_hidden_size\n self.normalize_before_ffn = normalize_before_ffn\n self.nffn_activation_dropout_prob = nffn_activation_dropout_prob\n self.max_positions = max_positions\n self.add_token_type_embeddings = add_token_type_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.ema_delta_alpha_range = ema_delta_alpha_range\n self.ema_beta_range = ema_beta_range\n self.ema_gamma_omega_range = ema_gamma_omega_range\n self.relative_positional_bias = relative_positional_bias\n self.use_cache = use_cache\n self.classifier_dropout = classifier_dropout\n self.add_lm_hidden_dense_layer = add_lm_hidden_dense_layer\n self.num_attention_heads = 1", "docstring": "This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the Mega\n[mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 30522):\n Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MegaModel`].\n hidden_size (`int`, *optional*, defaults to 128):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 4):\n Number of hidden layers in the Mega encoder.\n intermediate_size (`int`, *optional*, defaults to 256):\n Dimensionality of the hidden size (self-attention value projection) within the Mega encoder\n ema_projection_size (`int`, *optional*, defaults to 16):\n Dimensionality of the MegaMultiDimensionDampedEma\n bidirectional (`bool`, *optional*, defaults to `True`):\n Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)\n or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be\n False if you intend to use the model as a decoder.\n shared_representation_size (`int`, *optional*, defaults to 64):\n Dimensionality of the linear projection for shared representation of self-attention queries and keys\n use_chunking (`bool`, *optional*, defaults to `False`):\n Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)\n chunk_size (`int`, *optional*, defaults to -1):\n If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If\n chunking is used, input sequences must be padded to a multiple of `chunk_size`\n truncation (`int`, *optional*):\n If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma\n normalize_before_mega (`bool`, *optional*, defaults to `True`):\n Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks\n normalization_type (`str`, *optional*, defaults to `\"scalenorm\"`):\n Type of normalization to use in Mega encoder blocks. Choose one of `\"scalenorm\"`, `\"layernorm\"`,\n `\"rmsnorm\"`, `\"batchnorm\"`, or `\"syncbatchnorm\"` (GPU required for syncbatchnorm)\n norm_affine (`bool`, *optional*, defaults to `True`):\n If `True`, applies a parameterized affine transformation to inputs during normalization\n activation (`str`, *optional*, defaults to `\"silu\"`):\n Activation function to apply within Mega encoder blocks. Choose one of `\"silu\"`, `\"relu\"`, `\"linear\"`,\n `\"gelu\"`, or `\"gelu_accurate\"`\n attention_activation (`str`, *optional*, defaults to `\"softmax\"`):\n Activation function to apply for single-headed self-attention (a la Transformer). Choose one of\n `\"softmax\"`, `\"laplace\"`, or `\"relu2\"`\n dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for EMA self-attention\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n use_feature_dropout (`bool`, *optional*, defaults to `False`):\n Whether to use feature-based (`True`) or standard dropout (`False`)\n use_normalized_ffn (`bool`, *optional*, defaults to `True`):\n Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output\n as-is (`False`)\n nffn_hidden_size (`int`, *optional*, defaults to 256):\n If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this\n is the hidden size of the NFFN\n normalize_before_ffn (`bool`, *optional*, defaults to `True`):\n Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN\n nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the NFFN component.\n max_positions (`int`, *optional*, defaults to 2048):\n The maximum sequence length to use for positional representations. For `\"simple\"` relative positional bias,\n this is a hard limit on input length; `\"rotary\"` relative positional bias will extrapolate to longer\n sequences\n add_token_type_embeddings (`bool`, *optional*, defaults to `True`):\n Whether to account for token types in embeddings. Left as optional to maintain compatibility with original\n implementation while adding support for token types.\n type_vocab_size (`int`, *optional*, defaults to 2):\n The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if\n `add_token_type_embeddings = True`\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):\n The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in\n MegaMultiDimensionDampedEma.\n ema_beta_range (`float`, *optional*, defaults to 0.02):\n The standard deviation for initializing the beta parameter (expansion matrix) in\n MegaMultiDimensionDampedEma.\n ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):\n The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)\n parameters in MultiDimensionEMA.\n relative_positional_bias (`str`, *optional*, defaults to `\"rotary\"`):\n Type of relative positional encoding. Choose one of `\"rotary\"` or `\"simple\"`. If `\"simple\"` is selected,\n `max_positions` is used as a limit on input size, while `\"rotary\"` extrapolates beyond `max_positions`.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n classifier_dropout (`float`, *optional*):\n The dropout ratio for the classification head.\n add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):\n Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass\n hidden states directly to LM head (`False`). Remains optional for compatibility with original\n implementation\n\nExamples:\n\n```python\n>>> from transformers import MegaConfig, MegaModel\n\n>>> # Initializing a Mega configuration\n>>> configuration = MegaConfig()\n\n>>> # Initializing a model (with random weights) from the configuration\n>>> model = MegaModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def conjugate_gradient(operator, rhs, preconditioner=None, x=None, tol=1e-05, max_iter=20, name='conjugate_gradient'):\n if not (operator.is_self_adjoint and operator.is_positive_definite):\n raise ValueError('Expected a self-adjoint, positive definite operator.')\n cg_state = collections.namedtuple('CGState', ['i', 'x', 'r', 'p', 'gamma'])\n\n def stopping_criterion(i, state):\n return math_ops.logical_and(i < max_iter, math_ops.reduce_any(linalg.norm(state.r, axis=-1) > tol))\n\n def dot(x, y):\n return array_ops.squeeze(math_ops.matvec(x[..., array_ops.newaxis], y, adjoint_a=True), axis=-1)\n\n def cg_step(i, state):\n z = math_ops.matvec(operator, state.p)\n alpha = state.gamma / dot(state.p, z)\n x = state.x + alpha[..., array_ops.newaxis] * state.p\n r = state.r - alpha[..., array_ops.newaxis] * z\n if preconditioner is None:\n q = r\n else:\n q = preconditioner.matvec(r)\n gamma = dot(r, q)\n beta = gamma / state.gamma\n p = q + beta[..., array_ops.newaxis] * state.p\n return (i + 1, cg_state(i + 1, x, r, p, gamma))\n with ops.name_scope(name):\n broadcast_shape = array_ops.broadcast_dynamic_shape(array_ops.shape(rhs)[:-1], operator.batch_shape_tensor())\n if preconditioner is not None:\n broadcast_shape = array_ops.broadcast_dynamic_shape(broadcast_shape, preconditioner.batch_shape_tensor())\n broadcast_rhs_shape = array_ops.concat([broadcast_shape, [array_ops.shape(rhs)[-1]]], axis=-1)\n r0 = array_ops.broadcast_to(rhs, broadcast_rhs_shape)\n tol *= linalg.norm(r0, axis=-1)\n if x is None:\n x = array_ops.zeros(broadcast_rhs_shape, dtype=rhs.dtype.base_dtype)\n else:\n r0 = rhs - math_ops.matvec(operator, x)\n if preconditioner is None:\n p0 = r0\n else:\n p0 = math_ops.matvec(preconditioner, r0)\n gamma0 = dot(r0, p0)\n i = constant_op.constant(0, dtype=dtypes.int32)\n state = cg_state(i=i, x=x, r=r0, p=p0, gamma=gamma0)\n _, state = while_loop.while_loop(stopping_criterion, cg_step, [i, state])\n return cg_state(state.i, x=state.x, r=state.r, p=state.p, gamma=state.gamma)", "docstring": "Conjugate gradient solver.\n\nSolves a linear system of equations `A*x = rhs` for self-adjoint, positive\ndefinite matrix `A` and right-hand side vector `rhs`, using an iterative,\nmatrix-free algorithm where the action of the matrix A is represented by\n`operator`. The iteration terminates when either the number of iterations\nexceeds `max_iter` or when the residual norm has been reduced to `tol`\ntimes its initial value, i.e. \\\\(||rhs - A x_k|| <= tol ||rhs||\\\\).\n\nArgs:\n operator: A `LinearOperator` that is self-adjoint and positive definite.\n rhs: A possibly batched vector of shape `[..., N]` containing the right-hand\n size vector.\n preconditioner: A `LinearOperator` that approximates the inverse of `A`.\n An efficient preconditioner could dramatically improve the rate of\n convergence. If `preconditioner` represents matrix `M`(`M` approximates\n `A^{-1}`), the algorithm uses `preconditioner.apply(x)` to estimate\n `A^{-1}x`. For this to be useful, the cost of applying `M` should be\n much lower than computing `A^{-1}` directly.\n x: A possibly batched vector of shape `[..., N]` containing the initial\n guess for the solution.\n tol: A float scalar convergence tolerance.\n max_iter: An integer giving the maximum number of iterations.\n name: A name scope for the operation.\n\nReturns:\n output: A namedtuple representing the final state with fields:\n - i: A scalar `int32` `Tensor`. Number of iterations executed.\n - x: A rank-1 `Tensor` of shape `[..., N]` containing the computed\n solution.\n - r: A rank-1 `Tensor` of shape `[.., M]` containing the residual vector.\n - p: A rank-1 `Tensor` of shape `[..., N]`. `A`-conjugate basis vector.\n - gamma: \\\\(r \\dot M \\dot r\\\\), equivalent to \\\\(||r||_2^2\\\\) when\n `preconditioner=None`."} +{"repo": "transformers", "function": "class TFCLIPEncoder(keras.layers.Layer):\n\n def __init__(self, config: CLIPConfig, **kwargs):\n super().__init__(**kwargs)\n self.layers = [TFCLIPEncoderLayer(config, name=f'layers_._{i}') for i in range(config.num_hidden_layers)]\n\n def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, causal_attention_mask: tf.Tensor, output_attentions: bool, output_hidden_states: bool, return_dict: bool, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n for i, layer_module in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n layer_outputs = layer_module(hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, training=training)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None))\n return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions)\n\n def build(self, input_shape=None):\n if self.built:\n return\n self.built = True\n if getattr(self, 'layers', None) is not None:\n for layer in self.layers:\n with tf.name_scope(layer.name):\n layer.build(None)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`TFCLIPEncoderLayer`].\n\nArgs:\n config: CLIPConfig"} +{"repo": "tensorflow", "function": "def init_op(self):\n return self._init_op", "docstring": "Return the Init Op used by the supervisor.\n\nReturns:\n An Op or `None`."} +{"repo": "keras", "function": "def divide_no_nan(x1, x2):\n if any_symbolic_tensors((x1, x2)):\n return DivideNoNan().symbolic_call(x1, x2)\n return backend.numpy.divide_no_nan(x1, x2)", "docstring": "Safe element-wise division which returns 0 where the denominator is 0.\n\nArgs:\n x1: First input tensor.\n x2: Second input tensor.\n\nReturns:\n The quotient `x1/x2`, element-wise, with zero where x2 is zero."} +{"repo": "transformers", "function": "def __call__(self, *args: Union[str, 'Image.Image', List['Image.Image'], List[str]], **kwargs: Any) -> List[Any]:\n return super().__call__(*args, **kwargs)", "docstring": "Extract the features of the input(s).\n\nArgs:\n images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):\n The pipeline handles three types of images:\n\n - A string containing a http link pointing to an image\n - A string containing a local path to an image\n - An image loaded in PIL directly\n\n The pipeline accepts either a single image or a batch of images, which must then be passed as a string.\n Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL\n images.\n timeout (`float`, *optional*, defaults to None):\n The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and\n the call may block forever.\nReturn:\n A nested list of `float`: The features computed by the model."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, original_hidden_states: Optional[torch.Tensor]=None, layer_idx: Optional[int]=None, attention_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_value: Optional[ZambaHybridDynamicCache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n layer_outputs = self.shared_transf(hidden_states, original_hidden_states=original_hidden_states, layer_idx=layer_idx, attention_mask=causal_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n transformer_hidden_states = layer_outputs[0]\n if output_attentions:\n self_attn_weights = layer_outputs[1]\n transformer_hidden_states = self.linear(transformer_hidden_states)\n layer_outputs = self.mamba_decoder(hidden_states, transformer_hidden_states=transformer_hidden_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n if output_attentions:\n layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]\n return layer_outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with\n hidden activations to form the input of the shared transformer layer.\n layer_idx (`int`): layer number.\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, sequence_length)` where padding elements are indicated by 0.\n past_key_value (`ZambaHybridDynamicCache`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence."} +{"repo": "pyglove", "function": "def from_json(cls, json_value: Any, *, value_spec: Optional[pg_typing.List]=None, allow_partial: bool=False, root_path: Optional[utils.KeyPath]=None, **kwargs) -> 'List':\n return cls([base.from_json(v, root_path=utils.KeyPath(i, root_path), allow_partial=allow_partial, **kwargs) for i, v in enumerate(json_value)], value_spec=value_spec, root_path=root_path, allow_partial=allow_partial)", "docstring": "Class method that load an symbolic List from a JSON value.\n\nExample::\n\n l = List.from_json([{\n '_type': '__main__.Foo',\n 'f1': 1,\n 'f2': {\n 'f21': True\n }\n },\n 1\n ])\n\n assert l.value_spec is None\n # Okay:\n l.append('abc')\n\n # [0].f2 is bound by class Foo's field 'f2' definition\n # (assuming it defines a schema for the Dict field).\n assert l[0].f2.value_spec is not None\n\n # Not okay:\n l[0].f2.abc = 1\n\nArgs:\n json_value: Input JSON value, only JSON list is acceptable.\n value_spec: An optional `pg.typing.List` object as the schema for the\n list.\n allow_partial: Whether to allow elements of the list to be partial.\n root_path: KeyPath of loaded object in its object tree.\n **kwargs: Allow passing through keyword arguments that are not applicable.\n\nReturns:\n A schema-less symbolic list, but its items maybe symbolic."} +{"repo": "tensorflow", "function": "def MonitoredTrainingSession(master='', is_chief=True, checkpoint_dir=None, scaffold=None, hooks=None, chief_only_hooks=None, save_checkpoint_secs=USE_DEFAULT, save_summaries_steps=USE_DEFAULT, save_summaries_secs=USE_DEFAULT, config=None, stop_grace_period_secs=120, log_step_count_steps=100, max_wait_secs=7200, save_checkpoint_steps=USE_DEFAULT, summary_dir=None, save_graph_def=True):\n if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:\n save_summaries_steps = 100\n save_summaries_secs = None\n elif save_summaries_secs == USE_DEFAULT:\n save_summaries_secs = None\n elif save_summaries_steps == USE_DEFAULT:\n save_summaries_steps = None\n if save_checkpoint_steps == USE_DEFAULT and save_checkpoint_secs == USE_DEFAULT:\n save_checkpoint_steps = None\n save_checkpoint_secs = 600\n elif save_checkpoint_secs == USE_DEFAULT:\n save_checkpoint_secs = None\n elif save_checkpoint_steps == USE_DEFAULT:\n save_checkpoint_steps = None\n scaffold = scaffold or Scaffold()\n worker_context = distribute_coordinator_context.get_current_worker_context()\n if worker_context:\n return _create_monitored_session_with_worker_context(worker_context, scaffold, checkpoint_dir=checkpoint_dir, hooks=hooks, chief_only_hooks=chief_only_hooks, save_checkpoint_secs=save_checkpoint_secs, save_summaries_steps=save_summaries_steps, save_summaries_secs=save_summaries_secs, config=config, stop_grace_period_secs=stop_grace_period_secs, log_step_count_steps=log_step_count_steps, max_wait_secs=max_wait_secs, save_checkpoint_steps=save_checkpoint_steps, summary_dir=summary_dir, save_graph_def=save_graph_def)\n if not is_chief:\n session_creator = WorkerSessionCreator(scaffold=scaffold, master=master, config=config, max_wait_secs=max_wait_secs)\n return MonitoredSession(session_creator=session_creator, hooks=hooks or [], stop_grace_period_secs=stop_grace_period_secs)\n all_hooks = []\n if chief_only_hooks:\n all_hooks.extend(chief_only_hooks)\n session_creator = ChiefSessionCreator(scaffold=scaffold, checkpoint_dir=checkpoint_dir, master=master, config=config)\n summary_dir = summary_dir or checkpoint_dir\n if summary_dir:\n if log_step_count_steps and log_step_count_steps > 0:\n all_hooks.append(basic_session_run_hooks.StepCounterHook(output_dir=summary_dir, every_n_steps=log_step_count_steps))\n if save_summaries_steps and save_summaries_steps > 0 or (save_summaries_secs and save_summaries_secs > 0):\n all_hooks.append(basic_session_run_hooks.SummarySaverHook(scaffold=scaffold, save_steps=save_summaries_steps, save_secs=save_summaries_secs, output_dir=summary_dir))\n if checkpoint_dir:\n if save_checkpoint_secs and save_checkpoint_secs > 0 or (save_checkpoint_steps and save_checkpoint_steps > 0):\n all_hooks.append(basic_session_run_hooks.CheckpointSaverHook(checkpoint_dir, save_steps=save_checkpoint_steps, save_secs=save_checkpoint_secs, scaffold=scaffold, save_graph_def=save_graph_def))\n if hooks:\n all_hooks.extend(hooks)\n return MonitoredSession(session_creator=session_creator, hooks=all_hooks, stop_grace_period_secs=stop_grace_period_secs)", "docstring": "Creates a `MonitoredSession` for training.\n\nFor a chief, this utility sets proper session initializer/restorer. It also\ncreates hooks related to checkpoint and summary saving. For workers, this\nutility sets proper session creator which waits for the chief to\ninitialize/restore. Please check `tf.compat.v1.train.MonitoredSession` for\nmore\ninformation.\n\n@compatibility(TF2)\nThis API is not compatible with eager execution and `tf.function`. To migrate\nto TF2, rewrite the code to be compatible with eager execution. Check the\n[migration\nguide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)\non replacing `Session.run` calls. In Keras, session hooks can be replaced by\nCallbacks e.g. [logging hook notebook](\nhttps://github.com/tensorflow/docs/blob/master/site/en/guide/migrate/logging_stop_hook.ipynb)\nFor more details please read [Better\nperformance with tf.function](https://www.tensorflow.org/guide/function).\n@end_compatibility\n\nArgs:\n master: `String` the TensorFlow master to use.\n is_chief: If `True`, it will take care of initialization and recovery the\n underlying TensorFlow session. If `False`, it will wait on a chief to\n initialize or recover the TensorFlow session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n scaffold: A `Scaffold` used for gathering or building supportive ops. If not\n specified, a default one is created. It's used to finalize the graph.\n hooks: Optional list of `SessionRunHook` objects.\n chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if\n `is_chief==True`, ignore otherwise.\n save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If both `save_checkpoint_steps` and\n `save_checkpoint_secs` are set to `None`, then the default checkpoint\n saver isn't used. If both are provided, then only `save_checkpoint_secs`\n is used. Default 600.\n save_summaries_steps: The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If both\n `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n the default summary saver isn't used. Default 100.\n save_summaries_secs: The frequency, in secs, that the summaries are written\n to disk using a default summary saver. If both `save_summaries_steps` and\n `save_summaries_secs` are set to `None`, then the default summary saver\n isn't used. Default not enabled.\n config: an instance of `tf.compat.v1.ConfigProto` proto used to configure\n the session. It's the `config` argument of constructor of\n `tf.compat.v1.Session`.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n log_step_count_steps: The frequency, in number of global steps, that the\n global step/sec is logged.\n max_wait_secs: Maximum time workers should wait for the session to become\n available. This should be kept relatively short to help detect incorrect\n code, but sometimes may need to be increased if the chief takes a while to\n start up.\n save_checkpoint_steps: The frequency, in number of global steps, that a\n checkpoint is saved using a default checkpoint saver. If both\n `save_checkpoint_steps` and `save_checkpoint_secs` are set to `None`, then\n the default checkpoint saver isn't used. If both are provided, then only\n `save_checkpoint_secs` is used. Default not enabled.\n summary_dir: A string. Optional path to a directory where to save\n summaries. If None, checkpoint_dir is used instead.\n save_graph_def: Whether to save the GraphDef and MetaGraphDef to\n `checkpoint_dir`. The GraphDef is saved after the session is created as\n `graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as\n `model.ckpt-*.meta`.\n\nReturns:\n A `MonitoredSession` object."} +{"repo": "mobly", "function": "def _on_fail(self, record):\n self.on_fail(record)", "docstring": "Proxy function to guarantee the base implementation of on_fail is\ncalled.\n\nArgs:\n record: records.TestResultRecord, a copy of the test record for\n this test, containing all information of the test execution\n including exception objects."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n q_type, k_type = (q.dtype, k.dtype)\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed.to(q_type), k_embed.to(k_type))", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "keras", "function": "def _resize_images(self, x, height_factor, width_factor, data_format, interpolation='nearest'):\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError(f'Invalid `data_format` argument: {data_format}')\n if data_format == 'channels_first':\n x = ops.transpose(x, [0, 2, 3, 1])\n if interpolation == 'nearest':\n x = ops.repeat(x, height_factor, axis=1)\n x = ops.repeat(x, width_factor, axis=2)\n else:\n shape = ops.shape(x)\n new_shape = (shape[1] * height_factor, shape[2] * width_factor)\n x = ops.image.resize(x, new_shape, interpolation=interpolation)\n if data_format == 'channels_first':\n x = ops.transpose(x, [0, 3, 1, 2])\n return x", "docstring": "Resizes the images contained in a 4D tensor.\n\nArgs:\n x: Tensor or variable to resize.\n height_factor: Positive integer.\n width_factor: Positive integer.\n data_format: One of `\"channels_first\"`, `\"channels_last\"`.\n interpolation: A string, one of `\"bicubic\"`, `\"bilinear\"`,\n `\"lanczos3\"`, `\"lanczos5\"`, or `\"nearest\"`.\n\nReturns:\n A tensor."} +{"repo": "transformers", "function": "class XLNetSequenceSummary(nn.Module):\n\n def __init__(self, config: XLNetConfig):\n super().__init__()\n self.summary_type = getattr(config, 'summary_type', 'last')\n if self.summary_type == 'attn':\n raise NotImplementedError\n self.summary = nn.Identity()\n if hasattr(config, 'summary_use_proj') and config.summary_use_proj:\n if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and (config.num_labels > 0):\n num_classes = config.num_labels\n else:\n num_classes = config.hidden_size\n self.summary = nn.Linear(config.hidden_size, num_classes)\n activation_string = getattr(config, 'summary_activation', None)\n self.activation: Callable = get_activation(activation_string) if activation_string else nn.Identity()\n self.first_dropout = nn.Identity()\n if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:\n self.first_dropout = nn.Dropout(config.summary_first_dropout)\n self.last_dropout = nn.Identity()\n if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:\n self.last_dropout = nn.Dropout(config.summary_last_dropout)\n\n def forward(self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n \"\"\"\n Compute a single vector summary of a sequence hidden states.\n\n Args:\n hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):\n The hidden states of the last layer.\n cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):\n Used if `summary_type == \"cls_index\"` and takes the last token of the sequence as classification token.\n\n Returns:\n `torch.FloatTensor`: The summary of the sequence hidden states.\n \"\"\"\n if self.summary_type == 'last':\n output = hidden_states[:, -1]\n elif self.summary_type == 'first':\n output = hidden_states[:, 0]\n elif self.summary_type == 'mean':\n output = hidden_states.mean(dim=1)\n elif self.summary_type == 'cls_index':\n if cls_index is None:\n cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long)\n else:\n cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)\n cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))\n output = hidden_states.gather(-2, cls_index).squeeze(-2)\n elif self.summary_type == 'attn':\n raise NotImplementedError\n output = self.first_dropout(output)\n output = self.summary(output)\n output = self.activation(output)\n output = self.last_dropout(output)\n return output", "docstring": "Compute a single vector summary of a sequence hidden states.\n\nArgs:\n config ([`XLNetConfig`]):\n The config used by the model. Relevant arguments in the config class of the model are (refer to the actual\n config class of your model for the default values it uses):\n\n - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:\n\n - `\"last\"` -- Take the last token hidden state (like XLNet)\n - `\"first\"` -- Take the first token hidden state (like Bert)\n - `\"mean\"` -- Take the mean of all tokens hidden states\n - `\"cls_index\"` -- Supply a Tensor of classification token position (GPT/GPT-2)\n - `\"attn\"` -- Not implemented now, use multi-head attention\n\n - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.\n - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes\n (otherwise to `config.hidden_size`).\n - **summary_activation** (`Optional[str]`) -- Set to `\"tanh\"` to add a tanh activation to the output,\n another string or `None` will add no activation.\n - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.\n - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation."} +{"repo": "tensorflow", "function": "def binomial(self, shape, counts, probs, dtype=dtypes.int32, name=None):\n dtype = dtypes.as_dtype(dtype)\n with ops.name_scope(name, 'binomial', [shape, counts, probs]) as name:\n counts = ops.convert_to_tensor(counts, name='counts')\n probs = ops.convert_to_tensor(probs, name='probs')\n shape_tensor = _shape_tensor(shape)\n return gen_stateful_random_ops.stateful_random_binomial(self.state.handle, self.algorithm, shape=shape_tensor, counts=counts, probs=probs, dtype=dtype, name=name)", "docstring": "Outputs random values from a binomial distribution.\n\nThe generated values follow a binomial distribution with specified count and\nprobability of success parameters.\n\nExample:\n\n```python\ncounts = [10., 20.]\n# Probability of success.\nprobs = [0.8]\n\nrng = tf.random.Generator.from_seed(seed=234)\nbinomial_samples = rng.binomial(shape=[2], counts=counts, probs=probs)\n\n\ncounts = ... # Shape [3, 1, 2]\nprobs = ... # Shape [1, 4, 2]\nshape = [3, 4, 3, 4, 2]\nrng = tf.random.Generator.from_seed(seed=1717)\n# Sample shape will be [3, 4, 3, 4, 2]\nbinomial_samples = rng.binomial(shape=shape, counts=counts, probs=probs)\n```\n\n\nArgs:\n shape: A 1-D integer Tensor or Python array. The shape of the output\n tensor.\n counts: Tensor. The counts of the binomial distribution. Must be\n broadcastable with `probs`, and broadcastable with the rightmost\n dimensions of `shape`.\n probs: Tensor. The probability of success for the\n binomial distribution. Must be broadcastable with `counts` and\n broadcastable with the rightmost dimensions of `shape`.\n dtype: The type of the output. Default: tf.int32\n name: A name for the operation (optional).\n\nReturns:\n samples: A Tensor of the specified shape filled with random binomial\n values. For each i, each samples[i, ...] is an independent draw from\n the binomial distribution on counts[i] trials with probability of\n success probs[i]."} +{"repo": "pytype", "function": "def VisitNamedType(self, t):\n if t.name in self._module_map:\n if self._alias_name and '.' in self._alias_name:\n return pytd.Module(name=self._alias_name, module_name=t.name)\n else:\n return t\n module_name, dot, name = t.name.rpartition('.')\n if not dot or self._IsLocalName(module_name):\n return t\n if module_name in self._module_alias_map:\n module_name = self._module_alias_map[module_name]\n try:\n module, cls_prefix = self._LookupModuleRecursive(module_name)\n except KeyError:\n if self._unit and f'{self.name}.{module_name}' in self._unit:\n return t\n raise\n module_name = module.name\n if module_name == self.name:\n return t\n if cls_prefix:\n try:\n maybe_alias = pytd.LookupItemRecursive(module, cls_prefix[:-1])\n except KeyError:\n pass\n else:\n if isinstance(maybe_alias, pytd.Alias) and isinstance(maybe_alias.type, pytd.Module):\n if maybe_alias.type.module_name not in self._module_map:\n raise KeyError(f'{t.name} refers to unknown module {maybe_alias.name}')\n module = self._module_map[maybe_alias.type.module_name]\n cls_prefix = ''\n name = cls_prefix + name\n try:\n if name == '*':\n self._star_imports.add(module_name)\n item = t\n else:\n item = pytd.LookupItemRecursive(module, name)\n except KeyError as e:\n item = self._ResolveUsingGetattr(module_name, module)\n if item is None:\n item = self._ResolveUsingStarImport(module, name)\n if item is None:\n raise KeyError(f'No {name} in module {module_name}') from e\n if isinstance(item, pytd.Alias):\n lookup_local = LookupLocalTypes()\n lookup_local.unit = module\n new_item = item.Visit(lookup_local)\n if lookup_local.local_names:\n item = new_item\n if not self._in_generic_type and isinstance(item, pytd.Alias):\n item = MaybeSubstituteParameters(item.type) or item\n if isinstance(item, pytd.Constant) and item.name == 'typing_extensions.TypedDict':\n return self.to_type(pytd.NamedType('typing.TypedDict'))\n try:\n return self.to_type(item)\n except NotImplementedError as e:\n raise SymbolLookupError(f'{item} is not a type') from e", "docstring": "Try to look up a NamedType.\n\nArgs:\n t: An instance of pytd.NamedType\n\nReturns:\n The same node t.\nRaises:\n KeyError: If we can't find a module, or an identifier in a module, or\n if an identifier in a module isn't a class."} +{"repo": "transformers", "function": "def compute_token_logits(sequence_output, temperature, output_weights, output_bias):\n logits = (torch.einsum('bsj,j->bs', sequence_output, output_weights) + output_bias) / temperature\n return logits", "docstring": "Computes logits per token\n\nArgs:\n sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.\n temperature (`float`):\n Temperature for the Bernoulli distribution.\n output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):\n Weights of the linear layer for cell selection.\n output_bias (`torch.FloatTensor` of shape `()`):\n Bias of the linear layer for cell selection\n\nReturns:\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token."} +{"repo": "transformers", "function": "def __init__(self, config, in_features, condition_dim, n_classes=256, bottleneck_factor=2):\n super().__init__()\n bottleneck = (in_features + condition_dim) // bottleneck_factor\n self.mlp = nn.Sequential(nn.Conv2d(in_features + condition_dim, bottleneck, kernel_size=1, stride=1, padding=0), nn.GELU(), nn.Conv2d(bottleneck, 2 + 2, kernel_size=1, stride=1, padding=0), nn.Softplus())\n self.p_eps = 0.0001\n self.max_temp = config.max_temp\n self.min_temp = config.min_temp\n self.log_binomial_transform = LogBinomialSoftmax(n_classes, act=torch.softmax)", "docstring": "Per-pixel MLP followed by a Conditional Log Binomial softmax.\n\nArgs:\n in_features (`int`):\n Number of input channels in the main feature.\n condition_dim (`int`):\n Number of input channels in the condition feature.\n n_classes (`int`, *optional*, defaults to 256):\n Number of classes.\n bottleneck_factor (`int`, *optional*, defaults to 2):\n Hidden dim factor."} +{"repo": "transformers", "function": "class FlavaConfig(PretrainedConfig):\n model_type = 'flava'\n sub_configs = {'text_config': FlavaTextConfig, 'image_config': FlavaImageConfig, 'multimodal_config': FlavaMultimodalConfig, 'image_codebook_config': FlavaImageCodebookConfig}\n\n def __init__(self, image_config: Optional[Dict[str, Any]]=None, text_config: Optional[Dict[str, Any]]=None, multimodal_config: Optional[Dict[str, Any]]=None, image_codebook_config: Optional[Dict[str, Any]]=None, hidden_size: int=768, layer_norm_eps: float=1e-12, projection_dim: int=768, init_codebook: bool=True, logit_scale_init_value: float=2.6592, initializer_range: float=0.02, ce_ignore_index: int=-100, mim_weight: float=1.0, mlm_weight: float=1.0, global_contrastive_weight: float=1.0, itm_weight: float=1.0, mmm_image_weight: float=1.0, mmm_text_weight: float=1.0, global_backprop_contrastive: bool=True, skip_unmasked_multimodal_encoder: bool=True, return_loss: bool=True, **kwargs):\n text_config_dict = kwargs.pop('text_config_dict', None)\n image_config_dict = kwargs.pop('image_config_dict', None)\n multimodal_config_dict = kwargs.pop('multimodal_config_dict', None)\n image_codebook_config_dict = kwargs.pop('image_codebook_config_dict', None)\n super().__init__(**kwargs)\n if text_config_dict is not None:\n if text_config is None:\n text_config = {}\n _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict()\n for key, value in _text_config_dict.items():\n if key in text_config and value != text_config[key] and (key not in ['transformers_version']):\n if key in text_config_dict:\n message = f'`{key}` is found in both `text_config_dict` and `text_config` but with different values. The value `text_config_dict[\"{key}\"]` will be used instead.'\n else:\n message = f'`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The value `text_config[\"{key}\"]` will be overridden.'\n logger.info(message)\n text_config.update(_text_config_dict)\n if image_config_dict is not None:\n if image_config is None:\n image_config = {}\n _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict()\n if 'id2label' in _image_config_dict:\n _image_config_dict['id2label'] = {str(key): value for key, value in _image_config_dict['id2label'].items()}\n for key, value in _image_config_dict.items():\n if key in image_config and value != image_config[key] and (key not in ['transformers_version']):\n if key in image_config_dict:\n message = f'`{key}` is found in both `image_config_dict` and `image_config` but with different values. The value `image_config_dict[\"{key}\"]` will be used instead.'\n else:\n message = f'`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. The value `image_config[\"{key}\"]` will be overridden.'\n logger.info(message)\n image_config.update(_image_config_dict)\n if multimodal_config_dict is not None:\n if multimodal_config is None:\n multimodal_config = {}\n _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict()\n for key, value in _multimodal_config_dict.items():\n if key in multimodal_config and value != multimodal_config[key] and (key not in ['transformers_version']):\n if key in multimodal_config_dict:\n message = f'`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with different values. The value `multimodal_config_dict[\"{key}\"]` will be used instead.'\n else:\n message = f'`multimodal_config_dict` is provided which will be used to initialize `FlavaMultimodalConfig`. The value `multimodal_config[\"{key}\"]` will be overridden.'\n logger.info(message)\n multimodal_config.update(_multimodal_config_dict)\n if image_codebook_config_dict is not None:\n if image_codebook_config is None:\n image_codebook_config = {}\n _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict()\n for key, value in _image_codebook_config_dict.items():\n if key in image_codebook_config and value != image_codebook_config[key] and (key not in ['transformers_version']):\n if key in image_codebook_config_dict:\n message = f'`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but with different values. The value `image_codebook_config_dict[\"{key}\"]` will be used instead.'\n else:\n message = f'`image_codebook_config_dict` is provided which will be used to initialize `FlavaImageCodebookConfig`. The value `image_codebook_config[\"{key}\"]` will be overridden.'\n logger.info(message)\n image_codebook_config.update(_image_codebook_config_dict)\n if image_config is None:\n image_config = {}\n logger.info('`image_config` is `None`. initializing the `FlavaImageConfig` with default values.')\n if text_config is None:\n text_config = {}\n logger.info('`text_config` is `None`. Initializing the `FlavaTextConfig` with default values.')\n if multimodal_config is None:\n multimodal_config = {}\n logger.info('`multimodal_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.')\n if image_codebook_config is None:\n image_codebook_config = {}\n logger.info('`image_codebook_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values.')\n self.image_config = FlavaImageConfig(**image_config)\n self.text_config = FlavaTextConfig(**text_config)\n self.multimodal_config = FlavaMultimodalConfig(**multimodal_config)\n self.image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config)\n self.projection_dim = projection_dim\n self.init_codebook = init_codebook\n self.hidden_size = hidden_size\n self.layer_norm_eps = layer_norm_eps\n self.initializer_range = initializer_range\n self.logit_scale_init_value = logit_scale_init_value\n self.initializer_factor = 1.0\n self.ce_ignore_index = ce_ignore_index\n self.mim_weight = mim_weight\n self.mlm_weight = mlm_weight\n self.global_contrastive_weight = global_contrastive_weight\n self.itm_weight = itm_weight\n self.mmm_image_weight = mmm_image_weight\n self.mmm_text_weight = mmm_text_weight\n self.global_backprop_contrastive = global_backprop_contrastive\n self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder\n self.return_loss = return_loss\n\n @classmethod\n def from_configs(cls, image_config: FlavaImageConfig, text_config: FlavaTextConfig, multimodal_config: FlavaMultimodalConfig, image_codebook_config: FlavaImageCodebookConfig, **kwargs):\n \"\"\"\n Instantiate a [`FlavaConfig`] (or a derived class) from flava text model configuration, flava image model\n configuration, flava multimodal model and flava codebook model configuration.\n\n Returns:\n [`FlavaConfig`]: An instance of a configuration object\n \"\"\"\n return cls(image_config=image_config.to_dict(), text_config=text_config.to_dict(), multimodal_config=multimodal_config.to_dict(), image_codebook_config=image_codebook_config.to_dict(), **kwargs)", "docstring": "[`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to\ninstantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook\nand multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to\nthat of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`FlavaTextConfig`].\n image_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`FlavaImageConfig`].\n multimodal_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`].\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n projection_dim (`int`, *optional*, defaults to 512):\n Dimensionality of text and image projection layers.\n logit_scale_init_value (`float`, *optional*, defaults to 2.6592):\n The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP\n implementation.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n ce_ignore_index (`int`, *optional*, defaults to -100):\n Cross entropy index to ignore.\n mim_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to MIM (Masked Image Modeling) unimodal loss\n mlm_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to MLM (Masked Language Modeling) unimodal loss\n global_contrastive_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to global contrastive cross-alignment loss.\n itm_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to image-text matching multimodal loss.\n mmm_image_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to MMM loss's image part.\n mmm_text_weight (`float`, *optional*, defaults to 1.0):\n Weight to be assigned to MMM loss's text part.\n global_backprop_contrastive (`bool`, *optional*, defaults to `True`):\n Whether to use global backpropgation through all workers in contrastive loss.\n skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`):\n Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses.\n return_loss (`bool`, *optional*, defaults to `True`):\n Whether to return loss or not\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\nExample:\n\n```python\n>>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining\n\n>>> # Initializing a FlavaConfig with style configuration\n>>> configuration = FlavaConfig()\n\n>>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration\n>>> model = FlavaModel(configuration)\n>>> model_pre = FlavaForPreTraining(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n>>> configuration_pre = model_pre.config\n```"} +{"repo": "keras", "function": "def assign(self, variable, value):\n variable.assign(value)", "docstring": "Assign a value to a variable.\n\nThis should be used in optimizers instead of `variable.assign(value)` to\nsupport backend specific optimizations.\nNote that the variable can be a model variable or an optimizer variable;\nit can be a backend native variable or a Keras variable.\n\nArgs:\n variable: The variable to update.\n value: The value to add to the variable."} +{"repo": "transformers", "function": "class FlaxMaskedLMOutput(ModelOutput):\n logits: Optional[jnp.ndarray] = None\n hidden_states: Optional[Tuple[jnp.ndarray]] = None\n attentions: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for masked language models outputs.\n\nArgs:\n logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "class Glm4Config(PretrainedConfig):\n model_type = 'glm4'\n keys_to_ignore_at_inference = ['past_key_values']\n base_model_tp_plan = {'layers.*.self_attn.q_proj': 'colwise', 'layers.*.self_attn.k_proj': 'colwise', 'layers.*.self_attn.v_proj': 'colwise', 'layers.*.self_attn.o_proj': 'rowwise', 'layers.*.mlp.gate_up_proj': 'colwise_rep', 'layers.*.mlp.down_proj': 'rowwise_rep'}\n base_model_pp_plan = {'embed_tokens': (['input_ids'], ['inputs_embeds']), 'layers': (['hidden_states', 'attention_mask'], ['hidden_states']), 'norm': (['hidden_states'], ['hidden_states'])}\n\n def __init__(self, vocab_size=151552, hidden_size=4096, intermediate_size=13696, num_hidden_layers=40, num_attention_heads=32, num_key_value_heads=2, partial_rotary_factor=0.5, head_dim=128, hidden_act='silu', attention_dropout=0.0, max_position_embeddings=131072, initializer_range=0.02, rms_norm_eps=1.5625e-07, use_cache=True, tie_word_embeddings=False, rope_theta=10000.0, pad_token_id=151329, eos_token_id=[151329, 151336, 151338], bos_token_id=None, attention_bias=True, **kwargs):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.partial_rotary_factor = partial_rotary_factor\n self.head_dim = head_dim\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.use_cache = use_cache\n self.rope_theta = rope_theta\n self.attention_bias = attention_bias\n self.attention_dropout = attention_dropout\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`Glm4Model`]. It is used to instantiate an Glm4\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the Glm4-4-9b-chat.\ne.g. [THUDM/GLM-4-9B-0414](https://huggingface.co/THUDM/GLM-4-9B-0414)\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\nArgs:\n vocab_size (`int`, *optional*, defaults to 151552):\n Vocabulary size of the Glm4 model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`Glm4Model`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 13696):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 40):\n Number of hidden layers in the Transformer decoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer decoder.\n num_key_value_heads (`int`, *optional*, defaults to 2):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details, check out [this\n paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to\n `num_attention_heads`.\n partial_rotary_factor (`float`, *optional*, defaults to 0.5): The factor of the partial rotary position.\n head_dim (`int`, *optional*, defaults to 128):\n The attention head dimension.\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The legacy activation function. It is overwritten by the `hidden_activation`.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 131072):\n The maximum sequence length that this model might ever be used with.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1.5625e-07):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_theta (`float`, *optional*, defaults to 10000.0):\n The base period of the RoPE embeddings.\n pad_token_id (`int`, *optional*, defaults to 151329):\n Padding token id.\n eos_token_id (`int` | `list`, *optional*, defaults to `[151329, 151336, 151338]`):\n End of stream token id.\n bos_token_id (`int`, *optional*):\n Beginning of stream token id.\n attention_bias (`bool`, defaults to `False`, *optional*, defaults to `True`):\n Whether to use a bias in the query, key, value and output projection layers during self-attention.\n```python\n>>> from transformers import Glm4Model, Glm4Config\n>>> # Initializing a Glm4 glm4-4-9b-chat style configuration\n>>> configuration = Glm4Config()\n>>> # Initializing a model from the glm4-4-9b-chat style configuration\n>>> model = Glm4Model(configuration)\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "keras", "function": "def predict_on_batch(self, x):\n raise NotImplementedError", "docstring": "Returns predictions for a single batch of samples.\n\nArgs:\n x: Input data. It must be array-like.\n\nReturns:\n NumPy array(s) of predictions."} +{"repo": "tensorflow", "function": "def ensure_graph_is_valid(graph_def: graph_pb2.GraphDef) -> None:\n node_map = {}\n for node in graph_def.node:\n if node.name not in node_map:\n node_map[node.name] = node\n else:\n raise ValueError('Duplicate node names detected for ', node.name)\n for node in graph_def.node:\n for input_name in node.input:\n input_node_name = node_name_from_input(input_name)\n if input_node_name not in node_map:\n raise ValueError('Input for ', node.name, ' not found: ', input_name)", "docstring": "Makes sure that the graph is internally consistent.\n\nChecks basic properties of the graph def and raises an exception if there are\ninput references to missing nodes, duplicated names, or other logic errors.\n\nArgs:\n graph_def: Definition of a graph to be checked.\n\nRaises:\n ValueError: If the graph is incorrectly constructed."} +{"repo": "tensorflow", "function": "def _check_trt_version_compatibility():\n if not _pywrap_py_utils.is_tensorrt_enabled():\n logging.error('Tensorflow needs to be built with TensorRT support enabled to allow TF-TRT to operate.')\n raise RuntimeError('Tensorflow has not been built with TensorRT support.')\n if platform.system() == 'Windows':\n logging.warn('Windows support is provided experimentally. No guarantee is made regarding functionality or engineering support. Use at your own risk.')\n linked_version = _pywrap_py_utils.get_linked_tensorrt_version()\n loaded_version = _pywrap_py_utils.get_loaded_tensorrt_version()\n logging.info('Linked TensorRT version: %s', str(linked_version))\n logging.info('Loaded TensorRT version: %s', str(loaded_version))\n\n def raise_trt_version_deprecated(version_type, trt_version):\n assert version_type in ['linked', 'loaded'], \"Incorrect value received for version_type: %s. Accepted: ['linked', 'loaded']\" % version_type\n logging.error('The {version_type} version of TensorRT: `{trt_version}` has now been removed. Please upgrade to TensorRT 7 or more recent.'.format(version_type=version_type, trt_version=trt_utils.version_tuple_to_string(trt_version)))\n raise RuntimeError('Incompatible %s TensorRT versions' % version_type)\n if not trt_utils.is_linked_tensorrt_version_greater_equal(7, 0, 0):\n raise_trt_version_deprecated('linked', linked_version)\n if not trt_utils.is_loaded_tensorrt_version_greater_equal(7, 0, 0):\n raise_trt_version_deprecated('loaded', loaded_version)\n if loaded_version[0] != linked_version[0] or not trt_utils.is_loaded_tensorrt_version_greater_equal(*linked_version):\n logging.error('Loaded TensorRT %s but linked TensorFlow against TensorRT %s. A few requirements must be met:\\n\\t-It is required to use the same major version of TensorRT during compilation and runtime.\\n\\t-TensorRT does not support forward compatibility. The loaded version has to be equal or more recent than the linked version.', trt_utils.version_tuple_to_string(loaded_version), trt_utils.version_tuple_to_string(linked_version))\n raise RuntimeError('Incompatible TensorRT major version')\n elif loaded_version != linked_version:\n logging.info('Loaded TensorRT %s and linked TensorFlow against TensorRT %s. This is supported because TensorRT minor/patch upgrades are backward compatible.', trt_utils.version_tuple_to_string(loaded_version), trt_utils.version_tuple_to_string(linked_version))", "docstring": "Check compatibility of TensorRT version.\n\nRaises:\n RuntimeError: if the TensorRT library version is incompatible."} +{"repo": "tensorflow", "function": "def moments(x, axes, shift=None, name=None, keep_dims=None, keepdims=None):\n keep_dims = deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims)\n if keep_dims is None:\n keep_dims = False\n with ops.name_scope(name, 'moments', [x, axes]):\n y = math_ops.cast(x, dtypes.float32) if x.dtype == dtypes.float16 else x\n mean = math_ops.reduce_mean(y, axes, keepdims=True, name='mean')\n variance = math_ops.reduce_mean(math_ops.squared_difference(y, array_ops.stop_gradient(mean)), axes, keepdims=True, name='variance')\n if not keep_dims:\n mean = array_ops.squeeze(mean, axes)\n variance = array_ops.squeeze(variance, axes)\n if x.dtype == dtypes.float16:\n return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16))\n else:\n return (mean, variance)", "docstring": "Calculate the mean and variance of `x`.\n\nThe mean and variance are calculated by aggregating the contents of `x`\nacross `axes`. If `x` is 1-D and `axes = [0]` this is just the mean\nand variance of a vector.\n\nNote: shift is currently not used; the true mean is computed and used.\n\nWhen using these moments for batch normalization (see\n`tf.nn.batch_normalization`):\n\n * for so-called \"global normalization\", used with convolutional filters with\n shape `[batch, height, width, depth]`, pass `axes=[0, 1, 2]`.\n * for simple batch normalization pass `axes=[0]` (batch only).\n\nArgs:\n x: A `Tensor`.\n axes: Array of ints. Axes along which to compute mean and\n variance.\n shift: Not used in the current implementation\n name: Name used to scope the operations that compute the moments.\n keep_dims: produce moments with the same dimensionality as the input.\n keepdims: Alias to keep_dims.\n\nReturns:\n Two `Tensor` objects: `mean` and `variance`."} +{"repo": "transformers", "function": "def get_modified_python_files(diff_with_last_commit: bool=False) -> List[str]:\n repo = Repo(PATH_TO_REPO)\n if not diff_with_last_commit:\n print(f'main is at {repo.refs.main.commit}')\n print(f'Current head is at {repo.head.commit}')\n branching_commits = repo.merge_base(repo.refs.main, repo.head)\n for commit in branching_commits:\n print(f'Branching commit: {commit}')\n return get_diff(repo, repo.head.commit, branching_commits)\n else:\n print(f'main is at {repo.head.commit}')\n parent_commits = repo.head.commit.parents\n for commit in parent_commits:\n print(f'Parent commit: {commit}')\n return get_diff(repo, repo.head.commit, parent_commits)", "docstring": "Return a list of python files that have been modified between:\n\n- the current head and the main branch if `diff_with_last_commit=False` (default)\n- the current head and its parent commit otherwise.\n\nReturns:\n `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files\n modified are returned if the diff in the file is not only in docstrings or comments, see\n `diff_is_docstring_only`)."} +{"repo": "tensorflow", "function": "def get_config(self):\n config = {'name': self._name}\n if self.clipnorm is not None:\n config['clipnorm'] = self.clipnorm\n if self.clipvalue is not None:\n config['clipvalue'] = self.clipvalue\n if self.global_clipnorm is not None:\n config['global_clipnorm'] = self.global_clipnorm\n return config", "docstring": "Returns the config of the optimizer.\n\nAn optimizer config is a Python dictionary (serializable)\ncontaining the configuration of an optimizer.\nThe same optimizer can be reinstantiated later\n(without any saved state) from this configuration.\n\nReturns:\n Python dictionary."} +{"repo": "nsscache", "function": "def SetModifyTimestamp(self, value):\n if value is None or isinstance(value, int):\n self._last_modification_timestamp = value\n else:\n raise TypeError('timestamp can only be int or None, not %r' % value)", "docstring": "Set the last modify timestamp of this map.\n\nArgs:\n value: An integer containing the number of seconds since epoch, or None.\n\nRaises:\n TypeError: The argument is not an int or None."} +{"repo": "beam", "function": "def delete(self, request):\n try:\n self.client.delete_object(Bucket=request.bucket, Key=request.object)\n except Exception as e:\n raise messages.S3ClientError(str(e), get_http_error_code(e))", "docstring": "Deletes given object from bucket\nArgs:\n request: (DeleteRequest) input message\n Returns:\n (void) Void, otherwise will raise if an error occurs"} +{"repo": "tensorflow", "function": "def join(self, timeout=_DEFAULT_TIMEOUT_SEC):\n if timeout and (not isinstance(timeout, int)):\n raise ValueError('`timeout` must be an integer or `None`.')\n with self._process_lock:\n if self._joined:\n raise ValueError(\"MultiProcessRunner can't be joined twice.\")\n self._joined = True\n self._watchdog_thread.join(timeout)\n if self._watchdog_thread.is_alive():\n with self._process_lock:\n self._auto_restart = False\n logging.error('Timeout when joining for child processes. Terminating...')\n self.terminate_all(sig=signal.SIGTERM)\n self._watchdog_thread.join(_FORCE_KILL_WAIT_SEC)\n if self._watchdog_thread.is_alive():\n logging.error('Timeout when waiting for child processes to print stacktrace. Sending SIGKILL...')\n self.terminate_all()\n self._watchdog_thread.join()\n process_statuses = self._get_process_statuses()\n self._reraise_if_subprocess_error(process_statuses)\n raise SubprocessTimeoutError('One or more subprocesses timed out, where timeout was set to {}s. Please change the `timeout` argument for `MultiProcessRunner.join()` or `multi_process_runner.run()` if it should be adjusted.'.format(timeout), self._get_mpr_result(process_statuses))\n for (task_type, task_id), p in self._processes.items():\n logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)\n process_statuses = self._get_process_statuses()\n self._reraise_if_subprocess_error(process_statuses)\n for (task_type, task_id), p in self._processes.items():\n assert p.exitcode is not None\n if p.exitcode > 0 and (task_type, task_id) not in self._terminated:\n raise UnexpectedSubprocessExitError('Subprocess %s-%d exited with exit code %s. See logs for details.' % (task_type, task_id, p.exitcode), self._get_mpr_result(process_statuses))\n logging.info('Joining log reading threads.')\n for thread in self._reading_threads:\n thread.join()\n logging.info('Joined log reading threads.')\n signal.alarm(0)\n return self._get_mpr_result(process_statuses)", "docstring": "Joins all the processes with timeout.\n\nIf any of the subprocesses does not exit approximately after `timeout`\nseconds has passed after `join` call, this raises a\n`SubprocessTimeoutError`.\n\nNote: At timeout, it uses SIGTERM to terminate the subprocesses, in order to\nlog the stack traces of the subprocesses when they exit. However, this\nresults in timeout when the test runs with tsan (thread sanitizer); if tsan\nis being run on the test targets that rely on timeout to assert information,\n`MultiProcessRunner.terminate_all()` must be called after `join()`, before\nthe test exits, so the subprocesses are terminated with SIGKILL, and data\nrace is removed.\n\nArgs:\n timeout: optional integer or `None`. If provided as an integer, and not\n all processes report status within roughly `timeout` seconds, a\n `SubprocessTimeoutError` exception will be raised. If `None`, `join` never\n times out.\n\nReturns:\n A `MultiProcessRunnerResult` object, which has two attributes,\n `return_value` and `stdout`. `return_value` always contains a list of\n return values from the subprocesses, although the order is not meaningful.\n If `return_output` argument is True at `__init__`, `stdout` is available\n that contains a list of all messages from subprocesses' stdout and stderr.\n\nRaises:\n SubprocessTimeoutError: if not all processes report status approximately\n within `timeout` seconds. When this is raised, a\n `MultiProcessRunnerResult` object can be retrieved by\n `SubprocessTimeoutError`'s mpr_result attribute, which has the same\n structure as above 'Returns' section describes.\n UnexpectedSubprocessExitError: If any of the subprocesses did not exit\n properly (for example, they exit on SIGTERM or SIGKILL signal). When\n this is raised, a `MultiProcessRunnerResult` object can be retrieved by\n `UnexpectedSubprocessExitError`'s mpr_result attribute, which has the\n same structure as above 'Returns' section describes. If `max_run_time`\n is not `None`, it is expected that some subprocesses may be\n force-killed when `max_run_time` is up, and this is raised in those\n cases.\n Exception: if there is an Exception propagated from any subprocess. When\n this is raised, a `MultiProcessRunnerResult` object can be retrieved by\n `UnexpectedSubprocessExitError`'s mpr_result attribute, which has the\n same structure as above 'Returns' section describes."} +{"repo": "tensorflow", "function": "def _create_variable(self, *args, **kwargs):\n with ops.name_scope('random_generator'):\n kwargs['name'] = 'StateVar'\n v = variables.Variable(*args, **kwargs)\n if isinstance(v, sharded_variable.ShardedVariable):\n raise ValueError(\"tf.random.Generator state is sharded, which is not allowed. When creating a tf.distribute.experimental.ParameterServerStrategy, please make sure that the `variable_partitioner` argument won't shard a small variable of shape [2] or [3]. Ways to avoid sharding small variables include setting `variable_partitioner` to None or to tf.distribute.experimental.partitioners.MinSizePartitioner with a large enough `min_shard_bytes`.\")\n return v", "docstring": "Creates a variable.\n\nArgs:\n *args: positional arguments passed along to `variables.Variable.\n **kwargs: keyword arguments passed along to `variables.Variable.\n\nReturns:\n The created variable."} +{"repo": "tensorflow", "function": "def split_compile_and_replicate(computation: Callable[..., Any], inputs: Optional[List[List[core_types.Tensor]]]=None, infeed_queue: Optional[tpu_feed.InfeedQueue]=None, device_assignment: Optional[device_assignment_lib.DeviceAssignment]=None, name: Optional[Text]=None, use_tpu: bool=True, maximum_shapes: Optional[Any]=None, padding_spec: Optional[PaddingSpec]=None, xla_options: Optional[XLAOptions]=None) -> List[List[core_types.Tensor]]:\n del name\n inputs = [[]] if inputs is None else inputs\n xla_options = xla_options or XLAOptions()\n metadata_kwargs = {}\n if device_assignment is not None:\n metadata_kwargs = {'topology': device_assignment.topology.serialized(), 'device_assignment': device_assignment.core_assignment.flatten().tolist()}\n metadata_kwargs['num_cores_per_replica'] = device_assignment.num_cores_per_replica\n metadata_kwargs['allow_soft_placement'] = config.get_soft_device_placement()\n if config.get_soft_device_placement():\n logging.info('Automatic outside compilation is enabled. Ops without XLA kernels will be automatically placed on CPU.')\n if not isinstance(inputs, list):\n raise TypeError(f'tpu.replicate() inputs must be a list of lists/tuples, received {type(inputs)}')\n if any((not isinstance(inp, (list, tuple)) for inp in inputs)):\n raise TypeError(f'tpu.replicate() inputs must be a list of lists/tuples, received types: {[type(inp) for inp in inputs]}')\n num_replicas = len(inputs)\n if num_replicas == 0:\n return []\n for i in range(1, num_replicas):\n nest.assert_same_structure(inputs[0], inputs[i])\n inputs = variable_utils.convert_variables_to_tensors(inputs)\n flat_inputs_with_nones = [nest.flatten(per_replica_input, expand_composites=True) for per_replica_input in inputs]\n is_composite = nest.flatten(nest.map_structure(lambda x: _flatten_and_filter_composite(x, False, True), inputs[0]))\n flat_inputs = []\n for inp in flat_inputs_with_nones:\n flat_inputs.append([constant_op.constant(0) if x is None else ops.convert_to_tensor(x) for x in inp])\n flat_input_types = [x.dtype for x in flat_inputs[0]]\n input_arity = len(inputs[0])\n flat_input_arity = len(flat_input_types)\n for i in range(num_replicas):\n if len(inputs[i]) != input_arity:\n raise ValueError('Replicas must have the same number of inputs. Replica 0 had {} inputs, replica {} had {} inputs.'.format(input_arity, i, len(inputs[i])))\n types = [x.dtype for x in flat_inputs[i]]\n if types != flat_input_types:\n raise ValueError('Replicas must have matching input types. Replica 0 had input types {}, replica {} had input types {}'.format(flat_input_types, i, types))\n arg_error = xla.check_function_argument_count(computation, input_arity, infeed_queue)\n if arg_error is not None:\n if infeed_queue is None:\n raise TypeError(f'Supplied computation cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs[0]]}, but the computation needs {arg_error}')\n else:\n raise TypeError(f'Supplied computation cannot be called with the specified inputs. You specified {input_arity} inputs: {[i.name for i in inputs[0]]} ', f'and {infeed_queue.number_of_tuple_elements} additional inputs from infeed, but the computation needs {arg_error}')\n dynamic_shape_inputs = False\n if maximum_shapes:\n if infeed_queue:\n raise ValueError('Dynamic input shapes are not supported with infeed queues')\n nest.assert_same_structure(inputs[0], maximum_shapes, check_types=False)\n flat_maximum_shapes = nest.flatten([_flatten_and_filter_composite(x, y) for x, y in zip(nest.flatten(inputs[0]), nest.flatten(maximum_shapes))])\n flat_maximum_shapes = [tensor_shape.TensorShape(s) if s is not None else None for s in flat_maximum_shapes]\n nest.assert_same_structure(flat_inputs[0], flat_maximum_shapes, check_types=False)\n unpadded_inputs = flat_inputs\n flat_inputs, padding_maps = _pad_all_input(unpadded_inputs, flat_maximum_shapes, padding_spec)\n if padding_maps:\n dynamic_shape_inputs = True\n logging.info('TPU has inputs with dynamic shapes: %s', inputs[0])\n metadata_kwargs['step_marker_location'] = getattr(computation, 'step_marker_location', 'STEP_MARK_AT_ENTRY')\n metadata_kwargs['use_spmd_for_xla_partitioning'] = xla_options.use_spmd_for_xla_partitioning\n graph = ops.get_default_graph()\n flat_replicated_inputs = []\n for i in range(0, len(flat_inputs[0])):\n replicas = [flat_inputs[replica][i] for replica in range(num_replicas)]\n flat_replicated_inputs.append(tpu_ops.tpu_replicated_input(replicas, name='input{}'.format(i)))\n if isinstance(graph, func_graph.FuncGraph):\n cluster_name = graph.unique_name('cluster_' + graph.name)\n else:\n cluster_name = graph.unique_name('cluster')\n pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')\n pivot._set_attr(_PIVOT_FOR_CLUSTER, attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name)))\n context = tpu_replication.TPUReplicateContext(name=cluster_name, num_replicas=num_replicas, pivot=pivot)\n try:\n context.Enter()\n metadata = tpu_ops.tpu_replicate_metadata(num_replicas=num_replicas, use_tpu=use_tpu, **metadata_kwargs)\n with tpu_function.tpu_shard_context(num_replicas), ops.control_dependencies([metadata]):\n if dynamic_shape_inputs and xla_options.enable_xla_dynamic_padder:\n for padding_map in padding_maps:\n input_shape = flat_replicated_inputs[padding_map.arg_index].shape\n flat_replicated_inputs[padding_map.arg_index] = tf2xla.set_dynamic_dimension_size(flat_replicated_inputs[padding_map.arg_index], padding_map.shape_index, flat_replicated_inputs[padding_map.padding_arg_index])\n flat_replicated_inputs[padding_map.arg_index].set_shape(input_shape)\n flat_replicated_inputs = [array_ops.identity(x, name='replicated_input_{}'.format(i)) for i, x in enumerate(flat_replicated_inputs)]\n for i, composite in zip(flat_replicated_inputs, is_composite):\n if not dynamic_shape_inputs or composite:\n i.op._set_attr('_tpu_input_identity', attr_value_pb2.AttrValue(b=True))\n computation_inputs = [None if inp is None else replicated for replicated, inp in zip(flat_replicated_inputs, flat_inputs_with_nones[0])]\n computation_inputs = nest.pack_sequence_as(structure=inputs[0], flat_sequence=computation_inputs[:flat_input_arity], expand_composites=True)\n if infeed_queue is not None:\n infeed_queue.set_number_of_shards(num_replicas)\n for t in infeed_queue.generate_dequeue_op():\n computation_inputs.append(t)\n vscope = variable_scope.get_variable_scope()\n saved_use_resource = vscope.use_resource\n saved_custom_getter = vscope.custom_getter\n\n def custom_getter(getter, name, *args, **kwargs):\n \"\"\"Variables on TPU have a few restrictions.\"\"\"\n partitioner = kwargs.get('partitioner', None)\n if partitioner is not None:\n kwargs['partitioner'] = None\n logging.warning('Partitioned variables are not supported on TPU. Got `partitioner` that is %s for variable %s. Setting `partitioner` to `None`.', partitioner, name)\n if saved_custom_getter is None:\n return getter(name, *args, **kwargs)\n else:\n return saved_custom_getter(getter, name, *args, **kwargs)\n vscope.set_use_resource(True)\n vscope.set_custom_getter(custom_getter)\n outputs = computation(*computation_inputs)\n vscope.set_use_resource(saved_use_resource)\n vscope.set_custom_getter(saved_custom_getter)\n outputs = variable_utils.convert_variables_to_tensors(outputs)\n need_spmd_partitioning = xla_options.use_spmd_for_xla_partitioning and device_assignment is not None and (device_assignment.num_cores_per_replica > 1)\n outputs_is_flat = xla.is_flat(outputs)\n if outputs_is_flat:\n output_tensors, control_deps, pack_template = _postprocess_flat_outputs(outputs, need_spmd_partitioning)\n else:\n output_tensors, control_deps, pack_template = _postprocess_non_flat_outputs(outputs, need_spmd_partitioning)\n if tensor_tracer.TensorTracer.is_enabled():\n if tf2.enabled():\n logging.warn('TF API ver >= 2.0 detected. Tensor Tracer v1 is not enabled.')\n else:\n tt = tensor_tracer.TensorTracer()\n output_tensors = tt.trace_tpu(ops.get_default_graph(), output_tensors, control_deps, num_replicas)\n context.ExitResult(output_tensors)\n finally:\n context.report_unsupported_operations()\n context.Exit()\n host_compute_core = context.HostComputeCore()\n if host_compute_core:\n attr_value = attr_value_pb2.AttrValue()\n attr_value.list.s.extend((compat.as_bytes(x) for x in host_compute_core))\n metadata._set_attr('host_compute_core', attr_value)\n with ops.control_dependencies([metadata]):\n if use_tpu:\n compile_status = tpu_ops.tpu_compilation_result()\n op = compile_status.op\n attr_value = attr_value_pb2.AttrValue(s=compat.as_bytes(cluster_name))\n op._set_attr(_TPU_COMPILATION_STATUS_ATTR, attr_value)\n else:\n compile_status = control_flow_ops.no_op(name='compilation_status')\n if not output_tensors:\n return [compile_status, [control_flow_ops.group(control_deps, name='shard_%d' % i) for i in range(num_replicas)]]\n replicated_outputs = [[] for i in range(num_replicas)]\n for i, t in enumerate(output_tensors):\n if t is None:\n for replica in range(num_replicas):\n replicated_outputs[replica].append(None)\n continue\n ys = tpu_ops.tpu_replicated_output(t, num_replicas, name='output{}'.format(i))\n with ops.control_dependencies(control_deps):\n for replica in range(num_replicas):\n replicated_outputs[replica].append(array_ops.identity(ys[replica], name='output_%d_shard_%d' % (i, replica)))\n replicated_outputs = [nest.pack_sequence_as(pack_template, replica_outs, expand_composites=True) for replica_outs in replicated_outputs]\n return [compile_status, replicated_outputs]", "docstring": "Builds graph operators that runs compilation and replicated computation.\n\nThis is a lower level interface than replicate that returns a separate compile\nand execute output tensor. In the generated graph the compile op feeds into\nthe execute op and no additional compilation is incurred when running the\ncompile op before the execute op. The compile op returns additional\ninformation about the compilation but does not return the compiled program.\n\nArgs:\n computation: A Python function that builds the computation to replicate.\n inputs: A list of lists of input tensors or `None` (equivalent to\n `[[]]`), indexed by `[replica_num][input_num]`. All replicas must\n have the same number of inputs. Each input can be a nested structure\n containing values that are convertible to tensors. Note that passing an\n N-dimension list of compatible values will result in a N-dimension list of\n scalar tensors rather than a single Rank-N tensors. If you need different\n behavior, convert part of inputs to tensors with `tf.convert_to_tensor`.\n infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple\n of arguments as inputs to computation.\n device_assignment: If not `None`, a `DeviceAssignment` describing the\n mapping between logical cores in the computation with physical cores in\n the TPU topology. Uses a default device assignment if `None`. The\n `DeviceAssignment` may be omitted if each replica of the computation uses\n only one core, and there is either only one replica, or the number of\n replicas is equal to the number of cores in the TPU system.\n name: (Deprecated) Does nothing.\n use_tpu: When false, the input `computation` is executed on the XLA CPU/GPU\n backends. Currently, only supports a default placement (computation is\n placed on GPU if one is available, and on CPU if not).\n maximum_shapes: A nested structure of tf.TensorShape representing the shape\n to which the respective component of each input element in each replica\n should be padded. Any unknown dimensions (e.g.\n tf.compat.v1.Dimension(None) in a tf.TensorShape or -1 in a tensor-like\n object) will be padded to the maximum size of that dimension over all\n replicas. The structure of `maximum_shapes` needs to be the same as\n `inputs[0]`.\n padding_spec: An enum specified by `tf.tpu.PaddingSpec`. This describes the\n padding policy when the `inputs` to `tf.tpu.replicate` is dynamic.\n One usage is to enable automatic bucketizing on the inputs by setting the\n value to `tpu.PaddingSpec.POWER_OF_TWO`, which can help to reduce the\n recompilation in the XLA side.\n xla_options: An instance of `tpu.XLAOptions` which indicates the options\n passed to XLA compiler. Use `None` for default options.\n\nReturns:\n A list of lists with the first list corresponding to the compile op and the\n second a list of output tensors, indexed by `[replica_num][output_num]`.\nRaises:\n ValueError: If all replicas do not have equal numbers of input tensors.\n ValueError: If the number of inputs per replica does not match\n the number of formal parameters to `computation`.\n ValueError: If the static `inputs` dimensions don't match with the values\n given in `maximum_shapes`.\n ValueError: If the structure of inputs per replica does not match\n the structure of `maximum_shapes`."} +{"repo": "beam", "function": "class RunThresholdCriterion(beam.PTransform[beam.PCollection[NestedKeyedOutputT], beam.PCollection[NestedKeyedOutputT]]):\n\n def __init__(self, threshold_criterion: ThresholdFn):\n self._threshold_fn = threshold_criterion\n\n def expand(self, input: beam.PCollection[NestedKeyedOutputT]) -> beam.PCollection[NestedKeyedOutputT]:\n if self._threshold_fn.is_stateful:\n return input | beam.ParDo(_StatefulThresholdDoFn(self._threshold_fn.to_spec()))\n else:\n return input | beam.ParDo(_StatelessThresholdDoFn(self._threshold_fn.to_spec()))", "docstring": "Applies a threshold criterion to anomaly detection results.\n\nThis PTransform applies a `ThresholdFn` to the anomaly scores in\n`AnomalyResult` objects, updating the prediction labels. It handles both\nstateful and stateless `ThresholdFn` implementations.\n\nArgs:\n threshold_criterion: The `ThresholdFn` to apply."} +{"repo": "transformers", "function": "class GPTSanJapaneseLayerSparseFF(nn.Module):\n\n def __init__(self, config: GPTSanJapaneseConfig):\n super().__init__()\n self.mlp = GPTSanJapaneseSparseMLP(config)\n self.soft_bypass_mlp = nn.Linear(config.d_model, config.d_model, bias=False)\n self.norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_epsilon)\n\n def forward(self, hidden_states, output_router_logits):\n \"\"\"\n Args:\n hidden_states (`torch.Tensor`) :\n [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\n output_router_logits (`bool`) :\n output experts router output.\n Returns:\n torch.Tensor[num_groups, tokens_per_group, hidden_dim]\n\n \"\"\"\n forwarded_states, router_tuple = self.mlp(hidden_states)\n forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states))\n output = hidden_states + self.norm(forwarded_states)\n if output_router_logits and router_tuple is not None:\n return (output, router_tuple)\n else:\n return output", "docstring": "Switch Transformers Feed Forward layer module. This is a wrapper around the Mixture of Experts module.\n\nParameters:\n config : ([`GPTSanJapaneseConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights."} +{"repo": "tensorflow", "function": "def is_scalar_batch(self, name='is_scalar_batch'):\n with self._name_scope(name):\n return ops.convert_to_tensor(self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), name='is_scalar_batch')", "docstring": "Indicates that `batch_shape == []`.\n\nArgs:\n name: Python `str` prepended to names of ops created by this function.\n\nReturns:\n is_scalar_batch: `bool` scalar `Tensor`."} +{"repo": "tensorflow", "function": "def _pop(self, key, indices=None, name=None):\n if name is None:\n name = '%s_get' % self._name\n indices, dtypes = self._get_indices_and_dtypes(indices)\n with ops.colocate_with(self._coloc_op):\n result = self._pop_fn(key, shared_name=self._name, indices=indices, dtypes=dtypes, name=name, capacity=self._capacity, memory_limit=self._memory_limit)\n return (key, self._get_return_value(result, indices))", "docstring": "Remove and return the associated (key, value) is returned from the staging area.\n\nIf the key is not in the staging area, this method will block until\nthe associated (key, value) is inserted.\nArgs:\n key: Key associated with the required data\n indices: Partial list of tensors to retrieve (optional).\n A list of integer or string indices.\n String indices are only valid if the Staging Area\n has names associated with it.\n name: A name for the operation (optional)\n\nReturns:\n The created op"} +{"repo": "tensorflow", "function": "def register_serializable(package='Custom', name=None, predicate=None):\n\n def decorator(arg):\n \"\"\"Registers a class with the serialization framework.\"\"\"\n nonlocal predicate\n if not tf_inspect.isclass(arg):\n raise TypeError('Registered serializable must be a class: {}'.format(arg))\n class_name = name if name is not None else arg.__name__\n if predicate is None:\n predicate = lambda x: isinstance(x, arg)\n _class_registry.register(package, class_name, predicate, arg)\n return arg\n return decorator", "docstring": "Decorator for registering a serializable class.\n\nTHIS METHOD IS STILL EXPERIMENTAL AND MAY CHANGE AT ANY TIME.\n\nRegistered classes will be saved with a name generated by combining the\n`package` and `name` arguments. When loading a SavedModel, modules saved with\nthis registered name will be created using the `_deserialize_from_proto`\nmethod.\n\nBy default, only direct instances of the registered class will be saved/\nrestored with the `serialize_from_proto`/`deserialize_from_proto` methods. To\nextend the registration to subclasses, use the `predicate argument`:\n\n```python\nclass A(tf.Module):\n pass\n\nregister_serializable(\n package=\"Example\", predicate=lambda obj: isinstance(obj, A))(A)\n```\n\nArgs:\n package: The package that this class belongs to.\n name: The name to serialize this class under in this package. If None, the\n class's name will be used.\n predicate: An optional function that takes a single Trackable argument, and\n determines whether that object should be serialized with this `package`\n and `name`. The default predicate checks whether the object's type exactly\n matches the registered class. Predicates are executed in the reverse order\n that they are added (later registrations are checked first).\n\nReturns:\n A decorator that registers the decorated class with the passed names and\n predicate."} +{"repo": "mobly", "function": "def kill_test_logger(logger):\n for h in list(logger.handlers):\n logger.removeHandler(h)\n if isinstance(h, logging.FileHandler):\n h.close()", "docstring": "Cleans up a test logger object by removing all of its handlers.\n\nArgs:\n logger: The logging object to clean up."} +{"repo": "keras", "function": "class MeanSquaredError(LossFunctionWrapper):\n\n def __init__(self, reduction='sum_over_batch_size', name='mean_squared_error', dtype=None):\n super().__init__(mean_squared_error, name=name, reduction=reduction, dtype=dtype)\n\n def get_config(self):\n return Loss.get_config(self)", "docstring": "Computes the mean of squares of errors between labels and predictions.\n\nFormula:\n\n```python\nloss = mean(square(y_true - y_pred))\n```\n\nArgs:\n reduction: Type of reduction to apply to the loss. In almost all cases\n this should be `\"sum_over_batch_size\"`. Supported options are\n `\"sum\"`, `\"sum_over_batch_size\"`, `\"mean\"`,\n `\"mean_with_sample_weight\"` or `None`. `\"sum\"` sums the loss,\n `\"sum_over_batch_size\"` and `\"mean\"` sum the loss and divide by the\n sample size, and `\"mean_with_sample_weight\"` sums the loss and\n divides by the sum of the sample weights. `\"none\"` and `None`\n perform no aggregation. Defaults to `\"sum_over_batch_size\"`.\n name: Optional name for the loss instance.\n dtype: The dtype of the loss's computations. Defaults to `None`, which\n means using `keras.backend.floatx()`. `keras.backend.floatx()` is a\n `\"float32\"` unless set to different value\n (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is\n provided, then the `compute_dtype` will be utilized."} +{"repo": "beam", "function": "def build_estimator(tf_transform_output, config, hidden_units=None):\n transformed_feature_spec = tf_transform_output.transformed_feature_spec().copy()\n transformed_feature_spec.pop(taxi.transformed_name(taxi.LABEL_KEY))\n real_valued_columns = [tf.feature_column.numeric_column(key, shape=()) for key in taxi.transformed_names(taxi.DENSE_FLOAT_FEATURE_KEYS)]\n categorical_columns = [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.VOCAB_SIZE + taxi.OOV_SIZE, default_value=0) for key in taxi.transformed_names(taxi.VOCAB_FEATURE_KEYS)]\n categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=taxi.FEATURE_BUCKET_COUNT, default_value=0) for key in taxi.transformed_names(taxi.BUCKET_FEATURE_KEYS)]\n categorical_columns += [tf.feature_column.categorical_column_with_identity(key, num_buckets=num_buckets, default_value=0) for key, num_buckets in zip(taxi.transformed_names(taxi.CATEGORICAL_FEATURE_KEYS), taxi.MAX_CATEGORICAL_FEATURE_VALUES)]\n return tf_estimator.DNNLinearCombinedClassifier(config=config, linear_feature_columns=categorical_columns, dnn_feature_columns=real_valued_columns, dnn_hidden_units=hidden_units or [100, 70, 50, 25])", "docstring": "Build an estimator for predicting the tipping behavior of taxi riders.\n\nArgs:\n tf_transform_output: A TFTransformOutput.\n config: tf.contrib.learn.RunConfig defining the runtime environment for the\n estimator (including model_dir).\n hidden_units: [int], the layer sizes of the DNN (input layer first)\n\nReturns:\n Resulting DNNLinearCombinedClassifier."} +{"repo": "tf-quant-finance", "function": "def crank_nicolson_step():\n\n def step_fn(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, num_steps_performed, dtype=None, name=None):\n \"\"\"Performs the step.\"\"\"\n del num_steps_performed\n name = name or 'crank_nicolson_step'\n return parabolic_equation_step(time, next_time, coord_grid, value_grid, boundary_conditions, second_order_coeff_fn, first_order_coeff_fn, zeroth_order_coeff_fn, inner_second_order_coeff_fn, inner_first_order_coeff_fn, time_marching_scheme=crank_nicolson_scheme, dtype=dtype, name=name)\n return step_fn", "docstring": "Creates a stepper function with Crank-Nicolson time marching scheme.\n\nCrank-Nicolson time marching scheme is one of the the most widely used schemes\nfor 1D PDEs. Given a space-discretized equation\n\n```\ndu/dt = A(t) u(t) + b(t)\n```\n(here `u` is a value vector, `A` and `b` are the matrix and the vector defined\nby the PDE), it approximates the right-hand side as an average of values taken\nbefore and after the time step:\n\n```\n(u(t2) - u(t1)) / (t2 - t1) = (A(t1) u(t1) + b(t1) + A(t2) u(t2) + b(t2)) / 2.\n```\n\nCrank-Nicolson has second order accuracy and is stable.\n\nMore details can be found in `weighted_implicit_explicit.py` describing the\nweighted implicit-explicit scheme - Crank-Nicolson scheme is a special case\nwith `theta = 0.5`.\n\nReturns:\n Callable to be used in finite-difference PDE solvers (see fd_solvers.py)."} +{"repo": "tensorflow", "function": "def should_stop(self):\n if self._check_stop():\n return True\n if self._sess:\n return self._wrapped_is_stoppable and self._sess.should_stop()\n return True", "docstring": "Return true if this session should not be used anymore.\n\nAlways return True if the session was closed.\n\nReturns:\n True if the session should stop, False otherwise."} +{"repo": "transformers", "function": "class EsmForProteinFoldingOutput(ModelOutput):\n frames: Optional[torch.FloatTensor] = None\n sidechain_frames: Optional[torch.FloatTensor] = None\n unnormalized_angles: Optional[torch.FloatTensor] = None\n angles: Optional[torch.FloatTensor] = None\n positions: Optional[torch.FloatTensor] = None\n states: Optional[torch.FloatTensor] = None\n s_s: Optional[torch.FloatTensor] = None\n s_z: Optional[torch.FloatTensor] = None\n distogram_logits: Optional[torch.FloatTensor] = None\n lm_logits: Optional[torch.FloatTensor] = None\n aatype: Optional[torch.FloatTensor] = None\n atom14_atom_exists: Optional[torch.FloatTensor] = None\n residx_atom14_to_atom37: Optional[torch.FloatTensor] = None\n residx_atom37_to_atom14: Optional[torch.FloatTensor] = None\n atom37_atom_exists: Optional[torch.FloatTensor] = None\n residue_index: Optional[torch.FloatTensor] = None\n lddt_head: Optional[torch.FloatTensor] = None\n plddt: Optional[torch.FloatTensor] = None\n ptm_logits: Optional[torch.FloatTensor] = None\n ptm: Optional[torch.FloatTensor] = None\n aligned_confidence_probs: Optional[torch.FloatTensor] = None\n predicted_aligned_error: Optional[torch.FloatTensor] = None\n max_predicted_aligned_error: Optional[torch.FloatTensor] = None", "docstring": "Output type of [`EsmForProteinFoldingOutput`].\n\nArgs:\n frames (`torch.FloatTensor`):\n Output frames.\n sidechain_frames (`torch.FloatTensor`):\n Output sidechain frames.\n unnormalized_angles (`torch.FloatTensor`):\n Predicted unnormalized backbone and side chain torsion angles.\n angles (`torch.FloatTensor`):\n Predicted backbone and side chain torsion angles.\n positions (`torch.FloatTensor`):\n Predicted positions of the backbone and side chain atoms.\n states (`torch.FloatTensor`):\n Hidden states from the protein folding trunk.\n s_s (`torch.FloatTensor`):\n Per-residue embeddings derived by concatenating the hidden states of each layer of the ESM-2 LM stem.\n s_z (`torch.FloatTensor`):\n Pairwise residue embeddings.\n distogram_logits (`torch.FloatTensor`):\n Input logits to the distogram used to compute residue distances.\n lm_logits (`torch.FloatTensor`):\n Logits output by the ESM-2 protein language model stem.\n aatype (`torch.FloatTensor`):\n Input amino acids (AlphaFold2 indices).\n atom14_atom_exists (`torch.FloatTensor`):\n Whether each atom exists in the atom14 representation.\n residx_atom14_to_atom37 (`torch.FloatTensor`):\n Mapping between atoms in the atom14 and atom37 representations.\n residx_atom37_to_atom14 (`torch.FloatTensor`):\n Mapping between atoms in the atom37 and atom14 representations.\n atom37_atom_exists (`torch.FloatTensor`):\n Whether each atom exists in the atom37 representation.\n residue_index (`torch.FloatTensor`):\n The index of each residue in the protein chain. Unless internal padding tokens are used, this will just be\n a sequence of integers from 0 to `sequence_length`.\n lddt_head (`torch.FloatTensor`):\n Raw outputs from the lddt head used to compute plddt.\n plddt (`torch.FloatTensor`):\n Per-residue confidence scores. Regions of low confidence may indicate areas where the model's prediction is\n uncertain, or where the protein structure is disordered.\n ptm_logits (`torch.FloatTensor`):\n Raw logits used for computing ptm.\n ptm (`torch.FloatTensor`):\n TM-score output representing the model's high-level confidence in the overall structure.\n aligned_confidence_probs (`torch.FloatTensor`):\n Per-residue confidence scores for the aligned structure.\n predicted_aligned_error (`torch.FloatTensor`):\n Predicted error between the model's prediction and the ground truth.\n max_predicted_aligned_error (`torch.FloatTensor`):\n Per-sample maximum predicted error."} +{"repo": "tensorflow", "function": "def fulltypes_for_flat_tensors(element_spec):\n specs = _specs_for_flat_tensors(element_spec)\n full_types_lists = [_translate_to_fulltype_for_flat_tensors(s) for s in specs]\n rval = nest.flatten(full_types_lists)\n return rval", "docstring": "Convert the element_spec for a dataset to a list of FullType Def.\n\nNote that \"flat\" in this function and in `_flat_tensor_specs` is a nickname\nfor the \"batchable tensor list\" encoding used by datasets and map_fn.\nThe FullTypeDef created corresponds to this encoding (e.g. that uses variants\nand not the FullTypeDef corresponding to the default \"component\" encoding).\n\nThis is intended for temporary internal use and expected to be removed\nwhen type inference support is sufficient. See limitations of\n`_translate_to_fulltype_for_flat_tensors`.\n\nArgs:\n element_spec: A nest of TypeSpec describing the elements of a dataset (or\n map_fn).\n\nReturns:\n A list of FullTypeDef corresponding to ELEMENT_SPEC. The items\n in this list correspond to the items in `_flat_tensor_specs`."} +{"repo": "python-fire", "function": "def NeedsSeparatingHyphenHyphen(self, flag='help'):\n element = self.GetLastHealthyElement()\n component = element.component\n spec = inspectutils.GetFullArgSpec(component)\n return spec.varkw is not None or flag in spec.args or flag in spec.kwonlyargs", "docstring": "Returns whether a the trace need '--' before '--help'.\n\n'--' is needed when the component takes keyword arguments, when the value of\nflag matches one of the argument of the component, or the component takes in\nkeyword-only arguments(e.g. argument with default value).\n\nArgs:\n flag: the flag available for the trace\n\nReturns:\n True for needed '--', False otherwise."} +{"repo": "tensorflow", "function": "def matrix_transpose(a, name='matrix_transpose', conjugate=False):\n with ops.name_scope(name, values=[a]):\n a = ops.convert_to_tensor(a, name='a')\n a_shape = a.get_shape()\n ndims = a_shape.ndims\n if ndims is not None:\n if ndims < 2:\n raise ValueError(f'Argument `a` should be a (batch) matrix with rank >= 2. Received `a` = {a} with shape: {a_shape}')\n perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]\n else:\n a_rank = rank(a)\n perm = concat((gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)\n return transpose(a, perm=perm, conjugate=conjugate)", "docstring": "Transposes last two dimensions of tensor `a`.\n\nFor example:\n\n```python\nx = tf.constant([[1, 2, 3], [4, 5, 6]])\ntf.linalg.matrix_transpose(x) # [[1, 4],\n # [2, 5],\n # [3, 6]]\n\nx = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],\n [4 + 4j, 5 + 5j, 6 + 6j]])\ntf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],\n # [2 - 2j, 5 - 5j],\n # [3 - 3j, 6 - 6j]]\n\n# Matrix with two batch dimensions.\n# x.shape is [1, 2, 3, 4]\n# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]\n```\n\nNote that `tf.matmul` provides kwargs allowing for transpose of arguments.\nThis is done with minimal cost, and is preferable to using this function. E.g.\n\n```python\n# Good! Transpose is taken at minimal additional cost.\ntf.matmul(matrix, b, transpose_b=True)\n\n# Inefficient!\ntf.matmul(matrix, tf.linalg.matrix_transpose(b))\n```\n\n@compatibility(numpy)\nIn `numpy` transposes are memory-efficient constant time operations as they\nsimply return a new view of the same data with adjusted `strides`.\n\nTensorFlow does not support strides, `linalg.matrix_transpose` returns a new\ntensor with the items permuted.\n@end_compatibility\n\nArgs:\n a: A `Tensor` with `rank >= 2`.\n name: A name for the operation (optional).\n conjugate: Optional bool. Setting it to `True` is mathematically equivalent\n to tf.math.conj(tf.linalg.matrix_transpose(input)).\n\nReturns:\n A transposed batch matrix `Tensor`.\n\nRaises:\n ValueError: If `a` is determined statically to have `rank < 2`."} +{"repo": "tensorflow", "function": "def build_graph(device, dtype, data_format, input_shape, filter_shape, strides, padding, num_iters, warmup_iters):\n with ops.device('/%s:0' % device):\n inp = variable_v1.VariableV1(random_ops.truncated_normal(input_shape, dtype=dtype))\n filt = variable_v1.VariableV1(random_ops.truncated_normal(filter_shape, dtype=dtype))\n outputs = []\n conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n outputs.append(conv2d_op)\n for _ in range(1, num_iters):\n with ops.control_dependencies([conv2d_op]):\n conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n outputs.append(conv2d_op)\n warmup_groups = []\n warmup_conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n warmup_groups.append(warmup_conv2d_op)\n for _ in range(1, warmup_iters):\n with ops.control_dependencies([warmup_conv2d_op]):\n warmup_conv2d_op = nn_ops.conv2d(inp, filt, strides, padding, data_format=data_format)\n warmup_groups.append(warmup_conv2d_op)\n return (control_flow_ops.group(*warmup_groups), control_flow_ops.group(*outputs))", "docstring": "builds a graph containing a sequence of conv2d operations.\n\nArgs:\n device: String, the device to run on.\n dtype: Data type for the convolution.\n data_format: A string from: \"NHWC\" or \"NCHW\". Data format for input and\n output data.\n input_shape: Shape of the input tensor.\n filter_shape: Shape of the filter tensor.\n strides: A list of ints. 1-D of length 4. The stride of sliding\n window for each dimension of input.\n padding: A string from: \"SAME\", \"VALID\". The type of padding\n algorithm to use.\n num_iters: number of iterations to run conv2d.\n warmup_iters: number of iterations for warmup runs.\n\nReturns:\n An array of tensors to run()"} +{"repo": "beam", "function": "def read(self, range_tracker):\n raise NotImplementedError", "docstring": "Returns an iterator that reads data from the source.\n\nThe returned set of data must respect the boundaries defined by the given\n``RangeTracker`` object. For example:\n\n * Returned set of data must be for the range\n ``[range_tracker.start_position, range_tracker.stop_position)``. Note\n that a source may decide to return records that start after\n ``range_tracker.stop_position``. See documentation in class\n ``RangeTracker`` for more details. Also, note that framework might\n invoke ``range_tracker.try_split()`` to perform dynamic split\n operations. range_tracker.stop_position may be updated\n dynamically due to successful dynamic split operations.\n * Method ``range_tracker.try_split()`` must be invoked for every record\n that starts at a split point.\n * Method ``range_tracker.record_current_position()`` may be invoked for\n records that do not start at split points.\n\nArgs:\n range_tracker: a ``RangeTracker`` whose boundaries must be respected\n when reading data from the source. A runner that reads this\n source muss pass a ``RangeTracker`` object that is not\n ``None``.\nReturns:\n an iterator of data read by the source."} +{"repo": "tensorflow", "function": "def serialize(optimizer):\n return serialize_keras_object(optimizer)", "docstring": "Serialize the optimizer configuration to JSON compatible python dict.\n\nThe configuration can be used for persistence and reconstruct the `Optimizer`\ninstance again.\n\n>>> tf.keras.optimizers.serialize(tf.keras.optimizers.SGD())\n{'class_name': 'SGD', 'config': {'name': 'SGD', 'learning_rate': 0.01,\n 'decay': 0.0, 'momentum': 0.0,\n 'nesterov': False}}\n\nArgs:\n optimizer: An `Optimizer` instance to serialize.\n\nReturns:\n Python dict which contains the configuration of the input optimizer."} +{"repo": "tensorflow", "function": "def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation):\n super(TPUReplicateContext, self).__init__()\n self._num_replicas = num_replicas\n self._outer_device_function_stack = None\n self._oc_dev_fn_stack = None\n self._outside_compilation_cluster = None\n self._is_map_outside_compilation = False\n self._outside_compilation_v2_context = None\n self._outside_compilation_counter = 0\n self._in_gradient_colocation = None\n self._gradient_colocation_stack = []\n self._host_compute_core = []\n self._name = name\n self._tpu_replicate_attr = attr_value_pb2.AttrValue(s=compat.as_bytes(self._name))\n self._unsupported_ops = []\n self._pivot = pivot\n self._replicated_vars = {}", "docstring": "Builds a new TPUReplicateContext.\n\nArgs:\n name: a unique name for the context, used to populate the `_tpu_replicate`\n attribute.\n num_replicas: an integer that gives the number of replicas for the\n computation.\n pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any\n inputs will have a control dependency on the pivot node. This ensures\n that nodes are correctly included in any enclosing control flow\n contexts."} +{"repo": "transformers", "function": "class VitPoseConfig(PretrainedConfig):\n model_type = 'vitpose'\n\n def __init__(self, backbone_config: Optional[PretrainedConfig]=None, backbone: Optional[str]=None, use_pretrained_backbone: bool=False, use_timm_backbone: bool=False, backbone_kwargs: Optional[dict]=None, initializer_range: float=0.02, scale_factor: int=4, use_simple_decoder: bool=True, **kwargs):\n super().__init__(**kwargs)\n if use_pretrained_backbone:\n logger.info('`use_pretrained_backbone` is `True`. For the pure inference purpose of VitPose weight do not set this value.')\n if use_timm_backbone:\n raise ValueError('use_timm_backbone set `True` is not supported at the moment.')\n if backbone_config is None and backbone is None:\n logger.info('`backbone_config` is `None`. Initializing the config with the default `VitPose` backbone.')\n backbone_config = CONFIG_MAPPING['vitpose_backbone'](out_indices=[4])\n elif isinstance(backbone_config, dict):\n backbone_model_type = backbone_config.get('model_type')\n config_class = CONFIG_MAPPING[backbone_model_type]\n backbone_config = config_class.from_dict(backbone_config)\n verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)\n self.backbone_config = backbone_config\n self.backbone = backbone\n self.use_pretrained_backbone = use_pretrained_backbone\n self.use_timm_backbone = use_timm_backbone\n self.backbone_kwargs = backbone_kwargs\n self.initializer_range = initializer_range\n self.scale_factor = scale_factor\n self.use_simple_decoder = use_simple_decoder", "docstring": "This is the configuration class to store the configuration of a [`VitPoseForPoseEstimation`]. It is used to instantiate a\nVitPose model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the VitPose\n[usyd-community/vitpose-base-simple](https://huggingface.co/usyd-community/vitpose-base-simple) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `VitPoseBackboneConfig()`):\n The configuration of the backbone model. Currently, only `backbone_config` with `vitpose_backbone` as `model_type` is supported.\n backbone (`str`, *optional*):\n Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this\n will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`\n is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.\n use_pretrained_backbone (`bool`, *optional*, defaults to `False`):\n Whether to use pretrained weights for the backbone.\n use_timm_backbone (`bool`, *optional*, defaults to `False`):\n Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers\n library.\n backbone_kwargs (`dict`, *optional*):\n Keyword arguments to be passed to AutoBackbone when loading from a checkpoint\n e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n scale_factor (`int`, *optional*, defaults to 4):\n Factor to upscale the feature maps coming from the ViT backbone.\n use_simple_decoder (`bool`, *optional*, defaults to `True`):\n Whether to use a `VitPoseSimpleDecoder` to decode the feature maps from the backbone into heatmaps. Otherwise it uses `VitPoseClassicDecoder`.\n\n\nExample:\n\n```python\n>>> from transformers import VitPoseConfig, VitPoseForPoseEstimation\n\n>>> # Initializing a VitPose configuration\n>>> configuration = VitPoseConfig()\n\n>>> # Initializing a model (with random weights) from the configuration\n>>> model = VitPoseForPoseEstimation(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def __init__(self, job_to_label_mapping=None, tf_server_port=8470, rpc_layer='grpc', override_client=None, executable_location=ExecutableLocation.WITHIN_CLUSTER):\n try:\n from kubernetes import config as k8sconfig\n if not override_client:\n if executable_location == ExecutableLocation.OFF_CLUSTER:\n k8sconfig.load_kube_config()\n elif executable_location == ExecutableLocation.WITHIN_CLUSTER:\n k8sconfig.load_incluster_config()\n else:\n raise ValueError('The executable location provided is invalid.')\n except ImportError:\n if not override_client:\n raise ImportError('The Kubernetes Python client must be installed before using the Kubernetes Cluster Resolver. To install the Kubernetes Python client, run `pip install kubernetes` on your command line.')\n if not job_to_label_mapping:\n job_to_label_mapping = {'worker': ['job-name=tensorflow']}\n self._job_to_label_mapping = job_to_label_mapping\n self._tf_server_port = tf_server_port\n self._override_client = override_client\n self.task_type = None\n self.task_id = None\n self.rpc_layer = rpc_layer", "docstring": "Initializes a new KubernetesClusterResolver.\n\nThis initializes a new Kubernetes ClusterResolver. The ClusterResolver\nwill attempt to talk to the Kubernetes master to retrieve all the instances\nof pods matching a label selector.\n\nArgs:\n job_to_label_mapping: A mapping of TensorFlow jobs to label selectors.\n This allows users to specify many TensorFlow jobs in one Cluster\n Resolver, and each job can have pods belong with different label\n selectors. For example, a sample mapping might be\n ```\n {'worker': ['job-name=worker-cluster-a', 'job-name=worker-cluster-b'],\n 'ps': ['job-name=ps-1', 'job-name=ps-2']}\n ```\n tf_server_port: The port the TensorFlow server is listening on.\n rpc_layer: (Optional) The RPC layer TensorFlow should use to communicate\n between tasks in Kubernetes. Defaults to 'grpc'.\n override_client: The Kubernetes client (usually automatically retrieved\n using `from kubernetes import client as k8sclient`). If you pass this\n in, you are responsible for setting Kubernetes credentials manually and\n calling `k8sconfig.load_kube_config()` or\n `k8sconfig.load_incluster_config()` before using this ClusterResolver.\n executable_location: Parameter that specifies whether or not this\n TensorFlow code is running from within a K8S cluster or not.\n\nRaises:\n ImportError: If the Kubernetes Python client is not installed and no\n `override_client` is passed in.\n RuntimeError: If autoresolve_task is not a boolean or a callable.\n ValueError: If `executable_location` is not a valid value."} +{"repo": "fhir-py", "function": "def split_if_relative_reference(reference: message.Message) -> None:\n _validate_reference(reference)\n uri_field = reference.DESCRIPTOR.fields_by_name.get('uri')\n if not proto_utils.field_is_set(reference, uri_field):\n return\n uri = proto_utils.get_value_at_field(reference, uri_field)\n internal_match = re.fullmatch(_INTERNAL_REFERENCE_PATTERN, uri.value)\n if internal_match is not None:\n reference_id_field = get_reference_id_field_for_resource(reference, internal_match.group('resource_type'))\n reference_id = proto_utils.create_message_from_descriptor(reference_id_field.message_type)\n populate_typed_reference_id(reference_id, internal_match.group('resource_id'), internal_match.group('version'))\n proto_utils.copy_common_field(uri, reference_id, 'id')\n proto_utils.copy_common_field(uri, reference_id, 'extension')\n proto_utils.set_value_at_field(reference, reference_id_field, reference_id)\n return\n fragment_match = re.fullmatch(_FRAGMENT_REFERENCE_PATTERN, uri.value)\n if fragment_match is not None:\n fragment_field = reference.DESCRIPTOR.fields_by_name['fragment']\n fragment = proto_utils.create_message_from_descriptor(fragment_field.message_type)\n value_field = fragment.DESCRIPTOR.fields_by_name['value']\n proto_utils.set_value_at_field(fragment, value_field, uri.value[1:])\n proto_utils.copy_common_field(uri, fragment, 'id')\n proto_utils.copy_common_field(uri, fragment, 'extension')\n proto_utils.set_value_at_field(reference, fragment_field, fragment)\n return", "docstring": "If possible, parses a `Reference` `uri` into more structured fields.\n\nThis is only possible for two forms of reference uris:\n* Relative references of the form $TYPE/$ID, e.g., \"Patient/1234\"\n In this case, this will be parsed to a proto of the form:\n {patient_id: {value: \"1234\"}}\n* Fragments of the form \"#$FRAGMENT\", e.g., \"#vs1\". In this case, this would\n be parsed into a proto of the form:\n {fragment: {value: \"vs1\"} }\n\nIf the reference URI matches one of these schemas, the `uri` field will be\ncleared, and the appropriate structured fields set. Otherwise, the reference\nwill be unchanged.\n\nArgs:\n reference: The FHIR reference to potentially split.\n\nRaises:\n ValueError: If the message is not a valid FHIR Reference proto."} +{"repo": "transformers", "function": "class AltCLIPProcessor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = ('CLIPImageProcessor', 'CLIPImageProcessorFast')\n tokenizer_class = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')\n\n @deprecate_kwarg(old_name='feature_extractor', version='5.0.0', new_name='image_processor')\n def __init__(self, image_processor=None, tokenizer=None):\n if image_processor is None:\n raise ValueError('You need to specify an `image_processor`.')\n if tokenizer is None:\n raise ValueError('You need to specify a `tokenizer`.')\n super().__init__(image_processor, tokenizer)\n\n def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[AltClipProcessorKwargs]) -> BatchEncoding:\n \"\"\"\n Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\n and `kwargs` arguments to XLMRobertaTokenizerFast's [`~XLMRobertaTokenizerFast.__call__`] if `text` is not\n `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\n CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\n of the above two methods for more information.\n\n Args:\n\n images (`ImageInput`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n Returns:\n [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n \"\"\"\n if text is None and images is None:\n raise ValueError('You must specify either text or images.')\n if text is None and images is None:\n raise ValueError('You must specify either text or images.')\n output_kwargs = self._merge_kwargs(AltClipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if text is not None:\n encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n if 'return_tensors' in output_kwargs['common_kwargs']:\n return_tensors = output_kwargs['common_kwargs'].pop('return_tensors', None)\n if text is not None and images is not None:\n encoding['pixel_values'] = image_features.pixel_values\n return encoding\n elif text is not None:\n return encoding\n else:\n return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`].\n Please refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to XLMRobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n image_processor_input_names = self.image_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))", "docstring": "Constructs a AltCLIP processor which wraps a CLIP image processor and a XLM-Roberta tokenizer into a single\nprocessor.\n\n[`AltCLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`XLMRobertaTokenizerFast`]. See\nthe [`~AltCLIPProcessor.__call__`] and [`~AltCLIPProcessor.decode`] for more information.\n\nArgs:\n image_processor ([`CLIPImageProcessor`], *optional*):\n The image processor is a required input.\n tokenizer ([`XLMRobertaTokenizerFast`], *optional*):\n The tokenizer is a required input."} +{"repo": "tensorflow", "function": "def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):\n if dilation_rate is None:\n dilation_rate = [1] * num_spatial_dims\n elif len(dilation_rate) != num_spatial_dims:\n raise ValueError(f'`len(dilation_rate)` should be {num_spatial_dims}. Received: dilation_rate={dilation_rate} of length {len(dilation_rate)}')\n dilation_rate = np.array(dilation_rate, dtype=np.int32)\n if np.any(dilation_rate < 1):\n raise ValueError(f'all values of `dilation_rate` must be positive. Received: dilation_rate={dilation_rate}')\n if strides is None:\n strides = [1] * num_spatial_dims\n elif len(strides) != num_spatial_dims:\n raise ValueError(f'`len(strides)` should be {num_spatial_dims}. Received: strides={strides} of length {len(strides)}')\n strides = np.array(strides, dtype=np.int32)\n if np.any(strides < 1):\n raise ValueError(f'all values of `strides` must be positive. Received: strides={strides}')\n if np.any(strides > 1) and np.any(dilation_rate > 1):\n raise ValueError(f'`strides > 1` not supported in conjunction with `dilation_rate > 1`. Received: strides={strides} and dilation_rate={dilation_rate}')\n return (strides, dilation_rate)", "docstring": "Helper function for verifying strides and dilation_rate arguments.\n\nThis is used by `convolution` and `pool`.\n\nArgs:\n num_spatial_dims: int\n strides: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any value\n of strides is > 1, then all values of dilation_rate must be 1.\n dilation_rate: Optional. List of N ints >= 1. Defaults to `[1]*N`. If any\n value of dilation_rate is > 1, then all values of strides must be 1.\n\nReturns:\n Normalized (strides, dilation_rate) as int32 numpy arrays of shape\n [num_spatial_dims].\n\nRaises:\n ValueError: if the parameters are invalid."} +{"repo": "pyglove", "function": "def from_schema(cls, schema: class_schema.Schema, module_name: str, name: str, qualname: Optional[str]=None, is_method: bool=True) -> 'Signature':\n arg_names = list(schema.metadata.get('init_arg_list', []))\n if arg_names and arg_names[-1].startswith('*'):\n vararg_name = arg_names[-1][1:]\n arg_names.pop(-1)\n else:\n vararg_name = None\n\n def get_arg_spec(arg_name):\n field = schema.get_field(arg_name)\n if not field:\n raise ValueError(f'Argument {arg_name!r} is not a symbolic field.')\n return field.value\n args = []\n if is_method:\n args.append(Argument.from_annotation('self', Argument.Kind.POSITIONAL_OR_KEYWORD))\n args.extend([Argument(n, Argument.Kind.POSITIONAL_OR_KEYWORD, get_arg_spec(n)) for n in arg_names])\n varargs = None\n if vararg_name:\n varargs = Argument(vararg_name, Argument.Kind.VAR_POSITIONAL, get_arg_spec(vararg_name))\n existing_names = set(arg_names)\n if vararg_name:\n existing_names.add(vararg_name)\n kwonlyargs = []\n varkw = None\n for key, field in schema.fields.items():\n if key not in existing_names and (not field.frozen):\n if key.is_const:\n kwonlyargs.append(Argument(str(key), Argument.Kind.KEYWORD_ONLY, field.value))\n else:\n varkw = Argument(schema.metadata.get('varkw_name', None) or 'kwargs', Argument.Kind.VAR_KEYWORD, class_schema.ValueSpec.DictType(field.value))\n return Signature(callable_type=CallableType.FUNCTION, name=name, module_name=module_name, qualname=qualname, description=schema.description, args=args, kwonlyargs=kwonlyargs, varargs=varargs, varkw=varkw, return_value=schema.metadata.get('returns', None))", "docstring": "Creates a signature from a schema object.\n\nArgs:\n schema: A `pg.typing.Schema` object associated with a `pg.Object`.\n module_name: Module name for the signature.\n name: Function or method name of the signature.\n qualname: Qualname of the signature.\n is_method: If True, `self` will be added in the signature as the first\n argument.\n\nReturns:\n A signature object from the schema."} +{"repo": "tensorflow", "function": "def update_checkpoint_state_internal(save_dir, model_checkpoint_path, all_model_checkpoint_paths=None, latest_filename=None, save_relative_paths=False, all_model_checkpoint_timestamps=None, last_preserved_timestamp=None):\n coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)\n if save_relative_paths:\n if os.path.isabs(model_checkpoint_path):\n rel_model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)\n else:\n rel_model_checkpoint_path = model_checkpoint_path\n rel_all_model_checkpoint_paths = []\n for p in all_model_checkpoint_paths:\n if os.path.isabs(p):\n rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))\n else:\n rel_all_model_checkpoint_paths.append(p)\n ckpt = generate_checkpoint_state_proto(save_dir, rel_model_checkpoint_path, all_model_checkpoint_paths=rel_all_model_checkpoint_paths, all_model_checkpoint_timestamps=all_model_checkpoint_timestamps, last_preserved_timestamp=last_preserved_timestamp)\n else:\n ckpt = generate_checkpoint_state_proto(save_dir, model_checkpoint_path, all_model_checkpoint_paths=all_model_checkpoint_paths, all_model_checkpoint_timestamps=all_model_checkpoint_timestamps, last_preserved_timestamp=last_preserved_timestamp)\n if coord_checkpoint_filename == ckpt.model_checkpoint_path:\n raise RuntimeError(\"Save path '%s' conflicts with path used for checkpoint state. Please use a different save path.\" % model_checkpoint_path)\n file_io.atomic_write_string_to_file(coord_checkpoint_filename, text_format.MessageToString(ckpt))", "docstring": "Updates the content of the 'checkpoint' file.\n\nThis updates the checkpoint file containing a CheckpointState\nproto.\n\nArgs:\n save_dir: Directory where the model was saved.\n model_checkpoint_path: The checkpoint file.\n all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted\n checkpoints, sorted from oldest to newest. If this is a non-empty list,\n the last element must be equal to model_checkpoint_path. These paths\n are also saved in the CheckpointState proto.\n latest_filename: Optional name of the checkpoint file. Default to\n 'checkpoint'.\n save_relative_paths: If `True`, will write relative paths to the checkpoint\n state file.\n all_model_checkpoint_timestamps: Optional list of timestamps (floats,\n seconds since the Epoch) indicating when the checkpoints in\n `all_model_checkpoint_paths` were created.\n last_preserved_timestamp: A float, indicating the number of seconds since\n the Epoch when the last preserved checkpoint was written, e.g. due to a\n `keep_checkpoint_every_n_hours` parameter (see\n `tf.train.CheckpointManager` for an implementation).\n\nRaises:\n RuntimeError: If any of the model checkpoint paths conflict with the file\n containing CheckpointSate."} +{"repo": "tensorflow", "function": "def __init__(self, components):\n global _next_device_number, _next_device_number_lock\n self.components = tuple((device_util.canonicalize(d) for d in components))\n if not self.components:\n raise ValueError('ParallelDevice requires at least one component.')\n ctx = context.context()\n with _next_device_number_lock:\n self._name = '{}/device:CUSTOM:{}'.format(ctx.host_address_space(), _next_device_number)\n _next_device_number += 1\n device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(self._name, self.components)\n context.register_custom_device(device, self._name, device_info)\n self._device_ids = None\n self._device_scope = None\n _all_parallel_devices[self._name] = self", "docstring": "Creates a device which executes operations in parallel on `components`.\n\nArgs:\n components: A list of device names. Each operation executed on the\n returned device executes on these component devices.\n\nReturns:\n A string with the name of the newly created device."} +{"repo": "tensorflow", "function": "class LogCoshError(MeanMetricWrapper):\n\n def __init__(self, name='logcosh', dtype=None):\n super(LogCoshError, self).__init__(logcosh, name, dtype=dtype)", "docstring": "Computes the logarithm of the hyperbolic cosine of the prediction error.\n\n`logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred - y_true)\n\nArgs:\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.LogCoshError()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])\n>>> m.result().numpy()\n0.10844523\n\n>>> m.reset_state()\n>>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],\n... sample_weight=[1, 0])\n>>> m.result().numpy()\n0.21689045\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\n loss='mse',\n metrics=[tf.keras.metrics.LogCoshError()])\n```"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model.decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)\n logits = self.lm_head(outputs[0])\n loss = None\n if labels is not None:\n labels = labels.to(logits.device)\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, BlenderbotSmallForCausalLM\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot_small-90M\")\n>>> model = BlenderbotSmallForCausalLM.from_pretrained(\"facebook/blenderbot_small-90M\", add_cross_attention=False)\n>>> assert model.config.is_decoder, f\"{model.__class__} has to be configured as a decoder.\"\n>>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n\n>>> logits = outputs.logits\n>>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]\n>>> list(logits.shape) == expected_shape\nTrue\n```"} +{"repo": "transformers", "function": "def call(self, input_features=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if encoder_outputs is None:\n encoder_outputs = self.encoder(input_features, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n elif return_dict and (not isinstance(encoder_outputs, TFBaseModelOutput)):\n encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return TFSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExample:\n\n ```python\n >>> import tensorflow as tf\n >>> from transformers import TFWhisperModel, AutoFeatureExtractor\n >>> from datasets import load_dataset\n\n >>> model = TFWhisperModel.from_pretrained(\"openai/whisper-base\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"openai/whisper-base\")\n >>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n >>> inputs = feature_extractor(ds[0][\"audio\"][\"array\"], return_tensors=\"tf\")\n >>> input_features = inputs.input_features\n >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id\n >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state\n >>> list(last_hidden_state.shape)\n [1, 2, 512]\n ```"} +{"repo": "pytype", "function": "def _check_signature_compatible(method_signature, base_signature, stack, matcher, ctx):\n\n def is_subtype(this_type, that_type):\n \"\"\"Return True iff this_type is a subclass of that_type.\"\"\"\n if this_type == ctx.convert.never:\n return True\n this_type_instance = this_type.instantiate(ctx.root_node, container=abstract_utils.DUMMY_CONTAINER)\n return matcher.compute_one_match(this_type_instance, that_type).success\n check_result = _check_positional_parameters(method_signature, base_signature, is_subtype, ctx) or _check_keyword_only_parameters(method_signature, base_signature, is_subtype) or _check_default_values(method_signature, base_signature) or _check_return_types(method_signature, base_signature, is_subtype)\n if check_result:\n ctx.errorlog.overriding_signature_mismatch(stack, base_signature, method_signature, details=check_result.message)", "docstring": "Checks if the signatures match for the overridden and overriding methods.\n\nAdds the first error found to the context's error log.\n\nTwo invariants are verified:\n1. Every call that is valid for the overridden method is valid for\n the overriding method.\n2. Two calls that are equivalent for the overridden method are equivalent\n for the overriding method.\n\nThis translates into the following mapping requirements for\noverriding method parameters:\n+----------------------------------------------------------------------------+\n| Overridden method | Overriding method |\n+----------------------------------------------------------------------------+\n| Positional-only | Positional-only or |\n| | positional-or-keyword, any name |\n+--------------------------------------+-------------------------------------+\n| Positional-or-keyword | Positional-or-keyword, same name |\n+--------------------------------------+-------------------------------------+\n| Keyword-only | Positional-or-keyword |\n| | or keyword-only, same name |\n+--------------------------------------+-------------------------------------+\n| Non-default | Non-default or default |\n+--------------------------------------+-------------------------------------+\n| Default | Default, same default value |\n+--------------------------------------+-------------------------------------+\n| Parameter of type T | Parameter of supertype of T or |\n| | no annotation |\n+--------------------------------------+-------------------------------------+\n| Parameter without annotation | Parameter with any type annotation |\n| | or without annotation |\n+--------------------------------------+-------------------------------------+\n| Return type T | Return type - subtype of T or |\n| | no annotation |\n+--------------------------------------+-------------------------------------+\n| Return type not annotated | Any return type annotation |\n| | or no annotation |\n+--------------------------------------+-------------------------------------+\nIn addition, default parameters of the overriding method don't have to match\nany parameters of the overridden method.\nSame name requirement is often violated, so we don't treat is as an error\nfor now and only log a warning.\n\nArguments:\n method_signature: signature of the overriding method.\n base_signature: signature of the overridden method.\n stack: the stack to use for mismatch error reporting.\n matcher: abstract matcher for type comparison.\n ctx: pytype abstract context."} +{"repo": "starthinker", "function": "def report_build(config, auth, account, body):\n report = report_get(config, auth, account, name=body['name'])\n if report is None:\n account_id, advertiser_ids = parse_account(config, auth, account)\n is_superuser, profile_id = get_profile_for_api(config, auth, account_id)\n body['accountId'] = account_id\n body['ownerProfileId'] = profile_id\n if advertiser_ids and 'criteria' in body:\n body['criteria']['dimensionFilters'] = body.get('criteria', {}).get('dimensionFilters', []) + [{'kind': 'dfareporting#dimensionValue', 'dimensionName': 'advertiser', 'id': advertiser_id, 'matchType': 'EXACT'} for advertiser_id in advertiser_ids]\n if 'schedule' not in body:\n body['schedule'] = {'active': True, 'repeats': 'DAILY', 'every': 1}\n if 'startDate' not in body['schedule']:\n body['schedule']['startDate'] = str(date.today())\n if 'expirationDate' not in body['schedule']:\n body['schedule']['expirationDate'] = str(date.today() + timedelta(days=365))\n kwargs = {'profileId': profile_id, 'accountId': account_id} if is_superuser else {'profileId': profile_id}\n kwargs['body'] = body\n report = API_DCM(config, auth, internal=is_superuser).reports().insert(**kwargs).execute()\n kwargs = {'profileId': profile_id, 'accountId': account_id} if is_superuser else {'profileId': profile_id}\n kwargs['reportId'] = report['id']\n API_DCM(config, auth, internal=is_superuser).reports().run(**kwargs).execute()\n elif config.verbose:\n print('DCM Report Exists:', body['name'])\n return report", "docstring": "Creates a DCM report given a JSON definition.\n\nBulletproofing:\nhttps://developers.google.com/doubleclick-advertisers/v3.2/reports/insert\n\nThe body JSON provided will have the following fields overriden:\n * accountId - supplied as a parameter in account token.\n * ownerProfileId - determined from the current credentials.\n * advertiser_ids - supplied as a parameter in account token.\n\nArgs:\n * auth: (string) Either user or service.\n * account: (string) [account:advertiser@profile] token.\n * body: (json) As defined in:\n https://developers.google.com/doubleclick-advertisers/v3.2/reports#resource\n\nReturns:\n * JSON definition of report created or existing."} +{"repo": "tensorflow", "function": "def _show_tag_sets(saved_model_dir):\n tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n print('The given SavedModel contains the following tag-sets:')\n for tag_set in sorted(tag_sets):\n print('%r' % ', '.join(sorted(tag_set)))", "docstring": "Prints the tag-sets stored in SavedModel directory.\n\nPrints all the tag-sets for MetaGraphs stored in SavedModel directory.\n\nArgs:\n saved_model_dir: Directory containing the SavedModel to inspect."} +{"repo": "nsscache", "function": "def __init__(self, data=None, _KEY=None, _ATTRS=None):\n if self.__class__ is MapEntry:\n raise TypeError('MapEntry is an abstract class.')\n if data is None:\n return\n else:\n for key in data:\n setattr(self, key, data[key])\n self.log = logging.getLogger(__name__)", "docstring": "This is an abstract class.\n\nArgs:\n data: An optional dict of attribute, value pairs to populate with.\n\nRaises:\n TypeError: Bad argument, or attempt to instantiate abstract class."} +{"repo": "keras", "function": "def log_softmax(x, axis=-1):\n return ops.log_softmax(x, axis=axis)", "docstring": "Log-Softmax activation function.\n\nEach input vector is handled independently.\nThe `axis` argument sets which axis of the input the function\nis applied along.\n\nArgs:\n x: Input tensor.\n axis: Integer, axis along which the softmax is applied."}