diff --git "a/valid.jsonl" "b/valid.jsonl" new file mode 100644--- /dev/null +++ "b/valid.jsonl" @@ -0,0 +1,1000 @@ +{"repo": "transformers", "function": "class MobileViTImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values']\n\n def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_center_crop: bool=True, crop_size: Optional[Dict[str, int]]=None, do_flip_channel_order: bool=True, **kwargs) -> None:\n super().__init__(**kwargs)\n size = size if size is not None else {'shortest_edge': 224}\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else {'height': 256, 'width': 256}\n crop_size = get_size_dict(crop_size, param_name='crop_size')\n self.do_resize = do_resize\n self.size = size\n self.resample = resample\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.do_center_crop = do_center_crop\n self.crop_size = crop_size\n self.do_flip_channel_order = do_flip_channel_order\n\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n\n def flip_channel_order(self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"\n Flip the color channels from RGB to BGR or vice versa.\n\n Args:\n image (`np.ndarray`):\n The image, represented as a numpy array.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n return flip_channel_order(image, data_format=data_format, input_data_format=input_data_format)\n\n def __call__(self, images, segmentation_maps=None, **kwargs):\n \"\"\"\n Preprocesses a batch of images and optionally segmentation maps.\n\n Overrides the `__call__` method of the `Preprocessor` class so that both images and segmentation maps can be\n passed in as positional arguments.\n \"\"\"\n return super().__call__(images, segmentation_maps=segmentation_maps, **kwargs)\n\n def _preprocess(self, image: ImageInput, do_resize: bool, do_rescale: bool, do_center_crop: bool, do_flip_channel_order: bool, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, rescale_factor: Optional[float]=None, crop_size: Optional[Dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n if do_flip_channel_order:\n image = self.flip_channel_order(image, input_data_format=input_data_format)\n return image\n\n def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"Preprocesses a single image.\"\"\"\n image = to_numpy_array(image)\n if do_rescale and is_scaled_image(image):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n image = self._preprocess(image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=do_flip_channel_order, input_data_format=input_data_format)\n image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n return image\n\n def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"Preprocesses a single mask.\"\"\"\n segmentation_map = to_numpy_array(segmentation_map)\n if segmentation_map.ndim == 2:\n added_channel_dim = True\n segmentation_map = segmentation_map[None, ...]\n input_data_format = ChannelDimension.FIRST\n else:\n added_channel_dim = False\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)\n segmentation_map = self._preprocess(image=segmentation_map, do_resize=do_resize, size=size, resample=PILImageResampling.NEAREST, do_rescale=False, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=False, input_data_format=input_data_format)\n if added_channel_dim:\n segmentation_map = segmentation_map.squeeze(0)\n segmentation_map = segmentation_map.astype(np.int64)\n return segmentation_map\n\n @filter_out_non_signature_kwargs()\n def preprocess(self, images: ImageInput, segmentation_maps: Optional[ImageInput]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, do_flip_channel_order: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n segmentation_maps (`ImageInput`, *optional*):\n Segmentation map to preprocess.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only\n has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image by rescale factor.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop if `do_center_crop` is set to `True`.\n do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):\n Whether to flip the channel order of the image.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n do_flip_channel_order = do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name='crop_size')\n images = make_list_of_images(images)\n if segmentation_maps is not None:\n segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n if segmentation_maps is not None and (not valid_images(segmentation_maps)):\n raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)\n images = [self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_center_crop=do_center_crop, crop_size=crop_size, do_flip_channel_order=do_flip_channel_order, data_format=data_format, input_data_format=input_data_format) for img in images]\n data = {'pixel_values': images}\n if segmentation_maps is not None:\n segmentation_maps = [self._preprocess_mask(segmentation_map=segmentation_map, do_resize=do_resize, size=size, do_center_crop=do_center_crop, crop_size=crop_size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]\n data['labels'] = segmentation_maps\n return BatchFeature(data=data, tensor_type=return_tensors)\n\n def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n \"\"\"\n Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\n Args:\n outputs ([`MobileViTForSemanticSegmentation`]):\n Raw outputs of the model.\n target_sizes (`List[Tuple]` of length `batch_size`, *optional*):\n List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\n predictions will not be resized.\n\n Returns:\n semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\n segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\n specified). Each entry of each `torch.Tensor` correspond to a semantic class id.\n \"\"\"\n logits = outputs.logits\n if target_sizes is not None:\n if len(logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n if is_torch_tensor(target_sizes):\n target_sizes = target_sizes.numpy()\n semantic_segmentation = []\n for idx in range(len(logits)):\n resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = logits.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Constructs a MobileViT image processor.\n\nArgs:\n do_resize (`bool`, *optional*, defaults to `True`):\n Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the\n `do_resize` parameter in the `preprocess` method.\n size (`Dict[str, int]` *optional*, defaults to `{\"shortest_edge\": 224}`):\n Controls the size of the output image after resizing. Can be overridden by the `size` parameter in the\n `preprocess` method.\n resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):\n Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter\n in the `preprocess` method.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`\n parameter in the `preprocess` method.\n rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):\n Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the\n `preprocess` method.\n do_center_crop (`bool`, *optional*, defaults to `True`):\n Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the\n image is padded with 0's and then center cropped. Can be overridden by the `do_center_crop` parameter in\n the `preprocess` method.\n crop_size (`Dict[str, int]`, *optional*, defaults to `{\"height\": 256, \"width\": 256}`):\n Desired output size `(size[\"height\"], size[\"width\"])` when applying center-cropping. Can be overridden by\n the `crop_size` parameter in the `preprocess` method.\n do_flip_channel_order (`bool`, *optional*, defaults to `True`):\n Whether to flip the color channels from RGB to BGR. Can be overridden by the `do_flip_channel_order`\n parameter in the `preprocess` method."} +{"repo": "tensorflow", "function": "def run_user_main(wrapped_test_module):\n tree = ast.parse(tf_inspect.getsource(wrapped_test_module))\n target = ast.dump(ast.parse('if __name__ == \"__main__\": pass').body[0].test)\n for expr in reversed(tree.body):\n if isinstance(expr, ast.If) and ast.dump(expr.test) == target:\n break\n else:\n raise NotImplementedError(f'Could not find `if __name__ == \"main\":` block in {wrapped_test_module.__name__}.')\n new_ast = ast.Module(body=expr.body, type_ignores=[])\n exec(compile(new_ast, '', 'exec'), globals(), wrapped_test_module.__dict__)", "docstring": "Runs the \"if __name__ == '__main__'\" at the bottom of a module.\n\nTensorFlow practice is to have a main if at the bottom of the module which\nmight call an API compat function before calling test.main().\n\nSince this is a statement, not a function, we can't cleanly reference it, but\nwe can inspect it from the user module and run it in the context of that\nmodule so all imports and variables are available to it.\n\nArgs:\n wrapped_test_module: The user-provided test code to run.\n\nRaises:\n NotImplementedError: If main block was not found in module. This should not\n be caught, as it is likely an error on the user's part -- absltest is all\n too happy to report a successful status (and zero tests executed) if a\n user forgets to end a class with \"test.main()\"."} +{"repo": "tensorflow", "function": "def summary_writer(self):\n return self._summary_writer", "docstring": "Return the SummaryWriter used by the chief supervisor.\n\nReturns:\n A SummaryWriter."} +{"repo": "starthinker", "function": "def _process_new(self, feed_item):\n return {'name': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_NAME, None), 'url': feed_item.get(FieldMap.CAMPAIGN_LANDING_PAGE_URL, None), 'advertiserId': feed_item.get(FieldMap.ADVERTISER_ID, None)}", "docstring": "Creates a new landing page DCM object from a feed item representing a landing page from the Bulkdozer feed.\n\nThis function simply creates the object to be inserted later by the BaseDAO\nobject.\n\nArgs:\n feed_item: Feed item representing the landing page from the Bulkdozer\n feed.\n\nReturns:\n An landing page object ready to be inserted in DCM through the API."} +{"repo": "tensorflow", "function": "def _axis_gather(params, indices, axis):\n if axis > 1:\n if not isinstance(params, ragged_tensor.RaggedTensor):\n params = ragged_tensor.RaggedTensor.from_tensor(params, ragged_rank=1, row_splits_dtype=indices.row_splits.dtype)\n return params.with_values(_gather(params.values, indices, axis - 1, 0))\n if indices.shape.rank is None:\n raise ValueError('rank(indices) must be known statically')\n assert axis == 1\n flat_params = _flatten_dims_0_and_1(params)\n adjustments = _row_starts(params, indices.dtype)\n adjustments = _increase_rank_to(adjustments, indices.shape.ndims + 1)\n adjusted_indices = indices + adjustments\n return _gather(flat_params, adjusted_indices, axis - 1, 0)", "docstring": "Helper that implements ragged gather when axis>0 and batch_dims==0.\n\nArgs:\n params: The tensor from which to gather values.\n indices: The indices of values to gather.\n axis: The axis in `params` to gather `indices` from.\n\nReturns:\n A potentially ragged tensor."} +{"repo": "tf-quant-finance", "function": "def _expand_param_on_rank(self, param, expand_rank, axis):\n param_tensor = tf.convert_to_tensor(param, dtype=self._dtype)\n param_expand = param_tensor\n for _ in range(expand_rank):\n param_expand = tf.expand_dims(param_expand, axis)\n return param_expand", "docstring": "Adds dimensions to `param`, not inplace.\n\nArgs:\n param: initial element.\n expand_rank: is amount of dimensions that need to be added.\n axis: is axis where to place these dimensions.\n\nReturns:\n New `Tensor`."} +{"repo": "tensorflow", "function": "def dispatch_for_binary_elementwise_assert_apis(x_type, y_type):\n\n def decorator(handler):\n api_handler_key = (x_type, y_type, _ASSERT_API_TAG)\n if api_handler_key in _ELEMENTWISE_API_HANDLERS:\n raise ValueError(f'A binary elementwise assert dispatch handler ({_ELEMENTWISE_API_HANDLERS[api_handler_key]}) has already been registered for ({x_type}, {y_type}).')\n _ELEMENTWISE_API_HANDLERS[api_handler_key] = handler\n for api in _BINARY_ELEMENTWISE_ASSERT_APIS:\n _add_dispatch_for_binary_elementwise_api(api, x_type, y_type, handler)\n return handler\n return decorator", "docstring": "Decorator to override default implementation for binary elementwise assert APIs.\n\nThe decorated function (known as the \"elementwise assert handler\")\noverrides the default implementation for any binary elementwise assert API\nwhenever the value for the first two arguments (typically named `x` and `y`)\nmatch the specified type annotations. The handler is called with two\narguments:\n\n `elementwise_assert_handler(assert_func, x, y)`\n\nWhere `x` and `y` are the first two arguments to the binary elementwise assert\noperation, and `assert_func` is a TensorFlow function that takes two\nparameters and performs the elementwise assert operation (e.g.,\n`tf.debugging.assert_equal`).\n\nThe following example shows how this decorator can be used to update all\nbinary elementwise assert operations to handle a `MaskedTensor` type:\n\n>>> class MaskedTensor(tf.experimental.ExtensionType):\n... values: tf.Tensor\n... mask: tf.Tensor\n>>> @dispatch_for_binary_elementwise_assert_apis(MaskedTensor, MaskedTensor)\n... def binary_elementwise_assert_api_handler(assert_func, x, y):\n... merged_mask = tf.logical_and(x.mask, y.mask)\n... selected_x_values = tf.boolean_mask(x.values, merged_mask)\n... selected_y_values = tf.boolean_mask(y.values, merged_mask)\n... assert_func(selected_x_values, selected_y_values)\n>>> a = MaskedTensor([1, 1, 0, 1, 1], [False, False, True, True, True])\n>>> b = MaskedTensor([2, 2, 0, 2, 2], [True, True, True, False, False])\n>>> tf.debugging.assert_equal(a, b) # assert passed; no exception was thrown\n\n>>> a = MaskedTensor([1, 1, 1, 1, 1], [True, True, True, True, True])\n>>> b = MaskedTensor([0, 0, 0, 0, 2], [True, True, True, True, True])\n>>> tf.debugging.assert_greater(a, b)\nTraceback (most recent call last):\n...\nInvalidArgumentError: Condition x > y did not hold.\n\nArgs:\n x_type: A type annotation indicating when the api handler should be called.\n y_type: A type annotation indicating when the api handler should be called.\n\nReturns:\n A decorator.\n\n#### Registered APIs\n\nThe binary elementwise assert APIs are:\n\n<>"} +{"repo": "starthinker", "function": "def recipe_cm360_segmentology(config, account, auth_read, auth_write, recipe_name, date_range, recipe_slug, advertisers):\n dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n google_api(config, {'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'accounts.get', 'kwargs': {'id': account, 'fields': 'id,name'}, 'results': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_Account'}}})\n dcm(config, {'auth': auth_read, 'report': {'filters': {'advertiser': {'values': advertisers}}, 'account': account, 'body': {'name': recipe_name, 'criteria': {'dateRange': {'kind': 'dfareporting#dateRange', 'relativeDateRange': date_range}, 'dimensions': [{'kind': 'dfareporting#sortedDimension', 'name': 'advertiserId'}, {'kind': 'dfareporting#sortedDimension', 'name': 'advertiser'}, {'kind': 'dfareporting#sortedDimension', 'name': 'zipCode'}], 'metricNames': ['impressions', 'clicks', 'totalConversions']}, 'type': 'STANDARD', 'delivery': {'emailOwner': False}, 'format': 'CSV'}}})\n dcm(config, {'auth': auth_read, 'report': {'account': account, 'name': recipe_name}, 'out': {'bigquery': {'auth': auth_write, 'dataset': recipe_slug, 'table': 'CM360_KPI', 'header': True}}})\n bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n Id AS Partner_Id,\\n Name AS Partner,\\n Advertiser_Id,\\n Advertiser,\\n Zip_Postal_Code AS Zip,\\n SAFE_DIVIDE(Impressions, SUM(Impressions) OVER(PARTITION BY Advertiser_Id)) AS Impression,\\n SAFE_DIVIDE(Clicks, Impressions) AS Click,\\n SAFE_DIVIDE(Total_Conversions, Impressions) AS Conversion,\\n Impressions AS Impressions FROM `{dataset}.CM360_KPI` CROSS JOIN `{dataset}.CM360_Account` ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'CM360_KPI_Normalized'}})\n census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n census(config, {'auth': auth_write, 'correlate': {'join': 'Zip', 'pass': ['Partner_Id', 'Partner', 'Advertiser_Id', 'Advertiser'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion'], 'dataset': recipe_slug, 'table': 'CM360_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "CM360 funnel analysis using Census data.\n\nArgs:\n account (string) - NA\n auth_read (authentication) - Credentials used for reading data.\n auth_write (authentication) - Authorization used for writing data.\n recipe_name (string) - Name of report, not needed if ID used.\n date_range (choice) - Timeframe to run report for.\n recipe_slug (string) - Name of Google BigQuery dataset to create.\n advertisers (integer_list) - Comma delimited list of CM360 advertiser ids."} +{"repo": "tensorflow", "function": "def cross(inputs, name=None):\n return _cross_internal(inputs=inputs, hashed_output=False, name=name)", "docstring": "Generates feature cross from a list of tensors.\n\nThe input tensors must have `rank=2`, and must all have the same number of\nrows. The result is a `RaggedTensor` with the same number of rows as the\ninputs, where `result[row]` contains a list of all combinations of values\nformed by taking a single value from each input's corresponding row\n(`inputs[i][row]`). Values are combined by joining their strings with '_X_'.\nE.g.:\n\n>>> tf.ragged.cross([tf.ragged.constant([['a'], ['b', 'c']]),\n... tf.ragged.constant([['d'], ['e']]),\n... tf.ragged.constant([['f'], ['g']])])\n\n\nArgs:\n inputs: A list of `RaggedTensor` or `Tensor` or `SparseTensor`.\n name: Optional name for the op.\n\nReturns:\n A 2D `RaggedTensor` of type `string`."} +{"repo": "transformers", "function": "def update(self, key_states: torch.Tensor, value_states: torch.Tensor, layer_idx: int, cache_kwargs: Optional[Dict[str, Any]]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n key_states = key_states.to(self.key_cache[layer_idx].dtype)\n value_states = value_states.to(self.value_cache[layer_idx].dtype)\n if layer_idx == 0:\n self._seen_tokens += key_states.shape[-2]\n k_out = self.key_cache[0]\n v_out = self.value_cache[0]\n else:\n if self._prefetch_stream is not None:\n torch.cuda.default_stream(self.device).wait_stream(self._prefetch_stream)\n k_out = self._device_key_cache[layer_idx & 1]\n v_out = self._device_value_cache[layer_idx & 1]\n self._prefetch_layer(layer_idx + 1)\n cache_position = cache_kwargs.get('cache_position') if cache_kwargs is not None else None\n if cache_position is None:\n k_out.copy_(key_states)\n v_out.copy_(value_states)\n if layer_idx == 0:\n self.key_cache[layer_idx].copy_(key_states.to(self.offload_device))\n self.value_cache[layer_idx].copy_(value_states.to(self.offload_device))\n else:\n try:\n k_out.index_copy_(2, cache_position, key_states)\n v_out.index_copy_(2, cache_position, value_states)\n except NotImplementedError:\n k_out[:, :, cache_position] = key_states\n v_out[:, :, cache_position] = value_states\n if layer_idx != 0:\n cache_position = cache_position.to(self.offload_device)\n key_states = key_states.to(self.offload_device)\n value_states = value_states.to(self.offload_device)\n try:\n self.key_cache[layer_idx].index_copy_(2, cache_position, key_states)\n self.value_cache[layer_idx].index_copy_(2, cache_position, value_states)\n except NotImplementedError:\n self.key_cache[layer_idx][:, :, cache_position] = key_states\n self.value_cache[layer_idx][:, :, cache_position] = value_states\n return (k_out, v_out)", "docstring": "Updates the cache with the new `key_states` and `value_states` for the layer `layer_idx`.\nIt is VERY important to index using a tensor, otherwise you introduce a copy to the device.\n\nParameters:\n key_states (`torch.Tensor`):\n The new key states to cache.\n value_states (`torch.Tensor`):\n The new value states to cache.\n layer_idx (`int`):\n The index of the layer to cache the states for.\n cache_kwargs (`Dict[str, Any]`, *optional*):\n Additional arguments for the cache subclass. The `OffloadedStaticCache` needs the\n `cache_position` input to know how where to write in the cache.\n\nReturn:\n A tuple containing the updated key and value states."} +{"repo": "transformers", "function": "def download_wmt_dataset(src_lang='ro', tgt_lang='en', dataset='wmt16', save_dir=None) -> None:\n try:\n import datasets\n except (ModuleNotFoundError, ImportError):\n raise ImportError('run pip install datasets')\n pair = f'{src_lang}-{tgt_lang}'\n print(f'Converting {dataset}-{pair}')\n ds = datasets.load_dataset(dataset, pair)\n if save_dir is None:\n save_dir = f'{dataset}-{pair}'\n save_dir = Path(save_dir)\n save_dir.mkdir(exist_ok=True)\n for split in ds.keys():\n print(f'Splitting {split} with {ds[split].num_rows} records')\n fn = 'val' if split == 'validation' else split\n src_path = save_dir.joinpath(f'{fn}.source')\n tgt_path = save_dir.joinpath(f'{fn}.target')\n src_fp = src_path.open('w+')\n tgt_fp = tgt_path.open('w+')\n for x in tqdm(ds[split]):\n ex = x['translation']\n src_fp.write(ex[src_lang] + '\\n')\n tgt_fp.write(ex[tgt_lang] + '\\n')\n print(f'Saved {dataset} dataset to {save_dir}')", "docstring": "Download a dataset using the datasets package and save it to the format expected by finetune.py\nFormat of save_dir: train.source, train.target, val.source, val.target, test.source, test.target.\n\nArgs:\n src_lang: source language\n tgt_lang: target language\n dataset: wmt16, wmt17, etc. wmt16 is a good start as it's small. To get the full list run `import datasets; print([d.id for d in datasets.list_datasets() if \"wmt\" in d.id])`\n save_dir: , where to save the datasets, defaults to f'{dataset}-{src_lang}-{tgt_lang}'\n\nUsage:\n >>> download_wmt_dataset('ro', 'en', dataset='wmt16') # saves to wmt16-ro-en"} +{"repo": "tensorflow", "function": "def assign_group_v2(group_assignment, device_index, base_key):\n group_size, group_key = gen_collective_ops.collective_assign_group_v2(group_assignment=group_assignment, device_index=device_index, base_key=base_key)\n return (group_size, group_key)", "docstring": "Assign group key based on group_assignment.\n\nArgs:\n group_assignment: a 2 dimensional integer Tensor that encodes which devices\n belong to the same group. The values are indices of the devices within 0\n to number of devices.\n device_index: integer for the index of the current device\n base_key: integer to offset the resulted group_key. The base key shall be\n unique for different values of group_assignment in the same tf.function.\nNotes: The device_index argument must be consistent with the index of the\n device of this Op in the device assignment list. The behavior of this Op is\n undefined if they are inconsistent.\n\nReturns:\n group_size, group_key: The group size and group key for the current device."} +{"repo": "tensorflow", "function": "def psnr(a, b, max_val, name=None):\n with ops.name_scope(name, 'PSNR', [a, b]):\n max_val = math_ops.cast(max_val, a.dtype)\n max_val = convert_image_dtype(max_val, dtypes.float32)\n a = convert_image_dtype(a, dtypes.float32)\n b = convert_image_dtype(b, dtypes.float32)\n mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1])\n psnr_val = math_ops.subtract(20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr')\n _, _, checks = _verify_compatible_image_shapes(a, b)\n with ops.control_dependencies(checks):\n return array_ops.identity(psnr_val)", "docstring": "Returns the Peak Signal-to-Noise Ratio between a and b.\n\nThis is intended to be used on signals (or images). Produces a PSNR value for\neach image in batch.\n\nThe last three dimensions of input are expected to be [height, width, depth].\n\nExample:\n\n```python\n # Read images from file.\n im1 = tf.decode_png('path/to/im1.png')\n im2 = tf.decode_png('path/to/im2.png')\n # Compute PSNR over tf.uint8 Tensors.\n psnr1 = tf.image.psnr(im1, im2, max_val=255)\n\n # Compute PSNR over tf.float32 Tensors.\n im1 = tf.image.convert_image_dtype(im1, tf.float32)\n im2 = tf.image.convert_image_dtype(im2, tf.float32)\n psnr2 = tf.image.psnr(im1, im2, max_val=1.0)\n # psnr1 and psnr2 both have type tf.float32 and are almost equal.\n```\n\nArgs:\n a: First set of images.\n b: Second set of images.\n max_val: The dynamic range of the images (i.e., the difference between the\n maximum the and minimum allowed values).\n name: Namespace to embed the computation in.\n\nReturns:\n The scalar PSNR between a and b. The returned tensor has type `tf.float32`\n and shape [batch_size, 1]."} +{"repo": "keras", "function": "class FalsePositives(_ConfusionMatrixConditionCount):\n\n def __init__(self, thresholds=None, name=None, dtype=None):\n super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.FALSE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of false positives.\n\nIf `sample_weight` is given, calculates the sum of the weights of\nfalse positives. This metric creates one local variable, `accumulator`\nthat is used to keep track of the number of false positives.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to `0.5`. A float value, or a Python\n list/tuple of float threshold values in `[0, 1]`. A threshold is\n compared with prediction values to determine the truth value of\n predictions (i.e., above the threshold is `True`, below is `False`).\n If used with a loss function that sets `from_logits=True` (i.e. no\n sigmoid applied to predictions), `thresholds` should be set to 0.\n One metric value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExamples:\n\n>>> m = keras.metrics.FalsePositives()\n>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1])\n>>> m.result()\n2.0\n\n>>> m.reset_state()\n>>> m.update_state([0, 1, 0, 0], [0, 0, 1, 1], sample_weight=[0, 0, 1, 0])\n>>> m.result()\n1.0"} +{"repo": "pytype", "function": "def Lookup(self, name):\n if not self._name2item:\n self._InitCache()\n return self._name2item[name]", "docstring": "Convenience function: Look up a given name in the global namespace.\n\nTries to find a constant, function or class by this name.\n\nArgs:\n name: Name to look up.\n\nReturns:\n A Constant, Function or Class.\n\nRaises:\n KeyError: if this identifier doesn't exist."} +{"repo": "tensorflow", "function": "def summary(self, line_length=None, positions=None, print_fn=None):\n if not self.built:\n raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.')\n layer_utils.print_summary(self, line_length=line_length, positions=positions, print_fn=print_fn)", "docstring": "Prints a string summary of the network.\n\nArgs:\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements\n in each line. If not provided,\n defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use. Defaults to `print`.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n\nRaises:\n ValueError: if `summary()` is called before the model is built."} +{"repo": "genai-processors", "function": "def parallel_part_functions(fns: Sequence[PartFn], match_fns: Sequence[MatchFn] | None=None, with_default_output: bool=False, with_always_output: bool=False) -> PartFn:\n return functools.partial(_parallel_part_functions, _to_tuple_fns(fns, match_fns), with_default_output=with_default_output, with_always_output=with_always_output)", "docstring": "Combine `fns` to execute on _T in parallel across the `fns`.\n\nArgs:\n fns: sequence of part functions to chain.\n match_fns: sequence of functions that return True if the part should be\n processed by the part function. When the part should not be processed, the\n part function will not be called and nothing will be yielded by the part\n function. When match_fns is not provided, all parts are processed by\n default.\n with_default_output: True when the parallel execution should fallback to\n return the input part as is when fns do not return any output part.\n with_always_output: True when the parallel execution should always return\n the input part as is independent of the output of the fns. This is a\n stronger condition than `with_default_output`. When `with_always_output`\n is True, `with_default_output` is basically ignored.\n\nReturns:\n Part function that runs all functions 'fns' in parallel. The output stream\n will keep the order of the input parts:\n\n f_0(c) = c00, c01\n f_1(c) = c10, c11, c12, c14\n\n f_0(c) // f_1(c) = c00, c01, c10, c11, c12"} +{"repo": "tensorflow", "function": "def output_types(self):\n return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), self._element_spec)", "docstring": "Returns the type of each component of an element of this iterator.\n\nReturns:\n A (nested) structure of `tf.DType` objects corresponding to each component\n of an element of this dataset."} +{"repo": "beam", "function": "def get_splits(client, query, num_splits):\n if num_splits <= 1:\n raise SplitNotPossibleError('num_splits must be > 1, got: %d' % num_splits)\n validate_split(query)\n splits = []\n client_scatter_keys = _get_scatter_keys(client, query, num_splits)\n last_client_key = None\n for next_client_key in _get_split_key(client_scatter_keys, num_splits):\n splits.append(_create_split(last_client_key, next_client_key, query))\n last_client_key = next_client_key\n splits.append(_create_split(last_client_key, None, query))\n return splits", "docstring": "Returns a list of sharded queries for the given Cloud Datastore query.\n\nThis will create up to the desired number of splits, however it may return\nless splits if the desired number of splits is unavailable. This will happen\nif the number of split points provided by the underlying Datastore is less\nthan the desired number, which will occur if the number of results for the\nquery is too small.\n\nThis implementation of the QuerySplitter uses the __scatter__ property to\ngather random split points for a query.\n\nNote: This implementation is derived from the java query splitter in\nhttps://github.com/GoogleCloudPlatform/google-cloud-datastore/blob/master/java/datastore/src/main/java/com/google/datastore/v1/client/QuerySplitterImpl.java\n\nArgs:\n client: the datastore client.\n query: the query to split.\n num_splits: the desired number of splits.\n\nReturns:\n A list of split queries, of a max length of `num_splits`\n\nRaises:\n QuerySplitterError: if split could not be performed owing to query or split\n parameters."} +{"repo": "tensorflow", "function": "def __init__(self, sv, sess):\n super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)\n self._sv = sv\n self._sess = sess", "docstring": "Create a `SVTimerCheckpointThread`.\n\nArgs:\n sv: A `Supervisor`.\n sess: A `Session`."} +{"repo": "keras", "function": "def add_object(self, object_path, weights):\n if not isinstance(weights, dict):\n raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n if '/' in object_path:\n elements = object_path.split('/')\n partial_path = '/'.join(elements[:-1])\n weights_dict = self.weights_dict\n for e in elements[:-1]:\n if e not in weights_dict:\n raise ValueError(f\"Path '{partial_path}' not found in model.\")\n weights_dict = weights_dict[e]\n weights_dict[elements[-1]] = weights\n else:\n self.weights_dict[object_path] = weights", "docstring": "Add a new object to the file (e.g. a layer).\n\nArgs:\n object_path: String, full path of the\n object to add (e.g. `\"layers/dense_2\"`).\n weights: Dict mapping weight names to weight\n values (arrays),\n e.g. `{\"0\": kernel_value, \"1\": bias_value}`."} +{"repo": "etils", "function": "def get_html_content(id_: str) -> str:\n try:\n node = nodes.Node.from_id(id_)\n return node.inner_html\n except Exception as e:\n epy.reraise(e, prefix='`ecolab.inspect` internal error. Please report an issue.\\n')", "docstring": "Returns the inner content of the block id.\n\nIs called the first time a block is expanded.\n\nArgs:\n id_: Id of the block to load\n\nReturns:\n The html to add."} +{"repo": "starthinker", "function": "def recipe_dcm_to_sheets(config, auth_read, account, report_id, report_name, sheet, tab):\n dcm(config, {'auth': auth_read, 'report': {'account': account, 'report_id': report_id, 'name': report_name}, 'out': {'sheets': {'sheet': sheet, 'tab': tab, 'range': 'A1'}}})", "docstring": "Move existing CM report into a Sheet tab.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n account (integer) - NA\n report_id (integer) - NA\n report_name (string) - NA\n sheet (string) - NA\n tab (string) - NA"} +{"repo": "transformers", "function": "def from_text_vision_configs(cls, text_config: CLIPSegTextConfig, vision_config: CLIPSegVisionConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`CLIPSegConfig`] (or a derived class) from clipseg text model configuration and clipseg vision\nmodel configuration.\n\nReturns:\n [`CLIPSegConfig`]: An instance of a configuration object"} +{"repo": "transformers", "function": "def _get_longest_diag_dict(input_matrix, nonzero_idx):\n visited = set()\n diags = {}\n for idx in nonzero_idx:\n start_idx = torch.clone(idx)\n tuple_start_idx = tuple(start_idx.tolist())\n if tuple_start_idx in visited:\n continue\n visited.add(tuple_start_idx)\n cur_diag_len = 1\n start_idx += 1\n while start_idx[0] < input_matrix.shape[0] and start_idx[1] < input_matrix.shape[1]:\n tuple_start_idx = tuple(start_idx.tolist())\n visited.add(tuple_start_idx)\n if input_matrix[start_idx[0], start_idx[1]] == 1:\n cur_diag_len += 1\n start_idx += 1\n else:\n break\n diags[idx] = cur_diag_len\n return diags", "docstring": "Calculates the length of the longest diagonal sequence in a given matrix.\nArgs:\n input_matrix (torch.Tensor): The input matrix.\n nonzero_idx (torch.Tensor): The indices of the non-zero elements in the matrix.\nReturns:\n dict: A dictionary where the keys are the indices of the non-zero elements and the values are the lengths of the longest diagonal sequences starting from those indices."} +{"repo": "tensorflow", "function": "def model(x):\n hidden_act = dense_layer(hidden_weights, x)\n logits_act = dense_layer(output_weights, hidden_act, tf.identity)\n y = tf.nn.softmax(logits_act)\n return y", "docstring": "Feed forward function of the model.\n\nArgs:\n x: a (?, 28*28) tensor consisting of the feature inputs for a batch of\n examples.\n\nReturns:\n A (?, 10) tensor containing the class scores for each example."} +{"repo": "transformers", "function": "def replace_batch_norm(model):\n for name, module in model.named_children():\n if isinstance(module, nn.BatchNorm2d):\n new_module = DeformableDetrFrozenBatchNorm2d(module.num_features)\n if not module.weight.device == torch.device('meta'):\n new_module.weight.data.copy_(module.weight)\n new_module.bias.data.copy_(module.bias)\n new_module.running_mean.data.copy_(module.running_mean)\n new_module.running_var.data.copy_(module.running_var)\n model._modules[name] = new_module\n if len(list(module.children())) > 0:\n replace_batch_norm(module)", "docstring": "Recursively replace all `torch.nn.BatchNorm2d` with `DeformableDetrFrozenBatchNorm2d`.\n\nArgs:\n model (torch.nn.Module):\n input model"} +{"repo": "tensorflow", "function": "def assert_integer_v2(x, message=None, name=None):\n assert_integer(x=x, message=message, name=name)", "docstring": "Assert that `x` is of integer dtype.\n\nIf `x` has a non-integer type, `message`, as well as the dtype of `x` are\nprinted, and `InvalidArgumentError` is raised.\n\nThis can always be checked statically, so this method returns nothing.\n\nArgs:\n x: A `Tensor`.\n message: A string to prefix to the default message.\n name: A name for this operation (optional). Defaults to \"assert_integer\".\n\nRaises:\n TypeError: If `x.dtype` is not a non-quantized integer type."} +{"repo": "mobly", "function": "def abs_path(path):\n return os.path.abspath(os.path.expanduser(path))", "docstring": "Resolve the '.' and '~' in a path to get the absolute path.\n\nArgs:\n path: The path to expand.\n\nReturns:\n The absolute path of the input path."} +{"repo": "transformers", "function": "def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=-1):\n lr_lambda = partial(_get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles)\n return LambdaLR(optimizer, lr_lambda, last_epoch)", "docstring": "Create a schedule with a learning rate that decreases following the values of the cosine function between the\ninitial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\ninitial lr set in the optimizer.\n\nArgs:\n optimizer ([`~torch.optim.Optimizer`]):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (`int`):\n The number of steps for the warmup phase.\n num_training_steps (`int`):\n The total number of training steps.\n num_cycles (`float`, *optional*, defaults to 0.5):\n The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n following a half-cosine).\n last_epoch (`int`, *optional*, defaults to -1):\n The index of the last epoch when resuming training.\n\nReturn:\n `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule."} +{"repo": "transformers", "function": "def binary_mask_to_rle(mask):\n if is_torch_tensor(mask):\n mask = mask.numpy()\n pixels = mask.flatten()\n pixels = np.concatenate([[0], pixels, [0]])\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 1\n runs[1::2] -= runs[::2]\n return list(runs)", "docstring": "Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.\n\nArgs:\n mask (`torch.Tensor` or `numpy.array`):\n A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target\n segment_id or class_id.\nReturns:\n `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE\n format."} +{"repo": "transformers", "function": "def preprocess(self, images: Optional[List[ImageInput]]=None, videos: Optional[List[VideoInput]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, param_name='size', default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n if images is not None:\n images = make_list_of_images(images)\n if images is not None and (not valid_images(images)):\n raise ValueError('Invalid input type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n data = {}\n if videos is not None:\n logger.warning(\"`VideoLlavaImageProcessor` works only with image inputs and doesn't process videos anymore. This is a deprecated behavior and will be removed in v5.0. Your videos should be forwarded to `VideoLlavaVideoProcessor`. \")\n videos = make_batched_videos(videos)\n pixel_values_videos = [[self._preprocess_image(image=frame, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format) for frame in video] for video in videos]\n data['pixel_values_videos'] = pixel_values_videos\n if images is not None:\n pixel_values_images = [self._preprocess_image(image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_convert_rgb=do_convert_rgb, data_format=data_format, input_data_format=input_data_format) for image in images]\n data['pixel_values_images'] = pixel_values_images\n encoded_outputs = BatchFeature(data, tensor_type=return_tensors)\n return encoded_outputs", "docstring": "Preprocess an image or batch of images.\n\nArgs:\n images (`ImageInput`, *optional*):\n List of images to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n videos (`VideoInput`, *optional*):\n List of videos to preprocess. Expects a single or batch of videos with pixel values ranging from 0 to 255. If\n passing in videos with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format."} +{"repo": "tensorflow", "function": "def get_executor():\n return context().executor", "docstring": "Get the Executor of the current thread.\n\nReturns:\n The Executor of the current thread."} +{"repo": "keras", "function": "def erfinv(x):\n if any_symbolic_tensors((x,)):\n return Erfinv().symbolic_call(x)\n x = backend.convert_to_tensor(x)\n return backend.math.erfinv(x)", "docstring": "Computes the inverse error function of `x`, element-wise.\n\nArgs:\n x: Input tensor.\n\nReturns:\n A tensor with the same dtype as `x`.\n\nExample:\n\n>>> x = np.array([-0.5, -0.2, -0.1, 0.0, 0.3])\n>>> keras.ops.erfinv(x)\narray([-0.47694, -0.17914, -0.08886, 0. , 0.27246], dtype=float32)"} +{"repo": "tf-quant-finance", "function": "def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None) -> types.FloatTensor:\n name = name or self._name + '_price'\n with tf.name_scope(name):\n discount_curve = cashflow_streams.get_discount_curve(self._discount_curve_type, market, self._discount_curve_mask)\n currencies = [cur.currency.value for cur in self._discount_curve_type]\n vol_surface = equity_utils.get_vol_surface(currencies, self._equity, market, self._equity_mask)\n spots = tf.stack(market.spot(currencies, self._equity), axis=0)\n discount_factors = discount_curve.discount_factor(self._expiry_date.expand_dims(axis=-1))\n daycount_convention = discount_curve.daycount_convention\n day_count_fn = market_data_utils.get_daycount_fn(daycount_convention)\n if spots.shape.rank > 0:\n spots = tf.gather(spots, self._equity_mask)\n if self._model == 'BS-LSM':\n vols = vol_surface.volatility(expiry_dates=self._expiry_date.expand_dims(axis=-1), strike=tf.expand_dims(self._strike, axis=-1))\n prices = utils.bs_lsm_price(spots=spots, expiry_times=day_count_fn(start_date=market.date, end_date=self._expiry_date, dtype=self._dtype), strikes=self._strike, volatility=tf.squeeze(vols, axis=-1), discount_factors=tf.squeeze(discount_factors), is_call_option=self._is_call_option, num_samples=self._num_samples, num_exercise_times=self._num_exercise_times, num_calibration_samples=self._num_calibration_samples, seed=self._seed)\n return self._short_position * self._contract_amount * prices\n else:\n raise ValueError('Only BS-LSM model is supported. Supplied {}'.format(self._model))", "docstring": "Returns the present value of the American options.\n\nArgs:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\nReturns:\n A `Tensor` of shape `batch_shape` containing the modeled price of each\n American option contract based on the input market data."} +{"repo": "tensorflow", "function": "def deprecated_internal_learning_phase_scope(value):\n global _GRAPH_LEARNING_PHASES\n if value not in {0, 1}:\n raise ValueError('Expected learning phase to be 0 or 1.')\n with ops.init_scope():\n if context.executing_eagerly():\n previous_eager_value = _GRAPH_LEARNING_PHASES.get(_DUMMY_EAGER_GRAPH.key, None)\n previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)\n learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set\n try:\n deprecated_internal_set_learning_phase(value)\n yield\n finally:\n if not learning_phase_previously_set:\n _DUMMY_EAGER_GRAPH.learning_phase_is_set = False\n with ops.init_scope():\n if context.executing_eagerly():\n if previous_eager_value is not None:\n _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value\n elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]\n graph = get_graph()\n if previous_graph_value is not None:\n _GRAPH_LEARNING_PHASES[graph] = previous_graph_value\n elif graph in _GRAPH_LEARNING_PHASES:\n del _GRAPH_LEARNING_PHASES[graph]", "docstring": "An internal-only version of `learning_phase_scope`.\n\nUnlike the public method, this method does not raise a deprecation warning.\nThis is needed because saved model saving needs to set learning phase\nto maintain compatibility\nwith code that sets/gets the learning phase, but saved model\nsaving itself shouldn't raise a deprecation warning.\n\nWe can get rid of this method and its usages when the public API is\nremoved.\n\nArgs:\n value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train\n\nYields:\n None.\n\nRaises:\n ValueError: if `value` is neither `0` nor `1`."} +{"repo": "yapf", "function": "def _MatchBrackets(line):\n bracket_stack = []\n for token in line.tokens:\n if token.value in _OPENING_BRACKETS:\n bracket_stack.append(token)\n elif token.value in _CLOSING_BRACKETS:\n bracket_stack[-1].matching_bracket = token\n token.matching_bracket = bracket_stack[-1]\n bracket_stack.pop()\n for bracket in bracket_stack:\n if id(pytree_utils.GetOpeningBracket(token.node)) == id(bracket.node):\n bracket.container_elements.append(token)\n token.container_opening = bracket", "docstring": "Visit the node and match the brackets.\n\nFor every open bracket ('[', '{', or '('), find the associated closing bracket\nand \"match\" them up. I.e., save in the token a pointer to its associated open\nor close bracket.\n\nArguments:\n line: (LogicalLine) A logical line."} +{"repo": "transformers", "function": "class BarthezTokenizerFast(PreTrainedTokenizerFast):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n slow_tokenizer_class = BarthezTokenizer\n\n def __init__(self, vocab_file=None, tokenizer_file=None, bos_token='', eos_token='', sep_token='', cls_token='', unk_token='', pad_token='', mask_token='', **kwargs):\n mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token\n super().__init__(vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs)\n self.vocab_file = vocab_file\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BARThez sequence has the following format:\n\n - single sequence: ` X `\n - pair of sequences: ` A B `\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of zeros.\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if not self.can_save_slow_tokenizer:\n raise ValueError('Your fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.')\n if not os.path.isdir(save_directory):\n logger.error(f'Vocabulary path ({save_directory}) should be a directory')\n return\n out_vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n return (out_vocab_file,)", "docstring": "Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a \"fast\" BARThez tokenizer. Based on\n[SentencePiece](https://github.com/google/sentencepiece).\n\nThis tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\nrefer to this superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the beginning of\n sequence. The token used is the `cls_token`.\n\n \n\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n\n \n\n When building a sequence using special tokens, this is not the token that is used for the end of sequence.\n The token used is the `sep_token`.\n\n \n\n sep_token (`str`, *optional*, defaults to `\"\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n cls_token (`str`, *optional*, defaults to `\"\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n mask_token (`str`, *optional*, defaults to `\"\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n additional_special_tokens (`List[str]`, *optional*, defaults to `[\"NOTUSED\", \"NOTUSED\"]`):\n Additional special tokens used by the tokenizer."} +{"repo": "tensorflow", "function": "def __init__(self, name, default_name=None, values=None) -> None:\n self._name_scope = name_scope(name, default_name, values, skip_on_eager=False)\n self._name = default_name if name is None else name", "docstring": "Initialize the context manager.\n\nArgs:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\nRaises:\n TypeError: if `default_name` is passed in but not a string."} +{"repo": "tensorflow", "function": "def compute_gradients(self, loss, var_list=None, gate_gradients=optimizer.Optimizer.GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None):\n loss = self._scale_loss(loss)\n grads_and_vars = self._optimizer.compute_gradients(loss=loss, var_list=var_list, gate_gradients=gate_gradients, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops, grad_loss=grad_loss)\n grads = [g for g, _ in grads_and_vars]\n variables = [v for _, v in grads_and_vars]\n unscaled_grads = self._unscale_grads(grads)\n return list(zip(unscaled_grads, variables))", "docstring": "Compute gradients of `loss` for the variables in `var_list`.\n\nThis adjusts the dynamic range of the gradient evaluation by scaling up\nthe `loss` value. The gradient values are then scaled back down by the\nreciprocal of the loss scale. This is useful in reduced precision training\nwhere small gradient values would otherwise underflow the representable\nrange.\n\nArgs:\n loss: A Tensor containing the value to minimize or a callable taking no\n arguments which returns the value to minimize. When eager execution is\n enabled it must be a callable.\n var_list: Optional list or tuple of `tf.Variable` to update to minimize\n `loss`. Defaults to the list of variables collected in the graph under\n the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n\nReturns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`."} +{"repo": "tensorflow", "function": "def sigmoid(x):\n return nn.sigmoid(x)", "docstring": "Element-wise sigmoid.\n\nArgs:\n x: A tensor or variable.\n\nReturns:\n A tensor."} +{"repo": "beam", "function": "def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions:\n job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'\n staging_bucket = f'gs://{cfg.PROJECT_ID}-ml-examples'\n dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': 'us-central1', 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming}\n if num_workers:\n dataflow_options.update({'num_workers': num_workers})\n return PipelineOptions(flags=[], **dataflow_options)", "docstring": "Function to retrieve the pipeline options.\nArgs:\n project: GCP project to run on\n mode: Indicator to run local, cloud or template\n num_workers: Number of Workers for running the job parallely\n max_num_workers: Maximum number of workers running the job parallely\nReturns:\n Dataflow pipeline options"} +{"repo": "beam", "function": "def insert_rows(self, project_id, dataset_id, table_id, rows, insert_ids=None, skip_invalid_rows=False, ignore_unknown_values=False):\n insert_ids = [str(self.unique_row_id) if not insert_ids else insert_ids[i] for i, _ in enumerate(rows)]\n rows = [fast_json_loads(fast_json_dumps(r, default=default_encoder)) for r in rows]\n result, errors = self._insert_all_rows(project_id, dataset_id, table_id, rows, insert_ids, skip_invalid_rows=skip_invalid_rows, ignore_unknown_values=ignore_unknown_values)\n return (result, errors)", "docstring": "Inserts rows into the specified table.\n\nArgs:\n project_id: The project id owning the table.\n dataset_id: The dataset id owning the table.\n table_id: The table id.\n rows: A list of plain Python dictionaries. Each dictionary is a row and\n each key in it is the name of a field.\n skip_invalid_rows: If there are rows with insertion errors, whether they\n should be skipped, and all others should be inserted successfully.\n ignore_unknown_values: Set this option to true to ignore unknown column\n names. If the input rows contain columns that are not\n part of the existing table's schema, those columns are ignored, and\n the rows are successfully inserted.\n\nReturns:\n A tuple (bool, errors). If first element is False then the second element\n will be a bigquery.InsertErrorsValueListEntry instance containing\n specific errors."} +{"repo": "temporian", "function": "def from_csv(path: str, timestamps: str='timestamp', indexes: Optional[List[str]]=None, sep: str=',') -> EventSet:\n import pandas as pd\n if indexes is None:\n indexes = []\n df = pd.read_csv(path, sep=sep)\n return from_pandas(df, indexes=indexes, timestamps=timestamps)", "docstring": "Reads an [`EventSet`][temporian.EventSet] from a CSV file.\n\n Example:\n ```python\n >>> # Example CSV\n >>> temp_file = str(tmp_dir / \"temporal_data.csv\")\n >>> _ = open(temp_file, \"w\").write(\n ... \"date,feature_1,feature_2\n\"\n ... \"2023-01-01,10.0,3.0\n\"\n ... \"2023-01-02,20.0,4.0\n\"\n ... \"2023-02-01,30.0,5.0\"\n ... )\n >>> # Load CSV\n >>> evset = tp.from_csv(temp_file, timestamps=\"date\")\n >>> evset\n indexes: []\n features: [('feature_1', float64), ('feature_2', float64)]\n events:\n (3 events):\n timestamps: ['2023-01-01T00:00:00' '2023-01-02T00:00:00'\n '2023-02-01T00:00:00']\n 'feature_1': [10. 20. 30.]\n 'feature_2': [3. 4. 5.]\n ...\n\n ```\n\n Args:\n path: Path to the file.\n timestamps: Name of the column to be used as timestamps for the\n EventSet.\n indexes: Names of the columns to be used as indexes for the EventSet.\n If None, a flat EventSet will be created.\n sep: Separator to use.\n\n Returns:\n EventSet read from file.\n "} +{"repo": "pytype", "function": "def deep_variable_product(variables, limit: int=DEEP_VARIABLE_LIMIT):\n return _deep_values_list_product([v.bindings for v in variables], set(), ComplexityLimit(limit))", "docstring": "Take the deep Cartesian product of a list of Variables.\n\nFor example:\n x1.children = {v2, v3}\n v1 = {x1, x2}\n v2 = {x3}\n v3 = {x4, x5}\n v4 = {x6}\nthen\n deep_variable_product([v1, v4]) will return:\n [[x1, x3, x4, x6],\n [x1, x3, x5, x6],\n [x2, x6]]\n.\nArgs:\n variables: A sequence of Variables.\n limit: How many results we allow before aborting.\n\nReturns:\n A list of lists of Values, where each sublist has one Value from each\n of the corresponding Variables and the Variables of their Values' children.\n\nRaises:\n TooComplexError: If we expanded too many values."} +{"repo": "transformers", "function": "class SamHQVisionConfig(PretrainedConfig):\n base_config_key = 'vision_config'\n model_type = 'sam_hq_vision_model'\n\n def __init__(self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, mlp_ratio=4.0, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], num_pos_feats=128, mlp_dim=None, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.output_channels = output_channels\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.image_size = image_size\n self.patch_size = patch_size\n self.hidden_act = hidden_act\n self.layer_norm_eps = layer_norm_eps\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.qkv_bias = qkv_bias\n self.mlp_ratio = mlp_ratio\n self.use_abs_pos = use_abs_pos\n self.use_rel_pos = use_rel_pos\n self.window_size = window_size\n self.global_attn_indexes = global_attn_indexes\n self.num_pos_feats = num_pos_feats\n self.mlp_dim = int(hidden_size * mlp_ratio) if mlp_dim is None else mlp_dim", "docstring": "This is the configuration class to store the configuration of a [`SamHQVisionModel`]. It is used to instantiate a SAM_HQ\nvision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration\ndefaults will yield a similar configuration to that of the SAM_HQ ViT-h\n[facebook/sam_hq-vit-huge](https://huggingface.co/facebook/sam_hq-vit-huge) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n output_channels (`int`, *optional*, defaults to 256):\n Dimensionality of the output channels in the Patch Encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n Number of channels in the input image.\n image_size (`int`, *optional*, defaults to 1024):\n Expected resolution. Target size of the resized input image.\n patch_size (`int`, *optional*, defaults to 16):\n Size of the patches to be extracted from the input image.\n hidden_act (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string)\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 1e-10):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to query, key, value projections.\n mlp_ratio (`float`, *optional*, defaults to 4.0):\n Ratio of mlp hidden dim to embedding dim.\n use_abs_pos (`bool`, *optional*, defaults to `True`):\n Whether to use absolute position embedding.\n use_rel_pos (`bool`, *optional*, defaults to `True`):\n Whether to use relative position embedding.\n window_size (`int`, *optional*, defaults to 14):\n Window size for relative position.\n global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`):\n The indexes of the global attention layers.\n num_pos_feats (`int`, *optional*, defaults to 128):\n The dimensionality of the position embedding.\n mlp_dim (`int`, *optional*):\n The dimensionality of the MLP layer in the Transformer encoder. If `None`, defaults to `mlp_ratio *\n hidden_size`.\n\nExample:\n\n```python\n>>> from transformers import (\n... SamHQVisionConfig,\n... SamHQVisionModel,\n... )\n\n>>> # Initializing a SamHQVisionConfig with `\"facebook/sam_hq-vit-huge\"` style configuration\n>>> configuration = SamHQVisionConfig()\n\n>>> # Initializing a SamHQVisionModel (with random weights) from the `\"facebook/sam_hq-vit-huge\"` style configuration\n>>> model = SamHQVisionModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def forward(self, input_ids: torch.Tensor, input_embeds: Optional[torch.Tensor]=None) -> torch.Tensor:\n if input_ids is not None:\n input_embeds = self.word_embeddings(input_ids)\n seq_length = input_embeds.size(1)\n if hasattr(self, 'position_ids'):\n position_ids = self.position_ids[:, :seq_length]\n else:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n embeddings = input_embeds + position_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings", "docstring": "Parameters:\n input_ids (torch.Tensor):\n torch.tensor(bs, max_seq_length) The token ids to embed.\n input_embeds (*optional*, torch.Tensor):\n The pre-computed word embeddings. Can only be passed if the input ids are `None`.\n\n\nReturns: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type\nembeddings)"} +{"repo": "transformers", "function": "def _v2_get_resized_embeddings(self, old_embeddings: keras.layers.Embedding, new_num_tokens: int) -> keras.layers.Embedding:\n init_range = 0.02\n potential_initialization_variable_names = ['initializer_range', 'initializer_factor', 'init_std']\n for var_name in potential_initialization_variable_names:\n if hasattr(self.config, var_name):\n init_range = getattr(self.config, var_name)\n new_embeddings = keras.layers.Embedding(input_dim=new_num_tokens, output_dim=old_embeddings.output_dim, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=init_range), name=old_embeddings.embeddings.name[:-13])\n new_embeddings(tf.constant([[0]]))\n if old_embeddings.input_dim >= new_num_tokens:\n init_embeddings = old_embeddings.embeddings[:new_num_tokens]\n else:\n init_embeddings = tf.concat([old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim:]], axis=0)\n new_embeddings.embeddings.assign(init_embeddings)\n return new_embeddings", "docstring": "Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized\nvectors at the end. Reducing the size will remove vectors from the end.\n\nArgs:\n old_embeddings (`keras.layers.Embedding`):\n Old embeddings to be resized.\n new_num_tokens (`int`, *optional*):\n New number of tokens in the embedding matrix.\n\nReturn:\n `keras.layers.Embedding`: Resized Embedding layer."} +{"repo": "transformers", "function": "class RwkvOutput(ModelOutput):\n last_hidden_state: Optional[torch.FloatTensor] = None\n state: Optional[List[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Class for the RWKV model outputs.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):\n The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to\n avoid providing the old `input_ids`.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "sprockets", "function": "def Start(self, port, data_callback, error_callback, websocket_url, index_url, html_url, html_dir):", "docstring": "Starts the web server.\n\nUnderlying implementation can be based on http or websocket, or support\nSSL or not.\n\nArgs:\n port: tcp/ssl port for http or websocket.\n data_callback: a function called on incoming data from GUI clients.\n The arguments are (web_server, client_id, data) where\n web_server: this instance.\n client_id: an opaque ID or object for a GUI client for the incoming\n data or request.\n data: an incoming byte stream data from a GUI client.\n error_callback: a function called on any error from GUI clients which\n made the connection to the GUI client is invalid. The arguments are\n (web_server, client_id, error) where\n web_server: this instance.\n client_id: an opaque ID or object for a GUI client for the error.\n error: an error string.\n websocket_url: the absolute URL path of websocket or long-live http\n connection.\n index_url: the absolute URL path of main index.html.\n html_url: the absolute URL path of a directory serving static html or\n javascript resources.\n html_dir: the absolute file path of a directory corresponding to html_url.\n\nRaises:\n IOError: Cannot start a web server."} +{"repo": "transformers", "function": "class MyT5Tokenizer(PreTrainedTokenizer):\n model_input_names = ['input_ids', 'attention_mask']\n vocab_files_names = VOCAB_FILES_NAMES\n\n def __init__(self, vocab_file, eos_token='', unk_token='', pad_token='', extra_ids=125, additional_special_tokens=None, **kwargs) -> None:\n if extra_ids > 0 and additional_special_tokens is None:\n additional_special_tokens = [f'' for i in range(extra_ids)]\n elif extra_ids > 0 and additional_special_tokens is not None and (len(additional_special_tokens) > 0):\n extra_tokens = len(set(filter(lambda x: bool('extra_id' in str(x)), additional_special_tokens)))\n if extra_tokens != extra_ids:\n raise ValueError(f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to MyT5Tokenizer. In this case the additional_special_tokens must include the extra_ids tokens')\n pad_token = AddedToken(pad_token, lstrip=True, rstrip=True) if isinstance(pad_token, str) else pad_token\n eos_token = AddedToken(eos_token, lstrip=True, rstrip=True) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=True, rstrip=True) if isinstance(unk_token, str) else unk_token\n self._added_tokens_decoder = {0: pad_token, 1: eos_token, 2: unk_token}\n self.offset = len(self._added_tokens_decoder)\n self._utf_vocab_size = 2 ** 8\n self.byte_maps = json.load(open(vocab_file, 'r'))\n self.decompose_rewriter = ByteRewriter(self.byte_maps['decompose_map'])\n self.merge_rewriter = ByteRewriter(self.byte_maps['merge_map'])\n super().__init__(eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, extra_ids=0, additional_special_tokens=additional_special_tokens, **kwargs)\n\n @property\n def vocab_size(self):\n return self._utf_vocab_size\n\n def get_vocab(self):\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size + self.offset)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [0] * len(token_ids_0) + [1]\n return [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n\n def _add_eos_if_not_present(self, token_ids: List[int]) -> List[int]:\n \"\"\"Do not add eos again if user already added it.\"\"\"\n if len(token_ids) > 0 and token_ids[-1] == self.eos_token_id:\n warnings.warn(f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated eos tokens being added.')\n return token_ids\n else:\n return token_ids + [self.eos_token_id]\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. MyT5 does not\n make use of token type ids, therefore a list of zeros is returned.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of zeros.\n \"\"\"\n eos = [self.eos_token_id]\n if token_ids_1 is None:\n return len(token_ids_0 + eos) * [0]\n return len(token_ids_0 + eos + token_ids_1 + eos) * [0]\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A sequence has the following format:\n\n - single sequence: `X `\n - pair of sequences: `A B `\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.\n \"\"\"\n token_ids_0 = self._add_eos_if_not_present(token_ids_0)\n if token_ids_1 is None:\n return token_ids_0\n else:\n token_ids_1 = self._add_eos_if_not_present(token_ids_1)\n return token_ids_0 + token_ids_1\n\n def _tokenize(self, text: str, **kwargs) -> List[str]:\n \"\"\"Take as input a string and return a list of strings (tokens) for words/sub-words.\n Represents tokens in two character hex format\"\"\"\n tokens = [f'{i:02x}' for i in text.encode('utf-8')]\n tokens = self.morphological_encode(tokens)\n return tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n if len(token) != 2:\n token_id = None\n else:\n token_id = int(token, 16) + self.offset\n return token_id\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = f'{index - self.offset:02x}'\n return token\n\n def morphological_encode(self, indices: List[str]) -> List[str]:\n indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=False)\n indices = self.merge_rewriter.rewrite_bytes(indices, reverse=False)\n return indices\n\n def morphological_decode(self, indices: List[str]) -> List[str]:\n indices = self.merge_rewriter.rewrite_bytes(indices, reverse=True)\n indices = self.decompose_rewriter.rewrite_bytes(indices, reverse=True)\n return indices\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n bstring = b''\n out_tokens = []\n for token in tokens:\n if token in self.added_tokens_decoder:\n out_tokens.append(self.added_tokens_decoder[token])\n elif token in self.added_tokens_encoder:\n out_tokens.append(token)\n else:\n out_tokens.append(token)\n out_tokens = self.morphological_decode(out_tokens)\n _added_tokens = set(self.added_tokens_decoder.values()) | set(self.added_tokens_encoder)\n for token in out_tokens:\n if token in _added_tokens:\n bstring += bytes(token, 'utf-8')\n else:\n bstring += bytes.fromhex(token)\n string = bstring.decode('utf-8', errors='ignore')\n return string\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n else:\n vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n writer.write(json.dumps(self.byte_maps, indent=2, ensure_ascii=False))\n return (vocab_file,)", "docstring": "Construct a MyT5 tokenizer.\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to\nthis superclass for more information regarding those methods.\n\nArgs:\n vocab_file (`str`): The file containing the byte rewriting rules.\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n extra_ids (`int`, *optional*, defaults to 125):\n Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are\n accessible as \"\" where \"{%d}\" is a number between 0 and extra_ids-1. Extra tokens are\n indexed from the end of the vocabulary up to beginning (\"\" is the last token in the vocabulary\n like in ByT5 preprocessing see\n [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117)).\n additional_special_tokens (`List[str]`, *optional*):\n Additional special tokens used by the tokenizer."} +{"repo": "transformers", "function": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n with open(json_file_path, 'w', encoding='utf-8') as writer:\n config_dict = self.to_dict()\n json_string = json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'\n writer.write(json_string)", "docstring": "Save this instance to a JSON file.\n\nArgs:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default\n `QuantizationConfig()` is serialized to JSON file."} +{"repo": "keras", "function": "def serialize_keras_object(instance):\n instance = inspect.unwrap(instance)\n if instance is None:\n return None\n if hasattr(instance, 'get_config'):\n name = object_registration.get_registered_name(instance.__class__)\n try:\n config = instance.get_config()\n except NotImplementedError as e:\n if _SKIP_FAILED_SERIALIZATION:\n return serialize_keras_class_and_config(name, {_LAYER_UNDEFINED_CONFIG_KEY: True})\n raise e\n serialization_config = {}\n for key, item in config.items():\n if isinstance(item, str):\n serialization_config[key] = item\n continue\n try:\n serialized_item = serialize_keras_object(item)\n if isinstance(serialized_item, dict) and (not isinstance(item, dict)):\n serialized_item['__passive_serialization__'] = True\n serialization_config[key] = serialized_item\n except ValueError:\n serialization_config[key] = item\n name = object_registration.get_registered_name(instance.__class__)\n return serialize_keras_class_and_config(name, serialization_config, instance)\n if hasattr(instance, '__name__'):\n return object_registration.get_registered_name(instance)\n raise ValueError(f\"Cannot serialize {instance} because it doesn't implement `get_config()`.\")", "docstring": "Serialize a Keras object into a JSON-compatible representation.\n\nCalls to `serialize_keras_object` while underneath the\n`SharedObjectSavingScope` context manager will cause any objects re-used\nacross multiple layers to be saved with a special shared object ID. This\nallows the network to be re-created properly during deserialization.\n\nArgs:\n instance: The object to serialize.\n\nReturns:\n A dict-like, JSON-compatible representation of the object's config."} +{"repo": "pytype", "function": "def compatible_with(value, logical_value):\n if isinstance(value, abstract.List) and (not value.is_concrete):\n return True\n elif isinstance(value, abstract.Dict) and (not value.is_concrete):\n return not logical_value or bool(value.get_instance_type_parameter(abstract_utils.K).bindings)\n elif isinstance(value, abstract.LazyConcreteDict):\n return value.is_empty() != logical_value\n elif isinstance(value, abstract.PythonConstant):\n return bool(value.pyval) == logical_value\n elif isinstance(value, abstract.Instance):\n name = value.full_name\n if logical_value and name in _CONTAINER_NAMES:\n ret = value.has_instance_type_parameter(abstract_utils.T) and bool(value.get_instance_type_parameter(abstract_utils.T).bindings)\n return ret\n elif name == 'builtins.NoneType':\n return not logical_value\n elif name in NUMERIC:\n return True\n elif isinstance(value.cls, abstract.Class) and (not value.cls.overrides_bool):\n if getattr(value.cls, 'template', None):\n return True\n return logical_value\n return True\n elif isinstance(value, (abstract.Function, abstract.Class)):\n return logical_value\n else:\n return True", "docstring": "Returns the conditions under which the value could be True or False.\n\nArgs:\n value: An abstract value.\n logical_value: Either True or False.\n\nReturns:\n False: If the value could not evaluate to logical_value under any\n circumstance (e.g. value is the empty list and logical_value is True).\n True: If it is possible for the value to evaluate to the logical_value,\n and any ambiguity cannot be resolved by additional bindings."} +{"repo": "transformers", "function": "class BridgeTowerConfig(PretrainedConfig):\n model_type = 'bridgetower'\n sub_configs = {'text_config': BridgeTowerTextConfig, 'vision_config': BridgeTowerVisionConfig}\n\n def __init__(self, share_cross_modal_transformer_layers=True, hidden_act='gelu', hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type='add', num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs):\n _ = kwargs.pop('text_config_dict', None)\n _ = kwargs.pop('vision_config_dict', None)\n super().__init__(**kwargs)\n self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers\n self.hidden_act = hidden_act\n self.hidden_size = hidden_size\n self.initializer_factor = initializer_factor\n self.layer_norm_eps = layer_norm_eps\n self.share_link_tower_layers = share_link_tower_layers\n self.link_tower_type = link_tower_type\n self.num_attention_heads = num_attention_heads\n self.num_hidden_layers = num_hidden_layers\n self.tie_word_embeddings = tie_word_embeddings\n self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder\n if text_config is None:\n text_config = {}\n logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.')\n if vision_config is None:\n vision_config = {}\n logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.')\n self.text_config = BridgeTowerTextConfig(**text_config)\n self.vision_config = BridgeTowerVisionConfig(**vision_config)\n\n @classmethod\n def from_text_vision_configs(cls, text_config: BridgeTowerTextConfig, vision_config: BridgeTowerVisionConfig, **kwargs):\n \"\"\"\n Instantiate a [`BridgeTowerConfig`] (or a derived class) from BridgeTower text model configuration. Returns:\n [`BridgeTowerConfig`]: An instance of a configuration object\n \"\"\"\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`BridgeTowerModel`]. It is used to instantiate a\nBridgeTower model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the bridgetower-base\n[BridgeTower/bridgetower-base](https://huggingface.co/BridgeTower/bridgetower-base/) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n share_cross_modal_transformer_layers (`bool`, *optional*, defaults to `True`):\n Whether cross modal transformer layers are shared.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler.\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n share_link_tower_layers (`bool`, *optional*, defaults to `False`):\n Whether the bride/link tower layers are shared.\n link_tower_type (`str`, *optional*, defaults to `\"add\"`):\n Type of the bridge/link layer.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 6):\n Number of hidden layers in the Transformer encoder.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie input and output embeddings.\n init_layernorm_from_vision_encoder (`bool`, *optional*, defaults to `False`):\n Whether to init LayerNorm from the vision encoder.\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`BridgeTowerTextConfig`].\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`BridgeTowerVisionConfig`].\n\nExample:\n\n```python\n>>> from transformers import BridgeTowerModel, BridgeTowerConfig\n\n>>> # Initializing a BridgeTower BridgeTower/bridgetower-base style configuration\n>>> configuration = BridgeTowerConfig()\n\n>>> # Initializing a model from the BridgeTower/bridgetower-base style configuration\n>>> model = BridgeTowerModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def enqueue(self, features, weights=None, training: bool=True, name: Optional[Text]=None, device: Optional[Text]=None):\n if not self._using_tpu:\n raise RuntimeError('enqueue is not valid when TPUEmbedding object is not created under a TPUStrategy.')\n in_tpu_context = self._raise_error_for_incorrect_control_flow_context()\n nest.assert_same_structure(self._feature_config, features)\n if not self._verify_output_shapes_on_enqueue:\n if not self._output_shapes or not self._built:\n raise ValueError('Configured not to check output shapes on each enqueue() call; please ensure build() was called with output shapes to initialize the TPU for embeddings.')\n else:\n per_replica = device is None\n input_shapes = self._get_input_shapes(features, per_replica, in_tpu_context)\n self._maybe_build(input_shapes)\n self._check_output_shapes(self._get_output_shapes_from_input_shapes(input_shapes))\n flat_inputs = nest.flatten(features)\n flat_weights = [None] * len(flat_inputs)\n if weights is not None:\n nest.assert_same_structure(self._feature_config, weights)\n flat_weights = nest.flatten(weights)\n flat_features = nest.flatten_with_joined_string_paths(self._feature_config)\n flat_paths, _ = zip(*flat_features)\n self._raise_error_for_inputs_not_on_cpu(flat_inputs, flat_paths)\n if in_tpu_context:\n self._raise_error_for_non_direct_inputs(features)\n\n def generate_enqueue_ops():\n \"\"\"Generate enqueue ops for outside compilation.\"\"\"\n mode_override = array_ops.where_v2(training, constant_op.constant('train'), constant_op.constant('inference'))\n enqueue_op = self._generate_enqueue_op(flat_inputs, flat_weights, flat_features, device_ordinal=-1, mode_override=mode_override)\n if name is not None:\n _add_key_attr(enqueue_op, name)\n tpu_replication.outside_compilation(generate_enqueue_ops)\n elif device is None:\n mode_override = 'train' if training else 'inference'\n enqueue_ops = []\n\n def _split_fn(ts, idx):\n if ts is None:\n return None\n elif isinstance(ts, tensor_lib.Tensor):\n return array_ops.split(ts, num_or_size_splits=self._num_cores_per_replica, axis=0)[idx]\n elif isinstance(ts, sparse_tensor.SparseTensor):\n return sparse_ops.sparse_split_v2(sp_input=ts, num_split=self._num_cores_per_replica, axis=0)[idx]\n else:\n raise ValueError('SPMD does not support raggedTensor yet.')\n\n def _maybe_split(ts_inputs, core_id):\n if self._num_cores_per_replica is None:\n return ts_inputs\n else:\n splitter = functools.partial(_split_fn, idx=core_id)\n return nest.map_structure(splitter, ts_inputs)\n for replica_id in range(self._strategy.num_replicas_in_sync):\n replica_inputs = distribute_utils.select_replica(replica_id, flat_inputs)\n replica_weights = distribute_utils.select_replica(replica_id, flat_weights)\n if self._num_cores_per_replica:\n tpu_devices = self._strategy.extended._tpu_devices[replica_id]\n else:\n tpu_devices = [self._strategy.extended.worker_devices[replica_id]]\n for core_id in range(self._num_cores_per_replica or 1):\n tpu_device = tpu_devices[core_id]\n device_ordinal = tf_device.DeviceSpec.from_string(tpu_device).device_index\n with ops.device(device_util.get_host_for_device(tpu_device)):\n enqueue_op = self._generate_enqueue_op(_maybe_split(replica_inputs, core_id), _maybe_split(replica_weights, core_id), flat_features, device_ordinal=device_ordinal, mode_override=mode_override)\n if name is not None:\n _add_key_attr(enqueue_op, name)\n enqueue_ops.append(enqueue_op)\n else:\n mode_override = 'train' if training else 'inference'\n device_spec = tf_device.DeviceSpec.from_string(device)\n if device_spec.device_type != 'TPU':\n raise ValueError('Non-TPU device {} passed to enqueue.'.format(device))\n with ops.device(device_util.get_host_for_device(device)):\n enqueue_op = self._generate_enqueue_op(flat_inputs, flat_weights, flat_features, device_ordinal=device_spec.device_index, mode_override=mode_override)\n if name is not None:\n _add_key_attr(enqueue_op, name)", "docstring": "Enqueues id tensors for embedding lookup.\n\nThis function enqueues a structure of features to be looked up in the\nembedding tables. We expect that the input shapes of each of the tensors in\nfeatures matches the output shapes set via FeatureConfig or build method\n(if any). the output shapes will be auto detected based on the input shapes\nwith the max_sequence_length or output shape setting in the FeatureConfig.\nNote that the output shapes is based on per replica batch size.\nIf your input dataset is batched to the global batch size and you use\n`tf.distribute.TPUStrategy`'s `experimental_distribute_dataset`\nor if you use `distribute_datasets_from_function` and batch\nto the per core batch size computed by the context passed to your input\nfunction, the output shapes should match automatically.\n\nThe auto detected the output shapes:\n 1. For dense tensor, if rank 2 or above, make sure the tensor has last\n dimension as 1. The output shape will be the input shape excluding\n the last dimension.\n 2. For sparse tensor, make sure the tensor has rank 2 and above.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwise, if the tensor is rank 2, the output shape will be input\n shape with last dimension set as max_sequence_length. If the\n tensor is above rank 2, the output shape will be the input shape\n excluding the last dimension and the last dimension of the output\n shape will be set to max_sequence_length.\n 3. For ragged tensor, make sure the tensor has rank 2.\n a. If feature config has max_sequence_length equals 0 or output shape\n set (the max_sequence_length setting will be ignored), the\n output shape will be the input shape excluding the last dimension.\n b. Otherwise, the output shape will be the input shape excluding the\n last dimension and the last dimension of the output shape will be\n set to max_sequence_length.\n\n```python\nstrategy = tf.distribute.TPUStrategy(...)\nwith strategy.scope():\n embedding = tf.tpu.experimental.embedding.TPUEmbedding(...)\n\ndistributed_dataset = (\n strategy.distribute_datasets_from_function(\n dataset_fn=...,\n options=tf.distribute.InputOptions(\n experimental_fetch_to_device=False))\ndataset_iterator = iter(distributed_dataset)\n\n@tf.function\ndef training_step():\n def tpu_step(tpu_features):\n with tf.GradientTape() as tape:\n activations = embedding.dequeue()\n tape.watch(activations)\n\n loss = ... # some computation involving activations\n\n embedding_gradients = tape.gradient(loss, activations)\n embedding.apply_gradients(embedding_gradients)\n\n embedding_features, tpu_features = next(dataset_iterator)\n embedding.enqueue(embedding_features, training=True)\n strategy.run(tpu_step, args=(tpu_features,))\n\ntraining_step()\n```\n\nNOTE: You should specify `training=True` when using\n`embedding.apply_gradients` as above and `training=False` when not using\n`embedding.apply_gradients` (e.g. for frozen embeddings or when doing\nevaluation).\n\nFor finer grained control, in the above example the line\n\n```\n embedding.enqueue(embedding_features, training=True)\n```\n\nmay be replaced with\n\n```\n per_core_embedding_features = self.strategy.experimental_local_results(\n embedding_features)\n\n def per_core_enqueue(ctx):\n core_id = ctx.replica_id_in_sync_group\n device = strategy.extended.worker_devices[core_id]\n embedding.enqueue(per_core_embedding_features[core_id],\n device=device)\n\n strategy.experimental_distribute_values_from_function(\n per_core_queue_inputs)\n```\n\nArgs:\n features: A nested structure of `tf.Tensor`s, `tf.SparseTensor`s or\n `tf.RaggedTensor`s, with the same structure as `feature_config`. Inputs\n will be downcast to `tf.int32`. Only one type out of `tf.SparseTensor`\n or `tf.RaggedTensor` is supported per call.\n weights: If not `None`, a nested structure of `tf.Tensor`s,\n `tf.SparseTensor`s or `tf.RaggedTensor`s, matching the above, except\n that the tensors should be of float type (and they will be downcast to\n `tf.float32`). For `tf.SparseTensor`s we assume the `indices` are the\n same for the parallel entries from `features` and similarly for\n `tf.RaggedTensor`s we assume the row_splits are the same.\n training: Defaults to `True`. If `False`, enqueue the batch as inference\n batch (forward pass only). Do not call `apply_gradients` when this is\n `False` as this may lead to a deadlock.\n name: A name for the underlying op.\n device: The device name (e.g. '/task:0/device:TPU:2') where this batch\n should be enqueued. This should be set if and only if features is not a\n `tf.distribute.DistributedValues` and enqueue is not being called\n inside a TPU context (e.g. inside `TPUStrategy.run`).\n\nRaises:\n ValueError: When called inside a strategy.run call and input is not\n directly taken from the args of the `strategy.run` call. Also if\n the size of any sequence in `features` does not match corresponding\n sequence in `feature_config`. Similarly for `weights`, if not `None`.\n If input shapes of features is unequal or different from a previous\n call.\n RuntimeError: When called inside a strategy.run call and inside XLA\n control flow. If batch_size is not able to be determined and build was\n not called.\n TypeError: If the type of any sequence in `features` does not match\n corresponding sequence in `feature_config`. Similarly for `weights`, if\n not `None`."} +{"repo": "tensorflow", "function": "def update_hash_with_primitive_value(hash_value, value):\n hash_const = np.uint64(11400714819323197440)\n hash_value = np.uint64(hash_value)\n value = np.uint64(value)\n hash_value = np.array([hash_value])\n value = np.array([value])\n hash_value = np.bitwise_xor(hash_value, value + hash_const + np.left_shift(hash_value, 10) + np.right_shift(hash_value, 4))[0]\n return hash_value", "docstring": "Update the hash value using a primitive value.\n\nArgs:\n hash_value (uint64): The current hash value.\n value: The primitive value to incorporate into the hash.\n\nReturns:\n int: The updated hash value."} +{"repo": "mobly", "function": "class AttenuatorDevice:\n\n def __init__(self, path_count=1):\n self.path_count = path_count\n self._telnet_client = telnet_scpi_client.TelnetScpiClient(tx_cmd_separator='\\r\\n', rx_cmd_separator='\\r\\n', prompt='')\n\n @property\n def is_open(self):\n \"\"\"This function returns the state of the telnet connection to the\n underlying AttenuatorDevice.\n\n Returns:\n True if there is a successfully open connection to the\n AttenuatorDevice.\n \"\"\"\n return bool(self._telnet_client.is_open)\n\n def open(self, host, port=23):\n \"\"\"Opens a telnet connection to the desired AttenuatorDevice and\n queries basic information.\n\n Args:\n host: A valid hostname (IP address or DNS-resolvable name) to an\n MC-DAT attenuator instrument.\n port: An optional port number (defaults to telnet default 23)\n \"\"\"\n self._telnet_client.open(host, port)\n config_str = self._telnet_client.cmd('MN?')\n if config_str.startswith('MN='):\n config_str = config_str[len('MN='):]\n self.properties = dict(zip(['model', 'max_freq', 'max_atten'], config_str.split('-', 2)))\n self.max_atten = float(self.properties['max_atten'])\n\n def close(self):\n \"\"\"Closes a telnet connection to the desired attenuator device.\n\n This should be called as part of any teardown procedure prior to the\n attenuator instrument leaving scope.\n \"\"\"\n if self.is_open:\n self._telnet_client.close()\n\n def set_atten(self, idx, value):\n \"\"\"Sets the attenuation value for a particular signal path.\n\n Args:\n idx: Zero-based index int which is the identifier for a particular\n signal path in an instrument. For instruments that only has one\n channel, this is ignored by the device.\n value: A float that is the attenuation value to set.\n\n Raises:\n Error: The underlying telnet connection to the instrument is not\n open.\n IndexError: The index of the attenuator is greater than the maximum\n index of the underlying instrument.\n ValueError: The requested set value is greater than the maximum\n attenuation value.\n \"\"\"\n if not self.is_open:\n raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)\n if idx + 1 > self.path_count:\n raise IndexError('Attenuator index out of range!', self.path_count, idx)\n if value > self.max_atten:\n raise ValueError('Attenuator value out of range!', self.max_atten, value)\n self._telnet_client.cmd('CHAN:%s:SETATT:%s' % (idx + 1, value))\n\n def get_atten(self, idx=0):\n \"\"\"This function returns the current attenuation from an attenuator at a\n given index in the instrument.\n\n Args:\n idx: This zero-based index is the identifier for a particular\n attenuator in an instrument.\n\n Raises:\n Error: The underlying telnet connection to the instrument is not\n open.\n\n Returns:\n A float that is the current attenuation value.\n \"\"\"\n if not self.is_open:\n raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host)\n if idx + 1 > self.path_count or idx < 0:\n raise IndexError('Attenuator index out of range!', self.path_count, idx)\n telnet_cmd = ':ATT?' if self.path_count == 1 else 'CHAN:%s:ATT?' % (idx + 1)\n atten_val_str = self._telnet_client.cmd(telnet_cmd)\n atten_val = float(atten_val_str)\n return atten_val", "docstring": "This provides a specific telnet-controlled implementation of\nAttenuatorDevice for Mini-Circuits RC-DAT attenuators.\n\nAttributes:\n path_count: The number of signal attenuation path this device has."} +{"repo": "tensorflow", "function": "def generate_inputs_outputs(tflite_model_binary, min_value=0, max_value=255):\n interpreter = lite.Interpreter(model_content=tflite_model_binary)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n input_values = {}\n for input_detail in input_details:\n input_value = create_tensor_data(input_detail['dtype'], input_detail['shape'], min_value=min_value, max_value=max_value)\n interpreter.set_tensor(input_detail['index'], input_value)\n input_values.update({_normalize_input_name(input_detail['name']): input_value})\n interpreter.invoke()\n output_details = interpreter.get_output_details()\n output_values = {}\n for output_detail in output_details:\n output_values.update({_normalize_output_name(output_detail['name']): interpreter.get_tensor(output_detail['index'])})\n return (input_values, output_values)", "docstring": "Generate input values and output values of the given tflite model.\n\nArgs:\n tflite_model_binary: A serialized flatbuffer as a string.\n min_value: min value for the input tensor.\n max_value: max value for the input tensor.\n\nReturns:\n (input_values, output_values): Maps of input values and output values\n built."} +{"repo": "tensorflow", "function": "def huber(y_true, y_pred, delta=1.0):\n y_pred = math_ops.cast(y_pred, dtype=backend.floatx())\n y_true = math_ops.cast(y_true, dtype=backend.floatx())\n delta = math_ops.cast(delta, dtype=backend.floatx())\n error = math_ops.subtract(y_pred, y_true)\n abs_error = math_ops.abs(error)\n half = tensor_conversion.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype)\n return backend.mean(array_ops.where_v2(abs_error <= delta, half * math_ops.square(error), delta * abs_error - half * math_ops.square(delta)), axis=-1)", "docstring": "Computes Huber loss value.\n\nFor each value x in `error = y_true - y_pred`:\n\n```\nloss = 0.5 * x^2 if |x| <= d\nloss = d * |x| - 0.5 * d^2 if |x| > d\n```\nwhere d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss\n\nArgs:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n delta: A float, the point where the Huber loss function changes from a\n quadratic to linear.\n\nReturns:\n Tensor with one scalar loss entry per sample."} +{"repo": "transformers", "function": "def class_predictor(self, image_feats: torch.FloatTensor, query_embeds: Optional[torch.FloatTensor]=None, query_mask: Optional[torch.Tensor]=None) -> Tuple[torch.FloatTensor]:\n pred_logits, image_class_embeds = self.class_head(image_feats, query_embeds, query_mask)\n return (pred_logits, image_class_embeds)", "docstring": "Args:\n image_feats:\n Features extracted from the `image_text_embedder`.\n query_embeds:\n Text query embeddings.\n query_mask:\n Must be provided with query_embeddings. A mask indicating which query embeddings are valid."} +{"repo": "tensorflow", "function": "def _enter_scope_uncached(self):\n if self._auxiliary_name_scope:\n current_name_scope = None\n else:\n name_scope = ops.get_name_scope()\n if name_scope:\n name_scope += '/'\n current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)\n else:\n current_name_scope = ops.name_scope(name_scope, skip_on_eager=False)\n if self._name_or_scope is not None:\n if not isinstance(self._name_or_scope, (VariableScope, str)):\n raise TypeError('VariableScope: name_or_scope must be a string or VariableScope.')\n if isinstance(self._name_or_scope, str):\n name_scope = self._name_or_scope\n else:\n name_scope = self._name_or_scope.name.split('/')[-1]\n if name_scope or current_name_scope:\n current_name_scope = current_name_scope or ops.name_scope(name_scope, skip_on_eager=False)\n try:\n current_name_scope_name = current_name_scope.__enter__()\n except:\n current_name_scope.__exit__(*sys.exc_info())\n raise\n self._current_name_scope = current_name_scope\n if isinstance(self._name_or_scope, str):\n old_name_scope = current_name_scope_name\n else:\n old_name_scope = self._name_or_scope.original_name_scope\n pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=old_name_scope, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n try:\n entered_pure_variable_scope = pure_variable_scope.__enter__()\n except:\n pure_variable_scope.__exit__(*sys.exc_info())\n raise\n self._cached_pure_variable_scope = pure_variable_scope\n return entered_pure_variable_scope\n else:\n self._current_name_scope = None\n pure_variable_scope = _pure_variable_scope(self._name_or_scope, reuse=self._reuse, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n try:\n entered_pure_variable_scope = pure_variable_scope.__enter__()\n except:\n pure_variable_scope.__exit__(*sys.exc_info())\n raise\n self._cached_pure_variable_scope = pure_variable_scope\n return entered_pure_variable_scope\n else:\n if self._reuse:\n raise ValueError('reuse=True cannot be used without a name_or_scope')\n current_name_scope = current_name_scope or ops.name_scope(self._default_name, skip_on_eager=False)\n try:\n current_name_scope_name = current_name_scope.__enter__()\n except:\n current_name_scope.__exit__(*sys.exc_info())\n raise\n self._current_name_scope = current_name_scope\n unique_default_name = _get_unique_variable_scope(self._default_name)\n pure_variable_scope = _pure_variable_scope(unique_default_name, initializer=self._initializer, regularizer=self._regularizer, caching_device=self._caching_device, partitioner=self._partitioner, custom_getter=self._custom_getter, old_name_scope=current_name_scope_name, dtype=self._dtype, use_resource=self._use_resource, constraint=self._constraint)\n try:\n entered_pure_variable_scope = pure_variable_scope.__enter__()\n except:\n pure_variable_scope.__exit__(*sys.exc_info())\n raise\n self._cached_pure_variable_scope = pure_variable_scope\n return entered_pure_variable_scope", "docstring": "Enters the context manager when there is no cached scope yet.\n\nReturns:\n The entered variable scope.\n\nRaises:\n TypeError: A wrong type is passed as `scope` at __init__().\n ValueError: `reuse` is incorrectly set at __init__()."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is None:\n return [1] + [0] * len(token_ids_0) + [1]\n return [1] + [0] * len(token_ids_0) + [1, 1] + [0] * len(token_ids_1) + [1]", "docstring": "Args:\nRetrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "tensorflow", "function": "def as_tensor(self):\n with ops.control_dependencies(None):\n return self._concat()", "docstring": "Returns the overall concatenated value as a `Tensor`.\n\nThe returned tensor will not inherit the control dependencies from the scope\nwhere the value is used, which is similar to getting the value of\n`Variable`.\n\nReturns:\n `Tensor` containing the concatenated value."} +{"repo": "transformers", "function": "class Mamba2Config(PretrainedConfig):\n model_type = 'mamba2'\n\n def __init__(self, num_heads=128, head_dim=64, vocab_size=32768, hidden_size=4096, state_size=128, num_hidden_layers=64, layer_norm_epsilon=1e-05, pad_token_id=1, bos_token_id=0, eos_token_id=2, expand=2, conv_kernel=4, n_groups=8, use_bias=False, use_conv_bias=True, hidden_act='silu', initializer_range=0.1, residual_in_fp32=True, time_step_rank='auto', time_step_min=0.001, time_step_max=0.1, time_step_floor=0.0001, time_step_limit=(0.0, float('inf')), rescale_prenorm_residual=False, use_cache=True, rms_norm=True, chunk_size=256, tie_word_embeddings=False, **kwargs):\n if hidden_size * expand != num_heads * head_dim:\n raise ValueError(f'Inconsistent configuration: hidden_size * expand ({hidden_size * expand}) must equal num_heads * head_dim ({num_heads * head_dim}).')\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.state_size = state_size\n self.num_hidden_layers = num_hidden_layers\n self.layer_norm_epsilon = layer_norm_epsilon\n self.conv_kernel = conv_kernel\n self.expand = expand\n self.bos_token_id = bos_token_id\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id\n self.use_bias = use_bias\n self.use_conv_bias = use_conv_bias\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.time_step_rank = math.ceil(self.hidden_size / 16) if time_step_rank == 'auto' else time_step_rank\n self.time_step_min = time_step_min\n self.time_step_max = time_step_max\n self.time_step_floor = time_step_floor\n self.rescale_prenorm_residual = rescale_prenorm_residual\n self.residual_in_fp32 = residual_in_fp32\n self.use_cache = use_cache\n self.n_groups = n_groups\n self.num_heads = num_heads\n self.head_dim = head_dim\n self.rms_norm = rms_norm\n self.state_size = state_size\n self.chunk_size = chunk_size\n self.time_step_limit = time_step_limit\n self.tie_word_embeddings = tie_word_embeddings\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`Mamba2Model`]. It is used to instantiate a MAMBA2\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the MAMBA2\n[state-spaces/mamba2-2.8b](https://huggingface.co/state-spaces/mamba2-2.8b) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n num_heads (`int`, *optional*, defaults to 128):\n Number of heads for the evolution matrices of mamba 2.\n head_dim (`int`, *optional*, defaults to 64):\n Dimension of each head.\n vocab_size (`int`, *optional*, defaults to 32768):\n Vocabulary size of the MAMBA2 model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`Mamba2Model`].\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimensionality of the embeddings and hidden states.\n state_size (`int`, *optional*, defaults to 128): shape of the state space latents.\n num_hidden_layers (`int`, *optional*, defaults to 64):\n Number of hidden layers in the model.\n layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):\n The epsilon to use in the layer normalization layers.\n pad_token_id (`int`, *optional*, defaults to 1):\n Padding token id.\n bos_token_id (`int`, *optional*, defaults to 0):\n The id of the beginning of sentence token in the vocabulary.\n eos_token_id (`int`, *optional*, defaults to 2):\n The id of the end of sentence token in the vocabulary.\n expand (`int`, *optional*, defaults to 2): Expanding factor used to determine the intermediate size.\n conv_kernel (`int`, *optional*, defaults to 4): Size of the convolution kernel.\n n_groups (`int`, *optional*, defaults to 8):\n Number of groups for the evolution matrices of mamba 2.\n use_bias (`bool`, *optional*, defaults to `False`):\n Whether or not to use bias in [\"in_proj\", \"out_proj\"] of the mixer block\n use_conv_bias (`bool`, *optional*, defaults to `True`):\n Whether or not to use bias in the convolution layer of the mixer block.\n hidden_act (`str`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n initializer_range (`float`, *optional*, defaults to 0.1):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n residual_in_fp32 (`bool`, *optional*, defaults to `True`):\n Whether or not residuals should be in `float32`. If set to `False` residuals will keep the same `dtype` as the rest of the model\n time_step_rank (`Union[int,str]`, *optional*, defaults to `\"auto\"`):\n Rank of the discretization projection matrix. `\"auto\"` means that it will default to `math.ceil(self.hidden_size / 16)`\n time_step_min (`float`, *optional*, defaults to 0.001):\n Minimum `time_step` used to bound `dt_proj.bias`.\n time_step_max (`float`, *optional*, defaults to 0.1):\n Maximum `time_step` used to bound `dt_proj.bias`.\n time_step_floor (`float`, *optional*, defaults to 0.0001):\n Minimum clamping value of the `dt_proj.bias` layer initialization.\n time_step_limit (`tuple`, *optional*, defaults to `(0.0, inf)`):\n Accepted range of time step values.\n rescale_prenorm_residual (`bool`, *optional*, defaults to `False`):\n Whether or not to rescale `out_proj` weights when initializing.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the cache should be used.\n rms_norm (`bool`, *optional*, defaults to `True`):\n Whether to use RMS norm or not.\n chunk_size (`int`, *optional*, defaults to 256):\n Size of the chunks that will comprise the sequence.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether to tie word embeddings or not.\n\n\nExample:\n\n```python\n>>> from transformers import Mamba2Config, Mamba2Model\n\n>>> # Initializing a Mamba2 configuration\n>>> configuration = Mamba2Config()\n\n>>> # Initializing a model (with random weights) from the configuration\n>>> model = Mamba2Model(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def _get_profile_data_generator(self):\n node_to_file_path = {}\n node_to_line_number = {}\n node_to_func_name = {}\n node_to_op_type = {}\n for op in self._graph.get_operations():\n for trace_entry in reversed(op.traceback):\n file_path = trace_entry[0]\n line_num = trace_entry[1]\n func_name = trace_entry[2]\n if not source_utils.guess_is_tensorflow_py_library(file_path):\n break\n node_to_file_path[op.name] = file_path\n node_to_line_number[op.name] = line_num\n node_to_func_name[op.name] = func_name\n node_to_op_type[op.name] = op.type\n\n def profile_data_generator(device_step_stats):\n for node_stats in device_step_stats.node_stats:\n if node_stats.node_name == '_SOURCE' or node_stats.node_name == '_SINK':\n continue\n yield profiling.ProfileDatum(device_step_stats.device, node_stats, node_to_file_path.get(node_stats.node_name, ''), node_to_line_number.get(node_stats.node_name, 0), node_to_func_name.get(node_stats.node_name, ''), node_to_op_type.get(node_stats.node_name, ''))\n return profile_data_generator", "docstring": "Get function that generates `ProfileDatum` objects.\n\nReturns:\n A function that generates `ProfileDatum` objects."} +{"repo": "transformers", "function": "def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):\n q_ids = tf.range(query_size, dtype=tf.int32)\n k_ids = tf.range(key_size, dtype=tf.int32)\n rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])\n if bucket_size > 0 and max_position > 0:\n rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)\n rel_pos_ids = rel_pos_ids[:query_size, :]\n rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)\n return tf.cast(rel_pos_ids, tf.int64)", "docstring": "Build relative position according to the query and key\n\nWe assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key\n\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -\nP_k\\)\n\nArgs:\n query_size (int): the length of query\n key_size (int): the length of key\n bucket_size (int): the size of position bucket\n max_position (int): the maximum allowed absolute position\n\nReturn:\n `tf.Tensor`: A tensor with shape [1, query_size, key_size]"} +{"repo": "transformers", "function": "def mel_filter_bank(num_frequency_bins: int, num_mel_filters: int, min_frequency: float, max_frequency: float, sampling_rate: int, norm: Optional[str]=None, mel_scale: str='htk', triangularize_in_mel_space: bool=False) -> np.ndarray:\n if norm is not None and norm != 'slaney':\n raise ValueError('norm must be one of None or \"slaney\"')\n if num_frequency_bins < 2:\n raise ValueError(f'Require num_frequency_bins: {num_frequency_bins} >= 2')\n if min_frequency > max_frequency:\n raise ValueError(f'Require min_frequency: {min_frequency} <= max_frequency: {max_frequency}')\n mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale)\n mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale)\n mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2)\n filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale)\n if triangularize_in_mel_space:\n fft_bin_width = sampling_rate / ((num_frequency_bins - 1) * 2)\n fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale)\n filter_freqs = mel_freqs\n else:\n fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins)\n mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs)\n if norm is not None and norm == 'slaney':\n enorm = 2.0 / (filter_freqs[2:num_mel_filters + 2] - filter_freqs[:num_mel_filters])\n mel_filters *= np.expand_dims(enorm, 0)\n if (mel_filters.max(axis=0) == 0.0).any():\n warnings.warn(f'At least one mel filter has all zero values. The value for `num_mel_filters` ({num_mel_filters}) may be set too high. Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low.')\n return mel_filters", "docstring": "Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and\nvarious implementation exist, which differ in the number of filters, the shape of the filters, the way the filters\nare spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these\nfeatures is to approximate the non-linear human perception of the variation in pitch with respect to the frequency.\n\nDifferent banks of mel filters were introduced in the literature. The following variations are supported:\n\n- MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech\n bandwidth of `[0, 4600]` Hz.\n- MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech\n bandwidth of `[0, 8000]` Hz. This assumes sampling rate \u2265 16 kHz.\n- MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and\n speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization.\n- HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of\n 12.5 kHz and speech bandwidth of `[0, 6250]` Hz.\n\nThis code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's\n`melscale_fbanks` implement the `\"htk\"` filters while librosa uses the `\"slaney\"` implementation.\n\nArgs:\n num_frequency_bins (`int`):\n Number of frequency bins (should be the same as `n_fft // 2 + 1` where `n_fft` is the size of the Fourier Transform used to compute the spectrogram).\n num_mel_filters (`int`):\n Number of mel filters to generate.\n min_frequency (`float`):\n Lowest frequency of interest in Hz.\n max_frequency (`float`):\n Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`.\n sampling_rate (`int`):\n Sample rate of the audio waveform.\n norm (`str`, *optional*):\n If `\"slaney\"`, divide the triangular mel weights by the width of the mel band (area normalization).\n mel_scale (`str`, *optional*, defaults to `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n triangularize_in_mel_space (`bool`, *optional*, defaults to `False`):\n If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This\n should be set to `true` in order to get the same results as `torchaudio` when computing mel filters.\n\nReturns:\n `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a\n projection matrix to go from a spectrogram to a mel spectrogram."} +{"repo": "beam", "function": "def process(self, element, *args, **kwargs):\n yield {'text': element.data.decode('utf-8'), 'id': element.attributes['id']}", "docstring": "For each element in the input PCollection, retrieve the id and decode the bytes into string\n\nArgs:\n element: The element that is being processed."} +{"repo": "tensorflow", "function": "def __init__(self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=None, num_proj_shards=None, forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None, name=None, dtype=None, **kwargs):\n warnings.warn('`tf.nn.rnn_cell.LSTMCell` is deprecated and will be removed in a future version. This class is equivalent as `tf.keras.layers.LSTMCell`, and will be replaced by that in Tensorflow 2.0.')\n super(LSTMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs)\n _check_supported_dtypes(self.dtype)\n if not state_is_tuple:\n logging.warning('%s: Using a concatenated state is slower and will soon be deprecated. Use state_is_tuple=True.', self)\n if num_unit_shards is not None or num_proj_shards is not None:\n logging.warning('%s: The num_unit_shards and proj_unit_shards parameters are deprecated and will be removed in Jan 2017. Use a variable scope with a partitioner instead.', self)\n if context.executing_eagerly() and tf_config.list_logical_devices('GPU'):\n logging.warning('%s: Note that this cell is not optimized for performance. Please use tf.contrib.cudnn_rnn.CudnnLSTM for better performance on GPU.', self)\n self.input_spec = input_spec.InputSpec(ndim=2)\n self._num_units = num_units\n self._use_peepholes = use_peepholes\n self._cell_clip = cell_clip\n self._initializer = initializers.get(initializer)\n self._num_proj = num_proj\n self._proj_clip = proj_clip\n self._num_unit_shards = num_unit_shards\n self._num_proj_shards = num_proj_shards\n self._forget_bias = forget_bias\n self._state_is_tuple = state_is_tuple\n if activation:\n self._activation = activations.get(activation)\n else:\n self._activation = math_ops.tanh\n if num_proj:\n self._state_size = LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj\n self._output_size = num_proj\n else:\n self._state_size = LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units\n self._output_size = num_units", "docstring": "Initialize the parameters for an LSTM cell.\n\nArgs:\n num_units: int, The number of units in the LSTM cell.\n use_peepholes: bool, set True to enable diagonal/peephole connections.\n cell_clip: (optional) A float value, if provided the cell state is clipped\n by this value prior to the cell output activation.\n initializer: (optional) The initializer to use for the weight and\n projection matrices.\n num_proj: (optional) int, The output dimensionality for the projection\n matrices. If None, no projection is performed.\n proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is\n provided, then the projected values are clipped elementwise to within\n `[-proj_clip, proj_clip]`.\n num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a\n variable_scope partitioner instead.\n num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a\n variable_scope partitioner instead.\n forget_bias: Biases of the forget gate are initialized by default to 1 in\n order to reduce the scale of forgetting at the beginning of the\n training. Must set it manually to `0.0` when restoring from CudnnLSTM\n trained checkpoints.\n state_is_tuple: If True, accepted and returned states are 2-tuples of the\n `c_state` and `m_state`. If False, they are concatenated along the\n column axis. This latter behavior will soon be deprecated.\n activation: Activation function of the inner states. Default: `tanh`. It\n could also be string that is within Keras activation function names.\n reuse: (optional) Python boolean describing whether to reuse variables in\n an existing scope. If not `True`, and the existing scope already has\n the given variables, an error is raised.\n name: String, the name of the layer. Layers with the same name will share\n weights, but to avoid mistakes we require reuse=True in such cases.\n dtype: Default dtype of the layer (default of `None` means use the type of\n the first input). Required when `build` is called before `call`.\n **kwargs: Dict, keyword named properties for common layer attributes, like\n `trainable` etc when constructing the cell from configs of get_config().\n When restoring from CudnnLSTM-trained checkpoints, use\n `CudnnCompatibleLSTMCell` instead."} +{"repo": "tensorflow", "function": "def from_keras_model(cls, model):\n TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.KERAS_MODEL)\n return TFLiteKerasModelConverterV2(model)", "docstring": "Creates a TFLiteConverter object from a Keras model.\n\nArgs:\n model: tf.Keras.Model\n\nReturns:\n TFLiteConverter object."} +{"repo": "tensorflow", "function": "def get_index(uid, i):\n return _SHARED_SEQUENCES[uid][i]", "docstring": "Get the value from the Sequence `uid` at index `i`.\n\nTo allow multiple Sequences to be used at the same time, we use `uid` to\nget a specific one. A single Sequence would cause the validation to\noverwrite the training Sequence.\n\nArgs:\n uid: int, Sequence identifier\n i: index\n\nReturns:\n The value at index `i`."} +{"repo": "beam", "function": "def checksum(self, path):\n try:\n return self._blobstorageIO().checksum(path)\n except Exception as e:\n raise BeamIOError('Checksum operation failed', {path, e})", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nArgs:\n path: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n ``BeamIOError``: if path isn't a file or doesn't exist."} +{"repo": "beam", "function": "def read_gbq(table, dataset=None, project_id=None, use_bqstorage_api=False, **kwargs):\n if table is None:\n raise ValueError('Please specify a BigQuery table to read from.')\n elif len(kwargs) > 0:\n raise ValueError(f'Encountered unsupported parameter(s) in read_gbq: {kwargs.keys()!r}')\n return _ReadGbq(table, dataset, project_id, use_bqstorage_api)", "docstring": "This function reads data from a BigQuery table and produces a\n:class:`~apache_beam.dataframe.frames.DeferredDataFrame.\n\nArgs:\n table (str): Please specify a table. This can be done in the format\n 'PROJECT:dataset.table' if one would not wish to utilize\n the parameters below.\n dataset (str): Please specify the dataset\n (can omit if table was specified as 'PROJECT:dataset.table').\n project_id (str): Please specify the project ID\n (can omit if table was specified as 'PROJECT:dataset.table').\n use_bqstorage_api (bool): If you would like to utilize\n the BigQuery Storage API in ReadFromBigQuery, please set\n this flag to true. Otherwise, please set flag\n to false or leave it unspecified.\n "} +{"repo": "mobly", "function": "def wait_for_boot_completion(self, timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):\n deadline = time.perf_counter() + timeout\n self.adb.wait_for_device(timeout=timeout)\n while time.perf_counter() < deadline:\n try:\n if self.is_boot_completed():\n return\n except (adb.AdbError, adb.AdbTimeoutError):\n pass\n time.sleep(5)\n raise DeviceError(self, 'Booting process timed out')", "docstring": "Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.\n\nThis function times out after 15 minutes.\n\nArgs:\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect."} +{"repo": "temporian", "function": "def hours(value: Union[int, float]) -> Duration:\n return float(value * 60 * 60)", "docstring": "Converts input value from hours to a `Duration` in seconds.\n\nExample:\n ```python\n >>> timestamps = [tp.duration.hours(i) for i in [1, 2, 10]]\n >>> timestamps\n [3600.0, 7200.0, 36000.0]\n\n >>> # Usage in a window operation\n >>> a = tp.event_set(timestamps=timestamps, features={\"f1\": [1, 5, -5]})\n >>> a.moving_sum(window_length=tp.duration.hours(2))\n indexes: ...\n timestamps: [ 3600. 7200. 36000.]\n 'f1': [ 1 6 -5]\n ...\n\n ```\n\nArgs:\n value: Number of hours.\n\nReturns:\n Equivalent number of seconds."} +{"repo": "tensorflow", "function": "def dynamic_update_slice(operand, update, start_indices):\n operand = tf_np.asarray(operand).data\n update = tf_np.asarray(update).data\n start_indices = tf_np.asarray(start_indices, np.int32).data\n if not update.shape.is_fully_defined():\n raise ValueError(\"update's shape must be fully defined\")\n slice_sizes = update.shape\n idx = _get_dynamic_indices(operand, start_indices, slice_sizes)\n if idx is None:\n return tf_np.asarray(update)\n operand = array_ops.tensor_scatter_nd_update(operand, idx, update)\n return tf_np.asarray(operand)", "docstring": "Updates a dynamic slice.\n\nSee the docstring of `jax.lax.dynamic_update_slice`\n(https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.dynamic_update_slice.html)\nfor details.\n\nArgs:\n operand: an array to slice.\n update: an array containing the new values to write onto `operand`.\n start_indices: a vector of integers, one per dimension. The starts of the\n slice. The vector can be dynamic.\n\nReturns:\n The updated version of `operand`."} +{"repo": "transformers", "function": "def decode(self, token_ids, **kwargs):\n return super().decode(token_ids, **kwargs)", "docstring": "Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\ntokens and clean up tokenization spaces.\n\nSimilar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\nArgs:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces. If `None`, will default to\n `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).\n use_source_tokenizer (`bool`, *optional*, defaults to `False`):\n Whether or not to use the source tokenizer to decode sequences (only applicable in sequence-to-sequence\n problems).\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\nReturns:\n `str`: The decoded sentence."} +{"repo": "transformers", "function": "def get_prefix_bias_name(self) -> Union[None, str]:\n warnings.warn('The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.', FutureWarning)\n return None", "docstring": "Get the concatenated _prefix name of the bias from the model name to the parent layer\n\nReturn:\n `str`: The _prefix name of the bias."} +{"repo": "transformers", "function": "class GenerationConfig(PushToHubMixin):\n extra_output_flags = ('output_attentions', 'output_hidden_states', 'output_scores', 'output_logits')\n\n def __init__(self, **kwargs):\n self.max_length = kwargs.pop('max_length', 20)\n self.max_new_tokens = kwargs.pop('max_new_tokens', None)\n self.min_length = kwargs.pop('min_length', 0)\n self.min_new_tokens = kwargs.pop('min_new_tokens', None)\n self.early_stopping = kwargs.pop('early_stopping', False)\n self.max_time = kwargs.pop('max_time', None)\n self.stop_strings = kwargs.pop('stop_strings', None)\n self.do_sample = kwargs.pop('do_sample', False)\n self.num_beams = kwargs.pop('num_beams', 1)\n self.num_beam_groups = kwargs.pop('num_beam_groups', 1)\n self.penalty_alpha = kwargs.pop('penalty_alpha', None)\n self.dola_layers = kwargs.pop('dola_layers', None)\n self.use_cache = kwargs.pop('use_cache', True)\n self.cache_implementation = kwargs.pop('cache_implementation', None)\n self.cache_config = kwargs.pop('cache_config', None)\n if self.cache_implementation is not None and self.cache_implementation in CACHE_CONFIG_MAPPING:\n cache_config_class = CACHE_CONFIG_MAPPING[self.cache_implementation]\n if isinstance(self.cache_config, dict):\n self.cache_config = cache_config_class.from_dict(self.cache_config)\n self.return_legacy_cache = kwargs.pop('return_legacy_cache', None)\n self.prefill_chunk_size = kwargs.pop('prefill_chunk_size', None)\n self.temperature = kwargs.pop('temperature', 1.0)\n self.top_k = kwargs.pop('top_k', 50)\n self.top_p = kwargs.pop('top_p', 1.0)\n self.min_p = kwargs.pop('min_p', None)\n self.typical_p = kwargs.pop('typical_p', 1.0)\n self.epsilon_cutoff = kwargs.pop('epsilon_cutoff', 0.0)\n self.eta_cutoff = kwargs.pop('eta_cutoff', 0.0)\n self.diversity_penalty = kwargs.pop('diversity_penalty', 0.0)\n self.repetition_penalty = kwargs.pop('repetition_penalty', 1.0)\n self.encoder_repetition_penalty = kwargs.pop('encoder_repetition_penalty', 1.0)\n self.length_penalty = kwargs.pop('length_penalty', 1.0)\n self.no_repeat_ngram_size = kwargs.pop('no_repeat_ngram_size', 0)\n self.bad_words_ids = kwargs.pop('bad_words_ids', None)\n self.force_words_ids = kwargs.pop('force_words_ids', None)\n self.renormalize_logits = kwargs.pop('renormalize_logits', False)\n self.constraints = kwargs.pop('constraints', None)\n self.forced_bos_token_id = kwargs.pop('forced_bos_token_id', None)\n self.forced_eos_token_id = kwargs.pop('forced_eos_token_id', None)\n self.remove_invalid_values = kwargs.pop('remove_invalid_values', False)\n self.exponential_decay_length_penalty = kwargs.pop('exponential_decay_length_penalty', None)\n self.suppress_tokens = kwargs.pop('suppress_tokens', None)\n self.begin_suppress_tokens = kwargs.pop('begin_suppress_tokens', None)\n self.sequence_bias = kwargs.pop('sequence_bias', None)\n self.token_healing = kwargs.pop('token_healing', False)\n self.guidance_scale = kwargs.pop('guidance_scale', None)\n self.low_memory = kwargs.pop('low_memory', None)\n watermarking_config = kwargs.pop('watermarking_config', None)\n if watermarking_config is None:\n self.watermarking_config = None\n elif isinstance(watermarking_config, BaseWatermarkingConfig):\n self.watermarking_config = watermarking_config\n else:\n self.watermarking_config = WatermarkingConfig.from_dict(watermarking_config)\n self.num_return_sequences = kwargs.pop('num_return_sequences', 1)\n self.output_attentions = kwargs.pop('output_attentions', False)\n self.output_hidden_states = kwargs.pop('output_hidden_states', False)\n self.output_scores = kwargs.pop('output_scores', False)\n self.output_logits = kwargs.pop('output_logits', None)\n self.return_dict_in_generate = kwargs.pop('return_dict_in_generate', False)\n self.pad_token_id = kwargs.pop('pad_token_id', None)\n self.bos_token_id = kwargs.pop('bos_token_id', None)\n self.eos_token_id = kwargs.pop('eos_token_id', None)\n self.encoder_no_repeat_ngram_size = kwargs.pop('encoder_no_repeat_ngram_size', 0)\n self.decoder_start_token_id = kwargs.pop('decoder_start_token_id', None)\n self.is_assistant = False\n self.num_assistant_tokens = kwargs.pop('num_assistant_tokens', 20)\n self.num_assistant_tokens_schedule = kwargs.pop('num_assistant_tokens_schedule', 'constant')\n self.assistant_confidence_threshold = kwargs.pop('assistant_confidence_threshold', 0.4)\n self.prompt_lookup_num_tokens = kwargs.pop('prompt_lookup_num_tokens', None)\n self.max_matching_ngram_size = kwargs.pop('max_matching_ngram_size', None)\n self.assistant_early_exit = kwargs.pop('assistant_early_exit', None)\n self.assistant_lookbehind = kwargs.pop('assistant_lookbehind', 10)\n self.target_lookbehind = kwargs.pop('target_lookbehind', 10)\n self.compile_config = kwargs.pop('compile_config', None)\n self.disable_compile = kwargs.pop('disable_compile', False)\n self._from_model_config = kwargs.pop('_from_model_config', False)\n self._commit_hash = kwargs.pop('_commit_hash', None)\n self.transformers_version = kwargs.pop('transformers_version', __version__)\n if not self._from_model_config:\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n self.validate()\n\n def __hash__(self):\n return hash(self.to_json_string(ignore_metadata=True))\n\n def __eq__(self, other):\n if not isinstance(other, GenerationConfig):\n return False\n self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True)\n other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True)\n return self_without_metadata == other_without_metadata\n\n def __repr__(self):\n return f'{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}'\n\n def get_generation_mode(self, assistant_model: Optional['PreTrainedModel']=None) -> GenerationMode:\n \"\"\"\n Returns the generation mode triggered by the [`GenerationConfig`] instance.\n\n Arg:\n assistant_model (`PreTrainedModel`, *optional*):\n The assistant model to be used for assisted generation. If set, the generation mode will be\n assisted generation.\n\n Returns:\n `GenerationMode`: The generation mode triggered by the instance.\n \"\"\"\n if self.constraints is not None or self.force_words_ids is not None:\n generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n elif self.num_beams == 1:\n if self.do_sample is False:\n if self.top_k is not None and self.top_k > 1 and (self.penalty_alpha is not None) and (self.penalty_alpha > 0):\n generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n else:\n generation_mode = GenerationMode.GREEDY_SEARCH\n else:\n generation_mode = GenerationMode.SAMPLE\n elif self.num_beam_groups > 1:\n generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n elif self.do_sample is True:\n generation_mode = GenerationMode.BEAM_SAMPLE\n else:\n generation_mode = GenerationMode.BEAM_SEARCH\n if assistant_model is not None or self.prompt_lookup_num_tokens is not None or self.assistant_early_exit is not None:\n if generation_mode in ('greedy_search', 'sample'):\n generation_mode = GenerationMode.ASSISTED_GENERATION\n else:\n logger.warning(f\"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate is only supported with Greedy Search and Sample. However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.\")\n if self.dola_layers is not None:\n if generation_mode in ('greedy_search', 'sample'):\n generation_mode = GenerationMode.DOLA_GENERATION\n else:\n logger.warning(f\"You've set `dola_layers`, which triggers DoLa generate. Currently, DoLa generate is only supported with Greedy Search and Sample. However, the base decoding mode (based on current flags) is {generation_mode} -- some of the set flags will be ignored.\")\n return generation_mode\n\n @deprecate_kwarg('is_init', version='4.54.0')\n def validate(self, strict=False):\n \"\"\"\n Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence\n of parameterization that can be detected as incorrect from the configuration instance alone.\n\n Note that some parameters not validated here are best validated at generate runtime, as they may depend on\n other inputs and/or the model, such as parameters related to the generation length.\n\n Args:\n strict (bool): If True, raise an exception for any issues found. If False, only log issues.\n \"\"\"\n minor_issues = {}\n if self.early_stopping not in {True, False, 'never'}:\n raise ValueError(f\"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.\")\n if self.max_new_tokens is not None and self.max_new_tokens <= 0:\n raise ValueError(f'`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.')\n if self.pad_token_id is not None and self.pad_token_id < 0:\n minor_issues['pad_token_id'] = f'`pad_token_id` should be positive but got {self.pad_token_id}. This will cause errors when batch generating, if there is padding. Please set `pad_token_id` explicitly as `model.generation_config.pad_token_id=PAD_TOKEN_ID` to avoid errors in generation'\n if self.cache_implementation is not None and self.cache_implementation not in ALL_CACHE_IMPLEMENTATIONS:\n raise ValueError(f'Invalid `cache_implementation` ({self.cache_implementation}). Choose one of: {ALL_CACHE_IMPLEMENTATIONS}')\n if self.cache_config is not None:\n cache_class = CACHE_CONFIG_MAPPING.get(self.cache_implementation)\n if cache_class is None:\n raise ValueError(f'You provided a `cache_config` but the cache implementation you are using ({self.cache_implementation}) does not require any config. Make sure to use the correct cache implementation matching your cache config.')\n if not isinstance(self.cache_config, cache_class):\n self.cache_config = cache_class.from_dict(self.cache_config)\n self.cache_config.validate()\n if self.compile_config is not None and (not isinstance(self.compile_config, CompileConfig)):\n raise ValueError(f'You provided `compile_config` as an instance of {type(self.compile_config)}, but it must be an instance of `CompileConfig`.')\n if self.watermarking_config is not None:\n if not (isinstance(self.watermarking_config, WatermarkingConfig) or isinstance(self.watermarking_config, SynthIDTextWatermarkingConfig)):\n minor_issues['watermarking_config'] = '`watermarking_config` as a dict is deprecated and will be removed in v4.54.0. Please construct `watermarking_config` object with `WatermarkingConfig` or `SynthIDTextWatermarkingConfig` class.'\n self.watermarking_config = WatermarkingConfig.from_dict(self.watermarking_config)\n self.watermarking_config.validate()\n if self.do_sample is False:\n greedy_wrong_parameter_msg = '`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`.'\n if self.temperature is not None and self.temperature != 1.0:\n minor_issues['temperature'] = greedy_wrong_parameter_msg.format(flag_name='temperature', flag_value=self.temperature)\n if self.top_p is not None and self.top_p != 1.0:\n minor_issues['top_p'] = greedy_wrong_parameter_msg.format(flag_name='top_p', flag_value=self.top_p)\n if self.min_p is not None:\n minor_issues['min_p'] = greedy_wrong_parameter_msg.format(flag_name='min_p', flag_value=self.min_p)\n if self.typical_p is not None and self.typical_p != 1.0:\n minor_issues['typical_p'] = greedy_wrong_parameter_msg.format(flag_name='typical_p', flag_value=self.typical_p)\n if self.top_k is not None and self.top_k != 50 and (self.penalty_alpha is None):\n minor_issues['top_k'] = greedy_wrong_parameter_msg.format(flag_name='top_k', flag_value=self.top_k)\n if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0:\n minor_issues['epsilon_cutoff'] = greedy_wrong_parameter_msg.format(flag_name='epsilon_cutoff', flag_value=self.epsilon_cutoff)\n if self.eta_cutoff is not None and self.eta_cutoff != 0.0:\n minor_issues['eta_cutoff'] = greedy_wrong_parameter_msg.format(flag_name='eta_cutoff', flag_value=self.eta_cutoff)\n if self.num_beams == 1:\n single_beam_wrong_parameter_msg = '`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`.'\n if self.early_stopping is not False:\n minor_issues['early_stopping'] = single_beam_wrong_parameter_msg.format(flag_name='early_stopping', flag_value=self.early_stopping)\n if self.num_beam_groups is not None and self.num_beam_groups != 1:\n minor_issues['num_beam_groups'] = single_beam_wrong_parameter_msg.format(flag_name='num_beam_groups', flag_value=self.num_beam_groups)\n if self.diversity_penalty is not None and self.diversity_penalty != 0.0:\n minor_issues['diversity_penalty'] = single_beam_wrong_parameter_msg.format(flag_name='diversity_penalty', flag_value=self.diversity_penalty)\n if self.length_penalty is not None and self.length_penalty != 1.0:\n minor_issues['length_penalty'] = single_beam_wrong_parameter_msg.format(flag_name='length_penalty', flag_value=self.length_penalty)\n if self.constraints is not None:\n minor_issues['constraints'] = single_beam_wrong_parameter_msg.format(flag_name='constraints', flag_value=self.constraints)\n if self.dola_layers is not None and (self.repetition_penalty is None or self.repetition_penalty < 1.2):\n minor_issues['repetition_penalty'] = (f'`dola_layers` is set to trigger DoLa decoding, but `repetition_penalty` is set to a value of {self.repetition_penalty}, which could induce unwanted repetition. The recommended value for DoLa decoding is `repetition_penalty>=1.2`.',)\n elif self.constraints is not None or self.force_words_ids is not None:\n constrained_wrong_parameter_msg = 'one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, `{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set `constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue.'\n if self.do_sample is True:\n raise ValueError(constrained_wrong_parameter_msg.format(flag_name='do_sample', flag_value=self.do_sample))\n if self.num_beam_groups is not None and self.num_beam_groups != 1:\n raise ValueError(constrained_wrong_parameter_msg.format(flag_name='num_beam_groups', flag_value=self.num_beam_groups))\n elif self.diversity_penalty != 0.0 or self.num_beam_groups != 1:\n group_error_prefix = '`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In this generation mode, '\n if self.do_sample is True:\n raise ValueError(group_error_prefix + '`do_sample` must be set to `False`')\n if self.num_beams % self.num_beam_groups != 0:\n raise ValueError(group_error_prefix + '`num_beams` should be divisible by `num_beam_groups`')\n if self.diversity_penalty == 0.0:\n raise ValueError(group_error_prefix + '`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical.')\n if self.num_return_sequences != 1:\n if self.num_beams == 1:\n if self.do_sample is False:\n raise ValueError(f'Greedy methods without beam search do not support `num_return_sequences` different than 1 (got {self.num_return_sequences}).')\n elif self.num_return_sequences > self.num_beams:\n raise ValueError(f'`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` ({self.num_beams}).')\n if self.use_cache is False:\n no_cache_warning = 'You have set `use_cache` to `False`, but {cache_arg} is set to {cache_arg_value}. {cache_arg} will have no effect.'\n for arg_name in ('cache_implementation', 'cache_config', 'return_legacy_cache'):\n if getattr(self, arg_name) is not None:\n minor_issues[arg_name] = no_cache_warning.format(cache_arg=arg_name, cache_arg_value=getattr(self, arg_name))\n if self.return_dict_in_generate is not True:\n for extra_output_flag in self.extra_output_flags:\n if getattr(self, extra_output_flag) is True:\n minor_issues[extra_output_flag] = f'`return_dict_in_generate` is NOT set to `True`, but `{extra_output_flag}` is. When `return_dict_in_generate` is not `True`, `{extra_output_flag}` is ignored.'\n generate_arguments = ('logits_processor', 'stopping_criteria', 'prefix_allowed_tokens_fn', 'synced_gpus', 'assistant_model', 'streamer', 'negative_prompt_ids', 'negative_prompt_attention_mask', 'use_model_defaults')\n for arg in generate_arguments:\n if hasattr(self, arg):\n raise ValueError(f'Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to `generate()` (or a pipeline) directly.')\n if len(minor_issues) > 0:\n info_message = []\n for attribute_name, issue_description in minor_issues.items():\n info_message.append(f'- `{attribute_name}`: {issue_description}')\n info_message = '\\n'.join(info_message)\n info_message += \"\\nIf you're using a pretrained model, note that some of these attributes may be set through the model's `generation_config.json` file.\"\n if strict:\n raise ValueError('GenerationConfig is invalid: \\n' + info_message)\n else:\n attributes_with_issues = list(minor_issues.keys())\n warning_message = f'The following generation flags are not valid and may be ignored: {attributes_with_issues}.'\n if logging.get_verbosity() >= logging.WARNING:\n warning_message += ' Set `TRANSFORMERS_VERBOSITY=info` for more details.'\n logger.warning(warning_message)\n logger.info(info_message)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]]=None, push_to_hub: bool=False, **kwargs):\n \"\"\"\n Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~GenerationConfig.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n config_file_name (`str` or `os.PathLike`, *optional*, defaults to `\"generation_config.json\"`):\n Name of the generation configuration JSON file to be saved in `save_directory`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n try:\n self.validate(strict=True)\n except ValueError as exc:\n raise ValueError(str(exc) + '\\n\\nFix these issues to save the configuration.')\n use_auth_token = kwargs.pop('use_auth_token', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if kwargs.get('token', None) is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n kwargs['token'] = use_auth_token\n config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME\n if os.path.isfile(save_directory):\n raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n os.makedirs(save_directory, exist_ok=True)\n if push_to_hub:\n commit_message = kwargs.pop('commit_message', None)\n repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n output_config_file = os.path.join(save_directory, config_file_name)\n self.to_json_file(output_config_file, use_diff=True)\n logger.info(f'Configuration saved in {output_config_file}')\n if push_to_hub:\n self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name: Union[str, os.PathLike], config_file_name: Optional[Union[str, os.PathLike]]=None, cache_dir: Optional[Union[str, os.PathLike]]=None, force_download: bool=False, local_files_only: bool=False, token: Optional[Union[str, bool]]=None, revision: str='main', **kwargs) -> 'GenerationConfig':\n \"\"\"\n Instantiate a [`GenerationConfig`] from a generation configuration file.\n\n Args:\n pretrained_model_name (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained model configuration hosted inside a model repo on\n huggingface.co.\n - a path to a *directory* containing a configuration file saved using the\n [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.\n config_file_name (`str` or `os.PathLike`, *optional*, defaults to `\"generation_config.json\"`):\n Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the configuration files and override the cached versions if\n they exist.\n resume_download:\n Deprecated and ignored. All downloads are now resumed by default when possible.\n Will be removed in v5 of Transformers.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n token (`str` or `bool`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use\n the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n \n\n To test a pull request you made on the Hub, you can pass `revision=\"refs/pr/\"`.\n\n \n\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final configuration object.\n\n If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the\n part of `kwargs` which has not been used to update `config` and is otherwise ignored.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled\n by the `return_unused_kwargs` keyword parameter.\n\n Returns:\n [`GenerationConfig`]: The configuration object instantiated from this pretrained model.\n\n Examples:\n\n ```python\n >>> from transformers import GenerationConfig\n\n >>> # Download configuration from huggingface.co and cache.\n >>> generation_config = GenerationConfig.from_pretrained(\"openai-community/gpt2\")\n\n >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*\n >>> generation_config.save_pretrained(\"./test/saved_model/\")\n >>> generation_config = GenerationConfig.from_pretrained(\"./test/saved_model/\")\n\n >>> # You can also specify configuration names to your generation configuration file\n >>> generation_config.save_pretrained(\"./test/saved_model/\", config_file_name=\"my_configuration.json\")\n >>> generation_config = GenerationConfig.from_pretrained(\"./test/saved_model/\", \"my_configuration.json\")\n\n >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation\n >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored\n >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(\n ... \"openai-community/gpt2\", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True\n ... )\n >>> generation_config.top_k\n 1\n\n >>> unused_kwargs\n {'foo': False}\n ```\"\"\"\n config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME\n resume_download = kwargs.pop('resume_download', None)\n proxies = kwargs.pop('proxies', None)\n use_auth_token = kwargs.pop('use_auth_token', None)\n subfolder = kwargs.pop('subfolder', '')\n from_pipeline = kwargs.pop('_from_pipeline', None)\n from_auto_class = kwargs.pop('_from_auto', False)\n commit_hash = kwargs.pop('_commit_hash', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if token is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n token = use_auth_token\n user_agent = {'file_type': 'config', 'from_auto_class': from_auto_class}\n if from_pipeline is not None:\n user_agent['using_pipeline'] = from_pipeline\n config_path = os.path.join(pretrained_model_name, config_file_name)\n config_path = str(config_path)\n is_local = os.path.exists(config_path)\n if os.path.isfile(os.path.join(subfolder, config_path)):\n resolved_config_file = config_path\n is_local = True\n elif is_remote_url(config_path):\n configuration_file = config_path\n resolved_config_file = download_url(config_path)\n else:\n configuration_file = config_file_name\n try:\n resolved_config_file = cached_file(pretrained_model_name, configuration_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, token=token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=commit_hash)\n commit_hash = extract_commit_hash(resolved_config_file, commit_hash)\n except OSError:\n raise\n except Exception:\n raise OSError(f\"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory containing a {configuration_file} file\")\n try:\n config_dict = cls._dict_from_json_file(resolved_config_file)\n config_dict['_commit_hash'] = commit_hash\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise OSError(f\"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.\")\n if is_local:\n logger.info(f'loading configuration file {resolved_config_file}')\n else:\n logger.info(f'loading configuration file {configuration_file} from cache at {resolved_config_file}')\n if kwargs.get('return_unused_kwargs') is True:\n config, unused_kwargs = cls.from_dict(config_dict, **kwargs)\n config._original_object_hash = hash(config)\n return (config, unused_kwargs)\n else:\n config = cls.from_dict(config_dict, **kwargs)\n config._original_object_hash = hash(config)\n return config\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, 'r', encoding='utf-8') as reader:\n text = reader.read()\n return json.loads(text)\n\n @classmethod\n def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> 'GenerationConfig':\n \"\"\"\n Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.\n\n Args:\n config_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the configuration object.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n [`GenerationConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n kwargs.pop('_from_auto', None)\n kwargs.pop('_from_pipeline', None)\n if '_commit_hash' in kwargs and '_commit_hash' in config_dict:\n kwargs['_commit_hash'] = config_dict['_commit_hash']\n config = cls(**{**config_dict, **kwargs})\n unused_kwargs = config.update(**kwargs)\n logger.info(f'Generate config {config}')\n if return_unused_kwargs:\n return (config, unused_kwargs)\n else:\n return config\n\n def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:\n \"\"\"\n Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,\n converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *\"float32\"*\n string, which can then be stored in the json format.\n \"\"\"\n if d.get('torch_dtype', None) is not None and (not isinstance(d['torch_dtype'], str)):\n d['torch_dtype'] = str(d['torch_dtype']).split('.')[1]\n for value in d.values():\n if isinstance(value, dict):\n self.dict_torch_dtype_to_str(value)\n\n def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n default_config_dict = GenerationConfig().to_dict()\n serializable_config_dict = {}\n for key, value in config_dict.items():\n if key not in default_config_dict or key == 'transformers_version' or value != default_config_dict[key]:\n serializable_config_dict[key] = value\n self.dict_torch_dtype_to_str(serializable_config_dict)\n return serializable_config_dict\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if '_commit_hash' in output:\n del output['_commit_hash']\n if '_original_object_hash' in output:\n del output['_original_object_hash']\n if 'compile_config' in output:\n del output['compile_config']\n output['transformers_version'] = __version__\n self.dict_torch_dtype_to_str(output)\n return output\n\n def to_json_string(self, use_diff: bool=True, ignore_metadata: bool=False) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `GenerationConfig()`\n is serialized to JSON string.\n ignore_metadata (`bool`, *optional*, defaults to `False`):\n Whether to ignore the metadata fields present in the instance\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n if ignore_metadata:\n for metadata_field in METADATA_FIELDS:\n config_dict.pop(metadata_field, None)\n\n def convert_keys_to_string(obj):\n if isinstance(obj, dict):\n return {str(key): convert_keys_to_string(value) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [convert_keys_to_string(item) for item in obj]\n else:\n return obj\n\n def convert_dataclass_to_dict(obj):\n if isinstance(obj, dict):\n return {key: convert_dataclass_to_dict(value) for key, value in obj.items()}\n elif is_dataclass(obj):\n return obj.to_dict()\n else:\n return obj\n config_dict = convert_keys_to_string(config_dict)\n config_dict = convert_dataclass_to_dict(config_dict)\n return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `GenerationConfig()`\n is serialized to JSON file.\n \"\"\"\n with open(json_file_path, 'w', encoding='utf-8') as writer:\n writer.write(self.to_json_string(use_diff=use_diff))\n\n @classmethod\n def from_model_config(cls, model_config: PretrainedConfig) -> 'GenerationConfig':\n \"\"\"\n Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy\n [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].\n\n Args:\n model_config (`PretrainedConfig`):\n The model config that will be used to instantiate the generation config.\n\n Returns:\n [`GenerationConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n config_dict = model_config.to_dict()\n config_dict.pop('_from_model_config', None)\n config_dict = {key: value for key, value in config_dict.items() if value is not None}\n generation_config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)\n decoder_config = model_config.get_text_config(decoder=True)\n if decoder_config is not model_config:\n default_generation_config = GenerationConfig()\n decoder_config_dict = decoder_config.to_dict()\n for attr in generation_config.to_dict().keys():\n is_unset = getattr(generation_config, attr) == getattr(default_generation_config, attr)\n if attr in decoder_config_dict and is_unset:\n setattr(generation_config, attr, decoder_config_dict[attr])\n if generation_config.return_dict_in_generate is False:\n if any((getattr(generation_config, extra_output_flag, False) for extra_output_flag in generation_config.extra_output_flags)):\n generation_config.return_dict_in_generate = True\n generation_config._original_object_hash = hash(generation_config)\n return generation_config\n\n def update(self, **kwargs):\n \"\"\"\n Updates attributes of this class instance with attributes from `kwargs` if they match existing attributes,\n returning all the unused kwargs.\n\n Args:\n kwargs (`Dict[str, Any]`):\n Dictionary of attributes to tentatively update this class.\n\n Returns:\n `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.\n \"\"\"\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(self, key):\n setattr(self, key, value)\n to_remove.append(key)\n self.validate()\n unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}\n return unused_kwargs", "docstring": "Class that holds a configuration for a generation task. A `generate` call supports the following generation methods\nfor text-decoder, text-to-text, speech-to-text, and vision-to-text models:\n\n - *greedy decoding* if `num_beams=1` and `do_sample=False`\n - *contrastive search* if `penalty_alpha>0.` and `top_k>1`\n - *multinomial sampling* if `num_beams=1` and `do_sample=True`\n - *beam-search decoding* if `num_beams>1` and `do_sample=False`\n - *beam-search multinomial sampling* if `num_beams>1` and `do_sample=True`\n - *diverse beam-search decoding* if `num_beams>1` and `num_beam_groups>1`\n - *constrained beam-search decoding* if `constraints!=None` or `force_words_ids!=None`\n - *assisted decoding* if `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`\n - *dola decoding* if `dola_layers` is passed to `.generate()`\n\nTo learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).\n\n\n\nA large number of these flags control the logits or the stopping criteria of the generation. Make sure you check\nthe [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full\ndescription of the possible manipulations, as well as examples of their usage.\n\n\n\nArg:\n > Parameters that control the length of the output\n\n max_length (`int`, *optional*, defaults to 20):\n The maximum length the generated tokens can have. Corresponds to the length of the input prompt +\n `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.\n max_new_tokens (`int`, *optional*):\n The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.\n min_length (`int`, *optional*, defaults to 0):\n The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +\n `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.\n min_new_tokens (`int`, *optional*):\n The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.\n early_stopping (`bool` or `str`, *optional*, defaults to `False`):\n Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:\n `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very unlikely to find better candidates;\n `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical\n beam search algorithm).\n max_time (`float`, *optional*):\n The maximum amount of time you allow the computation to run for in seconds. generation will still finish\n the current pass after allocated time has been passed.\n stop_strings (`str or List[str]`, *optional*):\n A string or a list of strings that should terminate generation if the model outputs them.\n\n > Parameters that control the generation strategy used\n\n do_sample (`bool`, *optional*, defaults to `False`):\n Whether or not to use sampling ; use greedy decoding otherwise.\n num_beams (`int`, *optional*, defaults to 1):\n Number of beams for beam search. 1 means no beam search.\n num_beam_groups (`int`, *optional*, defaults to 1):\n Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.\n [this paper](https://huggingface.co/papers/1610.02424) for more details.\n penalty_alpha (`float`, *optional*):\n The values balance the model confidence and the degeneration penalty in contrastive search decoding.\n dola_layers (`str` or `List[int]`, *optional*):\n The layers to use for DoLa decoding. If `None`, DoLa decoding is not used. If a string, it must\n be one of \"low\" or \"high\", which means using the lower part or higher part of the model layers, respectively.\n \"low\" means the first half of the layers up to the first 20 layers, and \"high\" means the last half of the\n layers up to the last 20 layers.\n If a list of integers, it must contain the indices of the layers to use for candidate premature layers in DoLa.\n The 0-th layer is the word embedding layer of the model. Set to `'low'` to improve long-answer reasoning tasks,\n `'high'` to improve short-answer tasks. Check the [documentation](https://github.com/huggingface/transformers/blob/main/docs/source/en/generation_strategies.md)\n or [the paper](https://huggingface.co/papers/2309.03883) for more details.\n\n > Parameters that control the cache\n\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should use the past last key/values attentions (if applicable to the model) to\n speed up decoding.\n cache_implementation (`str`, *optional*, default to `None`):\n Name of the cache class that will be instantiated in `generate`, for faster decoding. Possible values are:\n\n - `\"dynamic\"`: [`DynamicCache`]\n - `\"static\"`: [`StaticCache`]\n - `\"offloaded_static\"`: [`OffloadedStaticCache`]\n - `\"sliding_window\"`: [`SlidingWindowCache`]\n - `\"hybrid\"`: [`HybridCache`]\n - `\"mamba\"`: [`MambaCache`]\n - `\"quantized\"`: [`QuantizedCache`]\n\n If none is specified, we will use the default cache for the model (which is often [`DynamicCache`]). See\n our [cache documentation](https://huggingface.co/docs/transformers/en/kv_cache) for further information.\n cache_config (`CacheConfig` or `dict`, *optional*, default to `None`):\n Arguments used in the key-value cache class can be passed in `cache_config`. Can be passed as a `Dict` and\n it will be converted to its respective `CacheConfig` internally.\n Otherwise can be passed as a `CacheConfig` class matching the indicated `cache_implementation`.\n return_legacy_cache (`bool`, *optional*, default to `True`):\n Whether to return the legacy or new format of the cache when `DynamicCache` is used by default.\n\n > Parameters for manipulation of the model output logits\n\n temperature (`float`, *optional*, defaults to 1.0):\n The value used to module the next token probabilities. This value is set in a model's `generation_config.json` file. If it isn't set, the default value is 1.0\n top_k (`int`, *optional*, defaults to 50):\n The number of highest probability vocabulary tokens to keep for top-k-filtering. This value is set in a model's `generation_config.json` file. If it isn't set, the default value is 50.\n top_p (`float`, *optional*, defaults to 1.0):\n If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to\n `top_p` or higher are kept for generation. This value is set in a model's `generation_config.json` file. If it isn't set, the default value is 1.0\n min_p (`float`, *optional*):\n Minimum token probability, which will be scaled by the probability of the most likely token. It must be a\n value between 0 and 1. Typical values are in the 0.01-0.2 range, comparably selective as setting `top_p` in\n the 0.99-0.8 range (use the opposite of normal `top_p` values).\n typical_p (`float`, *optional*, defaults to 1.0):\n Local typicality measures how similar the conditional probability of predicting a target token next is to\n the expected conditional probability of predicting a random token next, given the partial text already\n generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that\n add up to `typical_p` or higher are kept for generation. See [this\n paper](https://huggingface.co/papers/2202.00666) for more details.\n epsilon_cutoff (`float`, *optional*, defaults to 0.0):\n If set to float strictly between 0 and 1, only tokens with a conditional probability greater than\n `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the\n size of the model. See [Truncation Sampling as Language Model\n Desmoothing](https://huggingface.co/papers/2210.15191) for more details.\n eta_cutoff (`float`, *optional*, defaults to 0.0):\n Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between\n 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *\n exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token\n probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,\n depending on the size of the model. See [Truncation Sampling as Language Model\n Desmoothing](https://huggingface.co/papers/2210.15191) for more details.\n diversity_penalty (`float`, *optional*, defaults to 0.0):\n This value is subtracted from a beam's score if it generates a token same as any beam from other group at a\n particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.\n repetition_penalty (`float`, *optional*, defaults to 1.0):\n The parameter for repetition penalty. 1.0 means no penalty. See [this\n paper](https://huggingface.co/papers/1909.05858) for more details.\n encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):\n The parameter for encoder_repetition_penalty. An exponential penalty on sequences that are not in the\n original input. 1.0 means no penalty.\n length_penalty (`float`, *optional*, defaults to 1.0):\n Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to\n the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log\n likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while\n `length_penalty` < 0.0 encourages shorter sequences.\n no_repeat_ngram_size (`int`, *optional*, defaults to 0):\n If set to int > 0, all ngrams of that size can only occur once.\n bad_words_ids (`List[List[int]]`, *optional*):\n List of list of token ids that are not allowed to be generated. Check\n [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples.\n force_words_ids (`List[List[int]]` or `List[List[List[int]]]`, *optional*):\n List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of\n words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this\n triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one\n can allow different forms of each word.\n renormalize_logits (`bool`, *optional*, defaults to `False`):\n Whether to renormalize the logits after applying all the logits processors (including the custom\n ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits\n are normalized but some logit processors break the normalization.\n constraints (`List[Constraint]`, *optional*):\n Custom constraints that can be added to the generation to ensure that the output will contain the use of\n certain tokens as defined by `Constraint` objects, in the most sensible way possible.\n forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):\n The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for\n multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target\n language token.\n forced_eos_token_id (`int` or List[int]`, *optional*, defaults to `model.config.forced_eos_token_id`):\n The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a\n list to set multiple *end-of-sequence* tokens.\n remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):\n Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.\n Note that using `remove_invalid_values` can slow down generation.\n exponential_decay_length_penalty (`tuple(int, float)`, *optional*):\n This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been\n generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where\n penalty starts and `decay_factor` represents the factor of exponential decay\n suppress_tokens (`List[int]`, *optional*):\n A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their\n log probs to `-inf` so that they are not sampled.\n begin_suppress_tokens (`List[int]`, *optional*):\n A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit\n processor will set their log probs to `-inf` so that they are not sampled.\n sequence_bias (`Dict[Tuple[int], float]`, *optional*)):\n Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the\n sequence being selected, while negative biases do the opposite. Check\n [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples.\n token_healing (`bool`, *optional*, defaults to `False`):\n Heal tail tokens of prompts by replacing them with their appropriate extensions.\n This enhances the quality of completions for prompts affected by greedy tokenization bias.\n guidance_scale (`float`, *optional*):\n The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.\n Higher guidance scale encourages the model to generate samples that are more closely linked to the input\n prompt, usually at the expense of poorer quality.\n low_memory (`bool`, *optional*):\n Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory.\n Used with beam search and contrastive search.\n watermarking_config (`BaseWatermarkingConfig` or `dict`, *optional*):\n Arguments used to watermark the model outputs by adding a small bias to randomly selected set of \"green\"\n tokens. See the docs of [`SynthIDTextWatermarkingConfig`] and [`WatermarkingConfig`] for more\n details. If passed as `Dict`, it will be converted to a `WatermarkingConfig` internally.\n\n > Parameters that define the output variables of generate\n\n num_return_sequences (`int`, *optional*, defaults to 1):\n The number of independently computed returned sequences for each element in the batch.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more details.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more details.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether or not to return the prediction scores. See `scores` under returned tensors for more details.\n output_logits (`bool`, *optional*):\n Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for\n more details.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether or not to return a [`~utils.ModelOutput`], as opposed to returning exclusively the generated\n sequence. This flag must be set to `True` to return the generation cache (when `use_cache` is `True`)\n or optional outputs (see flags starting with `output_`)\n\n > Special tokens that can be used at generation time\n\n pad_token_id (`int`, *optional*):\n The id of the *padding* token.\n bos_token_id (`int`, *optional*):\n The id of the *beginning-of-sequence* token.\n eos_token_id (`Union[int, List[int]]`, *optional*):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.\n\n > Generation parameters exclusive to encoder-decoder models\n\n encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):\n If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the\n `decoder_input_ids`.\n decoder_start_token_id (`int` or `List[int]`, *optional*):\n If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length\n `batch_size`. Indicating a list enables different start ids for each element in the batch\n (e.g. multilingual models with different target languages in one batch)\n\n > Generation parameters exclusive to assistant generation\n is_assistant (`bool`, *optional*, defaults to `False`):\n Whether the model is an assistant (draft) model.\n num_assistant_tokens (`int`, *optional*, defaults to 20):\n Defines the number of _speculative tokens_ that shall be generated by the assistant model before being\n checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation\n more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant\n model requires lots of corrections, lower speed-ups are reached.\n num_assistant_tokens_schedule (`str`, *optional*, defaults to `\"constant\"`):\n Defines the schedule at which max assistant tokens shall be changed during inference.\n - `\"heuristic\"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else\n reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model.\n - `\"heuristic_transient\"`: Same as `\"heuristic\"` but `num_assistant_tokens` is reset to its initial value after each generation call.\n - `\"constant\"`: `num_assistant_tokens` stays unchanged during generation\n assistant_confidence_threshold (`float`, *optional*, defaults to 0.4):\n The confidence threshold for the assistant model. If the assistant model's confidence in its prediction for the current token is lower\n than this threshold, the assistant model stops the current token generation iteration, even if the number of _speculative tokens_\n (defined by `num_assistant_tokens`) is not yet reached. The assistant's confidence threshold is adjusted throughout the speculative iterations to reduce the number of unnecessary draft and target forward passes, biased towards avoiding false negatives.\n `assistant_confidence_threshold` value is persistent over multiple generation calls with the same assistant model.\n It is an unsupervised version of the dynamic speculation lookahead\n from Dynamic Speculation Lookahead Accelerates Speculative Decoding of Large Language Models .\n prompt_lookup_num_tokens (`int`, *optional*):\n The number of tokens to be output as candidate tokens.\n max_matching_ngram_size (`int`, *optional*):\n The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided.\n assistant_early_exit(`int`, *optional*):\n If set to a positive integer, early exit of the model will be used as an assistant. Can only be used with\n models that support early exit (i.e. models where logits from intermediate layers can be interpreted by the LM head).\n assistant_lookbehind(`int`, *optional*, defaults to 10):\n If set to a positive integer, the re-encodeing process will additionally consider the last `assistant_lookbehind` assistant tokens\n to correctly align tokens. Can only be used with different tokenizers in speculative decoding.\n See this [blog](https://huggingface.co/blog/universal_assisted_generation) for more details.\n target_lookbehind(`int`, *optional*, defaults to 10):\n If set to a positive integer, the re-encodeing process will additionally consider the last `target_lookbehind` target tokens\n to correctly align tokens. Can only be used with different tokenizers in speculative decoding.\n See this [blog](https://huggingface.co/blog/universal_assisted_generation) for more details.\n\n > Parameters related to performances and compilation\n\n compile_config (CompileConfig, *optional*):\n If using a compilable cache, this controls how `generate` will `compile` the forward pass for faster\n inference.\n disable_compile (`bool`, *optional*):\n Whether to disable the automatic compilation of the forward pass. Automatic compilation happens when\n specific criteria are met, including using a compilable cache. Please open an issue if you find the\n need to use this flag."} +{"repo": "tensorflow", "function": "def internal_convert_to_tensor_or_composite(value, dtype=None, name=None, as_ref=False) -> Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor]:\n if isinstance(value, composite_tensor.CompositeTensor):\n value_dtype = getattr(value, 'dtype', None)\n if dtype and (not dtypes.as_dtype(dtype).is_compatible_with(value_dtype)):\n raise ValueError(f'Tensor conversion dtype mismatch. Requested dtype is {dtypes.as_dtype(dtype).name}, Tensor has dtype {value.dtype.name}: {value!r}')\n return value\n else:\n return convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref, accepted_result_types=(tensor_lib.Tensor, composite_tensor.CompositeTensor))", "docstring": "Converts the given object to a `Tensor` or `CompositeTensor`.\n\nIf `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it\nis converted to a `Tensor` using `convert_to_tensor()`.\n\nArgs:\n value: A `CompositeTensor`, or an object that can be consumed by\n `convert_to_tensor()`.\n dtype: (Optional.) The required `DType` of the returned `Tensor` or\n `CompositeTensor`.\n name: (Optional.) A name to use if a new `Tensor` is created.\n as_ref: True if the caller wants the results as ref tensors.\n\nReturns:\n A `Tensor` or `CompositeTensor`, based on `value`.\n\nRaises:\n ValueError: If `dtype` does not match the element type of `value`."} +{"repo": "transformers", "function": "def tf_shard_checkpoint(weights, max_shard_size='10GB', weights_name: str=TF2_WEIGHTS_NAME):\n max_shard_size = convert_file_size_to_int(max_shard_size)\n sharded_state_dicts = []\n current_block = []\n current_block_size = 0\n total_size = 0\n for item in weights:\n weight_size = item.numpy().size * item.dtype.size\n if current_block_size + weight_size > max_shard_size:\n sharded_state_dicts.append(current_block)\n current_block = []\n current_block_size = 0\n current_block.append(item)\n current_block_size += weight_size\n total_size += weight_size\n sharded_state_dicts.append(current_block)\n if len(sharded_state_dicts) == 1:\n return ({weights_name: sharded_state_dicts[0]}, None)\n weight_map = {}\n shards = {}\n for idx, shard in enumerate(sharded_state_dicts):\n shard_file = weights_name.replace('.h5', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.h5')\n shard_file = shard_file.replace('.safetensors', f'-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors')\n shards[shard_file] = shard\n for weight in shard:\n weight_name = weight.name\n weight_map[weight_name] = shard_file\n metadata = {'total_size': total_size}\n index = {'metadata': metadata, 'weight_map': weight_map}\n return (shards, index)", "docstring": "Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a\ngiven size.\n\nThe sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no\noptimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the\nlimit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],\n[6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].\n\n\n\nIf one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will\nhave a size greater than `max_shard_size`.\n\n\n\nArgs:\n weights (`Dict[str, tf.RessourceVariable]`): The list of tf.RessourceVariable of a model to save.\n max_shard_size (`int` or `str`, *optional*, defaults to `\"10GB\"`):\n The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit\n (like `\"5MB\"`)."} +{"repo": "tensorflow", "function": "def __init__(self, value):\n if not (isinstance(value, tensor.Tensor) and value.dtype.is_floating):\n raise ValueError('Regression output value must be a float32 Tensor; got {}'.format(value))\n self._value = value", "docstring": "Constructor for `RegressionOutput`.\n\nArgs:\n value: a float `Tensor` giving the predicted values. Required.\n\nRaises:\n ValueError: if the value is not a `Tensor` with dtype tf.float32."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "pytype", "function": "def _compute_template(val: BaseValue) -> Sequence[BaseValue]:\n if isinstance(val, _abstract.PyTDClass):\n return [val.ctx.convert.constant_to_value(itm.type_param) for itm in val.pytd_cls.template]\n elif not isinstance(val, _abstract.InterpreterClass):\n return ()\n bases = [abstract_utils.get_atomic_value(base, default=val.ctx.convert.unsolvable) for base in val.bases()]\n template = []\n for base in bases:\n if base.full_name == 'typing.Generic':\n if isinstance(base, _abstract.PyTDClass):\n raise abstract_utils.GenericTypeError(val, 'Cannot inherit from plain Generic')\n if template:\n raise abstract_utils.GenericTypeError(val, 'Cannot inherit from Generic[...] multiple times')\n for item in base.template:\n param = base.formal_type_parameters.get(item.name)\n template.append(param.with_scope(val.full_name))\n if template:\n for base in bases:\n if base.full_name != 'typing.Generic':\n if isinstance(base, _abstract.ParameterizedClass):\n for item in base.template:\n param = base.formal_type_parameters.get(item.name)\n if isinstance(base, _abstract.TypeParameter):\n t = param.with_scope(val.full_name)\n if t not in template:\n raise abstract_utils.GenericTypeError(val, 'Generic should contain all the type variables')\n else:\n seqs = []\n for base in bases:\n if isinstance(base, _abstract.ParameterizedClass):\n seq = []\n for item in base.template:\n param = base.formal_type_parameters.get(item.name)\n if isinstance(param, _abstract.TypeParameter):\n seq.append(param.with_scope(val.full_name))\n seqs.append(seq)\n try:\n template.extend(mro.MergeSequences(seqs))\n except ValueError as e:\n raise abstract_utils.GenericTypeError(val, f'Illegal type parameter order in class {val.name}') from e\n return template", "docstring": "Compute the precedence list of template parameters according to C3.\n\n1. For the base class list, if it contains `typing.Generic`, then all the\ntype parameters should be provided. That means we don't need to parse extra\nbase class and then we can get all the type parameters.\n2. If there is no `typing.Generic`, parse the precedence list according to\nC3 based on all the base classes.\n3. If `typing.Generic` exists, it must contain at least one type parameters.\nAnd there is at most one `typing.Generic` in the base classes. Report error\nif the check fails.\n\nArgs:\n val: The abstract.BaseValue to compute a template for.\n\nReturns:\n parsed type parameters\n\nRaises:\n GenericTypeError: if the type annotation for generic type is incorrect"} +{"repo": "tensorflow", "function": "def zero_fraction(value, name=None):\n with ops.name_scope(name, 'zero_fraction', [value]):\n value = ops.convert_to_tensor(value, name='value')\n size = array_ops.size(value, out_type=dtypes.int64)\n num_nonzero = tf_cond.cond(size <= dtypes.int32.max, true_fn=lambda: math_ops.cast(_count_nonzero(value, dtype=dtypes.int32), dtype=dtypes.int64), false_fn=lambda: _count_nonzero(value, dtype=dtypes.int64))\n with ops.name_scope('counts_to_fraction'):\n num_zero = size - num_nonzero\n num_zero_float32 = math_ops.cast(num_zero, dtype=dtypes.float32)\n size_float32 = math_ops.cast(size, dtype=dtypes.float32)\n zero_fraction_float32 = num_zero_float32 / size_float32\n return array_ops.identity(zero_fraction_float32, 'fraction')", "docstring": "Returns the fraction of zeros in `value`.\n\nIf `value` is empty, the result is `nan`.\n\nThis is useful in summaries to measure and report sparsity. For example,\n\n```python\n z = tf.nn.relu(...)\n summ = tf.compat.v1.summary.scalar('sparsity', tf.nn.zero_fraction(z))\n```\n\nArgs:\n value: A tensor of numeric type.\n name: A name for the operation (optional).\n\nReturns:\n The fraction of zeros in `value`, with type `float32`."} +{"repo": "beam", "function": "def get_num_bytes(self, batch: Sequence[str]) -> int:\n return sum((sys.getsizeof(element) for element in batch))", "docstring": "Returns:\n The number of bytes of input batch elements."} +{"repo": "tensorflow", "function": "def _evaluate_once(checkpoint_path, master='', scaffold=None, eval_ops=None, feed_dict=None, final_ops=None, final_ops_feed_dict=None, hooks=None, config=None):\n eval_step = _get_or_create_eval_step()\n hooks = list(hooks or [])\n if eval_ops is not None:\n if any((isinstance(h, _MultiStepStopAfterNEvalsHook) for h in hooks)):\n steps_per_run_variable = basic_session_run_hooks.get_or_create_steps_per_run_variable()\n update_eval_step = state_ops.assign_add(eval_step, math_ops.cast(steps_per_run_variable, dtype=eval_step.dtype), use_locking=True)\n else:\n update_eval_step = state_ops.assign_add(eval_step, 1, use_locking=True)\n if isinstance(eval_ops, dict):\n eval_ops['update_eval_step'] = update_eval_step\n elif isinstance(eval_ops, (tuple, list)):\n eval_ops = list(eval_ops) + [update_eval_step]\n else:\n eval_ops = [eval_ops, update_eval_step]\n eval_step_value = _get_latest_eval_step_value(eval_ops)\n for h in hooks:\n if isinstance(h, (_StopAfterNEvalsHook, _MultiStepStopAfterNEvalsHook)):\n h._set_evals_completed_tensor(eval_step_value)\n logging.info('Starting evaluation at ' + time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime()))\n start = time.time()\n session_creator = monitored_session.ChiefSessionCreator(scaffold=scaffold, checkpoint_filename_with_path=checkpoint_path, master=master, config=config)\n final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops, final_ops_feed_dict)\n hooks.append(final_ops_hook)\n with monitored_session.MonitoredSession(session_creator=session_creator, hooks=hooks) as session:\n if eval_ops is not None:\n while not session.should_stop():\n session.run(eval_ops, feed_dict)\n logging.info('Inference Time : {:0.5f}s'.format(time.time() - start))\n logging.info('Finished evaluation at ' + time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()))\n return final_ops_hook.final_ops_values", "docstring": "Evaluates the model at the given checkpoint path.\n\nDuring a single evaluation, the `eval_ops` is run until the session is\ninterrupted or requested to finish. This is typically requested via a\n`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running\nthe requested number of times.\n\nOptionally, a user can pass in `final_ops`, a single `Tensor`, a list of\n`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is\nevaluated a single time after `eval_ops` has finished running and the fetched\nvalues of `final_ops` are returned. If `final_ops` is left as `None`, then\n`None` is returned.\n\nOne may also consider using a `tf.contrib.training.SummaryAtEndHook` to record\nsummaries after the `eval_ops` have run. If `eval_ops` is `None`, the\nsummaries run immediately after the model checkpoint has been restored.\n\nNote that `evaluate_once` creates a local variable used to track the number of\nevaluations run via `tf.contrib.training.get_or_create_eval_step`.\nConsequently, if a custom local init op is provided via a `scaffold`, the\ncaller should ensure that the local init op also initializes the eval step.\n\nArgs:\n checkpoint_path: The path to a checkpoint to use for evaluation.\n master: The BNS address of the TensorFlow master.\n scaffold: An tf.compat.v1.train.Scaffold instance for initializing variables\n and restoring variables. Note that `scaffold.init_fn` is used by the\n function to restore the checkpoint. If you supply a custom init_fn, then\n it must also take care of restoring the model from its checkpoint.\n eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names to\n `Tensors`, which is run until the session is requested to stop, commonly\n done by a `tf.contrib.training.StopAfterNEvalsHook`.\n feed_dict: The feed dictionary to use when executing the `eval_ops`.\n final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names\n to `Tensors`.\n final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.\n hooks: List of `tf.estimator.SessionRunHook` callbacks which are run inside\n the evaluation loop.\n config: An instance of `tf.compat.v1.ConfigProto` that will be used to\n configure the `Session`. If left as `None`, the default will be used.\n\nReturns:\n The fetched values of `final_ops` or `None` if `final_ops` is `None`."} +{"repo": "starthinker", "function": "def get_rows(self, reportId: int=None, timeout: int=60 * 3) -> typing.Iterator[dict]:\n if reportId is None:\n reportId = self.reportId\n while timeout > 0:\n report = API_SearchAds(self.config, self.auth).reports().get(reportId=reportId).execute()\n if report['isReportReady']:\n for fragment in range(len(report['files'])):\n rows = csv_to_rows(API_SearchAds(self.config, self.auth).reports().getFile(reportId=reportId, reportFragment=fragment).execute())\n if fragment > 0:\n next(rows)\n yield from rows\n break\n else:\n if self.config.verbose:\n print('.', end='')\n sleep(60)\n timeout -= 1", "docstring": "Return each row of data from a report as a generator.\n\nWait up to 3 hours with 1 minute poll intervals for report to finish.\nHandle fragmented downloads.\n\nArgs:\n reportId - optional, if not given uses prior value from request(...) call.\n timeout - optional, number of minutes to wait for report to complete.\n\nReturns:\n Generator with lists of column values."} +{"repo": "transformers", "function": "def get_image_tokens(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):\n image_tokens_list = self.vqmodel.encode(pixel_values, image_sizes)\n bpe_tokens_list = [self.vocabulary_mapping.convert_img2bpe(tokens).flatten() for tokens in image_tokens_list]\n bpe_tokens = torch.cat(bpe_tokens_list)\n return bpe_tokens", "docstring": "Tokenizes images into discrete tokens with VQGAN module. Converts\nobtained image tokens into BPE tokens and wraps with \"boi\" and \"eoi\"\nspecial tokens.\n\nArgs:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\n The tensors corresponding to the input images.\n image_sizes (`torch.LongTensor` of shape `(batch_size, 2)`):\n The sizes of the images in the batch, being (height, width) for each image."} +{"repo": "tensorflow", "function": "def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n self._validate_kwargs(kwargs)\n dtype = _assert_float_dtype(dtype)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n return self._random_generator.truncated_normal(shape, self.mean, self.stddev, dtype)", "docstring": "Returns a tensor object initialized as specified by the initializer.\n\nArgs:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported.\n **kwargs: Additional keyword arguments.\n\nRaises:\n ValueError: If the dtype is not floating point"} +{"repo": "tensorflow", "function": "def do_not_doc_inheritable(obj: T) -> T:\n setattr(obj, _DO_NOT_DOC_INHERITABLE, None)\n return obj", "docstring": "A decorator: Do not generate docs for this method.\n\nThis version of the decorator is \"inherited\" by subclasses. No docs will be\ngenerated for the decorated method in any subclass. Even if the sub-class\noverrides the method.\n\nFor example, to ensure that `method1` is **never documented** use this\ndecorator on the base-class:\n\n```\nclass Parent(object):\n @do_not_doc_inheritable\n def method1(self):\n pass\n def method2(self):\n pass\n\nclass Child(Parent):\n def method1(self):\n pass\n def method2(self):\n pass\n```\nThis will produce the following docs:\n\n```\n/Parent.md\n # method2\n/Child.md\n # method2\n```\n\nWhen generating docs for a class's arributes, the `__mro__` is searched and\nthe attribute will be skipped if this decorator is detected on the attribute\non any class in the `__mro__`.\n\nNote: This is implemented by adding a hidden attribute on the object, so it\ncannot be used on objects which do not allow new attributes to be added. So\nthis decorator must go *below* `@property`, `@classmethod`,\nor `@staticmethod`:\n\n```\nclass Example(object):\n @property\n @do_not_doc_inheritable\n def x(self):\n return self._x\n```\n\nArgs:\n obj: The class-attribute to hide from the generated docs.\n\nReturns:\n obj"} +{"repo": "tensorflow", "function": "def assert_input_compatibility(input_spec, inputs, layer_name):\n if not input_spec:\n return\n input_spec = nest.flatten(input_spec)\n if isinstance(inputs, dict):\n names = [spec.name for spec in input_spec]\n if all(names):\n list_inputs = []\n for name in names:\n if name not in inputs:\n raise ValueError('Missing data for input \"%s\". You passed a data dictionary with keys %s. Expected the following keys: %s' % (name, list(inputs.keys()), names))\n list_inputs.append(inputs[name])\n inputs = list_inputs\n inputs = nest.flatten(inputs)\n for x in inputs:\n if not hasattr(x, 'shape'):\n raise TypeError('Inputs to a layer should be tensors. Got: %s' % (x,))\n if len(inputs) != len(input_spec):\n raise ValueError('Layer ' + layer_name + ' expects ' + str(len(input_spec)) + ' input(s), but it received ' + str(len(inputs)) + ' input tensors. Inputs received: ' + str(inputs))\n for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):\n if spec is None:\n continue\n shape = tensor_shape.TensorShape(x.shape)\n if shape.rank is None:\n return\n if spec.ndim is not None and (not spec.allow_last_axis_squeeze):\n ndim = shape.rank\n if ndim != spec.ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected ndim=' + str(spec.ndim) + ', found ndim=' + str(ndim) + '. Full shape received: ' + str(tuple(shape)))\n if spec.max_ndim is not None:\n ndim = x.shape.rank\n if ndim is not None and ndim > spec.max_ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected max_ndim=' + str(spec.max_ndim) + ', found ndim=' + str(ndim))\n if spec.min_ndim is not None:\n ndim = x.shape.rank\n if ndim is not None and ndim < spec.min_ndim:\n raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: : expected min_ndim=' + str(spec.min_ndim) + ', found ndim=' + str(ndim) + '. Full shape received: ' + str(tuple(shape)))\n if spec.dtype is not None:\n if x.dtype.name != spec.dtype:\n raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected dtype=' + str(spec.dtype) + ', found dtype=' + str(x.dtype))\n shape_as_list = shape.as_list()\n if spec.axes:\n for axis, value in spec.axes.items():\n if hasattr(value, 'value'):\n value = value.value\n if value is not None and shape_as_list[int(axis)] not in {value, None}:\n raise ValueError('Input ' + str(input_index) + ' of layer ' + layer_name + ' is incompatible with the layer: expected axis ' + str(axis) + ' of input shape to have value ' + str(value) + ' but received input with shape ' + display_shape(x.shape))\n if spec.shape is not None and shape.rank is not None:\n spec_shape = spec.shape\n if spec.allow_last_axis_squeeze:\n if shape_as_list and shape_as_list[-1] == 1:\n shape_as_list = shape_as_list[:-1]\n if spec_shape and spec_shape[-1] == 1:\n spec_shape = spec_shape[:-1]\n for spec_dim, dim in zip(spec_shape, shape_as_list):\n if spec_dim is not None and dim is not None:\n if spec_dim != dim:\n raise ValueError('Input ' + str(input_index) + ' is incompatible with layer ' + layer_name + ': expected shape=' + str(spec.shape) + ', found shape=' + display_shape(x.shape))", "docstring": "Checks compatibility between the layer and provided inputs.\n\nThis checks that the tensor(s) `inputs` verify the input assumptions\nof a layer (if any). If not, a clear and actional exception gets raised.\n\nArgs:\n input_spec: An InputSpec instance, list of InputSpec instances, a nested\n structure of InputSpec instances, or None.\n inputs: Input tensor, list of input tensors, or a nested structure of\n input tensors.\n layer_name: String, name of the layer (for error message formatting).\n\nRaises:\n ValueError: in case of mismatch between\n the provided inputs and the expectations of the layer."} +{"repo": "tensorflow", "function": "def GetTestConfigs():\n test_configs = [('NHWC', False), ('NHWC', True)]\n if test.is_gpu_available(cuda_only=True):\n test_configs += [('NCHW', True)]\n return test_configs", "docstring": "Get all the valid tests configs to run.\n\nReturns:\n all the valid test configs as tuples of data_format and use_gpu."} +{"repo": "transformers", "function": "def load_attributes_from_hdf5_group(group, name):\n if name in group.attrs:\n data = [n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs[name]]\n else:\n data = []\n chunk_id = 0\n while '%s%d' % (name, chunk_id) in group.attrs:\n data.extend([n.decode('utf8') if hasattr(n, 'decode') else n for n in group.attrs['%s%d' % (name, chunk_id)]])\n chunk_id += 1\n return data", "docstring": "Loads attributes of the specified name from the HDF5 group.\n\nThis method deals with an inherent problem of HDF5 file which is not able to store data larger than\nHDF5_OBJECT_HEADER_LIMIT bytes.\n\nArgs:\n group: A pointer to a HDF5 group.\n name: A name of the attributes to load.\n\nReturns:\n data: Attributes data.\n\nCopied from Keras to Transformers to avoid versioning issues."} +{"repo": "pytype", "function": "def _cmp_rel(self, state, op_name, x, y):\n ret = self.ctx.program.NewVariable()\n leftover_x = self.ctx.program.NewVariable()\n leftover_y = self.ctx.program.NewVariable()\n op_not_eq = op_name not in ('EQ', 'NE')\n reported = False\n for b1 in x.bindings:\n for b2 in y.bindings:\n op = getattr(slots, op_name)\n try:\n err = False\n val = compare.cmp_rel(self.ctx, op, b1.data, b2.data)\n except compare.CmpTypeError:\n val = None\n if state.node.HasCombination([b1, b2]):\n err = True\n reported = True\n self.ctx.errorlog.unsupported_operands(self.frames, op, x, y)\n if val is None:\n if op_not_eq and isinstance(b1.data, abstract.Class) and err:\n ret.AddBinding(self.ctx.convert.unsolvable, {b1, b2}, state.node)\n elif isinstance(b1.data, abstract.SequenceLength):\n ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node)\n else:\n leftover_x.PasteBinding(b1, state.node)\n leftover_y.PasteBinding(b2, state.node)\n else:\n ret.AddBinding(self.ctx.convert.bool_values[val], {b1, b2}, state.node)\n if leftover_x.bindings:\n op = f'__{op_name.lower()}__'\n report_errors = op_not_eq and (not bool(ret.bindings)) and (not reported)\n state, leftover_ret = vm_utils.call_binary_operator(state, op, leftover_x, leftover_y, report_errors=report_errors, ctx=self.ctx)\n ret.PasteVariable(leftover_ret, state.node)\n return (state, ret)", "docstring": "Implementation of relational operators CMP_(LT|LE|EQ|NE|GE|GT).\n\nArgs:\n state: Initial FrameState.\n op_name: An operator name, e.g., \"EQ\".\n x: A variable of the lhs value.\n y: A variable of the rhs value.\n\nReturns:\n A tuple of the new FrameState and the return variable."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An MBART-50 sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `[src_lang_code] X [eos]`\n- `labels`: (for decoder) `[tgt_lang_code] X [eos]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def device_coordinates(self):\n return self._device_coordinates", "docstring": "Describes the mapping from TPU devices to topology coordinates.\n\nReturns:\n A rank 3 int32 array with shape `[tasks, devices, axis]`.\n `tasks` is the number of tasks in the TPU cluster, `devices` is the number\n of TPU devices per task, and `axis` is the number of axes in the TPU\n cluster topology. Each entry gives the `axis`-th coordinate in the\n topology of a task/device pair. TPU topologies are 4-dimensional, with\n dimensions `(x, y, z, core number)`."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=False, cache_position: Optional[torch.LongTensor]=None, output_router_logits: Optional[bool]=False, position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]]=None, **kwargs) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n residual = hidden_states\n hidden_states = self.input_layernorm(hidden_states)\n if self.mamba is not None:\n hidden_states = self.mamba(hidden_states=hidden_states, cache_position=cache_position, cache_params=past_key_value, attention_mask=attention_mask)\n self_attn_weights = None\n else:\n hidden_states, self_attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs)\n hidden_states = residual + hidden_states * self.residual_multiplier\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n moe_hidden_states, router_logits = self.block_sparse_moe(hidden_states)\n hidden_states = moe_hidden_states + self.shared_mlp(hidden_states)\n hidden_states = residual + hidden_states * self.residual_multiplier\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (past_key_value,)\n if output_router_logits:\n outputs += (router_logits,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*):\n attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,\n query_sequence_length, key_sequence_length)` if default attention is used.\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence\n output_router_logits (`bool`, *optional*):\n Whether or not to return the logits of all the routers. They are useful for computing the router loss, and\n should not be returned during inference.\n position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):\n Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,\n with `head_dim` being the embedding dimension of each attention head.\n kwargs (`dict`, *optional*):\n Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code\n into the model"} +{"repo": "transformers", "function": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple]]=None):\n logits = outputs.logits\n if target_sizes is not None:\n if len(logits) != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n if is_torch_tensor(target_sizes):\n target_sizes = target_sizes.numpy()\n semantic_segmentation = []\n for idx in range(len(logits)):\n resized_logits = torch.nn.functional.interpolate(logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = logits.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.\n\nArgs:\n outputs ([`MobileNetV2ForSemanticSegmentation`]):\n Raw outputs of the model.\n target_sizes (`List[Tuple]` of length `batch_size`, *optional*):\n List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,\n predictions will not be resized.\n\nReturns:\n semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic\n segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is\n specified). Each entry of each `torch.Tensor` correspond to a semantic class id."} +{"repo": "tensorflow", "function": "def searchsorted(sorted_sequence, values, side='left', out_type=dtypes.int32, name=None):\n sequence_size = shape_internal(sorted_sequence)[-1]\n values_size = shape_internal(values)[-1]\n sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])\n values_2d = reshape(values, [-1, values_size])\n if side == 'right':\n output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type, name)\n elif side == 'left':\n output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type, name)\n else:\n raise ValueError(f\"Argument `side` must be either 'right' or 'left'. Received: `side` = '{side}'.\")\n return reshape(output, shape_internal(values))", "docstring": "Searches for where a value would go in a sorted sequence.\n\nThis is not a method for checking containment (like python `in`).\n\nThe typical use case for this operation is \"binning\", \"bucketing\", or\n\"discretizing\". The `values` are assigned to bucket-indices based on the\n**edges** listed in `sorted_sequence`. This operation\nreturns the bucket-index for each value.\n\n>>> edges = [-1, 3.3, 9.1, 10.0]\n>>> values = [0.0, 4.1, 12.0]\n>>> tf.searchsorted(edges, values).numpy()\narray([1, 2, 4], dtype=int32)\n\nThe `side` argument controls which index is returned if a value lands exactly\non an edge:\n\n>>> seq = [0, 3, 9, 10, 10]\n>>> values = [0, 4, 10]\n>>> tf.searchsorted(seq, values).numpy()\narray([0, 2, 3], dtype=int32)\n>>> tf.searchsorted(seq, values, side=\"right\").numpy()\narray([1, 2, 5], dtype=int32)\n\nThe `axis` is not settable for this operation. It always operates on the\ninnermost dimension (`axis=-1`). The operation will accept any number of\nouter dimensions. Here it is applied to the rows of a matrix:\n\n>>> sorted_sequence = [[0., 3., 8., 9., 10.],\n... [1., 2., 3., 4., 5.]]\n>>> values = [[9.8, 2.1, 4.3],\n... [0.1, 6.6, 4.5, ]]\n>>> tf.searchsorted(sorted_sequence, values).numpy()\narray([[4, 1, 2],\n [0, 5, 4]], dtype=int32)\n\nNote: This operation assumes that `sorted_sequence` **is sorted** along the\ninnermost axis, maybe using `tf.sort(..., axis=-1)`. **If the sequence is not\nsorted, no error is raised** and the content of the returned tensor is not well\ndefined.\n\nArgs:\n sorted_sequence: N-D `Tensor` containing a sorted sequence.\n values: N-D `Tensor` containing the search values.\n side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to\n upper_bound.\n out_type: The output type (`int32` or `int64`). Default is `tf.int32`.\n name: Optional name for the operation.\n\nReturns:\n An N-D `Tensor` the size of `values` containing the result of applying\n either lower_bound or upper_bound (depending on side) to each value. The\n result is not a global index to the entire `Tensor`, but the index in the\n last dimension.\n\nRaises:\n ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.\n If the total size of `values` exceeds `2^31 - 1` elements.\n If the first `N-1` dimensions of the two tensors don't match."} +{"repo": "transformers", "function": "def to_tensor_7(self) -> torch.Tensor:\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n return tensor", "docstring": "Converts a transformation to a tensor with 7 final columns, four for the quaternion followed by three for the\ntranslation.\n\nReturns:\n A [*, 7] tensor representation of the transformation"} +{"repo": "transformers", "function": "def preprocess(self, videos: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, patch_size: Optional[List[int]]=None, num_frames: Optional[int]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, is_mixed: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, data_format: ChannelDimension=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> BatchFeature:\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name='crop_size')\n patch_size = patch_size if patch_size is not None else self.patch_size\n num_frames = num_frames if patch_size is not None else self.num_frames\n validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)\n if not valid_images(videos):\n raise ValueError('Invalid image or video type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n videos = make_batched(videos)\n for video in videos:\n if len(video) > self.num_frames:\n raise ValueError(f'number of frames must not be greater than the maximum frames of the model {self.num_frames}.')\n max_num_frames = max([len(video) for video in videos])\n num_patches_per_image = (size['shortest_edge'] // patch_size[0]) ** 2\n video_masks = np.array([len(video) * num_patches_per_image * [1] + (max_num_frames - len(video)) * num_patches_per_image * [0] for video in videos])\n videos = [[self._preprocess_image(image=img, do_resize=do_resize, size=size, resample=resample, do_center_crop=do_center_crop, crop_size=crop_size, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for img in video] for video in videos]\n if is_mixed:\n data = {'pixel_values_mixed': videos, 'pixel_mask_mixed': video_masks}\n else:\n data = {'pixel_values': videos, 'pixel_mask': video_masks}\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Preprocess an videos or image or batch of videos or images.\n\nArgs:\n videos (`ImageInput`):\n Images or videos to preprocess. Expects a single or batch of frames with pixel values ranging from 0 to\n 255. If passing in frames with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after applying resize.\n patch_size (`List[int]` *optional*, defaults to self.patch_size):\n The patch size of image patch embedding.\n num_frames (`int` *optional*, defaults to self.num_frames):\n The maximum number of video frames.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_centre_crop`):\n Whether to centre crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the image after applying the centre crop.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation.\n is_mixed (`bool`, *optional*):\n If the input video has negative samples.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the inferred channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height,\n width).\n\n - **pixel_mask** -- Pixel masks to be fed to a model, of shape (batch_size, num_pixel_patches).\n\n - **pixel_values_mixed** -- Pixel values with both positive or negative to be fed to a model, of shape\n (batch_size, num_channels, height, width).\n\n - **pixel_mask_mixed** -- Pixel masks with both positive or negative to be fed to a model, of shape\n (batch_size, num_pixel_patches)."} +{"repo": "temporian", "function": "def tick_calendar(self: EventSetOrNode, second: Optional[Union[int, Literal['*']]]=None, minute: Optional[Union[int, Literal['*']]]=None, hour: Optional[Union[int, Literal['*']]]=None, mday: Optional[Union[int, Literal['*']]]=None, month: Optional[Union[int, Literal['*']]]=None, wday: Optional[Union[int, Literal['*']]]=None, after_last: bool=True, before_first: bool=False) -> EventSetOrNode:\n from temporian.core.operators.tick_calendar import tick_calendar\n return tick_calendar(self, second=second, minute=minute, hour=hour, mday=mday, month=month, wday=wday, after_last=after_last, before_first=before_first)", "docstring": "Generates events periodically at fixed times or dates e.g. each month.\n\nEvents are generated in the range of the input\n[`EventSet`][temporian.EventSet] independently for each index.\n\nThe usability is inspired in the crontab format, where arguments can\ntake a value of `'*'` to tick at all values, or a fixed integer to\ntick only at that precise value.\n\nNon-specified values (`None`), are set to `'*'` if a finer\nresolution argument is specified, or fixed to the first valid value if\na lower resolution is specified. For example, setting only\n`tick_calendar(hour='*')`\nis equivalent to:\n`tick_calendar(second=0, minute=0, hour='*', mday='*', month='*')`\n, resulting in one tick at every exact hour of every day/month/year in\nthe input guide range.\n\nThe datetime timezone is always assumed to be UTC.\n\nExamples:\n ```python\n >>> # Every day (at 00:00:00) in the period (exactly one year)\n >>> a = tp.event_set(timestamps=[\"2021-01-01\", \"2021-12-31 23:59:59\"])\n >>> b = a.tick_calendar(hour=0)\n >>> b\n indexes: ...\n events:\n (366 events):\n timestamps: [...]\n ...\n\n\n >>> # Every day at 2:30am\n >>> b = a.tick_calendar(hour=2, minute=30)\n >>> tp.glue(b.calendar_hour(), b.calendar_minute())\n indexes: ...\n events:\n (366 events):\n timestamps: [...]\n 'calendar_hour': [2 2 2 ... 2 2 2]\n 'calendar_minute': [30 30 30 ... 30 30 30]\n ...\n\n\n >>> # Day 5 of every month (at 00:00)\n >>> b = a.tick_calendar(mday=5)\n >>> b.calendar_day_of_month()\n indexes: ...\n events:\n (13 events):\n timestamps: [...]\n 'calendar_day_of_month': [5 5 5 ... 5 5 5]\n ...\n\n\n >>> # 1st of February of every year\n >>> a = tp.event_set(timestamps=[\"2020-01-01\", \"2021-12-31\"])\n >>> b = a.tick_calendar(month=2)\n >>> tp.glue(b.calendar_day_of_month(), b.calendar_month())\n indexes: ...\n events:\n (3 events):\n timestamps: [...]\n 'calendar_day_of_month': [1 1 1]\n 'calendar_month': [2 2 2]\n ...\n\n >>> # Every second in the period (2 hours -> 7200 seconds)\n >>> a = tp.event_set(timestamps=[\"2020-01-01 00:00:00\",\n ... \"2020-01-01 01:59:59\"])\n >>> b = a.tick_calendar(second='*')\n >>> b\n indexes: ...\n events:\n (7200 events):\n timestamps: [...]\n ...\n\n >>> # Every second of the minute 30 of every hour (00:30 and 01:30)\n >>> a = tp.event_set(timestamps=[\"2020-01-01 00:00\",\n ... \"2020-01-01 02:00\"])\n >>> b = a.tick_calendar(second='*', minute=30)\n >>> b\n indexes: ...\n events:\n (121 events):\n timestamps: [...]\n ...\n\n >>> # Not allowed: intermediate arguments (minute, hour) not specified\n >>> b = a.tick_calendar(second=1, mday=1) # ambiguous meaning\n Traceback (most recent call last):\n ...\n ValueError: Can't set argument to None because previous and\n following arguments were specified. Set to '*' or an integer ...\n\n >>> # not after_last\n >>> a = tp.event_set(timestamps=[\"2020-02-01\", \"2020-04-01\"])\n >>> b = a.tick_calendar(mday=10, after_last=False)\n >>> tp.glue(b.calendar_day_of_month(), b.calendar_month())\n indexes: ...\n events:\n (2 events):\n timestamps: [...]\n 'calendar_day_of_month': [10 10]\n 'calendar_month': [2 3]\n ...\n\n >>> # before_first\n >>> a = tp.event_set(timestamps=[\"2020-02-01\", \"2020-04-01\"])\n >>> b = a.tick_calendar(mday=10, before_first=True)\n >>> tp.glue(b.calendar_day_of_month(), b.calendar_month())\n indexes: ...\n events:\n (4 events):\n timestamps: [...]\n 'calendar_day_of_month': [10 10 10 10]\n 'calendar_month': [1 2 3 4]\n ...\n\n ```\n\nArgs:\n second: '*' (any second), None (auto) or number in range `[0-59]`\n to tick at specific second of each minute.\n minute: '*' (any minute), None (auto) or number in range `[0-59]`\n to tick at specific minute of each hour.\n hour: '*' (any hour), None (auto), or number in range `[0-23]` to\n tick at specific hour of each day.\n mday: '*' (any day), None (auto) or number in range `[1-31]`\n to tick at specific day of each month. Note that months\n without some particular day may not have any tick\n (e.g: day 31 on February).\n month: '*' (any month), None (auto) or number in range `[1-12]` to\n tick at one particular month of each year.\n wday: '*' (any day), None (auto) or number in range `[0-6]`\n (Sun-Sat) to tick at particular day of week. Can only be\n specified if `day_of_month` is `None`.\n after_last: If True, a tick after the last timestamp is included.\n Useful for window operations where you want the timestamps\n to be included in the range of the ticks.\n before_first: If True, a tick before the first timestamp is\n included.\n Useful for window operations where you want the timestamps\n to be included in the range of the ticks.\n\nReturns:\n A feature-less EventSet with timestamps at specified interval."} +{"repo": "transformers", "function": "class DonutSwinImageClassifierOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "DonutSwin outputs for image classification.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of\n shape `(batch_size, hidden_size, height, width)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to\n include the spatial dimensions."} +{"repo": "transformers", "function": "class DataCollatorForSeq2Seq:\n tokenizer: PreTrainedTokenizerBase\n model: Optional[Any] = None\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n return_tensors: str = 'pt'\n\n def __call__(self, features, return_tensors=None):\n if return_tensors is None:\n return_tensors = self.return_tensors\n label_name = 'label' if 'label' in features[0].keys() else 'labels'\n labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None\n if labels is not None and all((label is None for label in labels)):\n labels = None\n non_labels_features = [{k: v for k, v in feature.items() if k != label_name} for feature in features]\n batch = pad_without_fast_tokenizer_warning(self.tokenizer, non_labels_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=return_tensors)\n no_padding = self.padding is False or self.padding == PaddingStrategy.DO_NOT_PAD\n if labels is not None:\n if no_padding:\n if isinstance(features[0][label_name], list):\n batch['labels'] = list(labels)\n else:\n batch['labels'] = [np.concatenate([label, []]) for label in labels]\n else:\n max_padding = self.padding == PaddingStrategy.MAX_LENGTH and self.max_length is not None\n max_label_length = max((len(l) for l in labels)) if not max_padding else self.max_length\n if self.pad_to_multiple_of is not None:\n max_label_length = (max_label_length + self.pad_to_multiple_of - 1) // self.pad_to_multiple_of * self.pad_to_multiple_of\n padding_side = self.tokenizer.padding_side\n if isinstance(features[0][label_name], list):\n batch['labels'] = [label + [self.label_pad_token_id] * (max_label_length - len(label)) if padding_side == 'right' else [self.label_pad_token_id] * (max_label_length - len(label)) + label for label in labels]\n else:\n batch['labels'] = [np.concatenate([label, np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64)]) if padding_side == 'right' else np.concatenate([np.array([self.label_pad_token_id] * (max_label_length - len(label)), dtype=np.int64), label]) for label in labels]\n if batch.get('labels', None) is not None:\n if return_tensors == 'pt':\n import torch\n batch['labels'] = torch.tensor(batch['labels'], dtype=torch.int64)\n elif return_tensors == 'tf':\n import tensorflow as tf\n batch['labels'] = tf.constant(batch['labels'], dtype=tf.int64)\n else:\n batch['labels'] = np.array(batch['labels'], dtype=np.int64)\n else:\n batch['labels'] = None\n if labels is not None and self.model is not None and hasattr(self.model, 'prepare_decoder_input_ids_from_labels'):\n decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=batch['labels'])\n batch['decoder_input_ids'] = decoder_input_ids\n return batch", "docstring": "Data collator that will dynamically pad the inputs received, as well as the labels.\n\nArgs:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n model ([`PreTrainedModel`], *optional*):\n The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to\n prepare the *decoder_input_ids*\n\n This is useful when using *label_smoothing* to avoid calculating loss twice.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single\n sequence is provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.0 (Volta).\n label_pad_token_id (`int`, *optional*, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n return_tensors (`str`, *optional*, defaults to `\"pt\"`):\n The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\"."} +{"repo": "starthinker", "function": "def recipe_cm360_to_dv360(config, auth_dv, auth_cm, auth_sheet, auth_bigquery, recipe_name, recipe_slug, command):\n dataset(config, {'__comment__': 'Ensure dataset exists.', 'auth': auth_bigquery, 'dataset': recipe_slug})\n drive(config, {'__comment__': 'Copy the default template to sheet with the recipe name', 'auth': auth_sheet, 'copy': {'source': 'https://docs.google.com/spreadsheets/d/1XjEHq-nEFMW8RVmCNJ-TVGvVcVBEADzjbvhmAvF04iQ/edit#gid=594912061', 'destination': recipe_name}})\n cm_to_dv(config, {'__comment': 'Depending on users choice, execute a different part of the solution.', 'auth_dv': auth_dv, 'auth_cm': auth_cm, 'auth_sheets': auth_sheet, 'auth_bigquery': auth_bigquery, 'sheet': recipe_name, 'dataset': recipe_slug, 'command': command})", "docstring": "Allows bulk creating DV360 Insertion Orders and Line Items from CM360.\n\nArgs:\n auth_dv (authentication) - Credentials used for dv.\n auth_cm (authentication) - Credentials used for dv.\n auth_sheet (authentication) - Credentials used for sheet.\n auth_bigquery (authentication) - Credentials used for bigquery.\n recipe_name (string) - Name of Google Sheet to create.\n recipe_slug (string) - Name of Google BigQuery dataset to create.\n command (choice) - Action to take."} +{"repo": "tensorflow", "function": "def _source_file_paths_outside_tensorflow_py_library(code_defs, id_to_string):\n file_ids = set()\n for code_def in code_defs:\n for trace in code_def.traces:\n file_ids.add(trace.file_id)\n non_tf_files = (id_to_string[file_id] for file_id in file_ids)\n non_tf_files = (f for f in non_tf_files if not source_utils.guess_is_tensorflow_py_library(f) and gfile.Exists(f))\n return non_tf_files", "docstring": "Extract source file paths outside TensorFlow Python library.\n\nArgs:\n code_defs: An iterable of `CodeDef` protos, i.e., an iterable of stack\n traces.\n id_to_string: A proto map from integer ids to strings.\n\nReturns:\n An iterable of source file paths outside the TensorFlow Python library."} +{"repo": "beam", "function": "def Count3(pcoll, factor=1):\n return pcoll | 'PairWithOne' >> beam.Map(lambda v: (v, factor)) | beam.CombinePerKey(sum)", "docstring": "Count as a decorated function with a side input.\n\nArgs:\n pcoll: the PCollection passed in from the previous transform\n factor: the amount by which to count\n\nReturns:\n A PCollection counting the number of times each unique element occurs."} +{"repo": "tensorflow", "function": "def _assertOpOutputMatchesExpected(self, op, axis, output_type, op_input, expected):\n with self.session() as session:\n with self.test_scope():\n pinp = array_ops.placeholder(dtypes.as_dtype(op_input.dtype), op_input.shape, name='a')\n output = op(pinp, axis=axis, output_type=output_type)\n result = session.run(output, {pinp: op_input})\n self.assertAllEqual(result, expected)", "docstring": "Verifies that 'op' produces 'expected' when fed input 'op_input' .\n\nArgs:\n op: argmin or argmax operator to test.\n axis: integer axis to reduce across.\n output_type: numpy datatype of the output to produce.\n op_input: numpy input array to use as input to 'op'.\n expected: numpy array representing the expected output of 'op'."} +{"repo": "transformers", "function": "def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:\n output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states\n output_attentions = output_attentions if output_attentions else self.config.output_attentions\n return_dict = return_dict if return_dict else self.config.return_dict\n outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoProcessor, TFHubertModel\n>>> from datasets import load_dataset\n>>> import soundfile as sf\n\n>>> processor = AutoProcessor.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n>>> model = TFHubertModel.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n\n\n>>> def map_to_array(batch):\n... speech, _ = sf.read(batch[\"file\"])\n... batch[\"speech\"] = speech\n... return batch\n\n\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> ds = ds.map(map_to_array)\n\n>>> input_values = processor(ds[\"speech\"][0], return_tensors=\"tf\").input_values # Batch size 1\n>>> hidden_states = model(input_values).last_hidden_state\n```"} +{"repo": "transformers", "function": "class DebugUnderflowOverflow:\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = ' '\n self.analyse_model()\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append('\\n'.join(self.frame))\n self.frame = []\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print('\\n'.join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f'\\nDetected inf/nan during batch_number={self.batch_number}')\n print(f'Last {len(self.frames)} forward frames:')\n print(f'{'abs min':8} {'abs max':8} metadata')\n print('\\n'.join(self.frames))\n print('\\n\\n')\n self.frames = []\n\n def analyse_model(self):\n self.module_names = {m: name for name, m in self.model.named_modules()}\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(get_abs_min_max(var, ctx))\n if detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f'{'None':>17} {ctx}')\n else:\n self.expand_frame(f'{'not a tensor':>17} {ctx}')\n\n def batch_start_frame(self):\n self.expand_frame(f'\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***')\n self.expand_frame(f'{'abs min':8} {'abs max':8} metadata')\n\n def batch_end_frame(self):\n self.expand_frame(f'{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n')\n\n def create_frame(self, module, input, output):\n self.expand_frame(f'{self.prefix} {self.module_names[module]} {module.__class__.__name__}')\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f'input[{i}]')\n else:\n self.analyse_variable(input, 'input')\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f'output[{i}][{j}]')\n else:\n self.analyse_variable(x, f'output[{i}]')\n else:\n self.analyse_variable(output, 'output')\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n last_frame_of_batch = False\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n self.create_frame(module, input, output)\n if trace_mode:\n self.trace_frames()\n if last_frame_of_batch:\n self.batch_start_frame()\n if self.detected_overflow and (not trace_mode):\n self.dump_saved_frames()\n raise ValueError('DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. Please scroll up above this traceback to see the activation values prior to this event.')\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(f'DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg')", "docstring": "This debug class helps detect and understand where the model starts getting very large or very small, and more\nimportantly `nan` or `inf` weight and activation elements.\n\nThere are 2 working modes:\n\n1. Underflow/overflow detection (default)\n2. Specific batch absolute min/max tracing without detection\n\nMode 1: Underflow/overflow detection\n\nTo activate the underflow/overflow detection, initialize the object with the model :\n\n```python\ndebug_overflow = DebugUnderflowOverflow(model)\n```\n\nthen run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or output\nelements this module will throw an exception and will print `max_frames_to_save` frames that lead to this event,\neach frame reporting\n\n1. the fully qualified module name plus the class name whose `forward` was run\n2. the absolute min and max value of all elements for each module weights, and the inputs and output\n\nFor example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16\nmixed precision :\n\n```\nDetected inf/nan during batch_number=0\nLast 21 forward frames:\nabs min abs max metadata\n[...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n2.17e-07 4.50e+00 weight\n1.79e-06 4.65e+00 input[0]\n2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n8.08e-07 2.66e+01 weight\n1.79e-06 4.65e+00 input[0]\n1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n1.01e-06 6.44e+00 weight\n0.00e+00 9.74e+03 input[0]\n3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n1.79e-06 4.65e+00 input[0]\n3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n3.18e-04 6.27e+04 input[0]\n0.00e+00 inf output\n```\n\nYou can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value was\naround 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\nrenormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n64K, and we get an overflow.\n\nAs you can see it's the previous frames that we need to look into when the numbers start going into very large for\nfp16 numbers.\n\nThe tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n\nBy default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n\n```python\ndebug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n```\n\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that\n may take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in\n the next section.\n\n\n Mode 2. Specific batch absolute min/max tracing without detection\n\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\ngiven batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n\n```python\ndebug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3])\n```\n\nAnd now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n\nThis is helpful if you know that the program starts misbehaving after a certain batch number, so you can\nfast-forward right to that area.\n\n\nEarly stopping:\n\nYou can also specify the batch number after which to stop the training, with :\n\n```python\ndebug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1, 3], abort_after_batch_num=3)\n```\n\nThis feature is mainly useful in the tracing mode, but you can use it for any mode.\n\n\n**Performance**:\n\nAs this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the training\ndown. Therefore remember to turn it off once the debugging needs have been met.\n\nArgs:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished"} +{"repo": "tensorflow", "function": "def join(self) -> None:\n self._is_thread_joined = True\n self._thread.join()\n if self._exception is not None:\n self._testcase.fail('Error in checkedThread: %s' % str(self._exception))", "docstring": "Blocks until the thread terminates.\n\nRaises:\n self._testcase.failureException: If the thread terminates with due to\n an exception."} +{"repo": "budoux", "function": "def parse_args(test: typing.Optional[typing.List[str]]=None) -> argparse.Namespace:\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('train_data', help='File path for the encoded training data.')\n parser.add_argument('base_model', help='File path for the base model file.')\n parser.add_argument('-o', '--output', help=f'File path for the output weights. (default: {DEFAULT_OUTPUT_NAME})', type=str, default=DEFAULT_OUTPUT_NAME)\n parser.add_argument('--val-data', help='File path for the encoded validation data.', type=str)\n parser.add_argument('--iters', help=f'Number of iterations for training. (default: {DEFAULT_NUM_ITERS})', type=int, default=DEFAULT_NUM_ITERS)\n parser.add_argument('--log-span', help=f'Iteration span to print metrics. (default: {DEFAULT_LOG_SPAN})', type=int, default=DEFAULT_LOG_SPAN)\n parser.add_argument('--learning-rate', help=f'Learning rate. (default: {DEFAULT_LEARNING_RATE})', type=float, default=DEFAULT_LEARNING_RATE)\n if test is None:\n return parser.parse_args()\n else:\n return parser.parse_args(test)", "docstring": "Parses commandline arguments.\n\nArgs:\n test (typing.Optional[typing.List[str]], optional): Commandline args for\n testing. Defaults to None.\n\nReturns:\n Parsed arguments (argparse.Namespace)."} +{"repo": "tensorflow", "function": "def compute_stats(array):\n q1 = np.percentile(array, 25)\n q3 = np.percentile(array, 75)\n low = q1 - 1.5 * (q3 - q1)\n high = q3 + 1.5 * (q3 - q1)\n filtered_array = list(filter(lambda x: low <= x and x <= high, array))\n mean = np.mean(filtered_array)\n min_val = np.min(filtered_array)\n max_val = np.max(filtered_array)\n max_diff = max(max_val - mean, mean - min_val)\n diff = max_diff / mean * 100.0\n return (mean, diff)", "docstring": "Reports mean and \u00b1 range for the given array.\n\nThe range computation follows benchstat's.\n\nArgs:\n array: The array to compute stats for.\n\nReturns:\n mean and \u00b1 %diff range."} +{"repo": "transformers", "function": "def correct_tables(self, generation: str) -> str:\n for l in generation.split('\\n'):\n if l.count('\\\\begin{tabular}') > 15 or l.count('\\\\multicolumn') > 60 or l.count('&') > 400:\n generation = generation.replace(l, '')\n generation = generation.replace('\\\\begin{table} \\\\begin{tabular}', '\\\\begin{table}\\n\\\\begin{tabular}')\n generation = generation.replace('\\\\end{tabular} \\\\end{table}', '\\\\end{tabular}\\n\\\\end{table}')\n generation = generation.replace('\\\\end{table} Tab', '\\\\end{table}\\nTab')\n generation = re.sub('(^.+)\\\\\\\\begin{tab', '\\\\1\\\\n\\\\\\\\begin{tab', generation, flags=re.M)\n generation = generation.replace('\\\\begin{tabular}{l l} & \\\\\\\\ \\\\end{tabular}', '')\n generation = generation.replace('\\\\begin{tabular}{}\\n\\n\\\\end{tabular}', '')\n return generation", "docstring": " Takes a generated string and fixes tables/tabulars to make them match the markdown format needed.\n\n Args:\n generation (str): The generated text to be postprocessed.\n\n Returns:\n str: The postprocessed text.\n\n Example:\n\n ```python\n correct_tables(\"\\begin{table} \\begin{tabular}{l l} & \\ \\end{tabular} \\end{table}\")\n \"\\begin{table}\n\\begin{tabular}{l l} & \\ \\end{tabular}\n\\end{table}\"\n ```\n "} +{"repo": "keras", "function": "class Conv1D(BaseConv):\n\n def __init__(self, filters, kernel_size, strides=1, padding='valid', data_format=None, dilation_rate=1, groups=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs):\n super().__init__(rank=1, filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, groups=groups, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs)\n\n def _compute_causal_padding(self):\n left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)\n if self.data_format == 'channels_last':\n causal_padding = [[0, 0], [left_pad, 0], [0, 0]]\n else:\n causal_padding = [[0, 0], [0, 0], [left_pad, 0]]\n return causal_padding\n\n def call(self, inputs):\n padding = self.padding\n if self.padding == 'causal':\n inputs = ops.pad(inputs, self._compute_causal_padding())\n padding = 'valid'\n outputs = ops.conv(inputs, self.kernel, strides=list(self.strides), padding=padding, dilation_rate=self.dilation_rate, data_format=self.data_format)\n if self.use_bias:\n if self.data_format == 'channels_last':\n bias_shape = (1,) * (self.rank + 1) + (self.filters,)\n else:\n bias_shape = (1, self.filters) + (1,) * self.rank\n bias = ops.reshape(self.bias, bias_shape)\n outputs = ops.add(outputs, bias)\n if self.activation is not None:\n return self.activation(outputs)\n return outputs", "docstring": "1D convolution layer (e.g. temporal convolution).\n\nThis layer creates a convolution kernel that is convolved with the layer\ninput over a single spatial (or temporal) dimension to produce a tensor of\noutputs. If `use_bias` is True, a bias vector is created and added to the\noutputs. Finally, if `activation` is not `None`, it is applied to the\noutputs as well.\n\nArgs:\n filters: int, the dimension of the output space (the number of filters\n in the convolution).\n kernel_size: int or tuple/list of 1 integer, specifying the size of the\n convolution window.\n strides: int or tuple/list of 1 integer, specifying the stride length\n of the convolution. `strides > 1` is incompatible with\n `dilation_rate > 1`.\n padding: string, `\"valid\"`, `\"same\"` or `\"causal\"`(case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input. When `padding=\"same\"` and\n `strides=1`, the output has the same size as the input.\n `\"causal\"` results in causal(dilated) convolutions, e.g. `output[t]`\n does not depend on`input[t+1:]`. Useful when modeling temporal data\n where the model should not violate the temporal order.\n See [WaveNet: A Generative Model for Raw Audio, section2.1](\n https://arxiv.org/abs/1609.03499).\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n dilation_rate: int or tuple/list of 1 integers, specifying the dilation\n rate to use for dilated convolution.\n groups: A positive int specifying the number of groups in which the\n input is split along the channel axis. Each group is convolved\n separately with `filters // groups` filters. The output is the\n concatenation of all the `groups` results along the channel axis.\n Input channels and `filters` must both be divisible by `groups`.\n activation: Activation function. If `None`, no activation is applied.\n use_bias: bool, if `True`, bias will be added to the output.\n kernel_initializer: Initializer for the convolution kernel. If `None`,\n the default initializer (`\"glorot_uniform\"`) will be used.\n bias_initializer: Initializer for the bias vector. If `None`, the\n default initializer (`\"zeros\"`) will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The\n function must take as input the unprojected variable and must return\n the projected variable (which must have the same shape). Constraints\n are not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n\nInput shape:\n\n- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, steps, channels)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, channels, steps)`\n\nOutput shape:\n\n- If `data_format=\"channels_last\"`:\n A 3D tensor with shape: `(batch_shape, new_steps, filters)`\n- If `data_format=\"channels_first\"`:\n A 3D tensor with shape: `(batch_shape, filters, new_steps)`\n\nReturns:\n A 3D tensor representing `activation(conv1d(inputs, kernel) + bias)`.\n\nRaises:\n ValueError: when both `strides > 1` and `dilation_rate > 1`.\n\nExample:\n\n>>> # The inputs are 128-length vectors with 10 timesteps, and the\n>>> # batch size is 4.\n>>> x = np.random.rand(4, 10, 128)\n>>> y = keras.layers.Conv1D(32, 3, activation='relu')(x)\n>>> print(y.shape)\n(4, 8, 32)"} +{"repo": "transformers", "function": "def stringify_default(default: Any) -> str:\n if isinstance(default, bool):\n return f'`{default}`'\n elif isinstance(default, enum.Enum):\n return f'`{str(default)}`'\n elif isinstance(default, int):\n return str(default)\n elif isinstance(default, float):\n result = str(default)\n return str(round(default, 2)) if len(result) > 6 else result\n elif isinstance(default, str):\n return str(default) if default.isnumeric() else f'`\"{default}\"`'\n elif isinstance(default, type):\n return f'`{default.__name__}`'\n else:\n return f'`{default}`'", "docstring": "Returns the string representation of a default value, as used in docstring: numbers are left as is, all other\nobjects are in backtiks.\n\nArgs:\n default (`Any`): The default value to process\n\nReturns:\n `str`: The string representation of that default."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.LongTensor):\n image_tokens = self.get_image_tokens(pixel_values, image_sizes)\n split_sizes = [height // self.vqmodel.vision_spatial_factor * (width // self.vqmodel.vision_spatial_factor + 1) for height, width in image_sizes]\n image_features = self.get_input_embeddings()(image_tokens)\n image_features = torch.split(image_features, split_sizes)\n return image_features", "docstring": "Tokenizes images into discrete tokens with VQGAN module and embeds\nthem with text embeddings layer\n\nArgs:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):\n The tensors corresponding to the input images."} +{"repo": "temporian", "function": "def to_csv(pipe: BeamEventSet, file_path_prefix: str, schema: Schema, timestamp_key: str='timestamp', **wargs):\n header_values = [timestamp_key] + schema.index_names() + schema.feature_names()\n header_string = io.StringIO()\n header_writer = csv.writer(header_string)\n header_writer.writerow(header_values)\n return add_feature_idx_and_flatten(pipe) | 'Group by features' >> beam.GroupByKey() | 'Convert to csv' >> beam.Map(_convert_to_csv) | 'Write csv' >> beam.io.textio.WriteToText(file_path_prefix=file_path_prefix, header=header_string.getvalue(), append_trailing_newlines=False, **wargs)", "docstring": "Writes a Beam EventSet to a file or set of csv files.\n\nLimitation: Timestamps are always stored as numerical values.\nTODO: Support datetime timestamps.\n\nUsage example:\n\n```\ninput_node: tp.EventSetNode = ...\n( p\n | tpb.from_csv(\"/input.csv\", input_node.schema)\n | ... # processing\n | tpb.to_csv(\"/output.csv\", output_node.schema)\n)\n```\n\nArgs:\n pipe: Beam pipe containing an EventSet.\n file_path_prefix: Path or path matching expression compatible with\n WriteToText.\n schema: Schema of the data. If you have a Temporian node, the schema is\n available with `node.schema`.\n timestamp_key: Key containing the timestamps.\n **wargs: Arguments passed to `beam.io.textio.WriteToText`."} +{"repo": "beam", "function": "class BigQueryVectorSearchParameters:\n project: str\n table_name: str\n embedding_column: str\n columns: List[str]\n neighbor_count: int\n metadata_restriction_template: Optional[Union[str, Callable[[Chunk], str]]] = None\n distance_type: Optional[str] = None\n options: Optional[Dict[str, Any]] = None\n include_distance: bool = False\n\n def _format_restrict(self, chunk: Chunk) -> str:\n assert self.metadata_restriction_template is not None, 'metadata_restriction_template cannot be None when formatting. This indicates a logical error in the code.'\n if callable(self.metadata_restriction_template):\n return self.metadata_restriction_template(chunk)\n return self.metadata_restriction_template.format(**chunk.metadata)\n\n def format_query(self, chunks: List[Chunk]) -> str:\n \"\"\"Format the vector search query template.\"\"\"\n base_columns_str = ', '.join((f'base.{col}' for col in self.columns))\n columns_str = ', '.join(self.columns)\n distance_clause = f\", distance_type => '{self.distance_type}'\" if self.distance_type else ''\n options_clause = f', options => {self.options}' if self.options else ''\n metadata_fn = '\\n CREATE TEMP FUNCTION check_metadata(\\n metadata ARRAY>, \\n search_key STRING, \\n search_value STRING\\n ) \\n AS ((\\n SELECT COUNT(*) > 0 \\n FROM UNNEST(metadata) \\n WHERE key = search_key AND value = search_value\\n ));\\n ' if self.metadata_restriction_template else ''\n condition_groups = defaultdict(list)\n if self.metadata_restriction_template:\n for chunk in chunks:\n condition = self._format_restrict(chunk)\n condition_groups[condition].append(chunk)\n else:\n condition_groups[''] = chunks\n vector_searches = []\n for condition, group_chunks in condition_groups.items():\n embedding_unions = []\n for chunk in group_chunks:\n if chunk.embedding is None or chunk.embedding.dense_embedding is None:\n raise ValueError(f'Chunk {chunk.id} missing embedding')\n embedding_str = f\"SELECT '{chunk.id}' as id, {[float(x) for x in chunk.embedding.dense_embedding]} as embedding\"\n embedding_unions.append(embedding_str)\n group_embeddings = ' UNION ALL '.join(embedding_unions)\n where_clause = f'WHERE {condition}' if condition else ''\n vector_search = f\"\\n SELECT \\n query.id,\\n ARRAY_AGG(\\n STRUCT({('distance, ' if self.include_distance else '')} {base_columns_str})\\n ) as chunks\\n FROM VECTOR_SEARCH(\\n (SELECT {columns_str}, {self.embedding_column} \\n FROM `{self.table_name}`\\n {where_clause}),\\n '{self.embedding_column}',\\n (SELECT * FROM ({group_embeddings})),\\n top_k => {self.neighbor_count}\\n {distance_clause}\\n {options_clause}\\n )\\n GROUP BY query.id\\n \"\n vector_searches.append(vector_search)\n combined_searches = ' UNION ALL '.join(vector_searches)\n return f'\\n {metadata_fn}\\n\\n {combined_searches}\\n '", "docstring": "Parameters for configuring BigQuery vector similarity search.\n\nThis class is used by BigQueryVectorSearchEnrichmentHandler to perform\nvector similarity search using BigQuery's VECTOR_SEARCH function. It\nprocesses :class:`~apache_beam.ml.rag.types.Chunk` objects that contain\n:class:`~apache_beam.ml.rag.types.Embedding` and returns similar vectors\nfrom a BigQuery table.\n\nBigQueryVectorSearchEnrichmentHandler is used with\n:class:`~apache_beam.transforms.enrichment.Enrichment` transform to enrich\nChunks with similar content from a vector database. For example:\n\n>>> # Create search parameters\n>>> params = BigQueryVectorSearchParameters(\n... table_name='project.dataset.embeddings',\n... embedding_column='embedding',\n... columns=['content'],\n... neighbor_count=5\n... )\n>>> # Use in pipeline\n>>> enriched = (\n... chunks\n... | \"Generate Embeddings\" >> MLTransform(...)\n... | \"Find Similar\" >> Enrichment(\n... BigQueryVectorSearchEnrichmentHandler(\n... project='my-project',\n... vector_search_parameters=params\n... )\n... )\n... )\n\nBigQueryVectorSearchParameters encapsulates the configuration needed to\nperform vector similarity search using BigQuery's VECTOR_SEARCH function.\nIt handles formatting the query with proper embedding vectors and metadata\nrestrictions.\n\nExample with flattened metadata column:\n\nTable schema::\n\n embedding: ARRAY # Vector embedding\n content: STRING # Document content\n language: STRING # Direct metadata column\n\nCode::\n\n >>> params = BigQueryVectorSearchParameters(\n ... table_name='project.dataset.embeddings',\n ... embedding_column='embedding',\n ... columns=['content', 'language'],\n ... neighbor_count=5,\n ... # For column 'language', value comes from \n ... # chunk.metadata['language']\n ... metadata_restriction_template=\"language = '{language}'\"\n ... )\n >>> # When processing a chunk with metadata={'language': 'en'},\n >>> # generates: WHERE language = 'en'\n\nExample with nested repeated metadata:\n\nTable schema::\n\n embedding: ARRAY # Vector embedding\n content: STRING # Document content\n metadata: ARRAY # Nested repeated metadata\n key: STRING,\n value: STRING\n >>\n\nCode::\n\n >>> params = BigQueryVectorSearchParameters(\n ... table_name='project.dataset.embeddings',\n ... embedding_column='embedding',\n ... columns=['content', 'metadata'],\n ... neighbor_count=5,\n ... # check_metadata(field_name, key_to_search, value_from_chunk)\n ... metadata_restriction_template=(\n ... \"check_metadata(metadata, 'language', '{language}')\"\n ... )\n ... )\n >>> # When processing a chunk with metadata={'language': 'en'},\n >>> # generates: WHERE check_metadata(metadata, 'language', 'en')\n >>> # Searches for {key: 'language', value: 'en'} in metadata array\n\nArgs:\n project: GCP project ID containing the BigQuery dataset\n table_name: Fully qualified BigQuery table name containing vectors.\n embedding_column: Column name containing the embedding vectors.\n columns: List of columns to retrieve from matched vectors.\n neighbor_count: Number of similar vectors to return (top-k).\n metadata_restriction_template: Template string or callable for filtering\n vectors. Template string supports two formats:\n \n 1. For flattened metadata columns: \n ``column_name = '{metadata_key}'`` where column_name is the \n BigQuery column and metadata_key is used to get the value from \n chunk.metadata[metadata_key].\n 2. For nested repeated metadata (ARRAY>):\n ``check_metadata(field_name, 'key_to_match', '{metadata_key}')``\n where field_name is the ARRAY column in BigQuery,\n key_to_match is the literal key to search for in the array, and\n metadata_key is used to get value from\n chunk.metadata[metadata_key].\n \n Multiple conditions can be combined using AND/OR operators. For\n example::\n \n >>> # Combine metadata check with column filter\n >>> template = (\n ... \"check_metadata(metadata, 'language', '{language}') \"\n ... \"AND source = '{source}'\"\n ... )\n >>> # When chunk.metadata = {'language': 'en', 'source': 'web'}\n >>> # Generates: WHERE \n >>> # check_metadata(metadata, 'language', 'en')\n >>> # AND source = 'web'\n \n distance_type: Optional distance metric to use. Supported values:\n COSINE (default), EUCLIDEAN, or DOT_PRODUCT.\n options: Optional dictionary of additional VECTOR_SEARCH options.\n include_distance: Reurns the vector search similarity score if True."} +{"repo": "tf-quant-finance", "function": "def make_basket_put_payoff(strike_price, dtype=None, name=None):\n strike_price = tf.convert_to_tensor(strike_price, dtype=dtype, name='strike_price')\n put_valuer = functools.partial(_put_valuer, strike_price=strike_price, dtype=dtype, name=name)\n return put_valuer", "docstring": "Produces a callable from samples to payoff of a simple basket put option.\n\nArgs:\n strike_price: A `Tensor` of `dtype` consistent with `samples` and shape\n `[num_samples, num_strikes]`.\n dtype: Optional `dtype`. Either `tf.float32` or `tf.float64`. The `dtype`\n If supplied, represents the `dtype` for the 'strike_price' as well as\n for the input argument of the output payoff callable.\n Default value: `None`, which means that the `dtype` inferred by TensorFlow\n is used.\n name: Python `str` name prefixed to Ops created by the callable created\n by this function.\n Default value: `None` which is mapped to the default name 'put_valuer'\n\nReturns:\n A callable from `Tensor` of shape `[num_samples, num_exercise_times, dim]`\n and a scalar `Tensor` representing current time to a `Tensor` of shape\n `[num_samples, num_strikes]`."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: Optional[Union[int, List[int]]]=None, **kwargs):\n vision_feature_layer = vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)\n if isinstance(vision_feature_layer, int):\n selected_image_feature = image_outputs.hidden_states[vision_feature_layer]\n else:\n hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]\n selected_image_feature = torch.cat(hs_pool, dim=-1)\n image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes)\n downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size\n split_sizes = [height // downsample_ratio * (width // downsample_ratio) for height, width in image_sizes]\n image_features = torch.split(image_features.squeeze(0), split_sizes)\n return image_features", "docstring": "Obtains image last hidden states from the vision tower and apply multimodal projection.\n\nArgs:\n pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):\n The tensors corresponding to the input images.\n vision_feature_layer (`Union[int, List[int]]`, *optional*):\n The index of the layer to select the vision feature. If multiple indices are provided,\n the vision feature of the corresponding indices will be concatenated to form the\n vision features.\n image_sizes (`torch.Tensor`, *optional*):\n Tensor containing the image sizes as returned by the processor.\nReturns:\n image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`)."} +{"repo": "transformers", "function": "class AcceleratorConfig:\n split_batches: bool = field(default=False, metadata={'help': 'Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set in your script multiplied by the number of processes.'})\n dispatch_batches: Optional[bool] = field(default=None, metadata={'help': 'If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose underlying dataset is an `IterableDataslet`, `False` otherwise.'})\n even_batches: bool = field(default=True, metadata={'help': 'If set to `True`, in cases where the total batch size across all processes does not exactly divide the dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among all workers.'})\n use_seedable_sampler: bool = field(default=True, metadata={'help': 'Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]).Ensures training results are fully reproducible using a different sampling technique. While seed-to-seed results may differ, on average the differences are negligible when usingmultiple different seeds to compare. Should also be ran with [`~utils.set_seed`] for the best results.'})\n non_blocking: Optional[bool] = field(default=False, metadata={'help': 'Whether to use non-blocking CUDA calls to help minimize synchronization during distributed training with prepared `DataLoader` inputs being moved to device. Best if used with `pin_memory=True` in the `TrainingArguments`. Requires accelerate v0.30.0.'})\n gradient_accumulation_kwargs: Optional[dict] = field(default=None, metadata={'help': 'Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`]. Any of the following (optional) keys are acceptable: num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if the latter is set to 1, otherwise an exception will be raised. adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`]. The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`. sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch. The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`.'})\n use_configured_state: bool = field(default=False, metadata={'help': 'Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined before calling `TrainingArguments`.If `True`, an `Accelerator` or `PartialState` must be initialized. May lead to issues using sweeps or hyperparameter tuning.'})\n\n @classmethod\n def from_json_file(cls, json_file):\n open_file = io.open if os.path.exists(json_file) else open\n with open_file(json_file, 'r', encoding='utf-8') as f:\n config_dict = json.load(f)\n extra_keys = sorted((key for key in config_dict.keys() if key not in cls.__dataclass_fields__.keys()))\n if len(extra_keys) > 0:\n raise ValueError(f'The config file at {json_file} had unknown keys ({extra_keys}), please try upgrading your `transformers` version or fix (and potentially remove these keys) from your config file.')\n return cls(**config_dict)\n\n def to_dict(self):\n return copy.deepcopy(self.__dict__)\n\n def pop(self, key, default=None):\n return self.__dict__.pop(key, default)", "docstring": "A subset of arguments relating to the underlying [`accelerate.Accelerator`]\nimplementation utilized in the `Trainer` that can be customized.\nMostly relating to data.\n\nParameters:\n split_batches (`bool`, *optional*, defaults to `False`):\n Whether or not the accelerator should split the batches yielded by the dataloaders across the devices. If\n `True` the actual batch size used will be the same on any kind of distributed processes, but it must be a\n round multiple of the `num_processes` you are using. If `False`, actual batch size used will be the one set\n in your script multiplied by the number of processes.\n dispatch_batches (`bool`, *optional*):\n If set to `True`, the dataloader prepared by the Accelerator is only iterated through on the main process\n and then the batches are split and broadcast to each process. Will default to `True` for `DataLoader` whose\n underlying dataset is an `IterableDataset`, `False` otherwise.\n even_batches (`bool`, *optional*, defaults to `True`):\n If set to `True`, in cases where the total batch size across all processes does not exactly divide the\n dataset, samples at the start of the dataset will be duplicated so the batch can be divided equally among\n all workers.\n use_seedable_sampler (`bool`, *optional*, defaults to `True`):\n Whether or not use a fully seedable random sampler ([`accelerate.data_loader.SeedableRandomSampler`]). Ensures\n training results are fully reproducible using a different sampling technique. While seed-to-seed results\n may differ, on average the differences are negligible when using multiple different seeds to compare. Should\n also be ran with [`~utils.set_seed`] for the best results.\n gradient_accumulation_kwargs (`dict`, *optional*):\n Additional kwargs to configure gradient accumulation, see [`accelerate.utils.GradientAccumulationPlugin`].\n Any of the following (optional) keys are acceptable:\n num_steps (`int`): Will take precedence over [`~.TrainingArguments.gradient_accumulation_steps`] if\n the latter is set to 1, otherwise an exception will be raised.\n adjust_scheduler (`bool`): Whether to adjust the scheduler steps to account for [`~.TrainingArguments.gradient_accumulation_steps`].\n The [`accelerate.utils.GradientAccumulationPlugin`] default is `True`.\n sync_each_batch (`bool`): Whether to synchronize the gradients at each data batch.\n The [`accelerate.utils.GradientAccumulationPlugin`] default is `False`.\n non_blocking (`bool`, *optional*, defaults to `False`):\n Whether to use non-blocking CUDA calls to help minimize synchronization during\n distributed training with prepared `DataLoader` inputs being moved to device.\n Best if used with `pin_memory=True` in the `TrainingArguments`.\n use_configured_state (`bool*, *optional*, defaults to `False`):\n Whether or not to use a pre-configured `AcceleratorState` or `PartialState` defined\n before calling `TrainingArguments`. If `True`, an `Accelerator` or `PartialState`\n must be initialized. May lead to issues using sweeps or hyperparameter tuning."} +{"repo": "transformers", "function": "def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n encoder_hidden_states = encoder_outputs[0]\n batch_size, sequence_length = decoder_input_ids.shape\n if decoder_position_ids is None:\n if past_key_values is not None:\n raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.')\n if decoder_attention_mask is not None:\n decoder_position_ids = decoder_attention_mask.cumsum(-1) * decoder_attention_mask - 1\n else:\n decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n if decoder_attention_mask is None:\n decoder_attention_mask = jnp.ones((batch_size, sequence_length), dtype='i4')\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n inputs = {'params': params or self.params}\n if past_key_values:\n inputs['cache'] = past_key_values\n mutable = ['cache']\n else:\n mutable = False\n\n def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):\n decoder_module = module._get_decoder_module()\n outputs = decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs)\n hidden_states = outputs[0]\n if self.config.tie_word_embeddings:\n shared_embedding = module.model.decoder.embed_tokens.variables['params']['embedding']\n lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states)\n else:\n lm_logits = module.lm_head(hidden_states)\n return (lm_logits, outputs)\n outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward)\n if past_key_values is None:\n lm_logits, decoder_outputs = outputs\n else:\n (lm_logits, decoder_outputs), past = outputs\n if return_dict:\n outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions)\n else:\n outputs = (lm_logits,) + decoder_outputs[1:]\n if past_key_values is not None and return_dict:\n outputs['past_key_values'] = unfreeze(past['cache'])\n return outputs\n elif past_key_values is not None and (not return_dict):\n outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:]\n return outputs", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration\n>>> from datasets import load_dataset\n\n>>> processor = WhisperProcessor.from_pretrained(\"openai/whisper-tiny.en\")\n>>> model = FlaxWhisperForConditionalGeneration.from_pretrained(\"openai/whisper-tiny.en\", from_pt=True)\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> inputs = processor(ds[0][\"audio\"][\"array\"], return_tensors=\"np\")\n>>> input_features = inputs.input_features\n>>> encoder_outputs = model.encode(input_features=input_features)\n>>> decoder_start_token_id = model.config.decoder_start_token_id\n\n>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype=\"i4\") * decoder_start_token_id\n\n>>> outputs = model.decode(decoder_input_ids, encoder_outputs)\n>>> last_decoder_hidden_states = outputs.last_hidden_state\n```"} +{"repo": "tensorflow", "function": "def matrices_to_flat_transforms(transform_matrices):\n with ops.name_scope('matrices_to_flat_transforms'):\n transform_matrices = ops.convert_to_tensor(transform_matrices, name='transform_matrices')\n if transform_matrices.shape.ndims not in (2, 3):\n raise ValueError('Matrices should be 2D or 3D, got: %s' % transform_matrices)\n transforms = array_ops.reshape(transform_matrices, constant_op.constant([-1, 9]))\n transforms /= transforms[:, 8:9]\n return transforms[:, :8]", "docstring": "Converts affine matrices to `tf.contrib.image` projective transforms.\n\nNote that we expect matrices that map output coordinates to input coordinates.\nTo convert forward transformation matrices, call `tf.linalg.inv` on the\nmatrices and use the result here.\n\nArgs:\n transform_matrices: One or more affine transformation matrices, for the\n reverse transformation in homogeneous coordinates. Shape `(3, 3)` or `(N,\n 3, 3)`.\n\nReturns:\n 2D tensor of flat transforms with shape `(N, 8)`, which may be passed into\n `tf.contrib.image.transform`.\n\nRaises:\n ValueError: If `transform_matrices` have an invalid shape."} +{"repo": "transformers", "function": "class BatchFeature(BaseBatchFeature):", "docstring": "Holds the output of the image processor specific `__call__` methods.\n\nThis class is derived from a python dictionary and can be used as a dictionary.\n\nArgs:\n data (`dict`):\n Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).\n tensor_type (`Union[None, str, TensorType]`, *optional*):\n You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at\n initialization."} +{"repo": "fhir-py", "function": "def copy_code(source: message.Message, target: message.Message) -> None:\n if not fhir_types.is_type_or_profile_of_code(source.DESCRIPTOR):\n raise fhir_errors.InvalidFhirError(f'Source: {source.DESCRIPTOR.full_name} is not type or profile of Code.')\n if not fhir_types.is_type_or_profile_of_code(target.DESCRIPTOR):\n raise fhir_errors.InvalidFhirError(f'Target: {target.DESCRIPTOR.full_name} is not type or profile of Code.')\n if proto_utils.are_same_message_type(source.DESCRIPTOR, target.DESCRIPTOR):\n target.CopyFrom(source)\n return\n source_value_field = source.DESCRIPTOR.fields_by_name.get('value')\n target_value_field = target.DESCRIPTOR.fields_by_name.get('value')\n if source_value_field is None or target_value_field is None:\n raise fhir_errors.InvalidFhirError(f'Unable to copy code from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}.')\n proto_utils.copy_common_field(source, target, 'id')\n proto_utils.copy_common_field(source, target, 'extension')\n if source_value_field.type not in _CODE_TYPES or target_value_field.type not in _CODE_TYPES:\n raise ValueError(f'Unable to copy from {source.DESCRIPTOR.full_name} to {target.DESCRIPTOR.full_name}. Must have a field of TYPE_ENUM or TYPE_STRING.')\n source_value = proto_utils.get_value_at_field(source, source_value_field)\n if source_value_field.type == target_value_field.type:\n proto_utils.set_value_at_field(target, target_value_field, source_value)\n elif source_value_field.type == descriptor.FieldDescriptor.TYPE_STRING:\n source_enum_value = code_string_to_enum_value_descriptor(source_value, target_value_field.enum_type)\n proto_utils.set_value_at_field(target, target_value_field, source_enum_value.number)\n elif source_value_field.type == descriptor.FieldDescriptor.TYPE_ENUM:\n source_string_value = enum_value_descriptor_to_code_string(source_value_field.enum_type.values_by_number[source_value])\n proto_utils.set_value_at_field(target, target_value_field, source_string_value)\n else:\n raise ValueError(f'Unexpected generic value field type: {source_value_field.type}. Must be a field of TYPE_ENUM or TYPE_STRING in order to copy.')", "docstring": "Adds all fields from source to target.\n\nArgs:\n source: The FHIR Code instance to copy from.\n target: The target FHIR Code instance to copy to."} +{"repo": "pyglove", "function": "def __call__(self, inputs: List[Any], global_state: Optional[pg.geno.AttributeDict]=None, step: int=0) -> List[Any]:\n if self.input_element_type is not None:\n elem_type = self.input_element_type\n for i, elem in enumerate(inputs):\n if not isinstance(elem, elem_type):\n raise TypeError(f'The input is expected to be a list of {elem_type!r} but {elem!r} is encountered at position {i}.')\n if global_state is None:\n global_state = pg.geno.AttributeDict()\n self._on_input(inputs)\n outputs = self._operate(inputs, global_state=global_state, step=step)\n if self.output_element_type is not None:\n elem_type = self.output_element_type\n for i, elem in enumerate(outputs):\n if not isinstance(elem, elem_type):\n raise TypeError(f'The output is expected to be a list of {elem_type!r} but {elem!r} is encountered at position {i}.')\n return outputs", "docstring": "Transform a list of input values to a list of output values.\n\nArgs:\n inputs: A list of values as inputs.\n global_state: An `AttributeDict` object (dictionary that provides\n attribute access) as the global state container, which is\n readable/writable during the operation.\n step: Number of examples historically proposed, which can be used for\n determining a cross over schedule.\n\nReturns:\n A list of values as output of current operation."} +{"repo": "transformers", "function": "def image_guided_detection(self, pixel_values: torch.FloatTensor, query_pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Owlv2ImageGuidedObjectDetectionOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n query_feature_map = self.image_embedder(pixel_values=query_pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)[0]\n feature_map, vision_outputs = self.image_embedder(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding)\n batch_size, num_patches_height, num_patches_width, hidden_dim = feature_map.shape\n image_feats = torch.reshape(feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim))\n batch_size, num_patches_height, num_patches_width, hidden_dim = query_feature_map.shape\n query_image_feats = torch.reshape(query_feature_map, (batch_size, num_patches_height * num_patches_width, hidden_dim))\n query_embeds, best_box_indices, query_pred_boxes = self.embed_image_query(query_image_feats, query_feature_map, interpolate_pos_encoding)\n pred_logits, class_embeds = self.class_predictor(image_feats=image_feats, query_embeds=query_embeds)\n target_pred_boxes = self.box_predictor(image_feats, feature_map, interpolate_pos_encoding)\n if not return_dict:\n output = (feature_map, query_feature_map, target_pred_boxes, query_pred_boxes, pred_logits, class_embeds, vision_outputs.to_tuple())\n output = tuple((x for x in output if x is not None))\n return output\n return Owlv2ImageGuidedObjectDetectionOutput(image_embeds=feature_map, query_image_embeds=query_feature_map, target_pred_boxes=target_pred_boxes, query_pred_boxes=query_pred_boxes, logits=pred_logits, class_embeds=class_embeds, text_model_output=None, vision_model_output=vision_outputs)", "docstring": "query_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values of query image(s) to be detected. Pass in one query image per target image.\n\nExamples:\n```python\n>>> import requests\n>>> from PIL import Image\n>>> import torch\n>>> from transformers import AutoProcessor, Owlv2ForObjectDetection\n\n>>> processor = AutoProcessor.from_pretrained(\"google/owlv2-base-patch16-ensemble\")\n>>> model = Owlv2ForObjectDetection.from_pretrained(\"google/owlv2-base-patch16-ensemble\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> query_url = \"http://images.cocodataset.org/val2017/000000001675.jpg\"\n>>> query_image = Image.open(requests.get(query_url, stream=True).raw)\n>>> inputs = processor(images=image, query_images=query_image, return_tensors=\"pt\")\n\n>>> # forward pass\n>>> with torch.no_grad():\n... outputs = model.image_guided_detection(**inputs)\n\n>>> target_sizes = torch.Tensor([image.size[::-1]])\n\n>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)\n>>> results = processor.post_process_image_guided_detection(\n... outputs=outputs, threshold=0.9, nms_threshold=0.3, target_sizes=target_sizes\n... )\n>>> i = 0 # Retrieve predictions for the first image\n>>> boxes, scores = results[i][\"boxes\"], results[i][\"scores\"]\n>>> for box, score in zip(boxes, scores):\n... box = [round(i, 2) for i in box.tolist()]\n... print(f\"Detected similar object with confidence {round(score.item(), 3)} at location {box}\")\nDetected similar object with confidence 0.938 at location [327.31, 54.94, 547.39, 268.06]\nDetected similar object with confidence 0.959 at location [5.78, 360.65, 619.12, 366.39]\nDetected similar object with confidence 0.902 at location [2.85, 360.01, 627.63, 380.8]\nDetected similar object with confidence 0.985 at location [176.98, -29.45, 672.69, 182.83]\nDetected similar object with confidence 1.0 at location [6.53, 14.35, 624.87, 470.82]\nDetected similar object with confidence 0.998 at location [579.98, 29.14, 615.49, 489.05]\nDetected similar object with confidence 0.985 at location [206.15, 10.53, 247.74, 466.01]\nDetected similar object with confidence 0.947 at location [18.62, 429.72, 646.5, 457.72]\nDetected similar object with confidence 0.996 at location [523.88, 20.69, 586.84, 483.18]\nDetected similar object with confidence 0.998 at location [3.39, 360.59, 617.29, 499.21]\nDetected similar object with confidence 0.969 at location [4.47, 449.05, 614.5, 474.76]\nDetected similar object with confidence 0.966 at location [31.44, 463.65, 654.66, 471.07]\nDetected similar object with confidence 0.924 at location [30.93, 468.07, 635.35, 475.39]\n```"} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor]:\n residual = hidden_states\n hidden_states = self.layer_norm1(hidden_states)\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.layer_norm2(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`):\n Input to the layer of shape `(batch, seq_len, embed_dim)`.\n attention_mask (`torch.FloatTensor`):\n Attention mask of shape `(batch, 1, q_len, k_v_seq_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "beam", "function": "def get(self):\n raise NotImplementedError()", "docstring": "Get the current tracking value.\n\nReturns:\n The current tracked value, the type of which depends on the specific\n tracker implementation."} +{"repo": "tensorflow", "function": "def create_base_for_fuse_batchnorm(self, pattern_match_mode='MATCH_ALL'):\n with self.cached_session() as sess:\n data_format = 'NHWC'\n if pattern_match_mode == 'MISMATCH_FORMAT':\n data_format = 'NCHW'\n inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]\n input_op = constant_op.constant(np.array(inputs), shape=[1, 1, 6, 2] if data_format == 'NHWC' else [1, 2, 1, 6], dtype=dtypes.float32)\n weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]\n weights_op = constant_op.constant(np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)\n conv_op = nn_ops.conv2d(input_op, weights_op, [1, 1, 1, 1], data_format=data_format, padding='SAME', name='conv_op')\n const_op_1 = None\n const_op_2 = constant_op.constant(1e-05, dtype=dtypes.float32)\n const_op_3 = None\n const_op_4 = None\n const_op_5 = None\n const_op_6 = None\n if data_format == 'NHWC':\n const_op_1 = constant_op.constant(np.array([0.25, 0.5]), shape=[2], dtype=dtypes.float32)\n const_op_3 = constant_op.constant(np.array([10, 20]), shape=[2], dtype=dtypes.float32)\n const_op_4 = constant_op.constant(np.array([0.1, 0.6]), shape=[2], dtype=dtypes.float32)\n const_op_5 = constant_op.constant(np.array([1.0, 2.0]), shape=[2], dtype=dtypes.float32)\n const_op_6 = constant_op.constant(np.array([0.2, 0.5]), shape=[2], dtype=dtypes.float32)\n else:\n const_op_1 = constant_op.constant(np.array([0.25, 0.5, 0.6, 0.7, 0.8, 0.9]), shape=[6], dtype=dtypes.float32)\n const_op_3 = constant_op.constant(np.array([10, 20, 30, 40, 50, 60]), shape=[6], dtype=dtypes.float32)\n const_op_4 = constant_op.constant(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]), shape=[6], dtype=dtypes.float32)\n const_op_5 = constant_op.constant(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), shape=[6], dtype=dtypes.float32)\n const_op_6 = constant_op.constant(np.array([0.2, 0.4, 0.5, 0.6, 0.7, 0.8]), shape=[6], dtype=dtypes.float32)\n add_op_1 = gen_math_ops.add(const_op_1, const_op_2)\n rsqrt_op = math_ops.rsqrt(add_op_1)\n variable_op = None\n if pattern_match_mode == 'MATCH_NO_GAMMA':\n variable_op = rsqrt_op\n else:\n variable_op = math_ops.multiply(rsqrt_op, const_op_5)\n mul_op_1 = math_ops.multiply(conv_op, variable_op)\n mul_op_2 = None\n if pattern_match_mode == 'MISMATCH_PATTERN':\n mul_op_2 = math_ops.multiply(const_op_3, const_op_6)\n else:\n mul_op_2 = math_ops.multiply(const_op_3, variable_op)\n sub_op = math_ops.subtract(const_op_4, mul_op_2)\n if pattern_match_mode == 'MATCH_SWITCH_ORDER':\n gen_math_ops.add(sub_op, mul_op_1, name='output')\n else:\n gen_math_ops.add(mul_op_1, sub_op, name='output')\n test_util.set_producer_version(ops.get_default_graph(), 8)\n original_graph = sess.graph_def\n original_result = sess.run(['output:0'])\n return (original_graph, original_result)", "docstring": "Create testing graph and compute the result from original graph.\n\nArgs:\n pattern_match_mode: A label string to indicate which batchnorm composition\n pattern to create in the resulting graph. \"MATCH_ALL\" - Create a graph\n matching the decomposed batchnorm pattern with full set of primitive\n ops. \"MATCH_NO_GAMMA\" - Create a graph matching the decomposed batchnorm\n pattern when gamma factor is 1 and multiplication with gamma is omitted.\n \"MATCH_SWITCH_ORDER\" - Create a graph matching the decomposed batchnorm\n pattern with a different order of inputs to the root Add node.\n \"MISMATCH_PATTERN\" - Create a graph with same set of primitive ops which\n makes up the decomposed batchnorm, but not matching the pattern.\n \"MISMATCH_FORMAT\" - Create a graph with NCHW format as input.\n\nReturns:\n A GraphDef as original graph to run the decomposed batchnorm test cases.\n Computation result from executing the original graph defined by GraphDef."} +{"repo": "transformers", "function": "def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)", "docstring": "Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\nresized to keep the input aspect ratio.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred."} +{"repo": "transformers", "function": "def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):\n if timeout is None:\n timeout = int(os.environ.get('PYTEST_TIMEOUT', 600))\n start_methohd = 'spawn'\n ctx = multiprocessing.get_context(start_methohd)\n input_queue = ctx.Queue(1)\n output_queue = ctx.JoinableQueue(1)\n input_queue.put(inputs, timeout=timeout)\n process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))\n process.start()\n try:\n results = output_queue.get(timeout=timeout)\n output_queue.task_done()\n except Exception as e:\n process.terminate()\n test_case.fail(e)\n process.join(timeout=timeout)\n if results['error'] is not None:\n test_case.fail(f'{results['error']}')", "docstring": "To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.\n\nArgs:\n test_case (`unittest.TestCase`):\n The test that will run `target_func`.\n target_func (`Callable`):\n The function implementing the actual testing logic.\n inputs (`dict`, *optional*, defaults to `None`):\n The inputs that will be passed to `target_func` through an (input) queue.\n timeout (`int`, *optional*, defaults to `None`):\n The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.\n variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`."} +{"repo": "beam", "function": "class RunOfflineDetector(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):\n\n def __init__(self, offline_detector: OfflineDetector):\n self._offline_detector = offline_detector\n\n def _restore_and_convert(self, elem: tuple[tuple[Any, Any, beam.Row], Any]) -> NestedKeyedOutputT:\n \"\"\"Converts the model output to AnomalyResult.\n\n Args:\n elem: A tuple containing the combined key (original key, temp key, row)\n and the output from RunInference.\n\n Returns:\n A tuple containing the keyed AnomalyResult.\n \"\"\"\n (orig_key, temp_key, row), prediction = elem\n assert isinstance(prediction, AnomalyPrediction), 'Wrong model handler output type.' + f\"Expected: 'AnomalyPrediction', but got '{type(prediction).__name__}'. \" + 'Consider adding a post-processing function via `with_postprocess_fn` ' + f\"to convert from '{type(prediction).__name__}' to 'AnomalyPrediction', \" + 'or use `score_prediction_adapter` or `label_prediction_adapter` to ' + 'perform the conversion.'\n result = AnomalyResult(example=row, predictions=[dataclasses.replace(prediction, model_id=self._offline_detector._model_id)])\n return (orig_key, (temp_key, result))\n\n def _select_features(self, elem: tuple[Any, beam.Row]) -> tuple[Any, beam.Row]:\n assert self._offline_detector._features is not None\n k, v = elem\n row_dict = v._asdict()\n return (k, beam.Row(**{k: row_dict[k] for k in self._offline_detector._features}))\n\n def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:\n model_uuid = f'{self._offline_detector._model_id}:{uuid.uuid4().hex[:6]}'\n run_inference = RunInference(self._offline_detector._keyed_model_handler, **self._offline_detector._run_inference_args)\n rekeyed_model_input = input | 'Rekey' >> beam.Map(lambda x: ((x[0], x[1][0], x[1][1]), x[1][1]))\n if self._offline_detector._features is not None:\n rekeyed_model_input = rekeyed_model_input | 'Select Features' >> beam.Map(self._select_features)\n rekeyed_model_output = rekeyed_model_input | f'Call RunInference ({model_uuid})' >> run_inference\n ret = rekeyed_model_output | 'Restore keys and convert model output' >> beam.Map(self._restore_and_convert)\n if self._offline_detector._threshold_criterion:\n ret = ret | f'Run Threshold Criterion ({model_uuid})' >> RunThresholdCriterion(self._offline_detector._threshold_criterion)\n return ret", "docstring": "Runs a offline anomaly detector on a PCollection of data.\n\nThis PTransform applies a `OfflineDetector` to the input data, handling\ncustom input/output conversion and inference.\n\nArgs:\n offline_detector: The `OfflineDetector` to run."} +{"repo": "weather-tools", "function": "class Manifest(abc.ABC):\n location: Location\n prev_stage_precise_start_time: t.Optional[str] = None\n status: t.Optional[DownloadStatus] = None\n\n def __post_init__(self):\n \"\"\"Initialize the manifest.\"\"\"\n pass\n\n def schedule(self, config_name: str, dataset: str, selection: t.Dict, location: str, user: str) -> None:\n \"\"\"Indicate that a job has been scheduled for download.\n\n 'scheduled' jobs occur before 'in-progress', 'success' or 'finished'.\n \"\"\"\n scheduled_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(timespec='seconds')\n self.status = DownloadStatus(config_name=config_name, dataset=dataset if dataset else None, selection=selection, location=location, area=fetch_geo_polygon(selection.get('area', GLOBAL_COVERAGE_AREA)), username=user, stage=None, status=Status.SCHEDULED, error=None, size=None, scheduled_time=scheduled_time, retrieve_start_time=None, retrieve_end_time=None, fetch_start_time=None, fetch_end_time=None, download_start_time=None, download_end_time=None, upload_start_time=None, upload_end_time=None)\n self._update(self.status)\n\n def skip(self, config_name: str, dataset: str, selection: t.Dict, location: str, user: str) -> None:\n \"\"\"Updates the manifest to mark the shards that were skipped in the current job\n as 'upload' stage and 'success' status, indicating that they have already been downloaded.\n \"\"\"\n old_status = self._read(location)\n if old_status.location != location or old_status.stage != Stage.UPLOAD or old_status.status != Status.SUCCESS:\n current_utc_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(timespec='seconds')\n size = get_file_size(location)\n status = DownloadStatus(config_name=config_name, dataset=dataset if dataset else None, selection=selection, location=location, area=fetch_geo_polygon(selection.get('area', GLOBAL_COVERAGE_AREA)), username=user, stage=Stage.UPLOAD, status=Status.SUCCESS, error=None, size=size, scheduled_time=None, retrieve_start_time=None, retrieve_end_time=None, fetch_start_time=None, fetch_end_time=None, download_start_time=None, download_end_time=None, upload_start_time=current_utc_time, upload_end_time=current_utc_time)\n self._update(status)\n logger.info(f'Manifest updated for skipped shard: {location!r} -- {DownloadStatus.to_dict(status)!r}.')\n\n def _set_for_transaction(self, config_name: str, dataset: str, selection: t.Dict, location: str, user: str) -> None:\n \"\"\"Reset Manifest state in preparation for a new transaction.\"\"\"\n self.status = dataclasses.replace(self._read(location))\n self.status.config_name = config_name\n self.status.dataset = dataset if dataset else None\n self.status.selection = selection\n self.status.location = location\n self.status.username = user\n\n def __enter__(self) -> None:\n pass\n\n def __exit__(self, exc_type, exc_inst, exc_tb) -> None:\n \"\"\"Record end status of a transaction as either 'success' or 'failure'.\"\"\"\n if exc_type is None:\n status = Status.SUCCESS\n error = None\n else:\n status = Status.FAILURE\n error = '\\n'.join(traceback.format_exception(exc_type, exc_inst, exc_tb))\n new_status = dataclasses.replace(self.status)\n new_status.error = error\n new_status.status = status\n current_utc_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(timespec='seconds')\n if new_status.stage == Stage.FETCH:\n new_status.fetch_start_time = self.prev_stage_precise_start_time\n new_status.fetch_end_time = current_utc_time\n elif new_status.stage == Stage.RETRIEVE:\n new_status.retrieve_start_time = self.prev_stage_precise_start_time\n new_status.retrieve_end_time = current_utc_time\n elif new_status.stage == Stage.DOWNLOAD:\n new_status.download_start_time = self.prev_stage_precise_start_time\n new_status.download_end_time = current_utc_time\n else:\n new_status.upload_start_time = self.prev_stage_precise_start_time\n new_status.upload_end_time = current_utc_time\n new_status.size = get_file_size(new_status.location)\n self.status = new_status\n self._update(self.status)\n\n def transact(self, config_name: str, dataset: str, selection: t.Dict, location: str, user: str) -> 'Manifest':\n \"\"\"Create a download transaction.\"\"\"\n self._set_for_transaction(config_name, dataset, selection, location, user)\n return self\n\n def set_stage(self, stage: Stage) -> None:\n \"\"\"Sets the current stage in manifest.\"\"\"\n prev_stage = self.status.stage\n new_status = dataclasses.replace(self.status)\n new_status.stage = stage\n new_status.status = Status.IN_PROGRESS\n current_utc_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(timespec='seconds')\n if stage == Stage.FETCH:\n new_status.fetch_start_time = current_utc_time\n new_status.fetch_end_time = None\n new_status.download_start_time = None\n new_status.download_end_time = None\n elif stage == Stage.RETRIEVE:\n new_status.retrieve_start_time = current_utc_time\n elif stage == Stage.DOWNLOAD:\n new_status.fetch_start_time = self.prev_stage_precise_start_time\n new_status.fetch_end_time = current_utc_time\n new_status.download_start_time = current_utc_time\n else:\n if prev_stage == Stage.DOWNLOAD:\n new_status.download_start_time = self.prev_stage_precise_start_time\n new_status.download_end_time = current_utc_time\n else:\n new_status.retrieve_start_time = self.prev_stage_precise_start_time\n new_status.retrieve_end_time = current_utc_time\n new_status.upload_start_time = current_utc_time\n self.status = new_status\n self._update(self.status)\n\n @abc.abstractmethod\n def _read(self, location: str) -> DownloadStatus:\n pass\n\n @abc.abstractmethod\n def _update(self, download_status: DownloadStatus) -> None:\n pass", "docstring": "Abstract manifest of download statuses.\n\nUpdate download statuses to some storage medium.\n\nThis class lets one indicate that a download is `scheduled` or in a transaction process.\nIn the event of a transaction, a download will be updated with an `in-progress`, `success`\nor `failure` status (with accompanying metadata).\n\nExample:\n ```\n my_manifest = parse_manifest_location(Location('fs://some-firestore-collection'))\n\n # Schedule data for download\n my_manifest.schedule({'some': 'metadata'}, 'path/to/downloaded/file', 'my-username')\n\n # ...\n\n # Initiate a transaction \u2013 it will record that the download is `in-progess`\n with my_manifest.transact({'some': 'metadata'}, 'path/to/downloaded/file', 'my-username') as tx:\n # download logic here\n pass\n\n # ...\n\n # on error, will record the download as a `failure` before propagating the error. By default, it will\n # record download as a `success`.\n ```\n\nAttributes:\n location: An implementation-specific manifest URI.\n status: The current `DownloadStatus` of the Manifest."} +{"repo": "transformers", "function": "class AriaSharedExpertsMLP(LlamaMLP):\n\n def __init__(self, config: AriaTextConfig):\n super().__init__(self)\n self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts", "docstring": "Shared Expert MLP for shared experts.\n\nUnlike routed experts, shared experts process all tokens without routing.\nThis class reconfigures the intermediate size in comparison to the LlamaMLP.\n\nArgs:\n config (`AriaTextConfig`): Configuration object for the Aria language model."} +{"repo": "transformers", "function": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output\n text_features = self.text_projection(pooled_output)\n text_features = F.normalize(text_features, dim=-1)\n return text_features", "docstring": "Returns:\n text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\n applying the projection layer to the pooled output of [`ClapTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, ClapModel\n\n>>> model = ClapModel.from_pretrained(\"laion/clap-htsat-unfused\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"laion/clap-htsat-unfused\")\n\n>>> inputs = tokenizer([\"the sound of a cat\", \"the sound of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```"} +{"repo": "tensorflow", "function": "def sparse_add_v2(a, b, threshold=0):\n sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)\n if not any((isinstance(inp, sparse_classes) for inp in [a, b])):\n raise TypeError('At least one input should be SparseTensor; do you mean to use tf.add()?')\n if all((isinstance(inp, sparse_classes) for inp in [a, b])):\n a = _convert_to_sparse_tensor(a)\n b = _convert_to_sparse_tensor(b)\n threshold = ops.convert_to_tensor(threshold, dtype=a.values.dtype.real_dtype.base_dtype, name='threshold')\n output_ind, output_val, output_shape = gen_sparse_ops.sparse_add(a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, threshold)\n a.get_shape().assert_is_compatible_with(b.get_shape())\n static_shape = array_ops.broadcast_static_shape(a.get_shape(), b.get_shape())\n if static_shape.is_fully_defined():\n output_shape = static_shape.as_list()\n return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)\n else:\n if isinstance(b, sparse_classes):\n a, b = (b, a)\n return gen_sparse_ops.sparse_tensor_dense_add(a.indices, a.values, a.dense_shape, b)", "docstring": "Adds two tensors, at least one of each is a `SparseTensor`.\n\nIf one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If\nboth arguments are `SparseTensor`s, this returns a `SparseTensor`. The order\nof arguments does not matter. Use vanilla `tf.add()` for adding two dense\n`Tensor`s.\n\nThe shapes of the two operands must match: broadcasting is not supported.\n\nThe indices of any input `SparseTensor` are assumed ordered in standard\nlexicographic order. If this is not the case, before this step run\n`SparseReorder` to restore index ordering.\n\nIf both arguments are sparse, we perform \"clipping\" as follows. By default,\nif two values sum to zero at some index, the output `SparseTensor` would still\ninclude that particular location in its index, storing a zero in the\ncorresponding value slot. To override this, callers can specify `threshold`,\nindicating that if the sum has a magnitude strictly smaller than `threshold`,\nits corresponding value and index would then not be included. In particular,\n`threshold == 0.0` (default) means everything is kept and actual thresholding\nhappens only for a positive value.\n\nFor example, suppose the logical sum of two sparse operands is (densified):\n\n [ 2]\n [.1 0]\n [ 6 -.2]\n\nThen,\n\n* `threshold == 0` (the default): all 5 index/value pairs will be\n returned.\n* `threshold == 0.11`: only .1 and 0 will vanish, and the remaining three\n index/value pairs will be returned.\n* `threshold == 0.21`: .1, 0, and -.2 will vanish.\n\nArgs:\n a: The first operand; `SparseTensor` or `Tensor`.\n b: The second operand; `SparseTensor` or `Tensor`. At least one operand\n must be sparse.\n threshold: A 0-D `Tensor`. The magnitude threshold that determines if an\n output value/index pair takes space. Its dtype should match that of the\n values if they are real; if the latter are complex64/complex128, then the\n dtype should be float32/float64, correspondingly.\n\nReturns:\n A `SparseTensor` or a `Tensor`, representing the sum.\n\nRaises:\n TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead."} +{"repo": "tensorflow", "function": "class VarianceScaling(Initializer):\n\n def __init__(self, scale=1.0, mode='fan_in', distribution='truncated_normal', seed=None):\n if scale <= 0.0:\n raise ValueError(f'Argument `scale` must be a positive float. Received: {scale}')\n if mode not in {'fan_in', 'fan_out', 'fan_avg'}:\n raise ValueError(f\"Argument `mode` should be one of ('fan_in', 'fan_out', 'fan_avg'). Received: {mode}\")\n distribution = distribution.lower()\n if distribution == 'normal':\n distribution = 'truncated_normal'\n if distribution not in {'uniform', 'truncated_normal', 'untruncated_normal'}:\n raise ValueError(f\"Argument `distribution` should be one of ('uniform', 'truncated_normal', 'untruncated_normal'). Received: {distribution}\")\n self.scale = scale\n self.mode = mode\n self.distribution = distribution\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=dtypes.float32, **kwargs):\n \"\"\"Returns a tensor object initialized as specified by the initializer.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported.\n **kwargs: Additional keyword arguments.\n\n Raises:\n ValueError: If the dtype is not floating point\n \"\"\"\n self._validate_kwargs(kwargs)\n dtype = _assert_float_dtype(dtype)\n scale = self.scale\n fan_in, fan_out = _compute_fans(shape)\n if _PARTITION_SHAPE in kwargs:\n shape = kwargs[_PARTITION_SHAPE]\n if self.mode == 'fan_in':\n scale /= max(1.0, fan_in)\n elif self.mode == 'fan_out':\n scale /= max(1.0, fan_out)\n else:\n scale /= max(1.0, (fan_in + fan_out) / 2.0)\n if self.distribution == 'truncated_normal':\n stddev = math.sqrt(scale) / 0.8796256610342398\n return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)\n elif self.distribution == 'untruncated_normal':\n stddev = math.sqrt(scale)\n return self._random_generator.random_normal(shape, 0.0, stddev, dtype)\n else:\n limit = math.sqrt(3.0 * scale)\n return self._random_generator.random_uniform(shape, -limit, limit, dtype)\n\n def get_config(self):\n return {'scale': self.scale, 'mode': self.mode, 'distribution': self.distribution, 'seed': self.seed}", "docstring": "Initializer capable of adapting its scale to the shape of weights tensors.\n\nInitializers allow you to pre-specify an initialization strategy, encoded in\nthe Initializer object, without knowing the shape and dtype of the variable\nbeing initialized.\n\nWith `distribution=\"truncated_normal\" or \"untruncated_normal\"`, samples are\ndrawn from a truncated/untruncated normal distribution with a mean of zero and\na standard deviation (after truncation, if used) `stddev = sqrt(scale / n)`\nwhere n is:\n\n - number of input units in the weight tensor, if mode = \"fan_in\"\n - number of output units, if mode = \"fan_out\"\n - average of the numbers of input and output units, if mode = \"fan_avg\"\n\nWith `distribution=\"uniform\"`, samples are drawn from a uniform distribution\nwithin [-limit, limit], with `limit = sqrt(3 * scale / n)`.\n\nExamples:\n\n>>> def make_variables(k, initializer):\n... return (tf.Variable(initializer(shape=[k], dtype=tf.float32)),\n... tf.Variable(initializer(shape=[k, k], dtype=tf.float32)))\n>>> v1, v2 = make_variables(3, tf.initializers.VarianceScaling(scale=1.))\n>>> v1\n\n>>> v2\n>> make_variables(4, tf.initializers.VarianceScaling(distribution='uniform'))\n(, 50:\n value = value[:47] + '...'\n self.assertFalse(output_value, f'Unexpected output to {output_type}: {value!r}')", "docstring": "Check that the output state matches expectations.\n\nIf, for example, you expect the program to print something to stdout and\nnothing to stderr before exiting with an error code, you would write\nassertOutputStateMatches(stdout=True, stderr=False, returncode=True).\n\nArgs:\n **has_output: Whether each output type should have output."} +{"repo": "tensorflow", "function": "def on_execution(self, execution_index, execution):", "docstring": "Monitor method for top-level execution events.\n\nReturn values (if any) are ignored by the associated DebugDataReader.\n\nArgs:\n execution_index: The index of the top-level execution event, as an int.\n execution: An Execution data object, for a top-level op or function\n execution event."} +{"repo": "tensorflow", "function": "def _NthElementGrad(op: ops.Operation, grad):\n input = op.inputs[0]\n output = op.outputs[0]\n indicators = math_ops.cast(math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)\n grad = array_ops.expand_dims(grad, -1)\n num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)\n return [math_ops.divide(indicators, num_selected) * grad, None]", "docstring": "Return the gradients for NthElement.\n\nArgs:\n op: The NthElementOp for which we need to generate gradients.\n grad: Tensor. The gradients passed to the NthElementOp\n\nReturns:\n A list of two tensors, the first being the gradient w.r.t. the input,\n the second being the gradient w.r.t. the N (None)."} +{"repo": "keras", "function": "def get_current_remat_mode():\n remat_scope_stack = global_state.get_global_attribute('remat_scope_stack')\n if not remat_scope_stack:\n return None\n active_scope = remat_scope_stack[-1]\n return RematMode(active_scope.mode, active_scope.output_size_threshold, active_scope.layer_names)", "docstring": "Get the current rematerialization mode and associated settings.\n\nReturns:\n RematMode or None: The current rematerialization mode, or None if not\n set."} +{"repo": "tensorflow", "function": "class _Entrypoint:\n module: str\n name: str\n exported_symbol: exported_api.ExportedSymbol\n\n def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:\n \"\"\"Returns the import statement for this entrypoint.\n\n Args:\n file_prefixes_to_strip: List of prefixes to strip from the file name.\n module_prefix: A prefix to add to the import.\n use_lazy_loading: Whether to use lazy loading or not.\n \"\"\"\n module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)\n alias = ''\n symbol_name = self.exported_symbol.symbol_name\n if self.name != symbol_name:\n alias = f' as {self.name}'\n if not use_lazy_loading:\n return f'from {module_import_path} import {symbol_name}{alias} # line: {self.exported_symbol.line_no}'\n else:\n return f\" '{self.name}': ('{module_import_path}', '{symbol_name}'), # line: {self.exported_symbol.line_no}\"", "docstring": "An entrypoint that was exposed by the use of a decorator.\n\nAttributes:\n module: The public module that the symbol was exposed to. For example:\n tensorflow.io.\n name: The name the symbol was exported as. For example: decode_png.\n exported_symbol: The symbol that this entrypoint refers back to."} +{"repo": "tensorflow", "function": "def placeholder_value(self, placeholder_context) -> Any:", "docstring": "Creates a placeholder for tracing.\n\ntf.funcion traces with the placeholder value rather than the actual value.\nFor example, a placeholder value can represent multiple different\nactual values. This means that the trace generated with that placeholder\nvalue is more general and reusable which saves expensive retracing.\n\nArgs:\n placeholder_context: A context reserved for internal/future usage.\nFor the `Fruit` example shared above, implementing:\n\n```python\nclass FruitTraceType:\n def placeholder_value(self, placeholder_context):\n return Fruit()\n```\ninstructs tf.function to trace with the `Fruit()` objects\ninstead of the actual `Apple()` and `Mango()` objects when it receives a\ncall to `get_mixed_flavor(Apple(), Mango())`. For example, Tensor arguments\nare replaced with Tensors of similar shape and dtype, output from\na tf.Placeholder op.\n\nMore generally, placeholder values are the arguments of a tf.function,\nas seen from the function's body:\n```python\n@tf.function\ndef foo(x):\n # Here `x` is be the placeholder value\n ...\n\nfoo(x) # Here `x` is the actual value\n```"} +{"repo": "transformers", "function": "def _get_model_info(func, parent_class):\n from transformers.models import auto as auto_module\n if parent_class is not None:\n model_name_lowercase = get_model_name(parent_class)\n else:\n model_name_lowercase = get_model_name(func)\n if model_name_lowercase and model_name_lowercase not in getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1]):\n model_name_lowercase = model_name_lowercase.replace('_', '-')\n class_name = func.__qualname__.split('.')[0]\n if model_name_lowercase is None:\n config_class = None\n else:\n try:\n config_class = getattr(getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE['config_class'][0]), PLACEHOLDER_TO_AUTO_MODULE['config_class'][1])[model_name_lowercase]\n except KeyError:\n if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS:\n config_class = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase]\n else:\n config_class = 'ModelConfig'\n print(f'\ud83d\udea8 Config not found for {model_name_lowercase}. You can manually add it to HARDCODED_CONFIG_FOR_MODELS in utils/args_doc.py')\n return (model_name_lowercase, class_name, config_class)", "docstring": "Extract model information from a function or its parent class.\n\nArgs:\n func (`function`): The function to extract information from\n parent_class (`class`): Optional parent class of the function"} +{"repo": "transformers", "function": "class FlaxCLIPTextModelOutput(ModelOutput):\n text_embeds: jnp.ndarray = None\n last_hidden_state: jnp.ndarray = None\n hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None\n attentions: Optional[Tuple[jnp.ndarray, ...]] = None", "docstring": "Base class for text model's outputs that also contains a pooling of the last hidden states.\n\nArgs:\n text_embeds (`jnp.ndarray` of shape `(batch_size, output_dim`):\n The text embeddings obtained by applying the projection layer to the pooled output of\n [`FlaxCLIPTextModel`].\n last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tf-quant-finance", "function": "def get_shape(x: tf.Tensor, name: Optional[str]=None) -> Union[tf.TensorShape, types.IntTensor]:\n name = 'get_shape' if name is None else name\n with tf.name_scope(name):\n x = tf.convert_to_tensor(x)\n is_fully_defined = x.shape.is_fully_defined()\n if is_fully_defined:\n return x.shape\n return tf.shape(x)", "docstring": "Returns static shape of `x` if it is fully defined, or dynamic, otherwise.\n\n####Example\n```python\nimport tensorflow as tf\nimport tf_quant_finance as tff\n\nx = tf.zeros([5, 2])\nprefer_static_shape(x)\n# Expected: [5, 2]\n\nArgs:\n x: A tensor of any shape and `dtype`\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name\n `get_shape`.\n\nReturns:\n A shape of `x` which a list, if the shape is fully defined, or a `Tensor`\n for dynamically shaped `x`."} +{"repo": "transformers", "function": "class TensorBoardCallback(TrainerCallback):\n\n def __init__(self, tb_writer=None):\n has_tensorboard = is_tensorboard_available()\n if not has_tensorboard:\n raise RuntimeError('TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX.')\n if has_tensorboard:\n try:\n from torch.utils.tensorboard import SummaryWriter\n self._SummaryWriter = SummaryWriter\n except ImportError:\n try:\n from tensorboardX import SummaryWriter\n self._SummaryWriter = SummaryWriter\n except ImportError:\n self._SummaryWriter = None\n else:\n self._SummaryWriter = None\n self.tb_writer = tb_writer\n\n def _init_summary_writer(self, args, log_dir=None):\n log_dir = log_dir or args.logging_dir\n if self._SummaryWriter is not None:\n self.tb_writer = self._SummaryWriter(log_dir=log_dir)\n\n def on_train_begin(self, args, state, control, **kwargs):\n if not state.is_world_process_zero:\n return\n log_dir = None\n if state.is_hyper_param_search:\n trial_name = state.trial_name\n if trial_name is not None:\n log_dir = os.path.join(args.logging_dir, trial_name)\n if self.tb_writer is None:\n self._init_summary_writer(args, log_dir)\n if self.tb_writer is not None:\n self.tb_writer.add_text('args', args.to_json_string())\n if 'model' in kwargs:\n model = kwargs['model']\n if hasattr(model, 'config') and model.config is not None:\n model_config_json = model.config.to_json_string()\n self.tb_writer.add_text('model_config', model_config_json)\n\n def on_log(self, args, state, control, logs=None, **kwargs):\n if not state.is_world_process_zero:\n return\n if self.tb_writer is None:\n self._init_summary_writer(args)\n if self.tb_writer is not None:\n logs = rewrite_logs(logs)\n for k, v in logs.items():\n if isinstance(v, (int, float)):\n self.tb_writer.add_scalar(k, v, state.global_step)\n elif isinstance(v, str):\n self.tb_writer.add_text(k, v, state.global_step)\n else:\n logger.warning(f'''Trainer is attempting to log a value of \"{v}\" of type {type(v)} for key \"{k}\" as a scalar. This invocation of Tensorboard's writer.add_scalar() is incorrect so we dropped this attribute.''')\n self.tb_writer.flush()\n\n def on_train_end(self, args, state, control, **kwargs):\n if self.tb_writer:\n self.tb_writer.close()\n self.tb_writer = None", "docstring": "A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard).\n\nArgs:\n tb_writer (`SummaryWriter`, *optional*):\n The writer to use. Will instantiate one if not set."} +{"repo": "tensorflow", "function": "def from_config(cls, config):\n return cls(**config)", "docstring": "Instantiates an initializer from a configuration dictionary.\n\nExample:\n\n```python\ninitializer = RandomUniform(-1, 1)\nconfig = initializer.get_config()\ninitializer = RandomUniform.from_config(config)\n```\n\nArgs:\n config: A Python dictionary. It will typically be the output of\n `get_config`.\n\nReturns:\n An Initializer instance."} +{"repo": "transformers", "function": "def format_image_annotations_as_coco(image_id: str, categories: list[int], areas: list[float], bboxes: list[tuple[float]]) -> dict:\n annotations = []\n for category, area, bbox in zip(categories, areas, bboxes):\n formatted_annotation = {'image_id': image_id, 'category_id': category, 'iscrowd': 0, 'area': area, 'bbox': list(bbox)}\n annotations.append(formatted_annotation)\n return {'image_id': image_id, 'annotations': annotations}", "docstring": "Format one set of image annotations to the COCO format\n\nArgs:\n image_id (str): image id. e.g. \"0001\"\n categories (List[int]): list of categories/class labels corresponding to provided bounding boxes\n areas (List[float]): list of corresponding areas to provided bounding boxes\n bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format\n ([center_x, center_y, width, height] in absolute coordinates)\n\nReturns:\n dict: {\n \"image_id\": image id,\n \"annotations\": list of formatted annotations\n }"} +{"repo": "tensorflow", "function": "def real(input, name=None):\n with ops.name_scope(name, 'Real', [input]) as name:\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype.is_complex:\n real_dtype = input.dtype.real_dtype\n return gen_math_ops.real(input, Tout=real_dtype, name=name)\n elif input.dtype.is_numeric:\n return input\n else:\n raise TypeError('input must be a numeric tensor, but got tensor with dtype {}'.format(input.dtype))", "docstring": "Returns the real part of a complex (or real) tensor.\n\nGiven a tensor `input`, this operation returns a tensor of type `float` that\nis the real part of each element in `input` considered as a complex number.\n\nFor example:\n\n```python\nx = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\ntf.math.real(x) # [-2.25, 3.25]\n```\n\nIf `input` is already real, it is returned unchanged.\n\nArgs:\n input: A `Tensor`. Must have numeric type.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `float32` or `float64`."} +{"repo": "transformers", "function": "def tokenize(self, text, never_split=None):\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n unicode_normalized_text = unicodedata.normalize('NFC', text)\n orig_tokens = whitespace_tokenize(unicode_normalized_text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split."} +{"repo": "pytruth", "function": "def _GetActualMessage(self):\n if six.PY2:\n return self._actual.message\n return self._actual.args[0] if self._actual.args else ''", "docstring": "Returns the \"message\" portion of an exception.\n\nMany Python 2 exceptions have a \"message\" attribute, so return that directly\nin Python 2. However, this attribute is never present in Python 3, so return\nthe first argument passed to the exception instance as the message.\n\nReturns:\n String"} +{"repo": "tensorflow", "function": "def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):\n if ref.dtype._is_ref_dtype:\n return gen_state_ops.scatter_nd_update(ref, indices, updates, use_locking, name)\n return ref._lazy_read(gen_state_ops.resource_scatter_nd_update(ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype), name=name))", "docstring": "Applies sparse `updates` to individual values or slices in a Variable.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nFor example, say we want to update 4 scattered elements to a rank-1 tensor to\n8 elements. In Python, that update would look like this:\n\n```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n update = tf.compat.v1.scatter_nd_update(ref, indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(update)\n```\n\nThe resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nArgs:\n ref: A Variable.\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n A tensor of indices into ref.\n updates: A `Tensor`. Must have the same type as `ref`.\n A Tensor. Must have the same type as ref. A tensor of updated\n values to add to ref.\n use_locking: An optional `bool`. Defaults to `True`.\n An optional bool. Defaults to True. If True, the assignment will\n be protected by a lock; otherwise the behavior is undefined,\n but may exhibit less contention.\n name: A name for the operation (optional).\n\nReturns:\n The value of the variable after the update."} +{"repo": "tensorflow", "function": "def register(self, candidate, name=None):\n if not name:\n name = candidate.__name__\n if name in self._registry:\n frame = self._registry[name][_LOCATION_TAG]\n raise KeyError(\"Registering two %s with name '%s'! (Previous registration was in %s %s:%d)\" % (self._name, name, frame.name, frame.filename, frame.lineno))\n logging.vlog(1, 'Registering %s (%s) in %s.', name, candidate, self._name)\n stack = traceback.extract_stack(limit=3)\n stack_index = min(2, len(stack) - 1)\n if stack_index >= 0:\n location_tag = stack[stack_index]\n else:\n location_tag = ('UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN', 'UNKNOWN')\n self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}", "docstring": "Registers a Python object \"candidate\" for the given \"name\".\n\nArgs:\n candidate: The candidate object to add to the registry.\n name: An optional string specifying the registry key for the candidate.\n If None, candidate.__name__ will be used.\nRaises:\n KeyError: If same name is used twice."} +{"repo": "tensorflow", "function": "def get_reachable_from_inputs(inputs, targets=None):\n inputs = nest.flatten(inputs, expand_composites=True)\n reachable = object_identity.ObjectIdentitySet(inputs)\n if targets:\n remaining_targets = object_identity.ObjectIdentitySet(nest.flatten(targets))\n queue = collections.deque(inputs)\n while queue:\n x = queue.pop()\n if isinstance(x, tuple(_user_convertible_tensor_types)):\n continue\n if isinstance(x, ops.Operation):\n outputs = x.outputs[:] or []\n outputs += x._control_outputs\n elif isinstance(x, variables.Variable):\n try:\n outputs = [x.op]\n except AttributeError:\n outputs = []\n elif tensor_util.is_tf_type(x):\n outputs = x.consumers()\n else:\n raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x))\n for y in outputs:\n if y not in reachable:\n reachable.add(y)\n if targets:\n remaining_targets.discard(y)\n queue.appendleft(y)\n if targets and (not remaining_targets):\n return reachable\n return reachable", "docstring": "Returns the set of tensors/ops reachable from `inputs`.\n\nStops if all targets have been found (target is optional).\n\nOnly valid in Symbolic mode, not Eager mode.\n\nArgs:\n inputs: List of tensors.\n targets: List of tensors.\n\nReturns:\n A set of tensors reachable from the inputs (includes the inputs themselves)."} +{"repo": "fhir-py", "function": "def __new__(cls, *args) -> 'InvokeExpressionNode':\n if not args:\n return super().__new__(cls)\n _, identifier, parent_node = args\n if identifier == 'reference' and isinstance(parent_node.return_type, _fhir_path_data_types.ReferenceStructureDataType):\n return super().__new__(InvokeReferenceNode)\n return super().__new__(InvokeExpressionNode)", "docstring": "Creates a new InvokeExpressionNode node or one of its subclasses.\n\nCreates either an InvokeExpressionNode or InvokeReferenceNode, a subclass of\nInvokeExpressionNode. The InvokeReferenceNode is returned when a field named\n'reference' is invoked against a FHIR Reference resource. Database backends\nhave special behavior for reference nodes. This reference-specific node type\nallows them to define visitors to implement their reference-specific logic.\n\nArgs:\n *args: The args passed to `__init__`.\n\nReturns:\n A new InvokeExpressionNode of the appropriate type."} +{"repo": "fhir-py", "function": "def is_slice_on_extension(element_definition: message.Message) -> bool:\n type_codes = element_type_codes(element_definition)\n return 'Extension' in type_codes and _SLICE_ON_EXTENSION_ID_RE.search(cast(Any, element_definition).id.value) is not None", "docstring": "Returns `True` if the given element is describing a slice on an extension.\n\nMore information about extensions:\nhttp://hl7.org/fhir/defining-extensions.html.\n\nArgs:\n element_definition: The element definition (element) that we are checking."} +{"repo": "keras", "function": "def floor_divide(x1, x2):\n if any_symbolic_tensors((x1, x2)):\n return FloorDivide().symbolic_call(x1, x2)\n return backend.numpy.floor_divide(x1, x2)", "docstring": "Returns the largest integer smaller or equal to the division of inputs.\n\nArgs:\n x1: Numerator.\n x2: Denominator.\n\nReturns:\n Output tensor, `y = floor(x1/x2)`"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, Blip2TextModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n query_embeds = self.embeddings(input_ids=input_ids, position_ids=position_ids)\n text_outputs = self.qformer(query_embeds=query_embeds, query_length=0, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n pooled_output = text_outputs[0] if not return_dict else text_outputs.last_hidden_state\n pooled_output = pooled_output.to(dtype=self.text_projection.weight.dtype)\n text_embeds = self.text_projection(pooled_output)\n text_embeds = nn.functional.normalize(text_embeds, dim=-1)\n if not return_dict:\n outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]\n return tuple((output for output in outputs if output is not None))\n return Blip2TextModelOutput(text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions)", "docstring": "Examples:\n\n```python\n>>> import torch\n>>> from transformers import AutoProcessor, Blip2TextModelWithProjection\n\n>>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n>>> model = Blip2TextModelWithProjection.from_pretrained(\n... \"Salesforce/blip2-itm-vit-g\", torch_dtype=torch.float16\n... )\n\n>>> model.to(device) # doctest: +IGNORE_RESULT\n\n>>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip2-itm-vit-g\")\n\n>>> inputs = processor(text=[\"a photo of a cat\", \"a photo of a dog\"], return_tensors=\"pt\").to(device)\n\n>>> outputs = model(**inputs)\n>>> text_embeds = outputs.text_embeds\n>>> print(text_embeds.shape)\ntorch.Size([2, 7, 256])\n```"} +{"repo": "tensorflow", "function": "def on_train_end(self, logs=None):", "docstring": "Called at the end of training.\n\nSubclasses should override for any actions to run.\n\nArgs:\n logs: Dict. Currently the output of the last call to `on_epoch_end()`\n is passed to this argument for this method but that may change in\n the future."} +{"repo": "tensorflow", "function": "def _clone_functional_model(model, input_tensors=None, layer_fn=_clone_layer):\n if not isinstance(model, Model):\n raise ValueError('Expected `model` argument to be a `Model` instance, got ', model)\n if isinstance(model, Sequential):\n raise ValueError('Expected `model` argument to be a functional `Model` instance, got a `Sequential` instance instead:', model)\n if not model._is_graph_network:\n raise ValueError('Expected `model` argument to be a functional `Model` instance, but got a subclass model instead.')\n new_input_layers = {}\n if input_tensors is not None:\n input_tensors = nest.flatten(input_tensors)\n for i, input_tensor in enumerate(input_tensors):\n original_input_layer = model._input_layers[i]\n if not backend.is_keras_tensor(input_tensor):\n name = original_input_layer.name\n input_tensor = Input(tensor=input_tensor, name='input_wrapper_for_' + name)\n newly_created_input_layer = input_tensor._keras_history.layer\n new_input_layers[original_input_layer] = newly_created_input_layer\n else:\n new_input_layers[original_input_layer] = original_input_layer\n if not callable(layer_fn):\n raise ValueError('Expected `layer_fn` argument to be a callable.')\n model_configs, created_layers = _clone_layers_and_model_config(model, new_input_layers, layer_fn)\n input_tensors, output_tensors, created_layers = functional.reconstruct_from_config(model_configs, created_layers=created_layers)\n metrics_names = model.metrics_names\n model = Model(input_tensors, output_tensors, name=model.name)\n ancillary_layers = [layer for layer in created_layers.values() if layer not in model.layers]\n if ancillary_layers:\n new_nodes = nest.flatten([layer.inbound_nodes[1:] if functional._should_skip_first_node(layer) else layer.inbound_nodes for layer in created_layers.values()])\n _insert_ancillary_layers(model, ancillary_layers, metrics_names, new_nodes)\n return model", "docstring": "Clone a functional `Model` instance.\n\nModel cloning is similar to calling a model on new inputs,\nexcept that it creates new layers (and thus new weights) instead\nof sharing the weights of the existing layers.\n\nInput layers are always cloned.\n\nArgs:\n model: Instance of `Model`.\n input_tensors: optional list of input tensors\n to build the model upon. If not provided,\n placeholders will be created.\n layer_fn: callable to be applied on non-input layers in the model. By\n default it clones the layer. Another example is to preserve the layer\n to share the weights. This is required when we create a per-replica\n copy of the model with distribution strategy; we want the weights to\n be shared but still feed inputs separately so we create new input\n layers.\n\nReturns:\n An instance of `Model` reproducing the behavior\n of the original model, on top of new inputs tensors,\n using newly instantiated weights.\n\nRaises:\n ValueError: in case of invalid `model` argument value or `layer_fn`\n argument value."} +{"repo": "tensorflow", "function": "def imag(input, name=None):\n with ops.name_scope(name, 'Imag', [input]) as name:\n input = ops.convert_to_tensor(input, name='input')\n if input.dtype.is_complex:\n return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)\n else:\n return array_ops.zeros_like(input)", "docstring": "Returns the imaginary part of a complex (or real) tensor.\n\nGiven a tensor `input`, this operation returns a tensor of type `float` that\nis the imaginary part of each element in `input` considered as a complex\nnumber. If `input` is real, a tensor of all zeros is returned.\n\nFor example:\n\n```python\nx = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])\ntf.math.imag(x) # [4.75, 5.75]\n```\n\nArgs:\n input: A `Tensor`. Must be one of the following types: `float`, `double`,\n `complex64`, `complex128`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` of type `float32` or `float64`."} +{"repo": "transformers", "function": "def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[List[dict]]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **loss_kwargs) -> Union[Tuple[torch.FloatTensor], RTDetrV2ObjectDetectionOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model(pixel_values, pixel_mask=pixel_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n denoising_meta_values = outputs.denoising_meta_values if return_dict else outputs[-1] if self.training else None\n outputs_class = outputs.intermediate_logits if return_dict else outputs[2]\n outputs_coord = outputs.intermediate_reference_points if return_dict else outputs[3]\n predicted_corners = outputs.intermediate_predicted_corners if return_dict else outputs[4]\n initial_reference_points = outputs.initial_reference_points if return_dict else outputs[5]\n logits = outputs_class[:, -1]\n pred_boxes = outputs_coord[:, -1]\n loss, loss_dict, auxiliary_outputs, enc_topk_logits, enc_topk_bboxes = (None, None, None, None, None)\n if labels is not None:\n enc_topk_logits = outputs.enc_topk_logits if return_dict else outputs[-5]\n enc_topk_bboxes = outputs.enc_topk_bboxes if return_dict else outputs[-4]\n loss, loss_dict, auxiliary_outputs = self.loss_function(logits, labels, self.device, pred_boxes, self.config, outputs_class, outputs_coord, enc_topk_logits=enc_topk_logits, enc_topk_bboxes=enc_topk_bboxes, denoising_meta_values=denoising_meta_values, predicted_corners=predicted_corners, initial_reference_points=initial_reference_points, **loss_kwargs)\n if not return_dict:\n if auxiliary_outputs is not None:\n output = (logits, pred_boxes) + (auxiliary_outputs,) + outputs\n else:\n output = (logits, pred_boxes) + outputs\n return (loss, loss_dict) + output if loss is not None else output\n return RTDetrV2ObjectDetectionOutput(loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_logits=outputs.intermediate_logits, intermediate_reference_points=outputs.intermediate_reference_points, intermediate_predicted_corners=outputs.intermediate_predicted_corners, initial_reference_points=outputs.initial_reference_points, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, init_reference_points=outputs.init_reference_points, enc_topk_logits=outputs.enc_topk_logits, enc_topk_bboxes=outputs.enc_topk_bboxes, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, denoising_meta_values=outputs.denoising_meta_values)", "docstring": "inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n can choose to directly pass a flattened representation of an image.\ndecoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n embedded representation.\nlabels (`List[Dict]` of len `(batch_size,)`, *optional*):\n Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the\n following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch\n respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes\n in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`.\n\nExamples:\n\n```python\n>>> from transformers import RTDetrV2ImageProcessor, RTDetrV2ForObjectDetection\n>>> from PIL import Image\n>>> import requests\n>>> import torch\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> image_processor = RTDetrV2ImageProcessor.from_pretrained(\"PekingU/RTDetrV2_r50vd\")\n>>> model = RTDetrV2ForObjectDetection.from_pretrained(\"PekingU/RTDetrV2_r50vd\")\n\n>>> # prepare image for the model\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n\n>>> # forward pass\n>>> outputs = model(**inputs)\n\n>>> logits = outputs.logits\n>>> list(logits.shape)\n[1, 300, 80]\n\n>>> boxes = outputs.pred_boxes\n>>> list(boxes.shape)\n[1, 300, 4]\n\n>>> # convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)\n>>> target_sizes = torch.tensor([image.size[::-1]])\n>>> results = image_processor.post_process_object_detection(outputs, threshold=0.9, target_sizes=target_sizes)[\n... 0\n... ]\n\n>>> for score, label, box in zip(results[\"scores\"], results[\"labels\"], results[\"boxes\"]):\n... box = [round(i, 2) for i in box.tolist()]\n... print(\n... f\"Detected {model.config.id2label[label.item()]} with confidence \"\n... f\"{round(score.item(), 3)} at location {box}\"\n... )\nDetected sofa with confidence 0.97 at location [0.14, 0.38, 640.13, 476.21]\nDetected cat with confidence 0.96 at location [343.38, 24.28, 640.14, 371.5]\nDetected cat with confidence 0.958 at location [13.23, 54.18, 318.98, 472.22]\nDetected remote with confidence 0.951 at location [40.11, 73.44, 175.96, 118.48]\nDetected remote with confidence 0.924 at location [333.73, 76.58, 369.97, 186.99]\n```"} +{"repo": "tensorflow", "function": "def deserialize(self, stamp_token, serialized_proto):\n return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)", "docstring": "Deserialize the input proto and resets the ensemble from it.\n\nArgs:\n stamp_token: int64 scalar Tensor to denote the stamp of the resource.\n serialized_proto: string scalar Tensor of the serialized proto.\n\nReturns:\n Operation (for dependencies)."} +{"repo": "tensorflow", "function": "def _current_graph(op_input_list, graph=None):\n current_default_graph = ops.get_default_graph()\n if current_default_graph.building_function:\n return current_default_graph\n op_input_list = tuple(op_input_list)\n if graph and (not isinstance(graph, ops.Graph)):\n raise TypeError('Input graph needs to be a Graph: %s' % (graph,))\n original_graph_element = None\n for op_input in op_input_list:\n if isinstance(op_input, (ops.Operation, tensor_lib.Tensor, composite_tensor.CompositeTensor)) and (not isinstance(op_input, tensor_lib.Tensor) or type(op_input) == tensor_lib.Tensor):\n graph_element = op_input\n else:\n graph_element = _as_graph_element(op_input)\n if graph_element is not None:\n if not graph:\n original_graph_element = graph_element\n graph = getattr(graph_element, 'graph', None)\n elif original_graph_element is not None:\n _assert_same_graph(original_graph_element, graph_element)\n elif graph_element.graph is not graph:\n raise ValueError('%s is not from the passed-in graph.' % graph_element)\n return graph or current_default_graph", "docstring": "Returns the appropriate graph to use for the given inputs.\n\nThis library method provides a consistent algorithm for choosing the graph\nin which an Operation should be constructed:\n\n1. If the default graph is being used to construct a function, we\n use the default graph.\n2. If the \"graph\" is specified explicitly, we validate that all of the inputs\n in \"op_input_list\" are compatible with that graph.\n3. Otherwise, we attempt to select a graph from the first Operation-\n or Tensor-valued input in \"op_input_list\", and validate that all other\n such inputs are in the same graph.\n4. If the graph was not specified and it could not be inferred from\n \"op_input_list\", we attempt to use the default graph.\n\nArgs:\n op_input_list: A list of inputs to an operation, which may include `Tensor`,\n `Operation`, and other objects that may be converted to a graph element.\n graph: (Optional) The explicit graph to use.\n\nRaises:\n TypeError: If op_input_list is not a list or tuple, or if graph is not a\n Graph.\n ValueError: If a graph is explicitly passed and not all inputs are from it,\n or if the inputs are from multiple graphs, or we could not find a graph\n and there was no default graph.\n\nReturns:\n The appropriate graph to use for the given inputs."} +{"repo": "keras", "function": "def plot_model(model, to_file='model.png', show_shapes=False, show_dtype=False, show_layer_names=False, rankdir='TB', expand_nested=False, dpi=200, show_layer_activations=False, show_trainable=False, **kwargs):\n if not model.built:\n raise ValueError('This model has not yet been built. Build the model first by calling `build()` or by calling the model on a batch of data.')\n if not check_pydot():\n message = 'You must install pydot (`pip install pydot`) for `plot_model` to work.'\n if 'IPython.core.magics.namespace' in sys.modules:\n io_utils.print_msg(message)\n return\n else:\n raise ImportError(message)\n if not check_graphviz():\n message = 'You must install graphviz (see instructions at https://graphviz.gitlab.io/download/) for `plot_model` to work.'\n if 'IPython.core.magics.namespace' in sys.modules:\n io_utils.print_msg(message)\n return\n else:\n raise ImportError(message)\n if kwargs.pop('layer_range', None) is not None:\n raise ValueError('Argument `layer_range` is no longer supported.')\n if kwargs:\n raise ValueError(f'Unrecognized keyword arguments: {kwargs}')\n dot = model_to_dot(model, show_shapes=show_shapes, show_dtype=show_dtype, show_layer_names=show_layer_names, rankdir=rankdir, expand_nested=expand_nested, dpi=dpi, show_layer_activations=show_layer_activations, show_trainable=show_trainable)\n to_file = str(to_file)\n if dot is None:\n return\n _, extension = os.path.splitext(to_file)\n if not extension:\n extension = 'png'\n else:\n extension = extension[1:]\n dot.write(to_file, format=extension)\n if extension != 'pdf':\n try:\n from IPython import display\n return display.Image(filename=to_file)\n except ImportError:\n pass", "docstring": "Converts a Keras model to dot format and save to a file.\n\nExample:\n\n```python\ninputs = ...\noutputs = ...\nmodel = keras.Model(inputs=inputs, outputs=outputs)\n\ndot_img_file = '/tmp/model_1.png'\nkeras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)\n```\n\nArgs:\n model: A Keras model instance\n to_file: File name of the plot image.\n show_shapes: whether to display shape information.\n show_dtype: whether to display layer dtypes.\n show_layer_names: whether to display layer names.\n rankdir: `rankdir` argument passed to PyDot,\n a string specifying the format of the plot: `\"TB\"`\n creates a vertical plot; `\"LR\"` creates a horizontal plot.\n expand_nested: whether to expand nested Functional models\n into clusters.\n dpi: Image resolution in dots per inch.\n show_layer_activations: Display layer activations (only for layers that\n have an `activation` property).\n show_trainable: whether to display if a layer is trainable.\n\nReturns:\n A Jupyter notebook Image object if Jupyter is installed.\n This enables in-line display of the model plots in notebooks."} +{"repo": "transformers", "function": "class AriaSharedExpertsMLP(nn.Module):\n\n def __init__(self, config: AriaTextConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size * config.moe_num_shared_experts\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n return down_proj", "docstring": "Shared Expert MLP for shared experts.\n\nUnlike routed experts, shared experts process all tokens without routing.\nThis class reconfigures the intermediate size in comparison to the LlamaMLP.\n\nArgs:\n config (`AriaTextConfig`): Configuration object for the Aria language model."} +{"repo": "tensorflow", "function": "def _check_params(window_length, dtype):\n if not dtype.is_floating:\n raise ValueError('dtype must be a floating point type. Found %s' % dtype)\n window_length = ops.convert_to_tensor(window_length, dtype=dtypes.int32)\n window_length.shape.assert_has_rank(0)\n return window_length", "docstring": "Check window_length and dtype params.\n\nArgs:\n window_length: A scalar value or `Tensor`.\n dtype: The data type to produce. Must be a floating point type.\n\nReturns:\n window_length converted to a tensor of type int32.\n\nRaises:\n ValueError: If `dtype` is not a floating point type or window_length is not\n a scalar."} +{"repo": "tensorflow", "function": "def _prefix_output_keys(self, output_dict, output_name):\n new_outputs = {}\n for key, val in output_dict.items():\n key = self._prefix_key(key, output_name)\n new_outputs[key] = val\n return new_outputs", "docstring": "Prepend output_name to the output_dict keys if it doesn't exist.\n\nThis produces predictable prefixes for the pre-determined outputs\nof SupervisedOutput.\n\nArgs:\n output_dict: dict of string to Tensor, assumed valid.\n output_name: prefix string to prepend to existing keys.\n\nReturns:\n dict with updated keys and existing values."} +{"repo": "tensorflow", "function": "def is_chief(cluster_spec=None, task_type=None, task_id=None):\n if has_worker_context():\n return dc_context.get_current_worker_context().is_chief\n _validate_cluster_spec(cluster_spec, task_type, task_id)\n cluster_spec = normalize_cluster_spec(cluster_spec).as_dict()\n if task_type == 'chief' or task_type == 'evaluator':\n return True\n if 'chief' not in cluster_spec and task_type == 'worker' and (task_id == 0):\n return True\n return False", "docstring": "Returns whether the given task is chief in the cluster.\n\nSince there is at most one evaluator and the evaluator itself should be\nindependent of the training cluster, the evaluator job is also a chief job on\nits own.\n\nIf this is currently running under a `_WorkerContext` of distribute\ncoordinator, the arguments can be omitted as the result is already available.\n\nArgs:\n cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object specifying the\n cluster configurations.\n task_type: the task type in the cluster.\n task_id: the task id in the cluster.\n\nReturns:\n a boolean indicating whether the given task is chief.\n\nRaises:\n ValueError: if `task_type` is not in the `cluster_spec` or `task_id` exceeds\n the maximum id of the `task_type`."} +{"repo": "transformers", "function": "def tokenize(self, text, never_split=None):\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n unicode_normalized_text = unicodedata.normalize('NFC', text)\n orig_tokens = whitespace_tokenize(unicode_normalized_text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens", "docstring": "Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.\n\nArgs:\n never_split (`List[str]`, *optional*)\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split."} +{"repo": "fhir-py", "function": "def _validate_fhir_constraints(msg: message.Message, base_name: str, primitive_handler_: primitive_handler.PrimitiveHandler) -> None:\n if annotation_utils.is_primitive_type(msg):\n _ = primitive_handler_.primitive_wrapper_from_primitive(msg)\n return\n if proto_utils.is_message_type(msg, any_pb2.Any):\n return\n for field in msg.DESCRIPTOR.fields:\n field_name = f'{base_name}.{proto_utils.json_field_name(field)}'\n _validate_field(msg, field, field_name, primitive_handler_)\n for oneof in msg.DESCRIPTOR.oneofs:\n if msg.WhichOneof(oneof.name) is None and (not oneof.GetOptions().HasExtension(annotations_pb2.fhir_oneof_is_optional)):\n raise fhir_errors.InvalidFhirError(f'Empty oneof: `{oneof.full_name}`.')", "docstring": "Iterates over fields of the provided message and validates constraints.\n\nArgs:\n msg: The message to validate.\n base_name: The root message name for recursive validation of nested message\n fields.\n primitive_handler_: Responsible for returning PrimitiveWrappers.\n\nRaises:\n fhir_errors.InvalidFhirError: In the event that a field is found to be\n violating FHIR constraints or a required oneof is not set."} +{"repo": "tensorflow", "function": "def _maybe_partial_apply_variables(fn, args, kwargs):\n\n def is_distributed_var(x):\n flat = nest.flatten(x)\n return flat and isinstance(flat[0], values.DistributedVariable)\n var_kwargs = {}\n nonvar_kwargs = {}\n if kwargs:\n var_kwargs = {k: v for k, v in kwargs.items() if is_distributed_var(v)}\n if var_kwargs:\n nonvar_kwargs = {k: v for k, v in kwargs.items() if not is_distributed_var(v)}\n positional_args = []\n index_of_star_args = None\n for i, p in enumerate(tf_inspect.signature(fn).parameters.values()):\n if i == 0 and p.name == 'self':\n continue\n if p.kind == tf_inspect.Parameter.POSITIONAL_OR_KEYWORD:\n positional_args.append(p.name)\n elif p.kind == tf_inspect.Parameter.VAR_POSITIONAL:\n index_of_star_args = i\n elif p.kind == tf_inspect.Parameter.POSITIONAL_ONLY:\n if var_kwargs or any((is_distributed_var(a) for a in args)):\n raise ValueError(f'Mixing Variables and positional-only parameters not supported by TPUStrategy. Received {len(var_kwargs)} DistributedVariables in **kwargs and {sum((is_distributed_var(a) for a in args))} in *args, expected zero for both.')\n return (fn, args, kwargs)\n star_args = []\n have_seen_var_arg = False\n for i, a in enumerate(args):\n if is_distributed_var(a):\n if index_of_star_args is not None and i >= index_of_star_args:\n raise ValueError('TPUStrategy.run() cannot handle Variables passed to *args. Either name the function argument, or capture the Variable implicitly.')\n if len(positional_args) <= i:\n raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().')\n var_kwargs[positional_args[i]] = a\n have_seen_var_arg = True\n else:\n if index_of_star_args is not None and i >= index_of_star_args:\n if have_seen_var_arg:\n raise ValueError('TPUStrategy.run() cannot handle both Variables and a mix of positional args and *args. Either remove the *args, or capture the Variable implicitly.')\n else:\n star_args.append(a)\n continue\n if len(positional_args) <= i:\n raise ValueError('Too many positional arguments passed to call to TPUStrategy.run().')\n nonvar_kwargs[positional_args[i]] = a\n if var_kwargs:\n return (functools.partial(fn, **var_kwargs), star_args, nonvar_kwargs)\n return (fn, args, kwargs)", "docstring": "Inspects arguments to partially apply any DistributedVariable.\n\nThis avoids an automatic cast of the current variable value to tensor.\n\nNote that a variable may be captured implicitly with Python scope instead of\npassing it to run(), but supporting run() keeps behavior consistent\nwith MirroredStrategy.\n\nSince positional arguments must be applied from left to right, this function\ndoes some tricky function inspection to move variable positional arguments\ninto kwargs. As a result of this, we can't support passing Variables as *args,\nnor as args to functions which combine both explicit positional arguments and\n*args.\n\nArgs:\n fn: The function to run, as passed to run().\n args: Positional arguments to fn, as passed to run().\n kwargs: Keyword arguments to fn, as passed to run().\n\nReturns:\n A tuple of the function (possibly wrapped), args, kwargs (both\n possibly filtered, with members of args possibly moved to kwargs).\n If no variables are found, this function is a noop.\n\nRaises:\n ValueError: If the function signature makes unsupported use of *args, or if\n too many arguments are passed."} +{"repo": "tf-quant-finance", "function": "def _prepare_grid(self, times, grid_step):\n grid = tf.range(0.0, times[-1], grid_step, dtype=self._dtype)\n all_times = tf.concat([grid, times], axis=0)\n mask = tf.concat([tf.zeros_like(grid, dtype=tf.bool), tf.ones_like(times, dtype=tf.bool)], axis=0)\n perm = tf.argsort(all_times, stable=True)\n all_times = tf.gather(all_times, perm)\n mask = tf.gather(mask, perm)\n return (all_times, mask)", "docstring": "Prepares grid of times for path generation.\n\nArgs:\n times: Rank 1 `Tensor` of increasing positive real values. The times at\n which the path points are to be evaluated.\n grid_step: Rank 0 real `Tensor`. Maximal distance between points in\n resulting grid.\n\nReturns:\n Tuple `(all_times, mask)`.\n `all_times` is 1-D real `Tensor` containing all points from 'times` and\n whose intervals are at most `grid_step`.\n `mask` is a boolean 1-D tensor of the same shape as 'all_times', showing\n which elements of 'all_times' correspond to values from `times`.\n Guarantees that times[0]=0 and grid_step[0]=False.\n 'all_times` is sorted ascending and may contain duplicates."} +{"repo": "tensorflow", "function": "def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):\n if not graph and (not context.executing_eagerly()):\n graph = ops.get_default_graph()\n if options == _DEFAULT_ADVISE_OPTIONS:\n options = ALL_ADVICE.copy()\n op_log = tfprof_logger.merge_default_with_oplog(graph, None, run_meta, add_trace=True)\n run_meta_str = run_meta.SerializeToString() if run_meta else b''\n opts = _build_advisor_options(options)\n ret = tfprof_output_pb2.AdviceProto()\n ret.ParseFromString(print_mdl.PrintModelAnalysis(_graph_string(graph), run_meta_str, op_log.SerializeToString(), 'advise'.encode('utf-8'), opts.SerializeToString()))\n return ret", "docstring": "Auto profile and advise.\n\n Builds profiles and automatically check anomalies of various\n aspects. For more details:\n https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md\n\nArgs:\n graph: tf.Graph. If None and eager execution is not enabled, use default\n graph.\n run_meta: optional tensorflow.RunMetadata proto. It is necessary to\n support run time information profiling, such as time and memory.\n options: see ALL_ADVICE example above. Default checks everything.\n\nReturns:\n Returns AdviceProto proto"} +{"repo": "transformers", "function": "def flatten(index, name='segmented_flatten'):\n batch_size = torch.prod(torch.tensor(list(index.batch_shape())))\n offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments\n offset = offset.view(index.batch_shape())\n for _ in range(index.batch_dims, len(index.indices.size())):\n offset = offset.unsqueeze(-1)\n indices = offset + index.indices\n return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)", "docstring": "Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation\nrelabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by\n*num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the\nbatch.\n\nArgs:\n index (`IndexMap`):\n IndexMap to flatten.\n name (`str`, *optional*, defaults to 'segmented_flatten'):\n Name for the operation. Currently not used\n\nReturns:\n (`IndexMap`): The flattened IndexMap."} +{"repo": "tensorflow", "function": "def build(self):\n\n def _create_per_worker_dataset():\n dataset = self._dataset_fn()\n return dataset\n per_worker_dataset = self._coordinator._create_per_worker_resources(_create_per_worker_dataset)\n dataset_fn_output_type_spec = self._dataset_fn.structured_outputs._type_spec\n for dataset_remote_value in per_worker_dataset._values:\n dataset_remote_value._type_spec = dataset_fn_output_type_spec\n return per_worker_dataset", "docstring": "Trigger dataset creation on workers without creating an iterator.\n\nReturns:\n A PerWorkerValues object containing a tuple of RemoteValues, themselves\n containing the built Dataset for each worker"} +{"repo": "temporian", "function": "def infer_graph(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode]) -> Graph:\n graph = Graph()\n graph.outputs.update(outputs)\n pending_nodes: Set[EventSetNode] = outputs.copy()\n done_nodes: Set[EventSetNode] = set()\n missing_nodes: Set[EventSetNode] = set()\n while pending_nodes:\n node = next(iter(pending_nodes))\n pending_nodes.remove(node)\n assert node not in done_nodes\n graph.add_node(node)\n if inputs is not None and node in inputs:\n graph.inputs.add(node)\n continue\n if node.creator is None:\n if inputs is not None:\n missing_nodes.add(node)\n else:\n graph.inputs.add(node)\n continue\n graph.add_operator(node.creator)\n for input_node in node.creator.inputs.values():\n if input_node in done_nodes:\n continue\n pending_nodes.add(input_node)\n for output_node in node.creator.outputs.values():\n graph.add_node(output_node)\n if missing_nodes:\n raise ValueError(f'The following input nodes are required but not provided as input:\\n{missing_nodes}')\n for e in graph.nodes:\n graph.add_sampling(e.sampling_node)\n for f in e.feature_nodes:\n graph.add_feature(f)\n return graph", "docstring": "Extracts the nodes in between the output and input nodes.\n\nIf inputs is set, fails if outputs cannot be computed from `inputs`.\nIf inputs is not set, infers the required set of inputs.\n\nArgs:\n inputs: Set of available input nodes. If None, inputs are inferred.\n outputs: Set of expected output nodes.\n\nReturns:\n The inferred graph.\n\nRaises:\n ValueError: If there are repeated nodes in the `inputs`; an\n unexpected type of input is provided; an unnamed node is inferred\n as input; or some nodes are required but not provided."} +{"repo": "keras", "function": "def sparse_intersection_indices_and_values(x1, x2):\n ones1 = tf.sparse.map_values(ones_like_int8, x1)\n ones2 = tf.sparse.map_values(ones_like_int8, x2)\n intersection_extra_dim = tf.sets.intersection(tf.sparse.expand_dims(ones1, axis=-1), tf.sparse.expand_dims(ones2, axis=-1))\n\n def empty_intersection():\n return (tf.zeros((0, x1.shape.rank), dtype=tf.int64), tf.zeros((0,), dtype=x1.values.dtype), tf.zeros((0,), dtype=x2.values.dtype))\n\n def non_empty_intersection():\n intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape)\n zeros1 = tf.sparse.map_values(zeros_like_int8, x1)\n zeros2 = tf.sparse.map_values(zeros_like_int8, x2)\n mask1 = tf.sparse.add(zeros1, intersection)\n mask2 = tf.sparse.add(zeros2, intersection)\n return (intersection.indices, tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values, tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values)\n return tf.cond(tf.equal(tf.size(intersection_extra_dim), 0), empty_intersection, non_empty_intersection)", "docstring": "Compute the indices for the intersection of two `tf.SparseTensor`s and\nmodify the values for these indices.\n\nArgs:\n x1: the first `tf.SparseTensor`.\n x2: the second `tf.SparseTensor`.\nReturns: A tuple containing:\n - the indices for the intersection\n - `x1` values for the intersection indices (some values were removed)\n - `x2` values for the intersection indices (some values were removed)"} +{"repo": "beam", "function": "def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, kwargs: Optional[Dict[str, Any]]=None, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], source_caller: Optional[Caller[RequestT, ResponseT]]=None):\n self.request_coder = request_coder\n self.response_coder = response_coder\n self.redis_caller = _RedisCaller(host, port, time_to_live, request_coder=self.request_coder, response_coder=self.response_coder, kwargs=kwargs, source_caller=source_caller, mode=_RedisMode.WRITE)", "docstring": "Args:\n host (str): The hostname or IP address of the Redis server.\n port (int): The port number of the Redis server.\n time_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for\n records stored in Redis. Provide an integer (in seconds) or a\n `datetime.timedelta` object.\n kwargs: Optional(Dict[str, Any]) additional keyword arguments that\n are required to connect to your redis server. Same as `redis.Redis()`.\n request_coder: (Optional[`coders.Coder`]) coder for requests stored\n in Redis.\n response_coder: (Optional[`coders.Coder`]) coder for decoding responses\n received from Redis.\n source_caller: (Optional[`Caller`]): The source caller using this Redis\n cache in case of fetching the cache request to store in Redis.\n "} +{"repo": "tensorflow", "function": "def from_spec(cls, spec, name=None):\n return cls(spec.shape, spec.dtype, name or spec.name)", "docstring": "Returns a `TensorSpec` with the same shape and dtype as `spec`.\n\n>>> spec = tf.TensorSpec(shape=[8, 3], dtype=tf.int32, name=\"OriginalName\")\n>>> tf.TensorSpec.from_spec(spec, \"NewName\")\nTensorSpec(shape=(8, 3), dtype=tf.int32, name='NewName')\n\nArgs:\n spec: The `TypeSpec` used to create the new `TensorSpec`.\n name: The name for the new `TensorSpec`. Defaults to `spec.name`."} +{"repo": "transformers", "function": "class PerceiverProjectionPostprocessor(nn.Module):\n\n def __init__(self, in_channels: int, out_channels: int) -> None:\n super().__init__()\n self.classifier = nn.Linear(in_channels, out_channels)\n\n def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor]=None, modality_sizes=None) -> torch.Tensor:\n logits = self.classifier(inputs)\n return logits", "docstring": "Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower\ndimension.\n\nArgs:\n in_channels (`int`):\n Number of channels in the input.\n out_channels (`int`):\n Number of channels in the output."} +{"repo": "transformers", "function": "class QuantLinear(nn.Module):\n\n def __init__(self, in_features, out_features, bias=True, weight_bit=8, bias_bit=32, per_channel=False, quant_mode=False):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.zeros([out_features, in_features]))\n self.register_buffer('weight_integer', torch.zeros_like(self.weight))\n self.register_buffer('fc_scaling_factor', torch.zeros(self.out_features))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_features))\n self.register_buffer('bias_integer', torch.zeros_like(self.bias))\n self.weight_bit = weight_bit\n self.quant_mode = quant_mode\n self.per_channel = per_channel\n self.bias_bit = bias_bit\n self.quant_mode = quant_mode\n self.percentile_mode = False\n self.weight_function = SymmetricQuantFunction.apply\n\n def __repr__(self):\n s = super().__repr__()\n s = f'({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})'\n return s\n\n def forward(self, x, prev_act_scaling_factor=None):\n if not self.quant_mode:\n return (nn.functional.linear(x, weight=self.weight, bias=self.bias), None)\n assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), 'Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. Please add a QuantAct layer with `per_channel = True` before this QuantAct layer'\n w = self.weight\n w_transform = w.data.detach()\n if self.per_channel:\n w_min, _ = torch.min(w_transform, dim=1, out=None)\n w_max, _ = torch.max(w_transform, dim=1, out=None)\n else:\n w_min = w_transform.min().expand(1)\n w_max = w_transform.max().expand(1)\n self.fc_scaling_factor = symmetric_linear_quantization_params(self.weight_bit, w_min, w_max, self.per_channel)\n self.weight_integer = self.weight_function(self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor)\n bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor\n if self.bias is not None:\n self.bias_integer = self.weight_function(self.bias, self.bias_bit, False, bias_scaling_factor)\n prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)\n x_int = x / prev_act_scaling_factor\n return (nn.functional.linear(x_int, weight=self.weight_integer, bias=self.bias_integer) * bias_scaling_factor, bias_scaling_factor)", "docstring": "Quantized version of `torch.nn.Linear`. Adds quantization-specific arguments on top of `torch.nn.Linear`.\n\nArgs:\n weight_bit (`int`, *optional*, defaults to `8`):\n Bitwidth for the quantized weight.\n bias_bit (`int`, *optional*, defaults to `32`):\n Bitwidth for the quantized bias.\n per_channel (`bool`, *optional*, defaults to `False`):\n Whether or not to use channel-wise quantization.\n quant_mode (`bool`, *optional*, defaults to `False`):\n Whether or not the layer is quantized."} +{"repo": "beam", "function": "def List(self, request, global_params=None):\n config = self.GetMethodConfig('List')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "List the jobs of a project. To list the jobs of a project in a region, we recommend using `projects.locations.jobs.list` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). To list the all jobs across all regions, use `projects.jobs.aggregated`. Using `projects.jobs.list` is not recommended, because you can only get the list of jobs that are running in `us-central1`. `projects.locations.jobs.list` and `projects.jobs.list` support filtering the list of jobs by name. Filtering by name isn't supported by `projects.jobs.aggregated`.\n\nArgs:\n request: (DataflowProjectsLocationsJobsListRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (ListJobsResponse) The response message."} +{"repo": "transformers", "function": "def range_index_map(batch_shape, num_segments, name='range_index_map'):\n device = num_segments.device if torch.is_tensor(num_segments) else 'cpu'\n batch_shape = torch.as_tensor(batch_shape, dtype=torch.long, device=device)\n assert len(batch_shape.size()) == 1\n num_segments = torch.as_tensor(num_segments, device=device)\n assert len(num_segments.size()) == 0\n indices = torch.arange(start=0, end=num_segments, device=num_segments.device)\n new_tensor = torch.cat([torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0)\n new_shape = [int(x) for x in new_tensor.tolist()]\n indices = indices.view(new_shape)\n multiples = torch.cat([batch_shape, torch.as_tensor([1], device=device)], dim=0)\n indices = indices.repeat(multiples.tolist())\n return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])", "docstring": "Constructs an index map equal to range(num_segments).\n\nArgs:\n batch_shape (`torch.Size`):\n Batch shape\n num_segments (`int`):\n Number of segments\n name (`str`, *optional*, defaults to 'range_index_map'):\n Name for the operation. Currently not used\n\nReturns:\n (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments)."} +{"repo": "temporian", "function": "def __invert__(self: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.unary import invert\n return invert(input=self)", "docstring": "Inverts a boolean [`EventSet`][temporian.EventSet] element-wise.\n\nSwaps False <-> True.\n\nDoes not work on integers, they should be cast to\n[`tp.bool_`][temporian.bool_] beforehand, using\n[`EventSet.cast()`][temporian.EventSet.cast].\n\nExample:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2],\n ... features={\"M\": [1, 5], \"N\": [1.0, 5.5]},\n ... )\n >>> # Boolean EventSet\n >>> b = a < 2\n >>> b\n indexes: ...\n 'M': [ True False]\n 'N': [ True False]\n ...\n\n >>> # Inverted EventSet\n >>> c = ~b\n >>> c\n indexes: ...\n 'M': [False True]\n 'N': [False True]\n ...\n\n ```\n\nReturns:\n Inverted EventSet."} +{"repo": "transformers", "function": "class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput):\n logits: Optional[tf.Tensor] = None\n mc_logits: Optional[tf.Tensor] = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for outputs of models predicting if two sentences are consecutive or not.\n\nArgs:\n logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`):\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "starthinker", "function": "def recipe_dataset(config, auth_write, dataset_dataset, dataset_emails, dataset_groups):\n dataset(config, {'auth': auth_write, 'dataset': dataset_dataset, 'emails': dataset_emails, 'groups': dataset_groups})", "docstring": "Create and permission a dataset in BigQuery.\n\nArgs:\n auth_write (authentication) - Credentials used for writing data.\n dataset_dataset (string) - Name of Google BigQuery dataset to create.\n dataset_emails (string_list) - Comma separated emails.\n dataset_groups (string_list) - Comma separated groups."} +{"repo": "tensorflow", "function": "def max_pool(value, ksize, strides, padding, data_format='NHWC', name=None, input=None):\n value = deprecation.deprecated_argument_lookup('input', input, 'value', value)\n with ops.name_scope(name, 'MaxPool', [value]) as name:\n if data_format is None:\n data_format = 'NHWC'\n channel_index = 1 if data_format.startswith('NC') else 3\n ksize = _get_sequence(ksize, 2, channel_index, 'ksize')\n strides = _get_sequence(strides, 2, channel_index, 'strides')\n if isinstance(padding, (list, tuple)) and data_format == 'NCHW_VECT_C':\n raise ValueError(f\"`data_format='NCHW_VECT_C'` is not supported with explicit padding. Received: padding={padding}\")\n padding, explicit_paddings = convert_padding(padding)\n if np.isscalar(ksize) and ksize == 0 or (isinstance(ksize, (list, tuple, np.ndarray)) and any((v == 0 for v in ksize))):\n raise ValueError(f'`ksize` cannot be zero. Received: ksize={ksize}')\n return gen_nn_ops.max_pool(value, ksize=ksize, strides=strides, padding=padding, explicit_paddings=explicit_paddings, data_format=data_format, name=name)", "docstring": "Performs the max pooling on the input.\n\nArgs:\n value: A 4-D `Tensor` of the format specified by `data_format`.\n ksize: An int or list of `ints` that has length `1`, `2` or `4`.\n The size of the window for each dimension of the input tensor.\n strides: An int or list of `ints` that has length `1`, `2` or `4`.\n The stride of the sliding window for each dimension of the input tensor.\n padding: Either the `string` `\"SAME\"` or `\"VALID\"` indicating the type of\n padding algorithm to use, or a list indicating the explicit paddings at\n the start and end of each dimension. When explicit padding is used and\n data_format is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top,\n pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used\n and data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`. When using explicit\n padding, the size of the paddings cannot be greater than the sliding\n window size.\n data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.\n name: Optional name for the operation.\n input: Alias for value.\n\nReturns:\n A `Tensor` of format specified by `data_format`.\n The max pooled output tensor."} +{"repo": "transformers", "function": "def forward(self, hidden_state, output_hidden_states: bool=False):\n all_hidden_states = []\n embedding = hidden_state\n for mod in self.mixers:\n embedding = mod(embedding)\n if output_hidden_states:\n all_hidden_states.append(embedding)\n if output_hidden_states:\n return (embedding, all_hidden_states)\n else:\n return (embedding, None)", "docstring": "Args:\n hidden_state (`torch.Tensor`): The input tensor.\n output_hidden_states (`bool`, *optional*, defaults to False.):\n Whether to output the hidden states as well.\n\nReturns:\n `torch.Tensor`: The embedding. `list`: List of all hidden states if `output_hidden_states` is set to\n `True`."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, padding_mask: Optional[torch.Tensor]=False):\n batch_size, sequence_length, hidden_dim = hidden_states.shape\n top_1_mask, router_probs = self.router(hidden_states, padding_mask)\n router_mask = router_probs.bool()\n hidden_states = hidden_states.reshape(batch_size * sequence_length, hidden_dim)\n masked_hidden_states = torch.einsum('bm,be->ebm', hidden_states, router_mask)\n for idx, expert in enumerate(self.experts.values()):\n token_indices = router_mask[:, idx]\n combining_weights = router_probs[token_indices, idx]\n expert_output = expert(masked_hidden_states[idx, token_indices])\n if self.moe_token_dropout > 0:\n if self.training:\n expert_output = self.token_dropout(expert_output)\n else:\n expert_output *= 1 - self.moe_token_dropout\n masked_hidden_states[idx, token_indices] = torch.einsum('b,be->be', combining_weights, expert_output)\n hidden_states = masked_hidden_states.sum(dim=0).reshape(batch_size, sequence_length, hidden_dim)\n top_1_expert_index = torch.argmax(top_1_mask, dim=-1)\n return (hidden_states, (router_probs, top_1_expert_index))", "docstring": "The goal of this forward pass is to have the same number of operation as the equivalent `NllbMoeDenseActDense`\n(mlp) layer. This means that all of the hidden states should be processed at most twice ( since we are using a\ntop_2 gating mechanism). This means that we keep the complexity to O(batch_size x sequence_length x hidden_dim)\ninstead of O(num_experts x batch_size x sequence_length x hidden_dim).\n\n1- Get the `router_probs` from the `router`. The shape of the `router_mask` is `(batch_size X sequence_length,\nnum_expert)` and corresponds to the boolean version of the `router_probs`. The inputs are masked using the\n`router_mask`.\n\n2- Dispatch the hidden_states to its associated experts. The router probabilities are used to weight the\ncontribution of each experts when updating the masked hidden states.\n\nArgs:\n hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):\n The hidden states\n padding_mask (`torch.Tensor`, *optional*, defaults to `False`):\n Attention mask. Can be in the causal form or not.\n\nReturns:\n hidden_states (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_dim)`):\n Updated hidden states\n router_logits (`torch.Tensor` of shape `(batch_size, sequence_length, num_experts)`):\n Needed for computing the loss"} +{"repo": "tensorflow", "function": "def _string_to_components(spec=None):\n cached_result = _STRING_TO_COMPONENTS_CACHE.get(spec)\n if cached_result is not None:\n return cached_result\n raw_spec = spec\n job, replica, task, device_type, device_index = (None, None, None, None, None)\n spec = spec or ''\n splits = [x.split(':') for x in spec.split('/')]\n valid_device_types = DeviceSpecV2._get_valid_device_types()\n for y in splits:\n ly = len(y)\n if y:\n if ly == 2 and y[0] == 'job':\n job = y[1]\n elif ly == 2 and y[0] == 'replica':\n replica = y[1]\n elif ly == 2 and y[0] == 'task':\n task = y[1]\n elif (ly == 1 or ly == 2) and y[0].upper() in valid_device_types:\n if device_type is not None:\n raise ValueError(f'Multiple device types are not allowed while parsing the device spec: {spec}.')\n device_type = y[0].upper()\n if ly == 2 and y[1] != '*':\n device_index = int(y[1])\n elif ly == 3 and y[0] == 'device':\n if device_type is not None:\n raise ValueError(f'Multiple device types are not allowed while parsing the device spec: {spec}.')\n device_type = y[1]\n if y[2] != '*':\n device_index = int(y[2])\n elif ly and y[0] != '':\n raise ValueError(f\"Unknown attribute '{y[0]}' is encountered while parsing the device spec: '{spec}'.\")\n output = (job, replica, task, device_type, device_index)\n _STRING_TO_COMPONENTS_CACHE[raw_spec] = output\n return output", "docstring": "Stateless portion of device spec string parsing.\n\nArgs:\n spec: An optional string specifying a device specification.\n\nReturns:\n The parsed components of `spec`. Note that the result of this function\n must go through attribute setters of DeviceSpec, and should therefore NOT\n be used directly."} +{"repo": "pyglove", "function": "def __init__(self, min_value: int=0, max_value: Optional[int]=None):\n super().__init__()\n self._min_value = min_value\n self._max_value = max_value", "docstring": "Constructor.\n\nArgs:\n min_value: Min value that is acceptable for the list index.\n max_value: Max value that is acceptable for the list index. If None, there\n is no upper bound for list index."} +{"repo": "tensorflow", "function": "def lu_solve(lower_upper, perm, rhs, validate_args=False, name=None):\n with ops.name_scope(name or 'lu_solve'):\n lower_upper = ops.convert_to_tensor(lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n rhs = ops.convert_to_tensor(rhs, dtype_hint=lower_upper.dtype, name='rhs')\n assertions = _lu_solve_assertions(lower_upper, perm, rhs, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n rhs = array_ops.identity(rhs)\n if rhs.shape.rank == 2 and perm.shape.rank == 1:\n permuted_rhs = array_ops.gather(rhs, perm, axis=-2)\n else:\n rhs_shape = array_ops.shape(rhs)\n broadcast_batch_shape = array_ops.broadcast_dynamic_shape(rhs_shape[:-2], array_ops.shape(perm)[:-1])\n d, m = (rhs_shape[-2], rhs_shape[-1])\n rhs_broadcast_shape = array_ops.concat([broadcast_batch_shape, [d, m]], axis=0)\n broadcast_rhs = array_ops.broadcast_to(rhs, rhs_broadcast_shape)\n broadcast_rhs = array_ops.reshape(broadcast_rhs, [-1, d, m])\n broadcast_perm = array_ops.broadcast_to(perm, rhs_broadcast_shape[:-1])\n broadcast_perm = array_ops.reshape(broadcast_perm, [-1, d])\n broadcast_batch_size = math_ops.reduce_prod(broadcast_batch_shape)\n broadcast_batch_indices = array_ops.broadcast_to(math_ops.range(broadcast_batch_size)[:, array_ops.newaxis], [broadcast_batch_size, d])\n broadcast_perm = array_ops_stack.stack([broadcast_batch_indices, broadcast_perm], axis=-1)\n permuted_rhs = array_ops.gather_nd(broadcast_rhs, broadcast_perm)\n permuted_rhs = array_ops.reshape(permuted_rhs, rhs_broadcast_shape)\n lower = set_diag(band_part(lower_upper, num_lower=-1, num_upper=0), array_ops.ones(array_ops.shape(lower_upper)[:-1], dtype=lower_upper.dtype))\n return triangular_solve(lower_upper, triangular_solve(lower, permuted_rhs), lower=False)", "docstring": "Solves systems of linear eqns `A X = RHS`, given LU factorizations.\n\nNote: this function does not verify the implied matrix is actually invertible\nnor is this condition checked even when `validate_args=True`.\n\nArgs:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n rhs: Matrix-shaped float `Tensor` representing targets for which to solve;\n `A X = RHS`. To handle vector cases, use: `lu_solve(..., rhs[...,\n tf.newaxis])[..., 0]`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness. Note: this function does not verify the implied matrix is\n actually invertible, even when `validate_args=True`.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_solve').\n\nReturns:\n x: The `X` in `A @ X = RHS`.\n\n#### Examples\n\n```python\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nx = [[[1., 2],\n [3, 4]],\n [[7, 8],\n [3, 4]]]\ninv_x = tf.linalg.lu_solve(*tf.linalg.lu(x), rhs=tf.eye(2))\ntf.assert_near(tf.matrix_inverse(x), inv_x)\n# ==> True\n```"} +{"repo": "mobly", "function": "def set_key(self, structure_prefix, key_line):\n self._empty = False\n key_value = self._remove_structure_prefix(structure_prefix, key_line)\n if '=' in key_value:\n key, value = key_value.split('=', 1)\n self.current_key = key\n if key in self.known_keys:\n self.known_keys[key].append(value)\n else:\n self.unknown_keys[key].append(key_value)", "docstring": "Sets the current key for the instrumentation block.\n\nFor unknown keys, the key is added to the value list in order to\nbetter contextualize the value in the output.\n\nArgs:\n structure_prefix: string, the structure prefix that was matched\n and that needs to be removed.\n key_line: string, the raw instrumentation output line that contains\n the key-value pair."} +{"repo": "tensorflow", "function": "def range(*args, **kwargs) -> 'DatasetV2':\n from tensorflow.python.data.ops import range_op\n return range_op._range(*args, **kwargs)", "docstring": "Creates a `Dataset` of a step-separated range of values.\n\n>>> ds = Dataset.range(5)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[0, 1, 2, 3, 4]\n>>> ds = Dataset.range(2, 5)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[2, 3, 4]\n>>> ds = Dataset.range(1, 5, 2)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[1, 3]\n>>> ds = Dataset.range(1, 5, -2)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[]\n>>> ds = Dataset.range(5, 1)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[]\n>>> ds = Dataset.range(5, 1, -2)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[5, 3]\n>>> ds = Dataset.range(2, 5, output_type=tf.int32)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[2, 3, 4]\n>>> ds = Dataset.range(1, 5, 2, output_type=tf.float32)\n>>> [a.item() for a in ds.as_numpy_iterator()]\n[1.0, 3.0]\n\nArgs:\n *args: follows the same semantics as python's range.\n len(args) == 1 -> start = 0, stop = args[0], step = 1.\n len(args) == 2 -> start = args[0], stop = args[1], step = 1.\n len(args) == 3 -> start = args[0], stop = args[1], step = args[2].\n **kwargs:\n - output_type: Its expected dtype. (Optional, default: `tf.int64`).\n - name: (Optional.) A name for the tf.data operation.\n\nReturns:\n Dataset: A `RangeDataset`.\n\nRaises:\n ValueError: if len(args) == 0."} +{"repo": "transformers", "function": "def from_vision_qformer_text_configs(cls, vision_config: InstructBlipVideoVisionConfig, qformer_config: InstructBlipVideoQFormerConfig, text_config: PretrainedConfig, **kwargs):\n return cls(vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`InstructBlipVideoConfig`] (or a derived class) from a InstructBlipVideo vision model, Q-Former and\nlanguage model configurations.\n\nReturns:\n [`InstructBlipVideoConfig`]: An instance of a configuration object"} +{"repo": "data-quality-monitor", "function": "class Buffer(Generic[T]):\n queue: List[T]\n max_size: int\n flusher: Union[FlushFunction, NoReturn]\n\n def __init__(self, initlist: List[T], max_size: int, flusher: FlushFunction) -> None:\n self.queue = initlist\n self.max_size = max_size\n self.flusher = flusher\n\n def flush(self, force: bool=False) -> bool | Any:\n \"\"\"\n Empty and consume queue items, if force or max_size reached\n\n Args:\n * force: If True, force queue to flush\n\n Returns:\n * True, if flushed with no errors\n * False, if not flushed\n * Error value from consumer, if flushed with errors\n \"\"\"\n if force or len(self.queue) > self.max_size:\n result = self.flusher(self.queue)\n self.queue.clear()\n return result or True\n else:\n return False\n\n def push(self, item: T) -> bool | Any:\n \"\"\"\n Add item to queue and attempt a flush.\n\n Args:\n * item: Item to add to queue\n\n Returns:\n * True, if flushed with no errors\n * False, if not flushed\n * Error value from consumer, if flushed with errors\n \"\"\"\n self.queue.append(item)\n return self.flush()", "docstring": "Representation of a Buffer (FIFO queue) with the ability to\nconsume the current queue into a flush function when max_size is reached.\n\nIt can queue any list of items, e.g. logs, rows, and API calls.\n\nArgs:\n * initlist: Initial list of items\n * max_size: Maximum queue size\n * flusher: Function to be called with list of items"} +{"repo": "tensorflow", "function": "def merge_with(self, other):\n other = as_shape(other)\n if self.dims is None:\n return other\n if other.dims is None:\n return self\n else:\n try:\n self.assert_same_rank(other)\n new_dims = [dim.merge_with(other_dim) for dim, other_dim in zip(self.dims, other.dims)]\n return TensorShape(new_dims)\n except ValueError:\n raise ValueError('Shapes %s and %s are not compatible' % (self, other))", "docstring": "Returns a `TensorShape` combining the information in `self` and `other`.\n\nThe dimensions in `self` and `other` are merged element-wise,\naccording to the rules below:\n\n```python\nDimension(n).merge_with(Dimension(None)) == Dimension(n)\nDimension(None).merge_with(Dimension(n)) == Dimension(n)\nDimension(None).merge_with(Dimension(None)) == Dimension(None)\n# raises ValueError for n != m\nDimension(n).merge_with(Dimension(m))\n```\n>> ts = tf.TensorShape([1,2])\n>> ot1 = tf.TensorShape([1,2])\n>> ts.merge_with(ot).as_list()\n[1,2]\n\n>> ot2 = tf.TensorShape([1,None])\n>> ts.merge_with(ot2).as_list()\n[1,2]\n\n>> ot3 = tf.TensorShape([None, None])\n>> ot3.merge_with(ot2).as_list()\n[1, None]\n\nArgs:\n other: Another `TensorShape`.\n\nReturns:\n A `TensorShape` containing the combined information of `self` and\n `other`.\n\nRaises:\n ValueError: If `self` and `other` are not compatible."} +{"repo": "tensorflow", "function": "def _use_temp_cache(self):\n if self._use_tensor_buffer():\n return False\n if self._use_tensor_values_cache():\n return self._parameters.use_temp_cache_var\n else:\n return False", "docstring": "Returns true if the intermediate values should be stacked instead of being stored in a tf.Variable.\n\nReturns:\n A boolean, denoting whether to use a temporary cache or not."} +{"repo": "starthinker", "function": "def google_api_initilaize(config, api_call, alias=None):\n if api_call['function'].endswith('list') or alias == 'list':\n api_call['iterate'] = True\n if api_call['api'] == 'dfareporting':\n if not api_call['function'].startswith('userProfiles'):\n is_superuser, profile_id = get_profile_for_api(config, api_call['auth'], api_call['kwargs']['id'] if api_call['function'] == 'accounts.get' else api_call['kwargs']['accountId'])\n api_call['kwargs']['profileId'] = profile_id\n if is_superuser:\n api_call['version'] = 'prerelease'\n elif 'accountId' in api_call['kwargs']:\n del api_call['kwargs']['accountId']", "docstring": "Some Google API calls require a lookup or pre-call, add it here.\n\nModifies the API call before actual execution with any data\nspecifically required by an endpoint. Currently:\n\n > dfa-reporting - look up user profile and add to call.\n\nArgs:\n api_call (dict): the JSON for the API call as defined in recipe.\n alias (string): mostly used to signal a list behavior (change to iterate in future?)\n\nReturns (dict):\n A modified JSON with additional API values added.\n Currently mostly used by dfareporting API to add profile and account.\n\nRaises:\n ValueError: If a required key in the recipe is missing."} +{"repo": "starthinker", "function": "def m_to_mile(value):\n if value is None:\n return None\n return value / 1609", "docstring": "Converts distance in meters to miles\n\nArgs:\n value: floating point representing the distance in meters\nReturns: distance in miles"} +{"repo": "transformers", "function": "class GraniteMoeMoE(nn.Module):\n\n def __init__(self, config: GraniteMoeConfig):\n super(GraniteMoeMoE, self).__init__()\n self.input_size = config.hidden_size\n self.hidden_size = config.intermediate_size\n self.activation = ACT2FN[config.hidden_act]\n self.input_linear = GraniteMoeParallelExperts(config.num_local_experts, self.input_size, self.hidden_size * 2)\n self.output_linear = GraniteMoeParallelExperts(config.num_local_experts, self.hidden_size, self.input_size)\n self.router = GraniteMoeTopKGating(input_size=self.input_size, num_experts=config.num_local_experts, top_k=config.num_experts_per_tok)\n\n def forward(self, layer_input):\n \"\"\"\n Forward pass of the mixture of experts layer.\n\n Args:\n layer_input (Tensor):\n Input tensor.\n\n Returns:\n Tensor:\n Output tensor.\n Tensor:\n Router logits.\n \"\"\"\n bsz, length, emb_size = layer_input.size()\n layer_input = layer_input.reshape(-1, emb_size)\n _, batch_index, batch_gates, expert_size, router_logits = self.router(layer_input)\n expert_inputs = layer_input[batch_index]\n hidden_states = self.input_linear(expert_inputs, expert_size)\n chunked_hidden_states = hidden_states.chunk(2, dim=-1)\n hidden_states = self.activation(chunked_hidden_states[0]) * chunked_hidden_states[1]\n expert_outputs = self.output_linear(hidden_states, expert_size)\n expert_outputs = expert_outputs * batch_gates[:, None]\n zeros = torch.zeros((bsz * length, self.input_size), dtype=expert_outputs.dtype, device=expert_outputs.device)\n layer_output = zeros.index_add(0, batch_index, expert_outputs)\n layer_output = layer_output.view(bsz, length, self.input_size)\n return (layer_output, router_logits)", "docstring": "A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n\nArgs:\n config:\n Configuration object with model hyperparameters."} +{"repo": "tensorflow", "function": "def connect_to_cluster(cluster_spec_or_resolver, job_name='localhost', task_index=0, protocol=None, make_master_device_default=True, cluster_device_filters=None):\n if not context.executing_eagerly():\n raise ValueError('`tf.config.experimental_connect_to_cluster` can only be called in eager mode.')\n protocol = protocol or remote_utils.get_default_communication_protocol()\n if isinstance(cluster_spec_or_resolver, server_lib.ClusterSpec):\n cluster_spec = cluster_spec_or_resolver\n elif isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver):\n if cluster_spec_or_resolver.master() in _LOCAL_MASTERS:\n return\n cluster_spec = cluster_spec_or_resolver.cluster_spec()\n else:\n raise ValueError('`cluster_spec_or_resolver` must be a `ClusterSpec` or a `ClusterResolver`.')\n cluster_def = copy.deepcopy(cluster_spec.as_cluster_def())\n if cluster_device_filters:\n if isinstance(cluster_device_filters, server_lib.ClusterDeviceFilters):\n cluster_device_filters = copy.deepcopy(cluster_device_filters._as_cluster_device_filters())\n else:\n raise ValueError('`cluster_device_filters` must be an instance of `tf.train.experimental.ClusterDeviceFilters`.')\n is_server_def_changed = False\n current_server_def = context.get_server_def()\n if current_server_def and job_name not in cluster_spec.jobs:\n for i, job in enumerate(current_server_def.cluster.job):\n if job.name == job_name:\n del current_server_def.cluster.job[i]\n if current_server_def is None or current_server_def.cluster != cluster_def or current_server_def.job_name != job_name or (current_server_def.task_index != task_index):\n is_server_def_changed = True\n if job_name not in cluster_spec.jobs:\n local_port = pywrap_tfe.TF_PickUnusedPortOrDie()\n job_def = cluster_def.job.add()\n job_def.name = job_name\n job_def.tasks[0] = 'localhost:{}'.format(local_port)\n if context.context().coordination_service is None:\n service_type = remote_utils.coordination_service_type(protocol)\n service_leader = ''\n if isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver) and hasattr(cluster_spec_or_resolver, 'tpu_hardware_feature'):\n service_leader = cluster_spec_or_resolver.get_coordination_service_leader()\n if cluster_spec_or_resolver.environment == 'google':\n is_uptc_sess = '.uptc-worker.' in cluster_spec_or_resolver.master()\n service_type = remote_utils.coordination_service_type(protocol, is_uptc_sess)\n else:\n service_type = 'standalone'\n if service_type:\n context.context().configure_coordination_service(service_type=service_type, service_leader=service_leader, enable_health_check=False)\n default_session_config = copy.deepcopy(context.context().config)\n for name in cluster_spec.jobs:\n if name == job_name:\n continue\n default_session_config.experimental.collective_group_leader = f'/job:{name}/replica:0/task:0'\n logging.info('default session config: %s', default_session_config)\n server_def = ServerDef(cluster=cluster_def, job_name=job_name, task_index=task_index, protocol=protocol, default_session_config=default_session_config, cluster_device_filters=cluster_device_filters)\n if is_server_def_changed:\n context.set_server_def(server_def)\n else:\n context.update_server_def(server_def)\n if make_master_device_default and isinstance(cluster_spec_or_resolver, cluster_resolver.ClusterResolver) and cluster_spec_or_resolver.master():\n master = cluster_spec_or_resolver.master()\n master_job_name = None\n master_task_id = None\n for job_name in cluster_spec.jobs:\n for task_id in cluster_spec.task_indices(job_name):\n task_address = cluster_spec.task_address(job_name, task_id)\n if master in task_address or task_address in master:\n master_job_name = job_name\n master_task_id = task_id\n break\n if not master_job_name:\n raise ValueError('`make_master_device_default` is set to True but cannot find master %s in the cluster' % master)\n master_device = '/job:{}/replica:0/task:{}'.format(master_job_name, master_task_id)\n master_device = device_util.canonicalize(master_device)\n current_device = device_util.current()\n if current_device:\n current_device = device_util.canonicalize(current_device)\n if current_device and current_device != master_device:\n raise ValueError('`connect_to_cluster` is called inside existing device scope %s, which is different from the master device scope %s to enter. This is not allowed.' % (current_device, master_device))\n if not current_device:\n logging.info('Entering into master device scope: %s', master_device)\n ops.device(master_device).__enter__()", "docstring": "Connects to the given cluster.\n\nWill make devices on the cluster available to use. Note that calling this more\nthan once will work, but will invalidate any tensor handles on the old remote\ndevices.\n\nIf the given local job name is not present in the cluster specification, it\nwill be automatically added, using an unused port on the localhost.\n\nDevice filters can be specified to isolate groups of remote tasks to avoid\nundesired accesses between workers. Workers accessing resources or launching\nops / functions on filtered remote devices will result in errors (unknown\ndevices). For any remote task, if no device filter is present, all cluster\ndevices will be visible; if any device filter is specified, it can only\nsee devices matching at least one filter. Devices on the task itself are\nalways visible. Device filters can be particially specified.\n\nFor example, for a cluster set up for parameter server training, the following\ndevice filters might be specified:\n\n```python\ncdf = tf.config.experimental.ClusterDeviceFilters()\n# For any worker, only the devices on PS nodes and itself are visible\nfor i in range(num_workers):\n cdf.set_device_filters('worker', i, ['/job:ps'])\n# Similarly for any ps, only the devices on workers and itself are visible\nfor i in range(num_ps):\n cdf.set_device_filters('ps', i, ['/job:worker'])\n\ntf.config.experimental_connect_to_cluster(cluster_def,\n cluster_device_filters=cdf)\n```\n\nArgs:\n cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing\n the cluster.\n job_name: The name of the local job.\n task_index: The local task index.\n protocol: The communication protocol, such as `\"grpc\"`. If unspecified, will\n use the default from `python/platform/remote_utils.py`.\n make_master_device_default: If True and a cluster resolver is passed, will\n automatically enter the master task device scope, which indicates the\n master becomes the default device to run ops. It won't do anything if\n a cluster spec is passed. Will throw an error if the caller is currently\n already in some device scope.\n cluster_device_filters: an instance of\n `tf.train.experimental/ClusterDeviceFilters` that specify device filters\n to the remote tasks in cluster."} +{"repo": "tensorflow", "function": "def convert_graphdef_with_arrays(input_data, input_arrays_with_shape, output_arrays, control_output_arrays, **kwargs):\n model_flags = build_model_flags(**kwargs)\n conversion_flags = build_conversion_flags(**kwargs)\n quantized_input_stats = kwargs.get('quantized_input_stats', None)\n for idx, (name, shape) in enumerate(input_arrays_with_shape):\n input_array = model_flags.input_arrays.add()\n if _is_quantized_input_stats_required(conversion_flags):\n if quantized_input_stats:\n input_array.mean_value, input_array.std_value = quantized_input_stats[idx]\n else:\n raise ValueError('The `quantized_input_stats` flag must be defined when either `inference_type` flag or `inference_input_type` flag is set to tf.int8 or tf.uint8.')\n input_array.name = name\n input_array.shape.dims.extend(list(map(int, shape)))\n if output_arrays:\n for name in output_arrays:\n model_flags.output_arrays.append(name)\n if control_output_arrays:\n for name in control_output_arrays:\n model_flags.control_output_arrays.append(name)\n data = convert(model_flags, conversion_flags, input_data.SerializeToString(), debug_info_str=None)\n return data", "docstring": "Convert a frozen GraphDef that can't be loaded in TF.\n\nConversion can be customized by providing arguments that are forwarded to\n`build_model_flags` and `build_conversion_flags` (see documentation).\n\nArgs:\n input_data: Input data (i.e. often `sess.graph_def`),\n input_arrays_with_shape: Tuple of strings representing input tensor names\n and list of integers representing input shapes (e.g., [(\"foo\" : [1, 16,\n 16, 3])]). Use only when graph cannot be loaded into TensorFlow and when\n `input_tensors` is None.\n output_arrays: List of output tensors to freeze graph with. Use only when\n graph cannot be loaded into TensorFlow and when `output_tensors` is None.\n control_output_arrays: Control output node names. This is used when\n converting a Graph with no output tensors. For example, if the graph's\n last operation is a Print op, just specify that op's name in this field.\n This can be used together with the `output_arrays` parameter.\n **kwargs: See `build_model_flags` and `build_conversion_flags`.\n\nReturns:\n The converted data. For example if TFLite was the destination, then\n this will be a tflite flatbuffer in a bytes array.\n\nRaises:\n Defined in `build_conversion_flags`."} +{"repo": "pyglove", "function": "def evaluate(code: str, *, global_vars: Optional[Dict[str, Any]]=None, permission: Optional[permissions.CodePermission]=None, returns_stdout: bool=False, outputs_intermediate: bool=False) -> Union[Any, Dict[str, Any]]:\n permission = permission or permissions.get_permission()\n ctx = dict(get_context())\n if global_vars:\n ctx.update(global_vars)\n code_block = parsing.parse(code, permission)\n global_vars, orig_global_vars = (ctx, ctx.copy())\n if not code_block.body:\n return {} if outputs_intermediate else None\n stdout = io.StringIO()\n with contextlib.redirect_stdout(stdout):\n if hasattr(code_block.body[-1], 'value'):\n last_expr = code_block.body.pop()\n result_vars = [RESULT_KEY]\n if isinstance(last_expr, ast.Assign):\n for name_node in last_expr.targets:\n if isinstance(name_node, ast.Name):\n result_vars.append(name_node.id)\n last_expr = ast.Expression(last_expr.value)\n try:\n exec(compile(code_block, '', mode='exec'), global_vars)\n result = eval(compile(last_expr, '', mode='eval'), global_vars)\n except BaseException as e:\n raise errors.CodeError(code, e) from e\n for result_var in result_vars:\n global_vars[result_var] = result\n else:\n try:\n exec(compile(code_block, '', mode='exec'), global_vars)\n except BaseException as e:\n raise errors.CodeError(code, e) from e\n global_vars[RESULT_KEY] = list(global_vars.values())[-1]\n if returns_stdout:\n return stdout.getvalue()\n if outputs_intermediate:\n outputs = {}\n for k, v in global_vars.items():\n if k == '__builtins__':\n continue\n if k not in orig_global_vars or v is not orig_global_vars[k]:\n outputs[k] = v\n outputs[STDOUT_KEY] = stdout.getvalue()\n return outputs\n return global_vars[RESULT_KEY]", "docstring": "Executes Python code.\n\nFeatures:\n * Fine-grained execution policy for limiting what APIs could be executed.\n This eliminates the need for sandboxing.\n * It exposes both the final results and intermediate results (variables).\n\nArgs:\n code: Python code to run.\n global_vars: An optional dict as the globals that could be referenced by the\n code.\n permission: Permission for the Python code to run.\n returns_stdout: If True, the stdout (a str) will be returned.\n outputs_intermediate: Applicable when returns_stdout is False. If True,\n intermediate output will be outputted as a dict, with the last line's\n value accessible by key '__result__' and the std output accessible by\n key '__stdout__'. Otherwise the value of the last line will be returned.\n\nReturns:\n The value of the last line of the code block. Or a dict of variable\n names of all locals to their evaluated values as the output of the code to\n run. The value for the last line can be accessed by key '__result__'. Or the\n stdout as a str."} +{"repo": "beam", "function": "def run_change_point_analysis(test_config_container: TestConfigContainer, big_query_metrics_fetcher: MetricsFetcher, change_point_config: ChangePointConfig=ChangePointConfig(), save_alert_metadata: bool=False):\n logging.info('Running change point analysis for test ID :%s on metric: % s' % (test_config_container.test_id, test_config_container.metric_name))\n test_name = test_config_container.test_name\n min_runs_between_change_points = change_point_config.min_runs_between_change_points\n num_runs_in_change_point_window = change_point_config.num_runs_in_change_point_window\n metric_container = big_query_metrics_fetcher.fetch_metric_data(test_config=test_config_container)\n metric_container.sort_by_timestamp()\n metric_values = metric_container.values\n timestamps = metric_container.timestamps\n change_point_index = find_latest_change_point_index(metric_values=metric_values)\n if not change_point_index:\n logging.info('Change point is not detected for the test ID %s' % test_config_container.test_id)\n return False\n latest_change_point_run = len(timestamps) - 1 - change_point_index\n if not is_change_point_in_valid_window(num_runs_in_change_point_window, latest_change_point_run):\n logging.info('Performance regression/improvement found for the test ID: %s. on metric %s. Since the change point run %s lies outside the num_runs_in_change_point_window distance: %s, alert is not raised.' % (test_config_container.test_id, test_config_container.metric_name, latest_change_point_run + 1, num_runs_in_change_point_window))\n return False\n is_valid_change_point = True\n last_reported_issue_number = None\n issue_metadata_table_name = f'{test_config_container.metrics_table}_{test_config_container.metric_name}'\n if test_config_container.test_name:\n issue_metadata_table_name = f'{issue_metadata_table_name}_{test_config_container.test_name}'\n existing_issue_data = get_existing_issues_data(table_name=issue_metadata_table_name)\n if existing_issue_data is not None:\n existing_issue_timestamps = existing_issue_data[constants._CHANGE_POINT_TIMESTAMP_LABEL].tolist()\n last_reported_issue_number = existing_issue_data[constants._ISSUE_NUMBER].tolist()[0]\n if not isinstance(last_reported_issue_number, int):\n last_reported_issue_number = last_reported_issue_number.item()\n is_valid_change_point = is_sibling_change_point(previous_change_point_timestamps=existing_issue_timestamps, change_point_index=change_point_index, timestamps=timestamps, min_runs_between_change_points=min_runs_between_change_points, test_id=test_config_container.test_id)\n if is_valid_change_point and save_alert_metadata:\n issue_number, issue_url = create_performance_alert(test_config_container=test_config_container, metric_container=metric_container, change_point_index=change_point_index, existing_issue_number=last_reported_issue_number)\n issue_metadata = GitHubIssueMetaData(issue_timestamp=pd.Timestamp(datetime.now().replace(tzinfo=timezone.utc)), test_id=test_config_container.test_id.replace('.', '_'), test_name=test_name or uuid.uuid4().hex, metric_name=test_config_container.metric_name, change_point=metric_values[change_point_index], issue_number=issue_number, issue_url=issue_url, change_point_timestamp=timestamps[change_point_index])\n publish_issue_metadata_to_big_query(issue_metadata=issue_metadata, table_name=issue_metadata_table_name, project=test_config_container.project)\n return is_valid_change_point", "docstring": "Args:\n test_config_container: TestConfigContainer containing test metadata for\n fetching data and running change point analysis.\n big_query_metrics_fetcher: BigQuery metrics fetcher used to fetch data for\n change point analysis.\n change_point_config: ChangePointConfig containing parameters to run\n change point analysis.\n save_alert_metadata: bool indicating if issue metadata\n should be published to BigQuery table.\nReturns:\n bool indicating if a change point is observed and alerted on GitHub."} +{"repo": "transformers", "function": "def get_doctest_files(diff_with_last_commit: bool=False) -> List[str]:\n repo = Repo(PATH_TO_REPO)\n test_files_to_run = []\n if not diff_with_last_commit:\n print(f'main is at {repo.refs.main.commit}')\n print(f'Current head is at {repo.head.commit}')\n branching_commits = repo.merge_base(repo.refs.main, repo.head)\n for commit in branching_commits:\n print(f'Branching commit: {commit}')\n test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits)\n else:\n print(f'main is at {repo.head.commit}')\n parent_commits = repo.head.commit.parents\n for commit in parent_commits:\n print(f'Parent commit: {commit}')\n test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits)\n all_test_files_to_run = get_all_doctest_files()\n new_test_files = get_new_doctest_files(repo, repo.head.commit, repo.refs.main.commit)\n test_files_to_run = list(set(test_files_to_run + new_test_files))\n with open('utils/slow_documentation_tests.txt') as fp:\n slow_documentation_tests = set(fp.read().strip().split('\\n'))\n test_files_to_run = [x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests]\n test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()]\n return sorted(test_files_to_run)", "docstring": "Return a list of python and Markdown files where doc example have been modified between:\n\n- the current head and the main branch if `diff_with_last_commit=False` (default)\n- the current head and its parent commit otherwise.\n\nReturns:\n `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files\n modified are returned if the diff in the file is only in doctest examples)."} +{"repo": "python-fire", "function": "def GetRawKeyFunction():\n for get_raw_key_function in (_GetRawKeyFunctionPosix, _GetRawKeyFunctionWindows):\n try:\n return get_raw_key_function()\n except:\n pass\n return lambda: None", "docstring": "Returns a function that reads one keypress from stdin with no echo.\n\nReturns:\n A function that reads one keypress from stdin with no echo or a function\n that always returns None if stdin does not support it."} +{"repo": "tensorflow", "function": "def assert_type(tensor, tf_type, message=None, name=None):\n tf_type = dtypes.as_dtype(tf_type)\n with ops.name_scope(name, 'assert_type', [tensor]):\n if not isinstance(tensor, sparse_tensor.SparseTensor):\n tensor = ops.convert_to_tensor(tensor, name='tensor')\n if tensor.dtype != tf_type:\n raise TypeError(f'{_message_prefix(message)}{getattr(tensor, 'name', 'tensor')} must be of type {tf_type!r}; got {tensor.dtype!r}')\n return control_flow_ops.no_op('statically_determined_correct_type')", "docstring": "Statically asserts that the given `Tensor` is of the specified type.\n\nArgs:\n tensor: A `Tensor` or `SparseTensor`.\n tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,\n etc).\n message: A string to prefix to the default message.\n name: A name to give this `Op`. Defaults to \"assert_type\"\n\nRaises:\n TypeError: If the tensors data type doesn't match `tf_type`.\n\nReturns:\n A `no_op` that does nothing. Type can be determined statically."} +{"repo": "tensorflow", "function": "def _slot_dict(self, slot_name):\n named_slots = self._slots.get(slot_name, None)\n if named_slots is None:\n named_slots = {}\n self._slots[slot_name] = named_slots\n return named_slots", "docstring": "Returns a dict for caching slots created under the given name.\n\nArgs:\n slot_name: Name for the slot.\n\nReturns:\n A dict that maps primary `Variable` objects to the slot created\n for that variable, under the given slot name."} +{"repo": "beam", "function": "def Get(self, request, global_params=None):\n config = self.GetMethodConfig('Get')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Gets information about a snapshot.\n\nArgs:\n request: (DataflowProjectsSnapshotsGetRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (Snapshot) The response message."} +{"repo": "mobly", "function": "def assert_raises(expected_exception, extras=None, *args, **kwargs):\n context = _AssertRaisesContext(expected_exception, extras=extras)\n return context", "docstring": "Assert that an exception is raised when a function is called.\n\nIf no exception is raised, test fail. If an exception is raised but not\nof the expected type, the exception is let through.\n\nThis should only be used as a context manager:\n with assert_raises(Exception):\n func()\n\nArgs:\n expected_exception: An exception class that is expected to be\n raised.\n extras: An optional field for extra information to be included in\n test result."} +{"repo": "keras", "function": "class CategoricalFocalCrossentropy(LossFunctionWrapper):\n\n def __init__(self, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='categorical_focal_crossentropy', dtype=None):\n \"\"\"Initializes `CategoricalFocalCrossentropy` instance.\"\"\"\n super().__init__(categorical_focal_crossentropy, name=name, reduction=reduction, dtype=dtype, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n self.from_logits = from_logits\n self.label_smoothing = label_smoothing\n self.axis = axis\n self.alpha = alpha\n self.gamma = gamma\n\n def get_config(self):\n config = Loss.get_config(self)\n config.update({'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis, 'alpha': self.alpha, 'gamma': self.gamma})\n return config", "docstring": "Computes the alpha balanced focal crossentropy loss.\n\nUse this crossentropy loss function when there are two or more label\nclasses and if you want to handle class imbalance without using\n`class_weights`. We expect labels to be provided in a `one_hot`\nrepresentation.\n\nAccording to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it\nhelps to apply a focal factor to down-weight easy examples and focus more on\nhard examples. The general formula for the focal loss (FL)\nis as follows:\n\n`FL(p_t) = (1 - p_t) ** gamma * log(p_t)`\n\nwhere `p_t` is defined as follows:\n`p_t = output if y_true == 1, else 1 - output`\n\n`(1 - p_t) ** gamma` is the `modulating_factor`, where `gamma` is a focusing\nparameter. When `gamma` = 0, there is no focal effect on the cross entropy.\n`gamma` reduces the importance given to simple examples in a smooth manner.\n\nThe authors use alpha-balanced variant of focal loss (FL) in the paper:\n`FL(p_t) = -alpha * (1 - p_t) ** gamma * log(p_t)`\n\nwhere `alpha` is the weight factor for the classes. If `alpha` = 1, the\nloss won't be able to handle class imbalance properly as all\nclasses will have the same weight. This can be a constant or a list of\nconstants. If alpha is a list, it must have the same length as the number\nof classes.\n\nThe formula above can be generalized to:\n`FL(p_t) = alpha * (1 - p_t) ** gamma * CrossEntropy(y_true, y_pred)`\n\nwhere minus comes from `CrossEntropy(y_true, y_pred)` (CE).\n\nExtending this to multi-class case is straightforward:\n`FL(p_t) = alpha * (1 - p_t) ** gamma * CategoricalCE(y_true, y_pred)`\n\nIn the snippet below, there is `num_classes` floating pointing values per\nexample. The shape of both `y_pred` and `y_true` are\n`(batch_size, num_classes)`.\n\nArgs:\n alpha: A weight balancing factor for all classes, default is `0.25` as\n mentioned in the reference. It can be a list of floats or a scalar.\n In the multi-class case, alpha may be set by inverse class\n frequency by using `compute_class_weight` from `sklearn.utils`.\n gamma: A focusing parameter, default is `2.0` as mentioned in the\n reference. It helps to gradually reduce the importance given to\n simple (easy) examples in a smooth manner.\n from_logits: Whether `output` is expected to be a logits tensor. By\n default, we consider that `output` encodes a probability\n distribution.\n label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,\n meaning the confidence on label values are relaxed. For example, if\n `0.1`, use `0.1 / num_classes` for non-target labels and\n `0.9 + 0.1 / num_classes` for target labels.\n axis: The axis along which to compute crossentropy (the features\n axis). Defaults to `-1`.\n reduction: Type of reduction to apply to the loss. In almost all cases\n this should be `\"sum_over_batch_size\"`. Supported options are\n `\"sum\"`, `\"sum_over_batch_size\"`, `\"mean\"`,\n `\"mean_with_sample_weight\"` or `None`. `\"sum\"` sums the loss,\n `\"sum_over_batch_size\"` and `\"mean\"` sum the loss and divide by the\n sample size, and `\"mean_with_sample_weight\"` sums the loss and\n divides by the sum of the sample weights. `\"none\"` and `None`\n perform no aggregation. Defaults to `\"sum_over_batch_size\"`.\n name: Optional name for the loss instance.\n dtype: The dtype of the loss's computations. Defaults to `None`, which\n means using `keras.backend.floatx()`. `keras.backend.floatx()` is a\n `\"float32\"` unless set to different value\n (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is\n provided, then the `compute_dtype` will be utilized.\n\nExamples:\n\nStandalone usage:\n\n>>> y_true = [[0., 1., 0.], [0., 0., 1.]]\n>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]\n>>> # Using 'auto'/'sum_over_batch_size' reduction type.\n>>> cce = keras.losses.CategoricalFocalCrossentropy()\n>>> cce(y_true, y_pred)\n0.23315276\n\n>>> # Calling with 'sample_weight'.\n>>> cce(y_true, y_pred, sample_weight=np.array([0.3, 0.7]))\n0.1632\n\n>>> # Using 'sum' reduction type.\n>>> cce = keras.losses.CategoricalFocalCrossentropy(\n... reduction=\"sum\")\n>>> cce(y_true, y_pred)\n0.46631\n\n>>> # Using 'none' reduction type.\n>>> cce = keras.losses.CategoricalFocalCrossentropy(\n... reduction=None)\n>>> cce(y_true, y_pred)\narray([3.2058331e-05, 4.6627346e-01], dtype=float32)\n\nUsage with the `compile()` API:\n\n```python\nmodel.compile(optimizer='adam',\n loss=keras.losses.CategoricalFocalCrossentropy())\n```"} +{"repo": "tensorflow", "function": "def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter, passed_filter_exclude_node_names):\n if tf_error:\n help_intro = cli_shared.get_error_intro(tf_error)\n self._init_command = 'help'\n self._title_color = 'red_on_white'\n else:\n help_intro = None\n self._init_command = 'lt'\n self._title_color = 'black_on_white'\n if passed_filter is not None:\n self._init_command = 'lt -f %s' % passed_filter\n if passed_filter_exclude_node_names:\n self._init_command += ' --filter_exclude_node_names %s' % passed_filter_exclude_node_names\n self._title_color = 'red_on_white'\n self._run_cli = analyzer_cli.create_analyzer_ui(debug_dump, self._tensor_filters, ui_type=self._ui_type, on_ui_exit=self._remove_dump_root, config=self._config)\n dumped_tensor_names = []\n for datum in debug_dump.dumped_tensor_data:\n dumped_tensor_names.append('%s:%d' % (datum.node_name, datum.output_slot))\n self._run_cli.register_tab_comp_context(['print_tensor', 'pt'], dumped_tensor_names)\n self._run_cli.register_tab_comp_context(['node_info', 'ni', 'list_inputs', 'li', 'list_outputs', 'lo'], [str(node_name) for node_name in debug_dump.nodes()])\n self._title = 'run-end: ' + self._run_description\n if help_intro:\n self._run_cli.set_help_intro(help_intro)", "docstring": "Prepare (but not launch) CLI for run-end, with debug dump from the run.\n\nArgs:\n debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this\n run.\n tf_error: (None or OpError) OpError that happened during the run() call\n (if any).\n passed_filter: (None or str) Name of the tensor filter that just passed\n and caused the preparation of this run-end CLI (if any).\n passed_filter_exclude_node_names: (None or str) Regular expression used\n with the tensor filter to exclude ops with names matching the regular\n expression."} +{"repo": "tensorflow", "function": "def set_random_seed(seed):\n if context.executing_eagerly():\n context.set_global_seed(seed)\n else:\n ops.get_default_graph().seed = seed", "docstring": "Sets the graph-level random seed for the default graph.\n\nOperations that rely on a random seed actually derive it from two seeds:\nthe graph-level and operation-level seeds. This sets the graph-level seed.\n\nIts interactions with operation-level seeds is as follows:\n\n 1. If neither the graph-level nor the operation seed is set:\n A random seed is used for this op.\n 2. If the graph-level seed is set, but the operation seed is not:\n The system deterministically picks an operation seed in conjunction with\n the graph-level seed so that it gets a unique random sequence. Within the\n same version of tensorflow and user code, this sequence is deterministic.\n However across different versions, this sequence might change. If the\n code depends on particular seeds to work, specify both graph-level\n and operation-level seeds explicitly.\n 3. If the graph-level seed is not set, but the operation seed is set:\n A default graph-level seed and the specified operation seed are used to\n determine the random sequence.\n 4. If both the graph-level and the operation seed are set:\n Both seeds are used in conjunction to determine the random sequence.\n\nTo illustrate the user-visible effects, consider these examples:\n\nTo generate different sequences across sessions, set neither\ngraph-level nor op-level seeds:\n\n```python\na = tf.random.uniform([1])\nb = tf.random.normal([1])\n\nprint(\"Session 1\")\nwith tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\nprint(\"Session 2\")\nwith tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A3'\n print(sess2.run(a)) # generates 'A4'\n print(sess2.run(b)) # generates 'B3'\n print(sess2.run(b)) # generates 'B4'\n```\n\nTo generate the same repeatable sequence for an op across sessions, set the\nseed for the op:\n\n```python\na = tf.random.uniform([1], seed=1)\nb = tf.random.normal([1])\n\n# Repeatedly running this block with the same graph will generate the same\n# sequence of values for 'a', but different sequences of values for 'b'.\nprint(\"Session 1\")\nwith tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\nprint(\"Session 2\")\nwith tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A1'\n print(sess2.run(a)) # generates 'A2'\n print(sess2.run(b)) # generates 'B3'\n print(sess2.run(b)) # generates 'B4'\n```\n\nTo make the random sequences generated by all ops be repeatable across\nsessions, set a graph-level seed:\n\n```python\ntf.compat.v1.random.set_random_seed(1234)\na = tf.random.uniform([1])\nb = tf.random.normal([1])\n\n# Repeatedly running this block with the same graph will generate the same\n# sequences of 'a' and 'b'.\nprint(\"Session 1\")\nwith tf.compat.v1.Session() as sess1:\n print(sess1.run(a)) # generates 'A1'\n print(sess1.run(a)) # generates 'A2'\n print(sess1.run(b)) # generates 'B1'\n print(sess1.run(b)) # generates 'B2'\n\nprint(\"Session 2\")\nwith tf.compat.v1.Session() as sess2:\n print(sess2.run(a)) # generates 'A1'\n print(sess2.run(a)) # generates 'A2'\n print(sess2.run(b)) # generates 'B1'\n print(sess2.run(b)) # generates 'B2'\n```\n\n@compatibility(TF2)\n'tf.compat.v1.set_random_seed' is compatible with eager mode. However,\nin eager mode this API will set the global seed instead of the\ngraph-level seed of the default graph. In TF2 this API is changed to\n[tf.random.set_seed]\n(https://www.tensorflow.org/api_docs/python/tf/random/set_seed).\n@end_compatibility\n\nArgs:\n seed: integer."} +{"repo": "beam", "function": "def run_inference(self, batch, model, inference_args=None):\n _validate_inference_args(inference_args)\n vectorized_batch = np.vstack(batch)\n predictions = hdbscan.approximate_predict(model, vectorized_batch)\n return [PredictionResult(x, y) for x, y in zip(batch, predictions)]", "docstring": "Runs inferences on a batch of numpy arrays.\n\nArgs:\n batch: A sequence of examples as numpy arrays. They should\n be single examples.\n model: A numpy model or pipeline. Must implement predict(X).\n Where the parameter X is a numpy array.\n inference_args: Any additional arguments for an inference.\n\nReturns:\n An Iterable of type PredictionResult."} +{"repo": "beam", "function": "def _check_state_for_finalize_write(self, writer_results, num_shards):\n if not writer_results:\n return ([], [], [], 0)\n src_glob = FileSystems.join(FileSystems.split(writer_results[0])[0], '*')\n dst_glob = self._get_final_name_glob(num_shards)\n src_glob_files = set((file_metadata.path for mr in FileSystems.match([src_glob]) for file_metadata in mr.metadata_list))\n dst_glob_files = set((file_metadata.path for mr in FileSystems.match([dst_glob]) for file_metadata in mr.metadata_list))\n src_files = []\n dst_files = []\n delete_files = []\n num_skipped = 0\n for shard_num, src in enumerate(writer_results):\n final_name = self._get_final_name(shard_num, num_shards)\n dst = final_name\n src_exists = src in src_glob_files\n dst_exists = dst in dst_glob_files\n if not src_exists and (not dst_exists):\n raise BeamIOError('src and dst files do not exist. src: %s, dst: %s' % (src, dst))\n if not src_exists and dst_exists:\n _LOGGER.debug('src: %s -> dst: %s already renamed, skipping', src, dst)\n num_skipped += 1\n continue\n if src_exists and dst_exists and (FileSystems.checksum(src) == FileSystems.checksum(dst)):\n _LOGGER.debug('src: %s == dst: %s, deleting src', src, dst)\n delete_files.append(src)\n continue\n src_files.append(src)\n dst_files.append(dst)\n self._report_sink_lineage(dst_glob, dst_files)\n return (src_files, dst_files, delete_files, num_skipped)", "docstring": "Checks writer output files' states.\n\nReturns:\n src_files, dst_files: Lists of files to rename. For each i, finalize_write\n should rename(src_files[i], dst_files[i]).\n delete_files: Src files to delete. These could be leftovers from an\n incomplete (non-atomic) rename operation.\n num_skipped: Tally of writer results files already renamed, such as from\n a previous run of finalize_write()."} +{"repo": "transformers", "function": "class SiglipVisionConfig(PretrainedConfig):\n model_type = 'siglip_vision_model'\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size=768, intermediate_size=3072, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=224, patch_size=16, hidden_act='gelu_pytorch_tanh', layer_norm_eps=1e-06, attention_dropout=0.0, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act", "docstring": "This is the configuration class to store the configuration of a [`SiglipVisionModel`]. It is used to instantiate a\nSiglip vision encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the vision encoder of the Siglip\n[google/siglip-base-patch16-224](https://huggingface.co/google/siglip-base-patch16-224) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n Number of channels in the input images.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu_pytorch_tanh\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n\nExample:\n\n```python\n>>> from transformers import SiglipVisionConfig, SiglipVisionModel\n\n>>> # Initializing a SiglipVisionConfig with google/siglip-base-patch16-224 style configuration\n>>> configuration = SiglipVisionConfig()\n\n>>> # Initializing a SiglipVisionModel (with random weights) from the google/siglip-base-patch16-224 style configuration\n>>> model = SiglipVisionModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "genai-processors", "function": "def __init__(self, api_key: str, config: interfaces.Config | None=None):\n self._config = config or interfaces.Config()\n self._genai_processor = genai_model.GenaiModel(api_key=api_key, model_name=self._config.topic_researcher_model_name, generate_content_config=types.GenerateContentConfig(tools=self._config.enabled_research_tools))\n p_preamble = preamble.Preamble(content=[ProcessorPart(prompts.TOPIC_RESEARCH_PREAMBLE), ProcessorPart('Topic to research: ')])\n p_verbalizer = topic_verbalizer.TopicVerbalizer(config=self._config)\n p_suffix = preamble.Suffix(content=[ProcessorPart('Your research: ')])\n self._pipeline = p_verbalizer + p_preamble + p_suffix + self._genai_processor", "docstring": "Initializes the TopicResearcher.\n\nArgs:\n api_key: The API key to use for the GenAI API.\n config: The agent configuration."} +{"repo": "beam", "function": "def has_ontime_pane(self):\n pass", "docstring": "Whether this trigger creates an empty pane even if there are no elements.\n\nReturns:\n True if this trigger guarantees that there will always be an ON_TIME pane\n even if there are no elements in that pane."} +{"repo": "beam", "function": "def parse_arguments(argv):\n parser = argparse.ArgumentParser(description='online-clustering')\n parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local')\n parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID)\n args, _ = parser.parse_known_args(args=argv)\n return args", "docstring": "It parses the arguments passed to the command line and returns them as an object\n\nArgs:\n argv: The arguments passed to the command line.\n\nReturns:\n The arguments that are being passed in."} +{"repo": "transformers", "function": "class TFDebertaV2XSoftmax(keras.layers.Layer):\n\n def __init__(self, axis=-1, **kwargs):\n super().__init__(**kwargs)\n self.axis = axis\n\n def call(self, inputs: tf.Tensor, mask: tf.Tensor):\n rmask = tf.logical_not(tf.cast(mask, tf.bool))\n output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs)\n output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)\n output = tf.where(rmask, 0.0, output)\n return output", "docstring": "Masked Softmax which is optimized for saving memory\n\nArgs:\n input (`tf.Tensor`): The input tensor that will apply softmax.\n mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.\n dim (int): The dimension that will apply softmax"} +{"repo": "tensorflow", "function": "def read(self, index, name=None):\n return self._implementation.read(index, name=name)", "docstring": "Read the value at location `index` in the TensorArray.\n\nArgs:\n index: 0-D. int32 tensor with the index to read from.\n name: A name for the operation (optional).\n\nReturns:\n The tensor at index `index`."} +{"repo": "pyglove", "function": "def download_and_prep_data() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n mnist_dataset = tf.keras.datasets.mnist\n (tr_x, tr_y), (te_x, te_y) = mnist_dataset.load_data()\n tr_x = tr_x / 255.0\n te_x = te_x / 255.0\n return (tr_x, tr_y, te_x, te_y)", "docstring": "Download dataset and scale to [0, 1].\n\nReturns:\n tr_x: Training data.\n tr_y: Training labels.\n te_x: Testing data.\n te_y: Testing labels."} +{"repo": "beam", "function": "def seek(self, offset, whence=os.SEEK_SET):\n self._checkClosed()\n if whence == os.SEEK_SET:\n self._position = offset\n elif whence == os.SEEK_CUR:\n self._position += offset\n elif whence == os.SEEK_END:\n self._position = self._downloader.size + offset\n else:\n raise ValueError('Whence mode %r is invalid.' % whence)\n self._position = min(self._position, self._downloader.size)\n self._position = max(self._position, 0)\n return self._position", "docstring": "Set the stream's current offset.\n\nNote if the new offset is out of bound, it is adjusted to either 0 or EOF.\n\nArgs:\n offset: seek offset as number.\n whence: seek mode. Supported modes are os.SEEK_SET (absolute seek),\n os.SEEK_CUR (seek relative to the current position), and os.SEEK_END\n (seek relative to the end, offset should be negative).\n\nRaises:\n ``ValueError``: When this stream is closed or if whence is invalid."} +{"repo": "pyglove", "function": "def recombine(self, parents: List[pg.DNA], global_state: pg.geno.AttributeDict, step: int) -> List[pg.DNA]:", "docstring": "Generate a list of child DNA based on the list of parents given.\n\nUser should override this method with optional keyword arguments\n'global_state' and 'step'.\n\nThe parents DNA contains a metadata field 'generation', which is the\ngeneration of the parent DNA. If the Recombinator does not assign this\nfield for the new child DNA, the child DNA will have the maximum generation\nfrom the parents plus 1.\n\nArgs:\n parents: Parent trials.\n global_state: An `AttributeDict` object as the global state container,\n which is readable/writable during the operation.\n step: Number of examples historically proposed, which can be used for\n determining a cross over schedule.\n\nReturns:\n A list of generated child DNA."} +{"repo": "tensorflow", "function": "def __init__(self, exit_node: tensor_lib.Tensor, pfor_ops: List[ops.Operation], fallback_to_while_loop: bool, pfor_config: 'PForConfig'):\n self._fallback_to_while_loop = fallback_to_while_loop\n self._pfor_config = pfor_config\n self._pfor_ops = set(pfor_ops)\n self._pfor_op_ids = set((x._id for x in pfor_ops))\n assert isinstance(exit_node, tensor_lib.Tensor)\n self._while_context = exit_node.op._get_control_flow_context()\n assert isinstance(self._while_context, control_flow_ops.WhileContext)\n self._context_name = self._while_context.name\n self._condition = self._while_context.pivot.op.inputs[0]\n self._is_inside_loop = self.op_is_inside_loop(self._condition.op)\n if self._is_inside_loop:\n for e in self._while_context.loop_exits:\n assert self.op_is_inside_loop(e.op)\n self._exit_switches = []\n self._body_outputs = []\n self._next_iter_control_inputs = []\n self._enter_merges = []\n self._outputs = []\n self._enters = []\n self._direct_enters = []\n for e in self._while_context.loop_exits:\n self._outputs.append(e.op.outputs[0])\n switch = e.op.inputs[0].op\n assert switch.type == 'Switch', switch\n self._exit_switches.append(switch)\n merge = switch.inputs[0].op\n assert merge.type == 'Merge', merge\n self._enter_merges.append(merge)\n enter = merge.inputs[0].op\n assert enter.type == 'Enter', enter\n self._enters.append(enter.outputs[0])\n next_iter = merge.inputs[1].op\n assert next_iter.type == 'NextIteration', next_iter\n self._body_outputs.append(next_iter.inputs[0])\n self._next_iter_control_inputs.append(next_iter.control_inputs)\n self._is_stateful = False\n for op in ops.get_default_graph().get_operations():\n control_flow_context = op._get_control_flow_context()\n if control_flow_context is None:\n continue\n if control_flow_context.name == self._context_name:\n self._is_stateful |= _is_stateful_pfor_op(op)\n if op.type == 'Enter':\n output = op.outputs[0]\n if output not in self._enters:\n if output.dtype in (dtypes.resource, dtypes.variant):\n if output not in self._direct_enters:\n self._direct_enters.append(output)\n else:\n self._enters.append(output)", "docstring": "Initializer.\n\nArgs:\n exit_node: A tensor output from the while_loop.\n pfor_ops: list of ops inside the current pfor loop.\n fallback_to_while_loop: If True, fallback to while loop when conversion of\n an op is not supported\n pfor_config: PForConfig object used while constructing loop body."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> tf.Tensor:\n image_features = self.groupvit.get_image_features(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return image_features", "docstring": "Returns:\n image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying\n the projection layer to the pooled output of [`TFGroupViTVisionModel`].\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, TFGroupViTModel\n\n>>> model = TFGroupViTModel.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n>>> processor = AutoProcessor.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = processor(images=image, return_tensors=\"tf\")\n\n>>> image_features = model.get_image_features(**inputs)\n```"} +{"repo": "tensorflow", "function": "def _warm_start_var_with_vocab(var, current_vocab_path, current_vocab_size, prev_ckpt, prev_vocab_path, previous_vocab_size=-1, current_oov_buckets=0, prev_tensor_name=None, initializer=None, axis=0):\n if not (current_vocab_path and current_vocab_size and prev_ckpt and prev_vocab_path):\n raise ValueError('Invalid args: Must provide all of [current_vocab_path, current_vocab_size, prev_ckpt, prev_vocab_path}.')\n if checkpoint_utils._is_variable(var):\n var = [var]\n elif isinstance(var, list) and all((checkpoint_utils._is_variable(v) for v in var)):\n var = var\n elif isinstance(var, variables_lib.PartitionedVariable):\n var = var._get_variable_list()\n else:\n raise TypeError('var MUST be one of the following: a Variable, list of Variable or PartitionedVariable, but is {}'.format(type(var)))\n if not prev_tensor_name:\n prev_tensor_name = _infer_var_name(var)\n total_v_first_axis = sum((v.get_shape().as_list()[0] for v in var))\n for v in var:\n v_shape = v.get_shape().as_list()\n slice_info = v._get_save_slice_info()\n partition_info = None\n if slice_info:\n partition_info = variable_scope._PartitionInfo(full_shape=slice_info.full_shape, var_offset=slice_info.var_offset)\n if axis == 0:\n new_row_vocab_size = current_vocab_size\n new_col_vocab_size = v_shape[1]\n old_row_vocab_size = previous_vocab_size\n old_row_vocab_file = prev_vocab_path\n new_row_vocab_file = current_vocab_path\n old_col_vocab_file = None\n new_col_vocab_file = None\n num_row_oov_buckets = current_oov_buckets\n num_col_oov_buckets = 0\n elif axis == 1:\n new_row_vocab_size = total_v_first_axis\n new_col_vocab_size = current_vocab_size\n old_row_vocab_size = -1\n old_row_vocab_file = None\n new_row_vocab_file = None\n old_col_vocab_file = prev_vocab_path\n new_col_vocab_file = current_vocab_path\n num_row_oov_buckets = 0\n num_col_oov_buckets = current_oov_buckets\n else:\n raise ValueError('The only supported values for the axis argument are 0 and 1. Provided axis: {}'.format(axis))\n init = checkpoint_ops._load_and_remap_matrix_initializer(ckpt_path=checkpoint_utils._get_checkpoint_filename(prev_ckpt), old_tensor_name=prev_tensor_name, new_row_vocab_size=new_row_vocab_size, new_col_vocab_size=new_col_vocab_size, old_row_vocab_size=old_row_vocab_size, old_row_vocab_file=old_row_vocab_file, new_row_vocab_file=new_row_vocab_file, old_col_vocab_file=old_col_vocab_file, new_col_vocab_file=new_col_vocab_file, num_row_oov_buckets=num_row_oov_buckets, num_col_oov_buckets=num_col_oov_buckets, initializer=initializer)\n new_init_val = ops.convert_to_tensor(init(shape=v_shape, partition_info=partition_info))\n v._initializer_op = state_ops.assign(v, new_init_val)", "docstring": "Warm-starts given variable from `prev_tensor_name` tensor in `prev_ckpt`.\n\nUse this method when the `var` is backed by vocabulary. This method stitches\nthe given `var` such that values corresponding to individual features in the\nvocabulary remain consistent irrespective of changing order of the features\nbetween old and new vocabularies.\n\nArgs:\n var: Current graph's variable that needs to be warm-started (initialized).\n Can be either of the following:\n (i) `Variable`\n (ii) `ResourceVariable`\n (iii) list of `Variable`: The list must contain slices of the same larger\n variable.\n (iv) `PartitionedVariable`\n current_vocab_path: Path to the vocab file used for the given `var`.\n current_vocab_size: An `int` specifying the number of entries in the current\n vocab.\n prev_ckpt: A string specifying the directory with checkpoint file(s) or path\n to checkpoint. The given checkpoint must have tensor with name\n `prev_tensor_name` (if not None) or tensor with name same as given `var`.\n prev_vocab_path: Path to the vocab file used for the tensor in `prev_ckpt`.\n previous_vocab_size: If provided, will constrain previous vocab to the first\n `previous_vocab_size` entries. -1 means use the entire previous vocab.\n current_oov_buckets: An `int` specifying the number of out-of-vocabulary\n buckets used for given `var`.\n prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\n None, we lookup tensor with same name as given `var`.\n initializer: Variable initializer to be used for missing entries. If None,\n missing entries will be zero-initialized.\n axis: Axis of the variable that the provided vocabulary corresponds to.\n\nRaises:\n ValueError: If required args are not provided."} +{"repo": "tensorflow", "function": "def _has_valid_tensors(self):\n return self._input_tensors is not None and self._output_tensors", "docstring": "Checks if the input and output tensors have been initialized.\n\nReturns:\n Bool."} +{"repo": "tf-quant-finance", "function": "def integrate(func: Callable[[types.FloatTensor], types.FloatTensor], lower: types.FloatTensor, upper: types.FloatTensor, method: IntegrationMethod=IntegrationMethod.COMPOSITE_SIMPSONS_RULE, dtype: Optional[tf.DType]=None, name: Optional[str]=None, **kwargs) -> types.FloatTensor:\n with tf.compat.v1.name_scope(name, default_name='integrate', values=[lower, upper]):\n if method == IntegrationMethod.COMPOSITE_SIMPSONS_RULE:\n return simpson(func, lower, upper, dtype=dtype, **kwargs)\n elif method == IntegrationMethod.GAUSS_LEGENDRE:\n return gauss_legendre(func, lower, upper, dtype=dtype, **kwargs)\n else:\n raise ValueError('Unknown method: %s.' % method)", "docstring": "Evaluates definite integral.\n\n#### Example\n```python\n f = lambda x: x*x\n a = tf.constant(0.0)\n b = tf.constant(3.0)\n integrate(f, a, b) # 9.0\n```\n\nArgs:\n func: Represents a function to be integrated. It must be a callable of a\n single `Tensor` parameter and return a `Tensor` of the same shape and\n dtype as its input. It will be called with a `Tesnor` of shape\n `lower.shape + [n]` (where n is integer number of points) and of the same\n `dtype` as `lower`.\n lower: Represents the lower limits of integration. `func` will be integrated\n between each pair of points defined by `lower` and `upper`.\n upper: Same shape and dtype as `lower` representing the upper limits of\n intergation.\n method: Integration method. Instance of IntegrationMethod enum. Default is\n IntegrationMethod.COMPOSITE_SIMPSONS_RULE.\n dtype: Dtype of result. Must be real dtype. Defaults to dtype of `lower`.\n name: The name to give to the ops created by this function.\n Default value: None which maps to 'integrate'.\n **kwargs: Additional parameters for specific integration method.\n\nReturns:\n `Tensor` of the same shape and dtype as `lower`, containing the value of the\n definite integral.\n\nRaises: ValueError if `method` was not recognized."} +{"repo": "tensorflow", "function": "def build_hlo_module(root: testlib_base.HloInstruction, *instructions: testlib_base.HloInstruction, extra_computations: Sequence[testlib_base.HloComputation] | None=None) -> tuple[testlib_base.HloModule, testlib_base.BufferAssignment]:\n hlo_module = testlib_base.HloModule(root.name())\n hlo_module.add_entry_computation(testlib_base.build_hlo_computation(root, *instructions))\n if extra_computations is not None:\n for computation in extra_computations:\n hlo_module.add_computation(computation)\n return annotate_hlo_module(hlo_module)", "docstring": "Builds an HLO module from a root instruction and its dependencies.\n\nArgs:\n root: The root instruction of the module.\n *instructions: The instructions that are dependencies of the root\n instruction.\n extra_computations: Any extra computations that should be added to the\n module.\n\nReturns:\n A tuple containing the HLO module and its buffer assignment."} +{"repo": "tensorflow", "function": "def take_grad(self, num_required, name=None):\n out = gen_data_flow_ops.resource_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name)\n out.set_shape(self._shape)\n return out", "docstring": "Attempts to extract the average gradient from the accumulator.\n\nThe operation blocks until sufficient number of gradients have been\nsuccessfully applied to the accumulator.\n\nOnce successful, the following actions are also triggered:\n\n- Counter of accumulated gradients is reset to 0.\n- Aggregated gradient is reset to 0 tensor.\n- Accumulator's internal time step is incremented by 1.\n\nArgs:\n num_required: Number of gradients that needs to have been aggregated\n name: Optional name for the operation\n\nReturns:\n A tensor holding the value of the average gradient.\n\nRaises:\n InvalidArgumentError: If num_required < 1"} +{"repo": "tensorflow", "function": "def _GetBcastSubshape(subscripts):\n start = subscripts.find(ellipsis)\n if start == -1:\n return (0, 0)\n remaining = len(subscripts) - (start + len(ellipsis))\n end = -remaining if remaining > 0 else None\n return (start, end)", "docstring": "Returns a tuple denoting the slice mapping to ellipsis.\n\nFor a given subscript, returns a tuple (start, end) denoting the start\naxis index and the (negative) end axis index respectively. For any input\nTensor `x` described by the subscript, `x[start:end]` would be the slice\nrepresented by the ellipsis. E.g. For `ab...cd` returns `[1, -2]`.\n\nIf ellipsis is not present in `subscripts`, returns `(0, 0)`.\n\nArgs:\n subscripts: A string denoting the einsum subscript."} +{"repo": "keras", "function": "class TargetReshaper(TransformerMixin, BaseEstimator):\n\n def fit(self, y):\n \"\"\"Fit the transformer to a target y.\n\n Returns:\n TargetReshaper\n A reference to the current instance of TargetReshaper.\n \"\"\"\n self.ndim_ = y.ndim\n return self\n\n def transform(self, y):\n \"\"\"Makes 1D y 2D.\n\n Args:\n y : np.ndarray\n Target y to be transformed.\n\n Returns:\n np.ndarray\n A numpy array, of dimension at least 2.\n \"\"\"\n if y.ndim == 1:\n return y.reshape(-1, 1)\n return y\n\n def inverse_transform(self, y):\n \"\"\"Revert the transformation of transform.\n\n Args:\n y: np.ndarray\n Transformed numpy array.\n\n Returns:\n np.ndarray\n If the transformer was fit to a 1D numpy array,\n and a 2D numpy array with a singleton second dimension\n is passed, it will be squeezed back to 1D. Otherwise, it\n will eb left untouched.\n \"\"\"\n sklearn.base.check_is_fitted(self)\n xp, _ = sklearn.utils._array_api.get_namespace(y)\n if self.ndim_ == 1 and y.ndim == 2:\n return xp.squeeze(y, axis=1)\n return y", "docstring": "Convert 1D targets to 2D and back.\n\nFor use in pipelines with transformers that only accept\n2D inputs, like OneHotEncoder and OrdinalEncoder.\n\nAttributes:\n ndim_ : int\n Dimensions of y that the transformer was trained on."} +{"repo": "transformers", "function": "def checkout_commit(repo: Repo, commit_id: str):\n current_head = repo.head.commit if repo.head.is_detached else repo.head.ref\n try:\n repo.git.checkout(commit_id)\n yield\n finally:\n repo.git.checkout(current_head)", "docstring": "Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.\nArgs:\n repo (`git.Repo`): A git repository (for instance the Transformers repo).\n commit_id (`str`): The commit reference to checkout inside the context manager."} +{"repo": "tensorflow", "function": "def get_initialized_tpu_systems():\n return _INITIALIZED_TPU_SYSTEMS.copy()", "docstring": "Returns all currently initialized tpu systems.\n\nReturns:\n A dictionary, with tpu name as the key and the tpu topology as the value."} +{"repo": "transformers", "function": "def center_crop(self, image: 'torch.Tensor', size: dict[str, int], **kwargs) -> 'torch.Tensor':\n if size.height is None or size.width is None:\n raise ValueError(f\"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}\")\n return F.center_crop(image, (size['height'], size['width']))", "docstring": "Center crop an image to `(size[\"height\"], size[\"width\"])`. If the input size is smaller than `crop_size` along\nany edge, the image is padded with 0's and then center cropped.\n\nArgs:\n image (`\"torch.Tensor\"`):\n Image to center crop.\n size (`Dict[str, int]`):\n Size of the output image.\n\nReturns:\n `torch.Tensor`: The center cropped image."} +{"repo": "transformers", "function": "def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True):\n if attention_mask is not None:\n attention_mask = invert_mask(attention_mask)\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n embed_pos = self.embed_positions(input_ids)\n elif inputs_embeds is not None:\n inputs_embeds = inputs_embeds * self.embed_scale\n position_ids = inputs_embeds[:, :, 0].masked_fill(inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx)\n embed_pos = self.embed_positions(position_ids)\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n x = inputs_embeds + embed_pos\n x = nn.functional.dropout(x, p=self.dropout, training=self.training)\n x = x.transpose(0, 1)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n if head_mask is not None:\n assert head_mask.size()[0] == len(self.layers), f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.'\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n x = x.transpose(0, 1)\n encoder_states += (x,)\n x = x.transpose(0, 1)\n dropout_probability = torch.rand([])\n if self.training and dropout_probability < self.layerdrop:\n attn = None\n else:\n x, attn = encoder_layer(x, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)\n if output_attentions:\n all_attentions = all_attentions + (attn,)\n x = x.transpose(0, 1)\n if output_hidden_states:\n encoder_states += (x,)\n if not return_dict:\n return tuple((v for v in [x, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n input_ids (`torch.LongTensor`): tokens in the source language of shape\n *(batch, src_len)*\n attention_mask (`torch.LongTensor`): indicating which indices are padding tokens\n inputs_embeds (`torch.FloatTensor`):\n embedding vectors of shape *(batch, src_len, embed_dim)*\n head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\nReturns:\n BaseModelOutput or Tuple comprised of:\n\n - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*\n - **encoder_states** (`Tuple(torch.FloatTensor)`): all intermediate hidden states of shape *(src_len,\n batch, embed_dim)*. Only populated if *output_hidden_states:* is True.\n - **all_attentions** (`Tuple(torch.FloatTensor)`): Attention weights for each layer.\n During training might not be of length n_layers because of layer dropout."} +{"repo": "transformers", "function": "class FlaxQuestionAnsweringModelOutput(ModelOutput):\n start_logits: Optional[jnp.ndarray] = None\n end_logits: Optional[jnp.ndarray] = None\n hidden_states: Optional[Tuple[jnp.ndarray]] = None\n attentions: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for outputs of question answering models.\n\nArgs:\n start_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def experimental_set_type(self, type_proto) -> None:\n with self.graph._c_graph.get() as c_graph:\n if type_proto.type_id not in (full_type_pb2.TFT_UNSET, full_type_pb2.TFT_PRODUCT):\n raise ValueError('error setting the type of ', self.name, ': expected TFT_UNSET or TFT_PRODUCT, got ', type_proto.type_id)\n with c_api_util.tf_buffer(type_proto.SerializeToString()) as serialized:\n pywrap_tf_session.SetFullType(c_graph, self._c_op, serialized)", "docstring": "Sets the corresponding node's `experimental_type` field.\n\nSee the description of `NodeDef.experimental_type` for more info.\n\nArgs:\n type_proto: A FullTypeDef proto message. The root type_if of this object\n must be `TFT_PRODUCT`, even for ops which only have a singlre return\n value."} +{"repo": "transformers", "function": "def __call__(self, *args, **kwargs):\n return super().__call__(*args, **kwargs)", "docstring": "Translate the text(s) given as inputs.\n\nArgs:\n args (`str` or `List[str]`):\n Texts to be translated.\n return_tensors (`bool`, *optional*, defaults to `False`):\n Whether or not to include the tensors of predictions (as token indices) in the outputs.\n return_text (`bool`, *optional*, defaults to `True`):\n Whether or not to include the decoded texts in the outputs.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to clean up the potential extra spaces in the text output.\n src_lang (`str`, *optional*):\n The language of the input. Might be required for multilingual models. Will not have any effect for\n single pair translation models\n tgt_lang (`str`, *optional*):\n The language of the desired output. Might be required for multilingual models. Will not have any effect\n for single pair translation models\n generate_kwargs:\n Additional keyword arguments to pass along to the generate method of the model (see the generate method\n corresponding to your framework [here](./text_generation)).\n\nReturn:\n A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:\n\n - **translation_text** (`str`, present when `return_text=True`) -- The translation.\n - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The\n token ids of the translation."} +{"repo": "keras", "function": "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'https://storage.googleapis.com/tensorflow/tf-keras-datasets/'\n files = ['train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz']\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n return ((x_train, y_train), (x_test, y_test))", "docstring": "Loads the Fashion-MNIST dataset.\n\nThis is a dataset of 60,000 28x28 grayscale images of 10 fashion categories,\nalong with a test set of 10,000 images. This dataset can be used as\na drop-in replacement for MNIST.\n\nThe classes are:\n\n| Label | Description |\n|:-----:|-------------|\n| 0 | T-shirt/top |\n| 1 | Trouser |\n| 2 | Pullover |\n| 3 | Dress |\n| 4 | Coat |\n| 5 | Sandal |\n| 6 | Shirt |\n| 7 | Sneaker |\n| 8 | Bag |\n| 9 | Ankle boot |\n\nReturns:\n\nTuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n**`x_train`**: `uint8` NumPy array of grayscale image data with shapes\n `(60000, 28, 28)`, containing the training data.\n\n**`y_train`**: `uint8` NumPy array of labels (integers in range 0-9)\n with shape `(60000,)` for the training data.\n\n**`x_test`**: `uint8` NumPy array of grayscale image data with shapes\n (10000, 28, 28), containing the test data.\n\n**`y_test`**: `uint8` NumPy array of labels (integers in range 0-9)\n with shape `(10000,)` for the test data.\n\nExample:\n\n```python\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\nassert x_train.shape == (60000, 28, 28)\nassert x_test.shape == (10000, 28, 28)\nassert y_train.shape == (60000,)\nassert y_test.shape == (10000,)\n```\n\nLicense:\n\nThe copyright for Fashion-MNIST is held by Zalando SE.\nFashion-MNIST is licensed under the [MIT license](\n https://github.com/zalandoresearch/fashion-mnist/blob/master/LICENSE)."} +{"repo": "tensorflow", "function": "def from_fields_and_rank(cls, fields: Mapping[str, _FieldValue], rank: int, validate: bool=False, dtype: Optional[dtypes.DType]=None) -> 'StructuredTensor':\n if not fields:\n raise ValueError('Must provide at least one field')\n if not isinstance(rank, int):\n raise ValueError('rank must be an integer')\n if rank < 0:\n raise ValueError('rank must be nonnegative')\n fields = {k: _convert_to_structured_field_value(v) for k, v in fields.items()}\n if dtype is None:\n dtype = _find_shape_dtype(fields, None, None)\n fields = _fields_with_dtype(fields, dtype)\n shape = _shape_from_fields(fields, rank, dtype)\n if rank > 1:\n shape = shape._with_num_row_partitions(rank - 1)\n new_rp = shape._row_partitions\n fields = {k: _replace_row_partitions(v, new_rp) for k, v in fields.items()}\n return StructuredTensor(fields=fields, ragged_shape=shape)", "docstring": "Creates a `StructuredTensor` from a nonempty dictionary of fields.\n\nNote that if the shape dtype is not specified, the shape dtype will be\ninferred from any fields that have a shape dtype. If fields differ, then\nint64 will be preferred to int32, because coercing from int32 to int64 is\nsafer than coercing from int64 to int32.\n\nIf there are no ragged fields, then it will be int64 by default, but this\nwill be changed to int32 in the future.\n\nArgs:\n fields: A dictionary mapping from string to `Tensor`, `RaggedTensor`, or\n `StructuredTensor`, providing the values for individual fields in each\n structure. If `rank > 0`, then every tensor in `fields` must have the\n same shape in the first `rank` dimensions. Cannot be empty.\n rank: The rank of the resulting structured tensor.\n validate: If true, then add runtime validation ops that check that the\n field values all have compatible shapes in the outer `rank` dimensions.\n dtype: If specified, then forces dtype of the shape to be this.\n\nReturns:\n A `StructuredTensor`.\nExamples:\n >>> tf.experimental.StructuredTensor.from_fields_and_rank(\n ... {'x': 1, 'y': [1, 2, 3]}, 0)\n \n >>> StructuredTensor.from_fields_and_rank({'foo': [1, 2], 'bar': [3, 4]},\n ... 1)\n "} +{"repo": "keras", "function": "def rgb_to_hsv(images, data_format=None):\n if any_symbolic_tensors((images,)):\n return RGBToHSV(data_format=data_format).symbolic_call(images)\n return backend.image.rgb_to_hsv(images, data_format=data_format)", "docstring": "Convert RGB images to HSV.\n\n`images` must be of float dtype, and the output is only well defined if the\nvalues in `images` are in `[0, 1]`.\n\nAll HSV values are in `[0, 1]`. A hue of `0` corresponds to pure red, `1/3`\nis pure green, and `2/3` is pure blue.\n\nArgs:\n images: Input image or batch of images. Must be 3D or 4D.\n data_format: A string specifying the data format of the input tensor.\n It can be either `\"channels_last\"` or `\"channels_first\"`.\n `\"channels_last\"` corresponds to inputs with shape\n `(batch, height, width, channels)`, while `\"channels_first\"`\n corresponds to inputs with shape `(batch, channels, height, width)`.\n If not specified, the value will default to\n `keras.config.image_data_format`.\n\nReturns:\n HSV image or batch of HSV images.\n\nExamples:\n\n>>> import numpy as np\n>>> from keras import ops\n>>> x = np.random.random((2, 4, 4, 3))\n>>> y = ops.image.rgb_to_hsv(x)\n>>> y.shape\n(2, 4, 4, 3)\n\n>>> x = np.random.random((4, 4, 3)) # Single RGB image\n>>> y = ops.image.rgb_to_hsv(x)\n>>> y.shape\n(4, 4, 3)\n\n>>> x = np.random.random((2, 3, 4, 4))\n>>> y = ops.image.rgb_to_hsv(x, data_format=\"channels_first\")\n>>> y.shape\n(2, 3, 4, 4)"} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool]=False) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:\n residual = hidden_states\n hidden_states, attn_weights, _ = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n if hidden_states.dtype == torch.float16 and (torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()):\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "qhbm-library", "function": "def seed(self, initial_seed: Union[None, tf.Tensor]):\n if initial_seed is None:\n self._update_seed.assign(True)\n else:\n self._update_seed.assign(False)\n self._seed.assign(tfp.random.sanitize_seed(initial_seed))", "docstring": "Sets a new value of the random seed.\n\nArgs:\n initial_seed: see `self.seed` for details."} +{"repo": "beam", "function": "class SqlChain:\n nodes: Dict[str, SqlNode] = None\n root: Optional[SqlNode] = None\n current: Optional[SqlNode] = None\n user_pipeline: Optional[beam.Pipeline] = None\n\n def __post_init__(self):\n if not self.nodes:\n self.nodes = {}\n\n @progress_indicated\n def to_pipeline(self) -> beam.Pipeline:\n \"\"\"Converts the chain into a beam pipeline.\"\"\"\n pipeline_to_execute = self.root.to_pipeline(self.user_pipeline)\n pipeline_to_execute.contains_external_transforms = True\n return pipeline_to_execute\n\n def append(self, node: SqlNode) -> 'SqlChain':\n \"\"\"Appends a node to the chain.\"\"\"\n if self.current:\n self.current.next = node\n else:\n self.root = node\n self.current = node\n self.nodes[node.output_name] = node\n return self\n\n def get(self, output_name: str) -> Optional[SqlNode]:\n \"\"\"Gets a node from the chain based on the given output_name.\"\"\"\n return self.nodes.get(output_name, None)", "docstring": "A chain of SqlNodes.\n\nAttributes:\n nodes: all nodes by their output_names.\n root: the first SqlNode applied chronologically.\n current: the last node applied.\n user_pipeline: the user defined pipeline this chain originates from. If\n None, the whole chain just computes from raw values in queries.\n Otherwise, at least some of the nodes in chain has queried against\n PCollections."} +{"repo": "transformers", "function": "class Speech2TextTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n prefix_tokens: List[int] = []\n\n def __init__(self, vocab_file, spm_file, bos_token='', eos_token='', pad_token='', unk_token='', do_upper_case=False, do_lower_case=False, tgt_lang=None, lang_codes=None, additional_special_tokens=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n self.do_upper_case = do_upper_case\n self.do_lower_case = do_lower_case\n self.encoder = load_json(vocab_file)\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.spm_file = spm_file\n self.sp_model = load_spm(spm_file, self.sp_model_kwargs)\n if lang_codes is not None:\n self.lang_codes = lang_codes\n self.langs = LANGUAGES[lang_codes]\n self.lang_tokens = [f'' for lang in self.langs]\n self.lang_code_to_id = {lang: self.sp_model.PieceToId(f'') for lang in self.langs}\n if additional_special_tokens is not None:\n additional_special_tokens = self.lang_tokens + additional_special_tokens\n else:\n additional_special_tokens = self.lang_tokens\n self._tgt_lang = tgt_lang if tgt_lang is not None else self.langs[0]\n self.set_tgt_lang_special_tokens(self._tgt_lang)\n else:\n self.lang_code_to_id = {}\n super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, do_upper_case=do_upper_case, do_lower_case=do_lower_case, tgt_lang=tgt_lang, lang_codes=lang_codes, sp_model_kwargs=self.sp_model_kwargs, additional_special_tokens=additional_special_tokens, **kwargs)\n\n @property\n def vocab_size(self) -> int:\n return len(self.encoder)\n\n def get_vocab(self) -> Dict:\n vocab = self.encoder.copy()\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n @property\n def tgt_lang(self) -> str:\n return self._tgt_lang\n\n @tgt_lang.setter\n def tgt_lang(self, new_tgt_lang) -> None:\n self._tgt_lang = new_tgt_lang\n self.set_tgt_lang_special_tokens(new_tgt_lang)\n\n def set_tgt_lang_special_tokens(self, tgt_lang: str) -> None:\n \"\"\"Reset the special tokens to the target language setting. prefix=[eos, tgt_lang_code] and suffix=[eos].\"\"\"\n lang_code_id = self.lang_code_to_id[tgt_lang]\n self.prefix_tokens = [lang_code_id]\n\n def _tokenize(self, text: str) -> List[str]:\n return self.sp_model.encode(text, out_type=str)\n\n def _convert_token_to_id(self, token):\n return self.encoder.get(token, self.encoder[self.unk_token])\n\n def _convert_id_to_token(self, index: int) -> str:\n \"\"\"Converts an index (integer) in a token (str) using the decoder.\"\"\"\n return self.decoder.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens: List[str]) -> str:\n \"\"\"Converts a sequence of tokens (strings for sub-words) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = ''\n for token in tokens:\n if token in self.all_special_tokens:\n decoded = self.sp_model.decode(current_sub_tokens)\n out_string += (decoded.upper() if self.do_upper_case else decoded) + token + ' '\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n decoded = self.sp_model.decode(current_sub_tokens)\n out_string += decoded.upper() if self.do_upper_case else decoded\n return out_string.strip()\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> List[int]:\n \"\"\"Build model inputs from a sequence by appending eos_token_id.\"\"\"\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + [self.eos_token_id]\n return self.prefix_tokens + token_ids_0 + token_ids_1 + [self.eos_token_id]\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n prefix_ones = [1] * len(self.prefix_tokens)\n suffix_ones = [1]\n if token_ids_1 is None:\n return prefix_ones + [0] * len(token_ids_0) + suffix_ones\n return prefix_ones + [0] * len(token_ids_0) + [0] * len(token_ids_1) + suffix_ones\n\n def __getstate__(self) -> Dict:\n state = self.__dict__.copy()\n state['sp_model'] = None\n return state\n\n def __setstate__(self, d: Dict) -> None:\n self.__dict__ = d\n if not hasattr(self, 'sp_model_kwargs'):\n self.sp_model_kwargs = {}\n self.sp_model = load_spm(self.spm_file, self.sp_model_kwargs)\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n save_dir = Path(save_directory)\n assert save_dir.is_dir(), f'{save_directory} should be a directory'\n vocab_save_path = save_dir / ((filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'])\n spm_save_path = save_dir / ((filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'])\n save_json(self.encoder, vocab_save_path)\n if os.path.abspath(self.spm_file) != os.path.abspath(spm_save_path) and os.path.isfile(self.spm_file):\n copyfile(self.spm_file, spm_save_path)\n elif not os.path.isfile(self.spm_file):\n with open(spm_save_path, 'wb') as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n return (str(vocab_save_path), str(spm_save_path))", "docstring": "Construct an Speech2Text tokenizer.\n\nThis tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to\nthe superclass for more information regarding such methods.\n\nArgs:\n vocab_file (`str`):\n File containing the vocabulary.\n spm_file (`str`):\n Path to the [SentencePiece](https://github.com/google/sentencepiece) model file\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sentence token.\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sentence token.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding, for example when batching sequences of different lengths.\n do_upper_case (`bool`, *optional*, defaults to `False`):\n Whether or not to uppercase the output when decoding.\n do_lower_case (`bool`, *optional*, defaults to `False`):\n Whether or not to lowercase the input when tokenizing.\n tgt_lang (`str`, *optional*):\n A string representing the target language.\n sp_model_kwargs (`dict`, *optional*):\n Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for\n SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,\n to set:\n\n - `enable_sampling`: Enable subword regularization.\n - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.\n\n - `nbest_size = {0,1}`: No sampling is performed.\n - `nbest_size > 1`: samples from the nbest_size results.\n - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)\n using forward-filtering-and-backward-sampling algorithm.\n\n - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for\n BPE-dropout.\n\n **kwargs\n Additional keyword arguments passed along to [`PreTrainedTokenizer`]"} +{"repo": "transformers", "function": "class Wav2Vec2ProcessorWithLM(ProcessorMixin):\n feature_extractor_class = 'AutoFeatureExtractor'\n tokenizer_class = 'Wav2Vec2CTCTokenizer'\n\n def __init__(self, feature_extractor: 'FeatureExtractionMixin', tokenizer: 'PreTrainedTokenizerBase', decoder: 'BeamSearchDecoderCTC'):\n from pyctcdecode import BeamSearchDecoderCTC\n super().__init__(feature_extractor, tokenizer)\n if not isinstance(decoder, BeamSearchDecoderCTC):\n raise TypeError(f'`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}')\n if feature_extractor.__class__.__name__ not in ['Wav2Vec2FeatureExtractor', 'SeamlessM4TFeatureExtractor']:\n raise ValueError(f'`feature_extractor` has to be of type `Wav2Vec2FeatureExtractor` or `SeamlessM4TFeatureExtractor`, but is {type(feature_extractor)}')\n missing_decoder_tokens = self.get_missing_alphabet_tokens(decoder, tokenizer)\n if len(missing_decoder_tokens) > 0:\n raise ValueError(f\"The tokens {missing_decoder_tokens} are defined in the tokenizer's vocabulary, but not in the decoder's alphabet. Make sure to include {missing_decoder_tokens} in the decoder's alphabet.\")\n self.decoder = decoder\n self.current_processor = self.feature_extractor\n self._in_target_context_manager = False\n\n def save_pretrained(self, save_directory):\n super().save_pretrained(save_directory)\n self.decoder.save_to_dir(save_directory)\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):\n \"\"\"\n Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor.\n\n \n\n This class method is simply calling the feature extractor's\n [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's\n [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], and\n [`pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub`].\n\n Please refer to the docstrings of the methods above for more information.\n\n \n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on\n huggingface.co.\n - a path to a *directory* containing a feature extractor file saved using the\n [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved feature extractor JSON *file*, e.g.,\n `./my_model_directory/preprocessor_config.json`.\n **kwargs\n Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and\n [`PreTrainedTokenizer`]\n \"\"\"\n requires_backends(cls, 'pyctcdecode')\n from pyctcdecode import BeamSearchDecoderCTC\n feature_extractor, tokenizer = super()._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)\n if os.path.isdir(pretrained_model_name_or_path) or os.path.isfile(pretrained_model_name_or_path):\n unigram_encoding = kwargs.get('unigram_encoding', 'utf-8')\n decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path, unigram_encoding)\n else:\n kwargs.pop('_from_auto', None)\n kwargs.pop('trust_remote_code', None)\n language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, '*')\n alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME\n allow_patterns = [language_model_filenames, alphabet_filename]\n decoder = BeamSearchDecoderCTC.load_from_hf_hub(pretrained_model_name_or_path, allow_patterns=allow_patterns, **kwargs)\n for attribute in ['alpha', 'beta', 'unk_score_offset', 'score_boundary']:\n value = kwargs.pop(attribute, None)\n if value is not None:\n cls._set_language_model_attribute(decoder, attribute, value)\n missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer)\n if len(missing_decoder_tokens) > 0:\n raise ValueError(f\"The tokens {missing_decoder_tokens} are defined in the tokenizer's vocabulary, but not in the decoder's alphabet. Make sure to include {missing_decoder_tokens} in the decoder's alphabet.\")\n return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder)\n\n @staticmethod\n def _set_language_model_attribute(decoder: 'BeamSearchDecoderCTC', attribute: str, value: float):\n setattr(decoder.model_container[decoder._model_key], attribute, value)\n\n @property\n def language_model(self):\n return self.decoder.model_container[self.decoder._model_key]\n\n @staticmethod\n def get_missing_alphabet_tokens(decoder, tokenizer):\n from pyctcdecode.alphabet import BLANK_TOKEN_PTN, UNK_TOKEN, UNK_TOKEN_PTN\n tokenizer_vocab_list = list(tokenizer.get_vocab().keys())\n for i, token in enumerate(tokenizer_vocab_list):\n if BLANK_TOKEN_PTN.match(token):\n tokenizer_vocab_list[i] = ''\n if token == tokenizer.word_delimiter_token:\n tokenizer_vocab_list[i] = ' '\n if UNK_TOKEN_PTN.match(token):\n tokenizer_vocab_list[i] = UNK_TOKEN\n missing_tokens = set(tokenizer_vocab_list) - set(decoder._alphabet.labels)\n return missing_tokens\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n When used in normal mode, this method forwards all its arguments to the feature extractor's\n [`~FeatureExtractionMixin.__call__`] and returns its output. If used in the context\n [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to\n Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.__call__`]. Please refer to the docstring of the above two\n methods for more information.\n \"\"\"\n if self._in_target_context_manager:\n return self.current_processor(*args, **kwargs)\n if 'raw_speech' in kwargs:\n warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.')\n audio = kwargs.pop('raw_speech')\n else:\n audio = kwargs.pop('audio', None)\n sampling_rate = kwargs.pop('sampling_rate', None)\n text = kwargs.pop('text', None)\n if len(args) > 0:\n audio = args[0]\n args = args[1:]\n if audio is None and text is None:\n raise ValueError('You need to specify either an `audio` or `text` input to process.')\n if audio is not None:\n inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)\n if text is not None:\n encodings = self.tokenizer(text, **kwargs)\n if text is None:\n return inputs\n elif audio is None:\n return encodings\n else:\n inputs['labels'] = encodings['input_ids']\n return inputs\n\n def pad(self, *args, **kwargs):\n \"\"\"\n When used in normal mode, this method forwards all its arguments to the feature extractor's\n [`~FeatureExtractionMixin.pad`] and returns its output. If used in the context\n [`~Wav2Vec2ProcessorWithLM.as_target_processor`] this method forwards all its arguments to\n Wav2Vec2CTCTokenizer's [`~Wav2Vec2CTCTokenizer.pad`]. Please refer to the docstring of the above two methods\n for more information.\n \"\"\"\n if self._in_target_context_manager:\n return self.current_processor.pad(*args, **kwargs)\n input_features = kwargs.pop('input_features', None)\n labels = kwargs.pop('labels', None)\n if len(args) > 0:\n input_features = args[0]\n args = args[1:]\n if input_features is not None:\n input_features = self.feature_extractor.pad(input_features, *args, **kwargs)\n if labels is not None:\n labels = self.tokenizer.pad(labels, **kwargs)\n if labels is None:\n return input_features\n elif input_features is None:\n return labels\n else:\n input_features['labels'] = labels['input_ids']\n return input_features\n\n def batch_decode(self, logits: np.ndarray, pool: Optional[Pool]=None, num_processes: Optional[int]=None, beam_width: Optional[int]=None, beam_prune_logp: Optional[float]=None, token_min_logp: Optional[float]=None, hotwords: Optional[Iterable[str]]=None, hotword_weight: Optional[float]=None, alpha: Optional[float]=None, beta: Optional[float]=None, unk_score_offset: Optional[float]=None, lm_score_boundary: Optional[bool]=None, output_word_offsets: bool=False, n_best: int=1):\n \"\"\"\n Batch decode output logits to audio transcription with language model support.\n\n \n\n This function makes use of Python's multiprocessing. Currently, multiprocessing is available only on Unix\n systems (see this [issue](https://github.com/kensho-technologies/pyctcdecode/issues/65)).\n\n If you are decoding multiple batches, consider creating a `Pool` and passing it to `batch_decode`. Otherwise,\n `batch_decode` will be very slow since it will create a fresh `Pool` for each call. See usage example below.\n\n \n\n Args:\n logits (`np.ndarray`):\n The logits output vector of the model representing the log probabilities for each token.\n pool (`multiprocessing.Pool`, *optional*):\n An optional user-managed pool. If not set, one will be automatically created and closed. The pool\n should be instantiated *after* `Wav2Vec2ProcessorWithLM`. Otherwise, the LM won't be available to the\n pool's sub-processes.\n\n \n\n Currently, only pools created with a 'fork' context can be used. If a 'spawn' pool is passed, it will\n be ignored and sequential decoding will be used instead.\n\n \n\n num_processes (`int`, *optional*):\n If `pool` is not set, number of processes on which the function should be parallelized over. Defaults\n to the number of available CPUs.\n beam_width (`int`, *optional*):\n Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH.\n beam_prune_logp (`int`, *optional*):\n Beams that are much worse than best beam will be pruned Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP.\n token_min_logp (`int`, *optional*):\n Tokens below this logp are skipped unless they are argmax of frame Defaults to pyctcdecode's\n DEFAULT_MIN_TOKEN_LOGP.\n hotwords (`List[str]`, *optional*):\n List of words with extra importance, can be OOV for LM\n hotword_weight (`int`, *optional*):\n Weight factor for hotword importance Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT.\n alpha (`float`, *optional*):\n Weight for language model during shallow fusion\n beta (`float`, *optional*):\n Weight for length score adjustment of during scoring\n unk_score_offset (`float`, *optional*):\n Amount of log score offset for unknown tokens\n lm_score_boundary (`bool`, *optional*):\n Whether to have kenlm respect boundaries when scoring\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n n_best (`int`, *optional*, defaults to `1`):\n Number of best hypotheses to return. If `n_best` is greater than 1, the returned `text` will be a list\n of lists of strings, `logit_score` will be a list of lists of floats, and `lm_score` will be a list of\n lists of floats, where the length of the outer list will correspond to the batch size and the length of\n the inner list will correspond to the number of returned hypotheses . The value should be >= 1.\n\n \n\n Please take a look at the Example of [`~Wav2Vec2ProcessorWithLM.decode`] to better understand how to\n make use of `output_word_offsets`. [`~Wav2Vec2ProcessorWithLM.batch_decode`] works the same way with\n batched output.\n\n \n\n Returns:\n [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`].\n\n Example:\n See [Decoding multiple audios](#decoding-multiple-audios).\n \"\"\"\n from pyctcdecode.constants import DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP\n beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH\n beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP\n token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP\n hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT\n self.decoder.reset_params(alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary)\n logits_list = [array[(array != -100.0).all(axis=-1)] for array in logits]\n if pool is None:\n default_context = get_start_method()\n if default_context == 'fork':\n cm = pool = get_context().Pool(num_processes)\n else:\n logger.warning('Parallel batch decoding is not currently supported in this platform. Falling back to sequential decoding.')\n cm = nullcontext()\n else:\n cm = nullcontext()\n if num_processes is not None:\n logger.warning('Parameter `num_process` was passed, but it will be ignored since `pool` was also specified.')\n with cm:\n decoded_beams = self.decoder.decode_beams_batch(pool=pool, logits_list=logits_list, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight)\n batch_texts, logit_scores, lm_scores, word_offsets = ([], [], [], [])\n for d in decoded_beams:\n batch_texts.append([beam[0] for beam in d])\n logit_scores.append([beam[-2] for beam in d])\n lm_scores.append([beam[-1] for beam in d])\n word_offsets.append([[{'word': word, 'start_offset': start_offset, 'end_offset': end_offset} for word, (start_offset, end_offset) in beam[1]] for beam in d])\n word_offsets = word_offsets if output_word_offsets else None\n if n_best == 1:\n return Wav2Vec2DecoderWithLMOutput(text=[hyps[0] for hyps in batch_texts], logit_score=[hyps[0] for hyps in logit_scores], lm_score=[hyps[0] for hyps in lm_scores], word_offsets=[hyps[0] for hyps in word_offsets] if word_offsets is not None else None)\n else:\n return Wav2Vec2DecoderWithLMOutput(text=[hyps[:n_best] for hyps in batch_texts], logit_score=[hyps[:n_best] for hyps in logit_scores], lm_score=[hyps[:n_best] for hyps in lm_scores], word_offsets=[hyps[:n_best] for hyps in word_offsets] if word_offsets is not None else None)\n\n def decode(self, logits: np.ndarray, beam_width: Optional[int]=None, beam_prune_logp: Optional[float]=None, token_min_logp: Optional[float]=None, hotwords: Optional[Iterable[str]]=None, hotword_weight: Optional[float]=None, alpha: Optional[float]=None, beta: Optional[float]=None, unk_score_offset: Optional[float]=None, lm_score_boundary: Optional[bool]=None, output_word_offsets: bool=False, n_best: int=1):\n \"\"\"\n Decode output logits to audio transcription with language model support.\n\n Args:\n logits (`np.ndarray`):\n The logits output vector of the model representing the log probabilities for each token.\n beam_width (`int`, *optional*):\n Maximum number of beams at each step in decoding. Defaults to pyctcdecode's DEFAULT_BEAM_WIDTH.\n beam_prune_logp (`int`, *optional*):\n A threshold to prune beams with log-probs less than best_beam_logp + beam_prune_logp. The value should\n be <= 0. Defaults to pyctcdecode's DEFAULT_PRUNE_LOGP.\n token_min_logp (`int`, *optional*):\n Tokens with log-probs below token_min_logp are skipped unless they are have the maximum log-prob for an\n utterance. Defaults to pyctcdecode's DEFAULT_MIN_TOKEN_LOGP.\n hotwords (`List[str]`, *optional*):\n List of words with extra importance which can be missing from the LM's vocabulary, e.g. [\"huggingface\"]\n hotword_weight (`int`, *optional*):\n Weight multiplier that boosts hotword scores. Defaults to pyctcdecode's DEFAULT_HOTWORD_WEIGHT.\n alpha (`float`, *optional*):\n Weight for language model during shallow fusion\n beta (`float`, *optional*):\n Weight for length score adjustment of during scoring\n unk_score_offset (`float`, *optional*):\n Amount of log score offset for unknown tokens\n lm_score_boundary (`bool`, *optional*):\n Whether to have kenlm respect boundaries when scoring\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n n_best (`int`, *optional*, defaults to `1`):\n Number of best hypotheses to return. If `n_best` is greater than 1, the returned `text` will be a list\n of strings, `logit_score` will be a list of floats, and `lm_score` will be a list of floats, where the\n length of these lists will correspond to the number of returned hypotheses. The value should be >= 1.\n\n \n\n Please take a look at the example below to better understand how to make use of `output_word_offsets`.\n\n \n\n Returns:\n [`~models.wav2vec2.Wav2Vec2DecoderWithLMOutput`].\n\n Example:\n\n ```python\n >>> # Let's see how to retrieve time steps for a model\n >>> from transformers import AutoTokenizer, AutoProcessor, AutoModelForCTC\n >>> from datasets import load_dataset\n >>> import datasets\n >>> import torch\n\n >>> # import model, feature extractor, tokenizer\n >>> model = AutoModelForCTC.from_pretrained(\"patrickvonplaten/wav2vec2-base-100h-with-lm\")\n >>> processor = AutoProcessor.from_pretrained(\"patrickvonplaten/wav2vec2-base-100h-with-lm\")\n\n >>> # load first sample of English common_voice\n >>> dataset = load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\", split=\"train\", streaming=True, trust_remote_code=True)\n >>> dataset = dataset.cast_column(\"audio\", datasets.Audio(sampling_rate=16_000))\n >>> dataset_iter = iter(dataset)\n >>> sample = next(dataset_iter)\n\n >>> # forward sample through model to get greedily predicted transcription ids\n >>> input_values = processor(sample[\"audio\"][\"array\"], return_tensors=\"pt\").input_values\n >>> with torch.no_grad():\n ... logits = model(input_values).logits[0].cpu().numpy()\n\n >>> # retrieve word stamps (analogous commands for `output_char_offsets`)\n >>> outputs = processor.decode(logits, output_word_offsets=True)\n >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate\n >>> time_offset = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate\n\n >>> word_offsets = [\n ... {\n ... \"word\": d[\"word\"],\n ... \"start_time\": round(d[\"start_offset\"] * time_offset, 2),\n ... \"end_time\": round(d[\"end_offset\"] * time_offset, 2),\n ... }\n ... for d in outputs.word_offsets\n ... ]\n >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer:\n >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en\n >>> word_offsets[:4]\n [{'word': 'THE', 'start_time': 0.68, 'end_time': 0.78}, {'word': 'TRACK', 'start_time': 0.88, 'end_time': 1.1}, {'word': 'APPEARS', 'start_time': 1.18, 'end_time': 1.66}, {'word': 'ON', 'start_time': 1.86, 'end_time': 1.92}]\n ```\"\"\"\n from pyctcdecode.constants import DEFAULT_BEAM_WIDTH, DEFAULT_HOTWORD_WEIGHT, DEFAULT_MIN_TOKEN_LOGP, DEFAULT_PRUNE_LOGP\n beam_width = beam_width if beam_width is not None else DEFAULT_BEAM_WIDTH\n beam_prune_logp = beam_prune_logp if beam_prune_logp is not None else DEFAULT_PRUNE_LOGP\n token_min_logp = token_min_logp if token_min_logp is not None else DEFAULT_MIN_TOKEN_LOGP\n hotword_weight = hotword_weight if hotword_weight is not None else DEFAULT_HOTWORD_WEIGHT\n self.decoder.reset_params(alpha=alpha, beta=beta, unk_score_offset=unk_score_offset, lm_score_boundary=lm_score_boundary)\n decoded_beams = self.decoder.decode_beams(logits, beam_width=beam_width, beam_prune_logp=beam_prune_logp, token_min_logp=token_min_logp, hotwords=hotwords, hotword_weight=hotword_weight)\n word_offsets = None\n if output_word_offsets:\n word_offsets = [[{'word': word, 'start_offset': start_offset, 'end_offset': end_offset} for word, (start_offset, end_offset) in beam[2]] for beam in decoded_beams]\n logit_scores = [beam[-2] for beam in decoded_beams]\n lm_scores = [beam[-1] for beam in decoded_beams]\n hypotheses = [beam[0] for beam in decoded_beams]\n if n_best > len(decoded_beams):\n logger.info('N-best size is larger than the number of generated hypotheses, all hypotheses will be returned.')\n if n_best == 1:\n return Wav2Vec2DecoderWithLMOutput(text=hypotheses[0], logit_score=logit_scores[0], lm_score=lm_scores[0], word_offsets=word_offsets[0] if word_offsets is not None else None)\n else:\n return Wav2Vec2DecoderWithLMOutput(text=hypotheses[:n_best], logit_score=logit_scores[:n_best], lm_score=lm_scores[:n_best], word_offsets=word_offsets[:n_best] if word_offsets is not None else None)\n\n @contextmanager\n def as_target_processor(self):\n \"\"\"\n Temporarily sets the processor for processing the target. Useful for encoding the labels when fine-tuning\n Wav2Vec2.\n \"\"\"\n warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.')\n self._in_target_context_manager = True\n self.current_processor = self.tokenizer\n yield\n self.current_processor = self.feature_extractor\n self._in_target_context_manager = False", "docstring": "Constructs a Wav2Vec2 processor which wraps a Wav2Vec2 feature extractor, a Wav2Vec2 CTC tokenizer and a decoder\nwith language model support into a single processor for language model boosted speech recognition decoding.\n\nArgs:\n feature_extractor ([`Wav2Vec2FeatureExtractor`] or [`SeamlessM4TFeatureExtractor`]):\n An instance of [`Wav2Vec2FeatureExtractor`] or [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input.\n tokenizer ([`Wav2Vec2CTCTokenizer`]):\n An instance of [`Wav2Vec2CTCTokenizer`]. The tokenizer is a required input.\n decoder (`pyctcdecode.BeamSearchDecoderCTC`):\n An instance of [`pyctcdecode.BeamSearchDecoderCTC`]. The decoder is a required input."} +{"repo": "tensorflow", "function": "def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):\n summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices)\n return self._resource_apply_sparse(summed_grad, handle, unique_indices)", "docstring": "Add ops to apply sparse gradients to `handle`, with repeated indices.\n\nOptimizers which override this method must deal with repeated indices. See\nthe docstring of `_apply_sparse_duplicate_indices` for details. By default\nthe correct behavior, to sum non-unique indices and their associated\ngradients, is enforced by first pre-processing `grad` and `indices` and\npassing them on to `_resource_apply_sparse`. Optimizers which deal correctly\nwith duplicate indices may instead override this method to avoid the\noverhead of summing.\n\nArgs:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable\n to be updated.\n indices: a `Tensor` of integral type representing the indices for\n which the gradient is nonzero. Indices may be repeated.\n\nReturns:\n An `Operation` which updates the value of the variable."} +{"repo": "tensorflow", "function": "def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):\n with ops.name_scope(name, 'normalize', [counts, mean_ss, variance_ss, shift]):\n divisor = math_ops.reciprocal(counts, name='divisor')\n if shift is not None:\n shifted_mean = math_ops.multiply(mean_ss, divisor, name='shifted_mean')\n mean = math_ops.add(shifted_mean, shift, name='mean')\n else:\n shifted_mean = math_ops.multiply(mean_ss, divisor, name='mean')\n mean = shifted_mean\n variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name='variance')\n return (mean, variance)", "docstring": "Calculate the mean and variance of based on the sufficient statistics.\n\nArgs:\n counts: A `Tensor` containing the total count of the data (one value).\n mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly\n shifted) sum of the elements to average over.\n variance_ss: A `Tensor` containing the variance sufficient statistics: the\n (possibly shifted) squared sum of the data to compute the variance over.\n shift: A `Tensor` containing the value by which the data is shifted for\n numerical stability, or `None` if no shift was performed.\n name: Name used to scope the operations that compute the moments.\n\nReturns:\n Two `Tensor` objects: `mean` and `variance`."} +{"repo": "yapf", "function": "def ParseCode(unformatted_source, filename=''):\n if not unformatted_source.endswith(os.linesep):\n unformatted_source += os.linesep\n try:\n ast_tree = ast.parse(unformatted_source, filename)\n ast.fix_missing_locations(ast_tree)\n readline = StringIO(unformatted_source).readline\n tokens = tokenize.generate_tokens(readline)\n except Exception:\n raise\n logical_lines = _CreateLogicalLines(tokens)\n split_penalty_visitor.SplitPenalty(logical_lines).visit(ast_tree)\n return logical_lines", "docstring": "Parse a string of Python code into logical lines.\n\nThis provides an alternative entry point to YAPF.\n\nArguments:\n unformatted_source: (unicode) The code to format.\n filename: (unicode) The name of the file being reformatted.\n\nReturns:\n A list of LogicalLines.\n\nRaises:\n An exception is raised if there's an error during AST parsing."} +{"repo": "beam", "function": "def __init__(self, model_name: str, *, max_seq_length: Optional[int]=None, **kwargs):\n if not SentenceTransformer:\n raise ImportError('sentence-transformers is required to use HuggingfaceTextEmbeddings.Please install it with using `pip install sentence-transformers`.')\n super().__init__(type_adapter=create_rag_adapter(), **kwargs)\n self.model_name = model_name\n self.max_seq_length = max_seq_length\n self.model_class = SentenceTransformer", "docstring": "Utilizes huggingface SentenceTransformer embeddings for RAG pipeline.\n\nArgs:\n model_name: Name of the sentence-transformers model to use\n max_seq_length: Maximum sequence length for the model\n **kwargs: Additional arguments passed to \n :class:`~apache_beam.ml.transforms.base.EmbeddingsManager`\n constructor including ModelHandler arguments"} +{"repo": "transformers", "function": "def replace_with_aqlm_linear(model, quantization_config=None, linear_weights_not_to_quantize=None, current_key_name=None, has_been_replaced=False):\n if not is_aqlm_available():\n raise ValueError('AQLM is not available. Please install it with `pip install aqlm[cpu,gpu]`')\n if not is_accelerate_available():\n raise ValueError(f\"AQLM requires Accelerate to be installed: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`\")\n if linear_weights_not_to_quantize is None:\n linear_weights_not_to_quantize = []\n from accelerate import init_empty_weights\n from aqlm import QuantizedLinear\n for name, module in model.named_children():\n if current_key_name is None:\n current_key_name = []\n current_key_name.append(name)\n if isinstance(module, nn.Linear):\n if '.'.join(current_key_name) + '.weight' not in linear_weights_not_to_quantize:\n with init_empty_weights():\n in_features = module.in_features\n out_features = module.out_features\n model._modules[name] = QuantizedLinear(in_features, out_features, bias=module.bias is not None, in_group_size=quantization_config.in_group_size, out_group_size=quantization_config.out_group_size, num_codebooks=quantization_config.num_codebooks, nbits_per_codebook=quantization_config.nbits_per_codebook)\n has_been_replaced = True\n model._modules[name].source_cls = type(module)\n model._modules[name].requires_grad_(False)\n if len(list(module.children())) > 0:\n _, has_been_replaced = replace_with_aqlm_linear(module, quantization_config=quantization_config, linear_weights_not_to_quantize=linear_weights_not_to_quantize, current_key_name=current_key_name, has_been_replaced=has_been_replaced)\n current_key_name.pop(-1)\n return (model, has_been_replaced)", "docstring": "Public method that recursively replaces the Linear layers of the given model with AQLM quantized layers.\n`accelerate` is needed to use this method. Returns the converted model and a boolean that indicates if the\nconversion has been successful or not.\n\nArgs:\n model (`torch.nn.Module`):\n The model to convert, can be any `torch.nn.Module` instance.\n quantization_config (`AqlmConfig`):\n The quantization config object that contains the quantization parameters.\n linear_weights_not_to_quantize (`list[str]`, *optional*):\n A list of nn.Linear weights to not convert. If a parameter path is in the list (e.g. `lm_head.weight`), the corresponding module will not be\n converted.\n current_key_name (`list`, *optional*):\n A list that contains the current key name. This is used for recursion and should not be passed by the user.\n has_been_replaced (`bool`, *optional*):\n A boolean that indicates if the conversion has been successful or not. This is used for recursion and\n should not be passed by the user."} +{"repo": "keras", "function": "def get_config(self):\n return {}", "docstring": "Returns a Python dict of the object config.\n\nA constraint config is a Python dictionary (JSON-serializable) that can\nbe used to reinstantiate the same object.\n\nReturns:\n Python dict containing the configuration of the constraint object."} +{"repo": "tensorflow", "function": "def convert_nested_bidirectional(weights):\n num_weights_per_layer = len(weights) // 2\n forward_weights = preprocess_weights_for_loading(layer.forward_layer, weights[:num_weights_per_layer], original_keras_version, original_backend)\n backward_weights = preprocess_weights_for_loading(layer.backward_layer, weights[num_weights_per_layer:], original_keras_version, original_backend)\n return forward_weights + backward_weights", "docstring": "Converts layers nested in `Bidirectional` wrapper.\n\nThis function uses `preprocess_weights_for_loading()` for converting\nlayers.\n\nArgs:\n weights: List of weights values (Numpy arrays).\n\nReturns:\n A list of weights values (Numpy arrays)."} +{"repo": "transformers", "function": "class GotOcr2VisionConfig(PretrainedConfig):\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size=768, output_channels=256, num_hidden_layers=12, num_attention_heads=12, num_channels=3, image_size=1024, patch_size=16, hidden_act='gelu', layer_norm_eps=1e-06, attention_dropout=0.0, initializer_range=1e-10, qkv_bias=True, use_abs_pos=True, use_rel_pos=True, window_size=14, global_attn_indexes=[2, 5, 8, 11], mlp_dim=3072, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.output_channels = output_channels\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.image_size = image_size\n self.patch_size = patch_size\n self.hidden_act = hidden_act\n self.layer_norm_eps = layer_norm_eps\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n self.qkv_bias = qkv_bias\n self.use_abs_pos = use_abs_pos\n self.use_rel_pos = use_rel_pos\n self.window_size = window_size\n self.global_attn_indexes = global_attn_indexes\n self.mlp_dim = mlp_dim", "docstring": "This is the configuration class to store the configuration of a [`GotOcr2VisionModel`]. It is used to instantiate a GOT_OCR2\nvision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration\ndefaults will yield a similar configuration to that of the SAM ViT-h\n[facebook/sam-vit-huge](https://huggingface.co/facebook/sam-vit-huge) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n output_channels (`int`, *optional*, defaults to 256):\n Dimensionality of the output channels in the Patch Encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n Number of channels in the input image.\n image_size (`int`, *optional*, defaults to 1024):\n Expected resolution. Target size of the resized input image.\n patch_size (`int`, *optional*, defaults to 16):\n Size of the patches to be extracted from the input image.\n hidden_act (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string)\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 1e-10):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to query, key, value projections.\n use_abs_pos (`bool`, *optional*, defaults to `True`):\n Whether to use absolute position embedding.\n use_rel_pos (`bool`, *optional*, defaults to `True`):\n Whether to use relative position embedding.\n window_size (`int`, *optional*, defaults to 14):\n Window size for relative position.\n global_attn_indexes (`List[int]`, *optional*, defaults to `[2, 5, 8, 11]`):\n The indexes of the global attention layers.\n mlp_dim (`int`, *optional*, defaults to 3072):\n The dimensionality of the MLP layer in the Transformer encoder."} +{"repo": "keras", "function": "def _build_attention_equation(rank, attn_axes):\n target_notation = ''\n for i in range(rank):\n target_notation += _index_to_einsum_variable(i)\n batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,)))\n letter_offset = rank\n source_notation = ''\n for i in range(rank):\n if i in batch_dims or i == rank - 1:\n source_notation += target_notation[i]\n else:\n source_notation += _index_to_einsum_variable(letter_offset)\n letter_offset += 1\n product_notation = ''.join([target_notation[i] for i in batch_dims] + [target_notation[i] for i in attn_axes] + [source_notation[i] for i in attn_axes])\n dot_product_equation = '%s,%s->%s' % (source_notation, target_notation, product_notation)\n attn_scores_rank = len(product_notation)\n combine_equation = '%s,%s->%s' % (product_notation, source_notation, target_notation)\n return (dot_product_equation, combine_equation, attn_scores_rank)", "docstring": "Builds einsum equations for the attention computation.\n\nQuery, key, value inputs after projection are expected to have the shape as:\n`(bs, , , num_heads, channels)`.\n`bs` and `` are treated as ``.\n\nThe attention operations can be generalized:\n1. Query-key dot product:\n (, , num_heads, channels),\n (, , num_heads, channels) ->\n (, num_heads, , )\n2. Combination:\n (, num_heads, , ),\n (, , num_heads, channels) -> (, , num_heads, channels)\n\nArgs:\n rank: Rank of query, key, value tensors.\n attn_axes: List/tuple of axes, `[-1, rank)`,\n that attention will be applied to.\n\nReturns:\n Einsum equations."} +{"repo": "tensorflow", "function": "def __init__(self, target='', graph=None, config=None):\n if not config:\n gpu_options = config_pb2.GPUOptions(allow_growth=True)\n config = config_pb2.ConfigProto(gpu_options=gpu_options)\n config.graph_options.place_pruned_graph = True\n super(InteractiveSession, self).__init__(target, graph, config)\n with InteractiveSession._count_lock:\n if InteractiveSession._active_session_count > 0:\n logging.error('An interactive session is already active. This can cause out-of-memory errors or some other unexpected errors (due to the unpredictable timing of garbage collection) in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s). Please use `tf.Session()` if you intend to productionize.')\n InteractiveSession._active_session_count += 1\n self._explicitly_closed = False\n self._default_session = self.as_default()\n self._default_session.enforce_nesting = False\n self._default_session.__enter__()\n self._explicit_graph = graph\n if self._explicit_graph is not None:\n self._default_graph = graph.as_default()\n self._default_graph.enforce_nesting = False\n self._default_graph.__enter__()", "docstring": "Creates a new interactive TensorFlow session.\n\nIf no `graph` argument is specified when constructing the session,\nthe default graph will be launched in the session. If you are\nusing more than one graph (created with `tf.Graph()`) in the same\nprocess, you will have to use different sessions for each graph,\nbut each graph can be used in multiple sessions. In this case, it\nis often clearer to pass the graph to be launched explicitly to\nthe session constructor.\n\nArgs:\n target: (Optional.) The execution engine to connect to. Defaults to using\n an in-process engine.\n graph: (Optional.) The `Graph` to be launched (described above).\n config: (Optional) `ConfigProto` proto used to configure the session."} +{"repo": "tensorflow", "function": "def __call__(self, *args, **kwargs):\n self._assert_built_as_v1()\n if not hasattr(self, '_thread_local'):\n raise RuntimeError('You must call `super().__init__()` in the layer constructor.')\n if args:\n inputs = args[0]\n args = args[1:]\n elif self._call_fn_args[0] in kwargs:\n inputs = kwargs.pop(self._call_fn_args[0])\n else:\n raise ValueError('The first argument to `Layer.call` must always be passed.')\n call_context = base_layer_utils.call_context()\n input_list = nest.flatten(inputs)\n build_graph = tf_utils.are_all_symbolic_tensors(input_list)\n if any((isinstance(x, (np.ndarray, float, int)) for x in input_list)):\n\n def _convert_non_tensor(x):\n if isinstance(x, (np.ndarray, float, int)):\n return tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n return x\n inputs = nest.map_structure(_convert_non_tensor, inputs)\n input_list = nest.flatten(inputs)\n mask_arg_passed_by_framework = False\n input_masks = self._collect_input_masks(inputs, args, kwargs)\n if self._expects_mask_arg and input_masks is not None and (not self._call_arg_was_passed('mask', args, kwargs)):\n mask_arg_passed_by_framework = True\n kwargs['mask'] = input_masks\n training_value = None\n training_arg_passed_by_framework = False\n if self._call_arg_was_passed('training', args, kwargs):\n training_value = self._get_call_arg_value('training', args, kwargs)\n if not self._expects_training_arg:\n kwargs.pop('training')\n if training_value is None:\n if call_context.training is not None:\n training_value = call_context.training\n elif backend.global_learning_phase_is_set():\n training_value = backend.learning_phase()\n elif build_graph:\n with backend.get_graph().as_default():\n if base_layer_utils.is_in_keras_graph():\n training_value = backend.learning_phase()\n if self._expects_training_arg and training_value is not None:\n if tensor_util.is_tf_type(training_value):\n training_value = math_ops.cast(training_value, dtypes.bool)\n else:\n training_value = bool(training_value)\n args, kwargs = self._set_call_arg_value('training', training_value, args, kwargs)\n training_arg_passed_by_framework = True\n if build_graph and base_layer_utils.needs_keras_history(inputs):\n base_layer_utils.create_keras_history(inputs)\n with call_context.enter(self, inputs, build_graph, training_value):\n if build_graph:\n input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)\n graph = backend.get_graph()\n with graph.as_default(), backend.name_scope(self._name_scope()):\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n if base_layer_utils.is_subclassed(self) and (not base_layer_utils.from_saved_model(self)):\n call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx())\n else:\n call_fn = self.call\n if not self.dynamic:\n try:\n with autocast_variable.enable_auto_cast_variables(self._compute_dtype_object):\n outputs = call_fn(cast_inputs, *args, **kwargs)\n except errors.OperatorNotAllowedInGraphError as e:\n raise TypeError('You are attempting to use Python control flow in a layer that was not declared to be dynamic. Pass `dynamic=True` to the class constructor.\\nEncountered error:\\n\"\"\"\\n' + str(e) + '\\n\"\"\"')\n else:\n outputs = self._symbolic_call(inputs)\n if outputs is None:\n raise ValueError(\"A layer's `call` method should return a Tensor or a list of Tensors, not None (layer: \" + self.name + ').')\n if base_layer_utils.have_all_keras_metadata(inputs):\n if training_arg_passed_by_framework:\n args, kwargs = self._set_call_arg_value('training', None, args, kwargs, pop_kwarg_if_none=True)\n if mask_arg_passed_by_framework:\n kwargs.pop('mask')\n outputs = self._set_connectivity_metadata((inputs,) + args, kwargs, outputs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n if hasattr(self, '_set_inputs') and (not self.inputs):\n self._set_inputs(inputs, outputs)\n else:\n with backend.name_scope(self._name_scope()):\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n with autocast_variable.enable_auto_cast_variables(self._compute_dtype_object):\n outputs = self.call(cast_inputs, *args, **kwargs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n return outputs", "docstring": "Wraps `call`, applying pre- and post-processing steps.\n\nArgs:\n *args: Positional arguments to be passed to `self.call`.\n **kwargs: Keyword arguments to be passed to `self.call`.\n\nReturns:\n Output tensor(s).\n\nNote:\n - The following optional keyword arguments are reserved for specific uses:\n * `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n * `mask`: Boolean input mask.\n - If the layer's `call` method takes a `mask` argument (as some Keras\n layers do), its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from\n a layer that generated a corresponding mask, i.e. if it came from\n a Keras layer with masking support.\n\nRaises:\n ValueError: if the layer's `call` method returns None (an invalid value).\n RuntimeError: if `super().__init__()` was not called in the constructor."} +{"repo": "tensorflow", "function": "def _shape_invariant_to_type_spec(self, shape):\n raise NotImplementedError(f'{type(self).__name__}._shape_invariant_to_type_spec')", "docstring": "Returns a TypeSpec given a shape invariant (used by `tf.while_loop`).\n\nArgs:\n shape: A `tf.TensorShape` object. The shape invariant for this\n `CompositeTensor`, or `None` if a default shape invariant should be used\n (based on the value of this `CompositeTensor`).\n\nReturns:\n A nested structure whose values are `tf.TensorShape` objects, specifying\n the shape invariants for the tensors that comprise this `CompositeTensor`."} +{"repo": "tensorflow", "function": "def _process_single_batch(model, inputs, targets, output_loss_metrics=None, sample_weights=None, training=False):\n with backend.eager_learning_phase_scope(1 if training else 0), training_utils.RespectCompiledTrainableState(model):\n with GradientTape() as tape:\n outs, total_loss, output_losses, masks = _model_loss(model, inputs, targets, output_loss_metrics=output_loss_metrics, sample_weights=sample_weights, training=training)\n if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n scaled_total_loss = model.optimizer.get_scaled_loss(total_loss)\n else:\n scaled_total_loss = total_loss\n if training:\n trainable_weights = model.trainable_weights\n if trainable_weights:\n if hasattr(model, '_backwards'):\n model._backwards(tape, scaled_total_loss)\n else:\n grads = tape.gradient(scaled_total_loss, trainable_weights)\n if isinstance(model.optimizer, loss_scale_optimizer.LossScaleOptimizer):\n grads = model.optimizer.get_unscaled_gradients(grads)\n model.optimizer.apply_gradients(zip(grads, trainable_weights))\n else:\n logging.warning('The list of trainable weights is empty. Make sure that you are not setting model.trainable to False before compiling the model.')\n return (outs, total_loss, output_losses, masks)", "docstring": "Calculate the loss and gradient for one input batch.\n\n The model weights are updated if training is set to True.\n\nArgs:\n model: Model whose loss has to be calculated.\n inputs: List of input arrays.\n targets: List of target arrays.\n output_loss_metrics: List of metrics that are used to aggregated output\n loss values.\n sample_weights: Optional list of sample weight arrays.\n training: The boolean represents if the weights of the model are updated.\n 'fit' methods will set this to True while 'evaluate' methods will\n set this to False.\n\nReturns:\n output of the model, total loss, the loss and the mask\n associated with each output.\n\nRaises:\n ValueError: If the model has no loss to optimize."} +{"repo": "tf-quant-finance", "function": "def asset_or_nothing_price(*, volatilities: types.RealTensor, strikes: types.RealTensor, expiries: types.RealTensor, spots: types.RealTensor=None, forwards: types.RealTensor=None, discount_rates: types.RealTensor=None, dividend_rates: types.RealTensor=None, discount_factors: types.RealTensor=None, is_call_options: types.BoolTensor=None, is_normal_volatility: bool=False, dtype: tf.DType=None, name: str=None) -> types.RealTensor:\n if (spots is None) == (forwards is None):\n raise ValueError('Either spots or forwards must be supplied but not both.')\n if discount_rates is not None and discount_factors is not None:\n raise ValueError('At most one of discount_rates and discount_factors may be supplied')\n with tf.name_scope(name or 'asset_or_nothing_price'):\n strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n dtype = strikes.dtype\n volatilities = tf.convert_to_tensor(volatilities, dtype=dtype, name='volatilities')\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n if discount_rates is not None:\n discount_rates = tf.convert_to_tensor(discount_rates, dtype=dtype, name='discount_rates')\n discount_factors = tf.exp(-discount_rates * expiries)\n elif discount_factors is not None:\n discount_factors = tf.convert_to_tensor(discount_factors, dtype=dtype, name='discount_factors')\n discount_rates = -tf.math.log(discount_factors) / expiries\n else:\n discount_rates = tf.convert_to_tensor(0.0, dtype=dtype, name='discount_rates')\n discount_factors = tf.convert_to_tensor(1.0, dtype=dtype, name='discount_factors')\n if dividend_rates is None:\n dividend_rates = tf.convert_to_tensor(0.0, dtype=dtype, name='dividend_rates')\n if forwards is not None:\n forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')\n else:\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n forwards = spots * tf.exp((discount_rates - dividend_rates) * expiries)\n sqrt_var = volatilities * tf.math.sqrt(expiries)\n if not is_normal_volatility:\n d1 = tf.math.divide_no_nan(tf.math.log(forwards / strikes), sqrt_var) + sqrt_var / 2\n undiscounted_calls = tf.where(sqrt_var > 0, forwards * _ncdf(d1), tf.where(forwards > strikes, forwards, 0.0))\n else:\n d1 = tf.math.divide_no_nan(forwards - strikes, sqrt_var)\n undiscounted_calls = tf.where(sqrt_var > 0.0, forwards * _ncdf(d1) + sqrt_var * tf.math.exp(-0.5 * d1 ** 2) / np.sqrt(2 * np.pi), tf.where(forwards > strikes, forwards, 0.0))\n if is_call_options is None:\n return discount_factors * undiscounted_calls\n undiscounted_puts = forwards - undiscounted_calls\n predicate = tf.broadcast_to(is_call_options, tf.shape(undiscounted_calls))\n return discount_factors * tf.where(predicate, undiscounted_calls, undiscounted_puts)", "docstring": "Computes the Black Scholes price for a batch of asset-or-nothing options.\n\nThe asset-or-nothing call (resp. put) pays out one unit of the underlying\nasset if the spot is above (resp. below) the strike at maturity.\n\n#### Example\n\n```python\n # Price a batch of 5 asset_or_nothing call and put options.\n volatilities = np.array([0.0001, 102.0, 2.0, 0.1, 0.4])\n forwards = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\n # Strikes will automatically be broadcasted to shape [5].\n strikes = np.array([3.0])\n # Expiries will be broadcast to shape [5], i.e. each option has strike=3\n # and expiry = 1.\n expiries = 1.0\n computed_prices = tff.black_scholes.asset_or_nothing_price(\n volatilities=volatilities,\n strikes=strikes,\n expiries=expiries,\n forwards=forwards)\n# Expected print output of prices:\n# [0., 2., 2.52403424, 3.99315108, 4.65085383]\n```\n\n#### References:\n\n[1] Hull, John C., Options, Futures and Other Derivatives. Pearson, 2018.\n[2] https://en.wikipedia.org/wiki/Binary_option#Asset-or-nothing_call\n\nArgs:\n volatilities: Real `Tensor` of any shape and dtype. The volatilities to\n expiry of the options to price.\n strikes: A real `Tensor` of the same dtype and compatible shape as\n `volatilities`. The strikes of the options to be priced.\n expiries: A real `Tensor` of same dtype and compatible shape as\n `volatilities`. The expiry of each option.\n spots: A real `Tensor` of any shape that broadcasts to the shape of the\n `volatilities`. The current spot price of the underlying. Either this\n argument or the `forwards` (but not both) must be supplied.\n forwards: A real `Tensor` of any shape that broadcasts to the shape of\n `volatilities`. The forwards to maturity. Either this argument or the\n `spots` must be supplied but both must not be supplied.\n discount_rates: An optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`. If\n not `None`, discount factors are calculated as e^(-rT), where r are the\n discount rates, or risk free rates. At most one of discount_rates and\n discount_factors can be supplied.\n Default value: `None`, equivalent to r = 0 and discount factors = 1 when\n discount_factors also not given.\n dividend_rates: An optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n Default value: `None`, equivalent to q = 0.\n discount_factors: An optional real `Tensor` of same dtype as the\n `volatilities`. If not `None`, these are the discount factors to expiry\n (i.e. e^(-rT)). Mutually exclusive with discount_rates. If neither is\n given, no discounting is applied (i.e. the undiscounted option price is\n returned). If `spots` is supplied and `discount_factors` is not `None`\n then this is also used to compute the forwards to expiry. At most one of\n `discount_rates` and `discount_factors` can be supplied.\n Default value: `None`, which maps to e^(-rT) calculated from\n discount_rates.\n is_call_options: A boolean `Tensor` of a shape compatible with\n `volatilities`. Indicates whether the option is a call (if True) or a put\n (if False). If not supplied, call options are assumed.\n is_normal_volatility: An optional Python boolean specifying whether the\n `volatilities` correspond to lognormal Black volatility (if False) or\n normal Black volatility (if True).\n Default value: False, which corresponds to lognormal volatility.\n dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion\n of any supplied non-`Tensor` arguments to `Tensor`.\n Default value: `None` which maps to the default dtype inferred by\n TensorFlow.\n name: str. The name for the ops created by this function.\n Default value: `None`, which is mapped to the default name\n `asset_or_nothing_price`.\n\nReturns:\n option_prices: A `Tensor` of the same shape as `forwards`. The Black\n Scholes price of the options.\n\nRaises:\n ValueError: If both `forwards` and `spots` are supplied or if neither is\n supplied.\n ValueError: If both `discount_rates` and `discount_factors` is supplied."} +{"repo": "tensorflow", "function": "def he_uniform(seed=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='uniform', seed=seed)", "docstring": "He uniform variance scaling initializer.\n\nIt draws samples from a uniform distribution within [-limit, limit]\nwhere `limit` is `sqrt(6 / fan_in)`\nwhere `fan_in` is the number of input units in the weight tensor.\n\nArgs:\n seed: A Python integer. Used to seed the random generator.\n\nReturns:\n An initializer.\n\nReferences:\n [He et al., 2015]\n (https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)\n # pylint: disable=line-too-long\n ([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))"} +{"repo": "tensorflow", "function": "class XLAOptions(collections.namedtuple('XLAOptions', ['use_spmd_for_xla_partitioning', 'enable_xla_dynamic_padder'])):\n\n def __new__(cls, use_spmd_for_xla_partitioning=True, enable_xla_dynamic_padder=True):\n return super(XLAOptions, cls).__new__(cls, use_spmd_for_xla_partitioning, enable_xla_dynamic_padder)", "docstring": "XLA compilation options.\n\nAttributes:\n use_spmd_for_xla_partitioning: Boolean. Whether to use XLA's SPMD\n partitioner instead of MPMD partitioner when compiler partitioning is\n requested.\n enable_xla_dynamic_padder: Boolean. Whether to enable XLA dynamic padder\n infrastructure to handle dynamic shapes inputs inside XLA. True by\n default. Disabling this may cause correctness issues with dynamic shapes\n inputs, as XLA will just assume the inputs are with padded shapes. However\n users can optionally set it to False to improve device time if masking is\n already handled in the user side."} +{"repo": "tensorflow", "function": "def size(self, name=None):\n with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]):\n return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)", "docstring": "Compute the number of elements in this table.\n\nArgs:\n name: A name for the operation (optional).\n\nReturns:\n A scalar tensor containing the number of elements in this table."} +{"repo": "tensorflow", "function": "def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, expected_output_shape=None, validate_training=True, adapt_data=None, custom_objects=None, test_harness=None, supports_masking=None):\n if input_data is None:\n if input_shape is None:\n raise ValueError('input_shape is None')\n if not input_dtype:\n input_dtype = 'float32'\n input_data_shape = list(input_shape)\n for i, e in enumerate(input_data_shape):\n if e is None:\n input_data_shape[i] = np.random.randint(1, 4)\n input_data = 10 * np.random.random(input_data_shape)\n if input_dtype[:5] == 'float':\n input_data -= 0.5\n input_data = input_data.astype(input_dtype)\n elif input_shape is None:\n input_shape = input_data.shape\n if input_dtype is None:\n input_dtype = input_data.dtype\n if expected_output_dtype is None:\n expected_output_dtype = input_dtype\n if dtypes.as_dtype(expected_output_dtype) == dtypes.string:\n if test_harness:\n assert_equal = test_harness.assertAllEqual\n else:\n assert_equal = string_test\n elif test_harness:\n assert_equal = test_harness.assertAllClose\n else:\n assert_equal = numeric_test\n kwargs = kwargs or {}\n layer = layer_cls(**kwargs)\n if supports_masking is not None and layer.supports_masking != supports_masking:\n raise AssertionError('When testing layer %s, the `supports_masking` property is %rbut expected to be %r.\\nFull kwargs: %s' % (layer_cls.__name__, layer.supports_masking, supports_masking, kwargs))\n if adapt_data is not None:\n layer.adapt(adapt_data)\n weights = layer.get_weights()\n layer.set_weights(weights)\n if 'weights' in tf_inspect.getargspec(layer_cls.__init__):\n kwargs['weights'] = weights\n layer = layer_cls(**kwargs)\n x = layers.Input(shape=input_shape[1:], dtype=input_dtype)\n y = layer(x)\n if backend.dtype(y) != expected_output_dtype:\n raise AssertionError('When testing layer %s, for input %s, found output dtype=%s but expected to find %s.\\nFull kwargs: %s' % (layer_cls.__name__, x, backend.dtype(y), expected_output_dtype, kwargs))\n\n def assert_shapes_equal(expected, actual):\n \"\"\"Asserts that the output shape from the layer matches the actual shape.\"\"\"\n if len(expected) != len(actual):\n raise AssertionError('When testing layer %s, for input %s, found output_shape=%s but expected to find %s.\\nFull kwargs: %s' % (layer_cls.__name__, x, actual, expected, kwargs))\n for expected_dim, actual_dim in zip(expected, actual):\n if isinstance(expected_dim, tensor_shape.Dimension):\n expected_dim = expected_dim.value\n if isinstance(actual_dim, tensor_shape.Dimension):\n actual_dim = actual_dim.value\n if expected_dim is not None and expected_dim != actual_dim:\n raise AssertionError('When testing layer %s, for input %s, found output_shape=%s but expected to find %s.\\nFull kwargs: %s' % (layer_cls.__name__, x, actual, expected, kwargs))\n if expected_output_shape is not None:\n assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape), y.shape)\n model = models.Model(x, y)\n computed_output_shape = tuple(layer.compute_output_shape(tensor_shape.TensorShape(input_shape)).as_list())\n computed_output_signature = layer.compute_output_signature(tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))\n actual_output = model.predict(input_data)\n actual_output_shape = actual_output.shape\n assert_shapes_equal(computed_output_shape, actual_output_shape)\n assert_shapes_equal(computed_output_signature.shape, actual_output_shape)\n if computed_output_signature.dtype != actual_output.dtype:\n raise AssertionError('When testing layer %s, for input %s, found output_dtype=%s but expected to find %s.\\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output.dtype, computed_output_signature.dtype, kwargs))\n if expected_output is not None:\n assert_equal(actual_output, expected_output)\n model_config = model.get_config()\n recovered_model = models.Model.from_config(model_config, custom_objects)\n if model.weights:\n weights = model.get_weights()\n recovered_model.set_weights(weights)\n output = recovered_model.predict(input_data)\n assert_equal(output, actual_output)\n layer_weights = layer.get_weights()\n if validate_training:\n model = models.Model(x, layer(x))\n if _thread_local_data.run_eagerly is not None:\n model.compile('rmsprop', 'mse', weighted_metrics=['acc'], run_eagerly=should_run_eagerly())\n else:\n model.compile('rmsprop', 'mse', weighted_metrics=['acc'])\n model.train_on_batch(input_data, actual_output)\n layer_config = layer.get_config()\n layer_config['batch_input_shape'] = input_shape\n layer = layer.__class__.from_config(layer_config)\n if adapt_data is not None:\n layer.adapt(adapt_data)\n model = models.Sequential()\n model.add(layers.Input(shape=input_shape[1:], dtype=input_dtype))\n model.add(layer)\n layer.set_weights(layer_weights)\n actual_output = model.predict(input_data)\n actual_output_shape = actual_output.shape\n for expected_dim, actual_dim in zip(computed_output_shape, actual_output_shape):\n if expected_dim is not None:\n if expected_dim != actual_dim:\n raise AssertionError('When testing layer %s **after deserialization**, for input %s, found output_shape=%s but expected to find inferred shape %s.\\nFull kwargs: %s' % (layer_cls.__name__, x, actual_output_shape, computed_output_shape, kwargs))\n if expected_output is not None:\n assert_equal(actual_output, expected_output)\n model_config = model.get_config()\n recovered_model = models.Sequential.from_config(model_config, custom_objects)\n if model.weights:\n weights = model.get_weights()\n recovered_model.set_weights(weights)\n output = recovered_model.predict(input_data)\n assert_equal(output, actual_output)\n return actual_output", "docstring": "Test routine for a layer with a single input and single output.\n\nArgs:\n layer_cls: Layer class object.\n kwargs: Optional dictionary of keyword arguments for instantiating the\n layer.\n input_shape: Input shape tuple.\n input_dtype: Data type of the input data.\n input_data: Numpy array of input data.\n expected_output: Numpy array of the expected output.\n expected_output_dtype: Data type expected for the output.\n expected_output_shape: Shape tuple for the expected shape of the output.\n validate_training: Whether to attempt to validate training on this layer.\n This might be set to False for non-differentiable layers that output\n string or integer values.\n adapt_data: Optional data for an 'adapt' call. If None, adapt() will not\n be tested for this layer. This is only relevant for PreprocessingLayers.\n custom_objects: Optional dictionary mapping name strings to custom objects\n in the layer class. This is helpful for testing custom layers.\n test_harness: The Tensorflow test, if any, that this function is being\n called in.\n supports_masking: Optional boolean to check the `supports_masking` property\n of the layer. If None, the check will not be performed.\n\nReturns:\n The output data (Numpy array) returned by the layer, for additional\n checks to be done by the calling code.\n\nRaises:\n ValueError: if `input_shape is None`."} +{"repo": "beam", "function": "class BufferedQuantileTracker(WindowedTracker, QuantileTracker):\n\n def __init__(self, window_mode, q, **kwargs):\n super().__init__(window_mode, **kwargs)\n QuantileTracker.__init__(self, q)\n self._sorted_items = SortedList()\n\n def push(self, x):\n \"\"\"Pushes a new value, maintains the sorted list, and manages the window.\n\n Args:\n x: The new value to be pushed.\n \"\"\"\n if not math.isnan(x):\n self._sorted_items.add(x)\n if self._window_mode == WindowMode.SLIDING:\n if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n self._sorted_items.discard(old_x)\n super().push(x)\n\n @staticmethod\n def _get_helper(sorted_items, q):\n n = len(sorted_items)\n if n < 1:\n return float('nan')\n pos = q * (n - 1)\n lo = math.floor(pos)\n lo_value = typing.cast(float, sorted_items[lo])\n hi = min(lo + 1, n - 1)\n hi_value: float = typing.cast(float, sorted_items[hi])\n return lo_value + (hi_value - lo_value) * (pos - lo)\n\n def get(self):\n \"\"\"Returns the current quantile value using the sorted list.\n\n Calculates the quantile using linear interpolation on the sorted values.\n\n Returns:\n float: The calculated quantile value. Returns NaN if the window is empty.\n \"\"\"\n return self._get_helper(self._sorted_items, self._q)", "docstring": "Abstract base class for buffered quantile trackers.\n\nWarning:\n Buffered quantile trackers are NOT truly incremental in the sense that they\n don't update the quantile in constant time per new data point. They maintain\n a sorted list of all values in the window.\n\nArgs:\n window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`\n or `SLIDING`.\n q: The quantile to calculate, a float between 0 and 1 (inclusive).\n **kwargs: Keyword arguments passed to the parent class constructor."} +{"repo": "tensorflow", "function": "def _maybe_colocate_with(self, value):\n if not self._colocate_with_first_write_call:\n yield\n else:\n if not self._colocate_with:\n self._colocate_with.append(value)\n with ops.colocate_with(self._colocate_with[0]):\n yield", "docstring": "Colocate operations with an internal colocation group or `value`.\n\nArgs:\n value: `Tensor`, the tensor to try to colocate with.\n\nYields:\n Does not yield anything, but the new context is a colocation context.\n\nIf no internal colocation group is set, colocate with `value` and set\nthe internal colocation group to be value."} +{"repo": "tensorflow", "function": "class MaxNorm(Constraint):\n\n def __init__(self, max_value=2, axis=0):\n self.max_value = max_value\n self.axis = axis\n\n @doc_controls.do_not_generate_docs\n def __call__(self, w):\n norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis=self.axis, keepdims=True))\n desired = backend.clip(norms, 0, self.max_value)\n return w * (desired / (backend.epsilon() + norms))\n\n @doc_controls.do_not_generate_docs\n def get_config(self):\n return {'max_value': self.max_value, 'axis': self.axis}", "docstring": "MaxNorm weight constraint.\n\nConstrains the weights incident to each hidden unit\nto have a norm less than or equal to a desired value.\n\nAlso available via the shortcut function `tf.keras.constraints.max_norm`.\n\nArgs:\n max_value: the maximum norm value for the incoming weights.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`."} +{"repo": "tensorflow", "function": "def multiplex(cond, a, b, name=None):\n return examples_multiplex_dense(cond=cond, a=a, b=b, name=name)", "docstring": "Return elements chosen from `a` or `b` depending on `cond`.\n\nThis is similar to `np.where` and `tf.where`, but simplified to only handle\nthe case of dense tensors, no optional parameters, no broadcasting, etc..\n\n>>> multiplex([True, False, False, True], [1,2,3,4], [100,200,300,400])\n\n\nArgs:\n cond: tf.Tensor of type bool. Where True, yield `a`, otherwise yield `b`.\n a: tf.Tensor with the same type and shape as `b`.\n b: tf.Tensor with the same type and shape as `a`.\n name: An optional name for the op.\n\nReturns:\n A tf.Tensor with elements from `a` where `cond` is True, and elements\n from `b` elsewhere."} +{"repo": "beam", "function": "def get(self):\n if self._n < 1:\n return float('nan')\n return self._mean", "docstring": "Returns the current incremental mean.\n\nReturns:\n float: The current incremental mean value.\n Returns NaN if no valid (non-NaN) values have been pushed."} +{"repo": "transformers", "function": "def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool=False, **kwargs):\n use_auth_token = kwargs.pop('use_auth_token', None)\n if use_auth_token is not None:\n warnings.warn('The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.', FutureWarning)\n if kwargs.get('token', None) is not None:\n raise ValueError('`token` and `use_auth_token` are both specified. Please set only the argument `token`.')\n kwargs['token'] = use_auth_token\n if os.path.isfile(save_directory):\n raise AssertionError(f'Provided path ({save_directory}) should be a directory, not a file')\n os.makedirs(save_directory, exist_ok=True)\n if push_to_hub:\n commit_message = kwargs.pop('commit_message', None)\n repo_id = kwargs.pop('repo_id', save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n output_video_processor_file = os.path.join(save_directory, VIDEO_PROCESSOR_NAME)\n self.to_json_file(output_video_processor_file)\n logger.info(f'Video processor saved in {output_video_processor_file}')\n if push_to_hub:\n self._upload_modified_files(save_directory, repo_id, files_timestamps, commit_message=commit_message, token=kwargs.get('token'))\n return [output_video_processor_file]", "docstring": "Save an video processor object to the directory `save_directory`, so that it can be re-loaded using the\n[`~video_processing_utils.VideoProcessorBase.from_pretrained`] class method.\n\nArgs:\n save_directory (`str` or `os.PathLike`):\n Directory where the video processor JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method."} +{"repo": "beam", "function": "def __init__(self, project, sub_name, expected_msg=None, expected_msg_len=None, timeout=DEFAULT_TIMEOUT, with_attributes=False, strip_attributes=None, sleep_time=DEFAULT_SLEEP_TIME, max_messages_in_one_pull=DEFAULT_MAX_MESSAGES_IN_ONE_PULL, pull_timeout=DEFAULT_PULL_TIMEOUT):\n if pubsub is None:\n raise ImportError('PubSub dependencies are not installed.')\n if not project:\n raise ValueError('Invalid project %s.' % project)\n if not sub_name:\n raise ValueError('Invalid subscription %s.' % sub_name)\n if not expected_msg_len and (not expected_msg):\n raise ValueError('Required expected_msg: {} or expected_msg_len: {}.'.format(expected_msg, expected_msg_len))\n if expected_msg and (not isinstance(expected_msg, list)):\n raise ValueError('Invalid expected messages %s.' % expected_msg)\n if expected_msg_len and (not isinstance(expected_msg_len, int)):\n raise ValueError('Invalid expected messages %s.' % expected_msg_len)\n self.project = project\n self.sub_name = sub_name\n self.expected_msg = expected_msg\n self.expected_msg_len = expected_msg_len or len(self.expected_msg)\n self.timeout = timeout\n self.messages = None\n self.messages_all_details = None\n self.with_attributes = with_attributes\n self.strip_attributes = strip_attributes\n self.sleep_time = sleep_time\n self.max_messages_in_one_pull = max_messages_in_one_pull\n self.pull_timeout = pull_timeout", "docstring": "Initialize PubSubMessageMatcher object.\n\nArgs:\n project: A name string of project.\n sub_name: A name string of subscription which is attached to output.\n expected_msg: A string list that contains expected message data pulled\n from the subscription. See also: with_attributes.\n expected_msg_len: Number of expected messages pulled from the\n subscription.\n timeout: Timeout in seconds to wait for all expected messages appears.\n with_attributes: If True, will match against both message data and\n attributes. If True, expected_msg should be a list of ``PubsubMessage``\n objects. Otherwise, it should be a list of ``bytes``.\n strip_attributes: List of strings. If with_attributes==True, strip the\n attributes keyed by these values from incoming messages.\n If a key is missing, will add an attribute with an error message as\n value to prevent a successful match.\n sleep_time: Time in seconds between which the pulls from pubsub are done.\n max_messages_in_one_pull: Maximum number of messages pulled from pubsub\n at once.\n pull_timeout: Time in seconds after which the pull from pubsub is repeated"} +{"repo": "transformers", "function": "class Speech2Text2Decoder(Speech2Text2PreTrainedModel):\n\n def __init__(self, config: Speech2Text2Config):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_target_positions\n self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)\n self.embed_positions = Speech2Text2SinusoidalPositionalEmbedding(self.max_target_positions, config.d_model, self.padding_idx)\n self.layers = nn.ModuleList([Speech2Text2DecoderLayer(config) for _ in range(config.decoder_layers)])\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`Speech2Text2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention\n on hidden heads. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length)\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)\n hidden_states = inputs_embeds + positions\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...')\n use_cache = False\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n next_decoder_cache = () if use_cache else None\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):\n if attn_mask is not None:\n if attn_mask.size()[0] != len(self.layers):\n raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if not return_dict:\n return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))\n return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2Text2DecoderLayer`]\n\nArgs:\n config: Speech2Text2Config\n embed_tokens (nn.Embedding): output embedding"} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFBertForPreTrainingOutput, Tuple[tf.Tensor]]:\n outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores = self.mlm(sequence_output=sequence_output, training=training)\n seq_relationship_score = self.nsp(pooled_output=pooled_output)\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n d_labels = {'labels': labels}\n d_labels['next_sentence_label'] = next_sentence_label\n total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return (total_loss,) + output if total_loss is not None else output\n return TFBertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\nnext_sentence_label (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair\n (see `input_ids` docstring) Indices should be in `[0, 1]`:\n\n - 0 indicates sequence B is a continuation of sequence A,\n - 1 indicates sequence B is a random sequence.\nkwargs (`Dict[str, any]`, *optional*, defaults to `{}`):\n Used to hide legacy arguments that have been deprecated.\n\nReturn:\n\nExamples:\n\n```python\n>>> import tensorflow as tf\n>>> from transformers import AutoTokenizer, TFBertForPreTraining\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-bert/bert-base-uncased\")\n>>> model = TFBertForPreTraining.from_pretrained(\"google-bert/bert-base-uncased\")\n>>> input_ids = tokenizer(\"Hello, my dog is cute\", add_special_tokens=True, return_tensors=\"tf\")\n>>> # Batch size 1\n\n>>> outputs = model(input_ids)\n>>> prediction_logits, seq_relationship_logits = outputs[:2]\n```"} +{"repo": "tensorflow", "function": "def _matmul_2d(a, b, **kwargs):\n ragged_err = 'The matrices in `a` and `b` may not be ragged in their innermost dimension.'\n checks = []\n if isinstance(a, ragged_tensor.RaggedTensor):\n original_size = array_ops.size(a.flat_values)\n a = a.to_tensor()\n checks.append(check_ops.assert_equal(original_size, array_ops.size(a), message=ragged_err))\n if isinstance(b, ragged_tensor.RaggedTensor):\n original_size = array_ops.size(b.flat_values)\n b = b.to_tensor()\n checks.append(check_ops.assert_equal(original_size, array_ops.size(b), message=ragged_err))\n with ops.control_dependencies(checks):\n return math_ops.matmul(a, b, **kwargs)", "docstring": "Multiplies potentially ragged 2D tensors.\n\nArgs:\n a: A 2D Tensor or RaggedTensor with `shape=[I, J]`\n b: A 2D Tensor or RaggedTensor with `shape=[J, K]`\n **kwargs: Additional arguments for `tf.matmul` (e.g. transpose_a).\n\nReturns:\n A 2D Tensor with `shape=[I, K]`."} +{"repo": "mobly", "function": "def on_skip(self, record):", "docstring": "A function that is executed upon a test being skipped.\n\nImplementation is optional.\n\nArgs:\n record: records.TestResultRecord, a copy of the test record for\n this test, containing all information of the test execution\n including exception objects."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, encoder_hidden_states: Optional[torch.FloatTensor]=None, encoder_attention_mask: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n use_cache = False\n outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n prediction_scores = self.lm_head(sequence_output)\n lm_loss = None\n if labels is not None:\n lm_loss = self.loss_function(prediction_scores, labels, vocab_size=self.config.vocab_size, **kwargs)\n if not return_dict:\n output = (prediction_scores,) + outputs[2:]\n return (lm_loss,) + output if lm_loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, RobertaForCausalLM, RobertaConfig\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"FacebookAI/roberta-base\")\n>>> config = RobertaConfig.from_pretrained(\"FacebookAI/roberta-base\")\n>>> config.is_decoder = True\n>>> model = RobertaForCausalLM.from_pretrained(\"FacebookAI/roberta-base\", config=config)\n>>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n>>> prediction_logits = outputs.logits\n```"} +{"repo": "transformers", "function": "def pad(self, images: List[np.ndarray], annotations: Optional[Union[AnnotationType, List[AnnotationType]]]=None, constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, update_bboxes: bool=True, pad_size: Optional[Dict[str, int]]=None) -> BatchFeature:\n pad_size = pad_size if pad_size is not None else self.pad_size\n if pad_size is not None:\n padded_size = (pad_size['height'], pad_size['width'])\n else:\n padded_size = get_max_height_width(images, input_data_format=input_data_format)\n annotation_list = annotations if annotations is not None else [None] * len(images)\n padded_images = []\n padded_annotations = []\n for image, annotation in zip(images, annotation_list):\n padded_image, padded_annotation = self._pad_image(image, padded_size, annotation, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format, update_bboxes=update_bboxes)\n padded_images.append(padded_image)\n padded_annotations.append(padded_annotation)\n data = {'pixel_values': padded_images}\n if return_pixel_mask:\n masks = [make_pixel_mask(image=image, output_size=padded_size, input_data_format=input_data_format) for image in images]\n data['pixel_mask'] = masks\n encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors)\n if annotations is not None:\n encoded_inputs['labels'] = [BatchFeature(annotation, tensor_type=return_tensors) for annotation in padded_annotations]\n return encoded_inputs", "docstring": "Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\nin the batch and optionally returns their corresponding pixel mask.\n\nArgs:\n images (List[`np.ndarray`]):\n Images to pad.\n annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):\n Annotations to transform according to the padding that is applied to the images.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n update_bboxes (`bool`, *optional*, defaults to `True`):\n Whether to update the bounding boxes in the annotations to match the padded images. If the\n bounding boxes have not been converted to relative coordinates and `(centre_x, centre_y, width, height)`\n format, the bounding boxes will not be updated.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to. Must be larger than any image size\n provided for preprocessing. If `pad_size` is not provided, images will be padded to the largest\n height and width in the batch."} +{"repo": "tensorflow", "function": "def __init__(self, cluster_spec, initializer=None, share_gpu=True):\n _active_pool_runners.add(self)\n self._cluster_spec = cluster_spec\n self._initializer = initializer\n self._share_gpu = share_gpu\n self._conn = {}\n self._runner = None", "docstring": "Creates a multi-process pool runner.\n\nArgs:\n cluster_spec: Dict for cluster spec. The following is an example of\n cluster with three workers.\n {\"worker\": [\"worker0.example.com:2222\",\n \"worker1.example.com:2222\",\n \"worker2.example.com:2222\"]}\n initializer: a callable to called at the startup of worker processes.\n share_gpu: Whether to share GPUs among workers. If False, each worker is\n assigned different GPUs in a roundrobin fashion.\n\nRaises:\n RuntimeError: if `multi_process_runner.test_main()` is not called.\n ValueError: if there are more than one chief in the `cluster_spec`."} +{"repo": "transformers", "function": "def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):\n exp_blocked_to_pad = torch.cat([to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2)\n band_mask = torch.einsum('blq,blk->blqk', from_blocked_mask[:, 2:-2], exp_blocked_to_pad)\n band_mask.unsqueeze_(1)\n return band_mask", "docstring": "Create 3D attention mask from a 2D tensor mask.\n\nArgs:\n from_blocked_mask: 2D Tensor of shape [batch_size,\n from_seq_length//from_block_size, from_block_size].\n to_blocked_mask: int32 Tensor of shape [batch_size,\n to_seq_length//to_block_size, to_block_size].\n\nReturns:\n float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,\n 3*to_block_size]."} +{"repo": "tensorflow", "function": "def _convert_from_saved_model(self, graph_def):\n self._save_conversion_params_metric(graph_def)\n quant_mode = QuantizationMode(self.optimizations, self.target_spec, self.representative_dataset, graph_def, self._experimental_disable_per_channel, self.experimental_new_dynamic_range_quantizer, self._experimental_low_bit_qat, self._experimental_full_integer_quantization_bias_type, self._experimental_variable_quantization, self._experimental_strict_qdq)\n self._validate_inference_input_output_types(quant_mode)\n converter_kwargs = {'enable_tflite_resource_variables': self.experimental_enable_resource_variables}\n converter_kwargs.update(self._get_base_converter_args())\n converter_kwargs.update(quant_mode.converter_flags())\n result = _convert_saved_model(**converter_kwargs)\n return self._optimize_tflite_model(result, quant_mode, _build_conversion_flags(**converter_kwargs).debug_options, quant_io=self.experimental_new_quantizer)", "docstring": "Helper method that converts saved model.\n\nArgs:\n graph_def: GraphDef object for the model, used only for stats.\n\nReturns:\n The converted TFLite model."} +{"repo": "sprockets", "function": "def __init__(self, error_name, error_id, error_msg, stack_patterns):\n self.error_name = error_name\n self.error_id = error_id\n self.error_msg = error_msg\n self._stack_patterns = stack_patterns", "docstring": "Create a ParserError that matches against any of the |stack_patterns|.\n\nArgs:\n error_name: A short, human readable name for the error,\n using lowercase-with-dashes-format.\n error_id: An integer to identify a specific error:\n 100s: Lexer errors.\n 200s: Low level parsing errors.\n 300s: High level parsing errors.\n error_msg: A message to display with this error that describes\n clearly what caused the error.\n stack_patterns: A list of \"stack patterns\", where each stack pattern\n is a list of strings corresponding to symbols on the parser's symbol\n stack at the time it errored out. The string values for the symbols\n can match essentially any terminal or non-terminal symbol used in the\n grammar from parser.py.\n Examples: ['TRANSITION', 'NAME', 'params', '=']\n (or None to match against any symbol stack).\n\nReturns:\n ParserError that matches against |stack_patterns|."} +{"repo": "tensorflow", "function": "def compute(self, batch_values, accumulator=None):\n pass", "docstring": "Compute a step in this computation, returning a new accumulator.\n\nThis method computes a step of the computation described by this Combiner.\nIf an accumulator is passed, the data in that accumulator is also used; so\ncompute(batch_values) results in f(batch_values), while\ncompute(batch_values, accumulator) results in\nmerge(f(batch_values), accumulator).\n\nArgs:\n batch_values: A list of ndarrays representing the values of the inputs for\n this step of the computation.\n accumulator: the current accumulator. Can be None.\n\nReturns:\n An accumulator that includes the passed batch of inputs."} +{"repo": "tensorflow", "function": "def _parse_interval(interval_str):\n interval_str = interval_str.strip()\n if interval_str.startswith('<='):\n if _NUMBER_PATTERN.match(interval_str[2:].strip()):\n return Interval(start=None, start_included=False, end=interval_str[2:].strip(), end_included=True)\n else:\n raise ValueError(\"Invalid value string after <= in '%s'\" % interval_str)\n if interval_str.startswith('<'):\n if _NUMBER_PATTERN.match(interval_str[1:].strip()):\n return Interval(start=None, start_included=False, end=interval_str[1:].strip(), end_included=False)\n else:\n raise ValueError(\"Invalid value string after < in '%s'\" % interval_str)\n if interval_str.startswith('>='):\n if _NUMBER_PATTERN.match(interval_str[2:].strip()):\n return Interval(start=interval_str[2:].strip(), start_included=True, end=None, end_included=False)\n else:\n raise ValueError(\"Invalid value string after >= in '%s'\" % interval_str)\n if interval_str.startswith('>'):\n if _NUMBER_PATTERN.match(interval_str[1:].strip()):\n return Interval(start=interval_str[1:].strip(), start_included=False, end=None, end_included=False)\n else:\n raise ValueError(\"Invalid value string after > in '%s'\" % interval_str)\n if not interval_str.startswith(('[', '(')) or not interval_str.endswith((']', ')')):\n raise ValueError('Invalid interval format: %s. Valid formats are: [min, max], (min, max), min' % interval_str)\n interval = interval_str[1:-1].split(',')\n if len(interval) != 2:\n raise ValueError('Incorrect interval format: %s. Interval should specify two values: [min, max] or (min, max).' % interval_str)\n start_item = interval[0].strip()\n if not _NUMBER_PATTERN.match(start_item):\n raise ValueError(\"Invalid first item in interval: '%s'\" % start_item)\n end_item = interval[1].strip()\n if not _NUMBER_PATTERN.match(end_item):\n raise ValueError(\"Invalid second item in interval: '%s'\" % end_item)\n return Interval(start=start_item, start_included=interval_str[0] == '[', end=end_item, end_included=interval_str[-1] == ']')", "docstring": "Convert a human-readable interval to a tuple of start and end value.\n\nArgs:\n interval_str: (`str`) A human-readable str representing an interval\n (e.g., \"[1M, 2M]\", \"<100k\", \">100ms\"). The items following the \">\", \"<\",\n \">=\" and \"<=\" signs have to start with a number (e.g., 3.0, -2, .98).\n The same requirement applies to the items in the parentheses or brackets.\n\nReturns:\n Interval object where start or end can be None\n if the range is specified as \"N\" respectively.\n\nRaises:\n ValueError: if the input is not valid."} +{"repo": "transformers", "function": "def prediction_step(self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]]=None) -> tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n inputs = self._prepare_inputs(inputs)\n gen_kwargs = {'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams}\n if self.args.predict_with_generate and (not self.args.prediction_loss_only):\n generated_tokens = self.model.generate(inputs['input_ids'], attention_mask=inputs['attention_mask'], **gen_kwargs)\n if generated_tokens.shape[-1] < gen_kwargs['max_length']:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs['max_length'])\n labels = inputs.pop('labels')\n with torch.no_grad():\n loss, logits = self._compute_loss(model, inputs, labels)\n loss = loss.mean().detach()\n if self.args.prediction_loss_only:\n return (loss, None, None)\n logits = generated_tokens if self.args.predict_with_generate else logits\n if labels.shape[-1] < gen_kwargs['max_length']:\n labels = self._pad_tensors_to_max_len(labels, gen_kwargs['max_length'])\n return (loss, logits, labels)", "docstring": "Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\nSubclass and override to inject custom behavior.\n\nArgs:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n\nReturn:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n A tuple with the loss, logits and labels (each being optional)."} +{"repo": "tensorflow", "function": "def isanytargetmethod(object):\n decorators, target = tf_decorator.unwrap(object)\n for decorator in decorators:\n if _inspect.ismethod(decorator.decorated_target):\n return True\n while isinstance(target, functools.partial):\n target = target.func\n return callable(target) and (not _inspect.isfunction(target))", "docstring": "Checks if `object` or a TF Decorator wrapped target contains self or cls.\n\nThis function could be used along with `tf_inspect.getfullargspec` to\ndetermine if the first argument of `object` argspec is self or cls. If the\nfirst argument is self or cls, it needs to be excluded from argspec when we\ncompare the argspec to the input arguments and, if provided, the tf.function\ninput_signature.\n\nLike `tf_inspect.getfullargspec` and python `inspect.getfullargspec`, it\ndoes not unwrap python decorators.\n\nArgs:\n obj: An method, function, or functool.partial, possibly decorated by\n TFDecorator.\n\nReturns:\n A bool indicates if `object` or any target along the chain of TF decorators\n is a method."} +{"repo": "transformers", "function": "def forward(self, hidden_states: List[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None, prompt_depth: Optional[torch.Tensor]=None) -> List[torch.Tensor]:\n if not isinstance(hidden_states, (tuple, list)):\n raise TypeError('hidden_states should be a tuple or list of tensors')\n if len(hidden_states) != len(self.config.neck_hidden_sizes):\n raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.')\n hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width)\n features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)]\n output = self.fusion_stage(features, prompt_depth=prompt_depth)\n return output", "docstring": "Args:\n hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`):\n List of hidden states from the backbone."} +{"repo": "tensorflow", "function": "def constant(value, dtype=None, shape=None, name='Const') -> Union[ops.Operation, ops._EagerTensorBase]:\n return _constant_impl(value, dtype, shape, name, verify_shape=False, allow_broadcast=True)", "docstring": "Creates a constant tensor from a tensor-like object.\n\nNote: All eager `tf.Tensor` values are immutable (in contrast to\n`tf.Variable`). There is nothing especially _constant_ about the value\nreturned from `tf.constant`. This function is not fundamentally different from\n`tf.convert_to_tensor`. The name `tf.constant` comes from the `value` being\nembedded in a `Const` node in the `tf.Graph`. `tf.constant` is useful\nfor asserting that the value can be embedded that way.\n\nIf the argument `dtype` is not specified, then the type is inferred from\nthe type of `value`.\n\n>>> # Constant 1-D Tensor from a python list.\n>>> tf.constant([1, 2, 3, 4, 5, 6])\n\n>>> # Or a numpy array\n>>> a = np.array([[1, 2, 3], [4, 5, 6]])\n>>> tf.constant(a)\n\n\nIf `dtype` is specified, the resulting tensor values are cast to the requested\n`dtype`.\n\n>>> tf.constant([1, 2, 3, 4, 5, 6], dtype=tf.float64)\n\n\nIf `shape` is set, the `value` is reshaped to match. Scalars are expanded to\nfill the `shape`:\n\n>>> tf.constant(0, shape=(2, 3))\n \n>>> tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])\n\n\n`tf.constant` has no effect if an eager Tensor is passed as the `value`, it\neven transmits gradients:\n\n>>> v = tf.Variable([0.0])\n>>> with tf.GradientTape() as g:\n... loss = tf.constant(v + v)\n>>> g.gradient(loss, v).numpy()\narray([2.], dtype=float32)\n\nBut, since `tf.constant` embeds the value in the `tf.Graph` this fails for\nsymbolic tensors:\n\n>>> with tf.compat.v1.Graph().as_default():\n... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n... t = tf.constant(i)\nTraceback (most recent call last):\n...\nTypeError: ...\n\n`tf.constant` will create tensors on the current device. Inputs which are\nalready tensors maintain their placements unchanged.\n\nRelated Ops:\n\n* `tf.convert_to_tensor` is similar but:\n * It has no `shape` argument.\n * Symbolic tensors are allowed to pass through.\n\n >>> with tf.compat.v1.Graph().as_default():\n ... i = tf.compat.v1.placeholder(shape=[None, None], dtype=tf.float32)\n ... t = tf.convert_to_tensor(i)\n\n* `tf.fill`: differs in a few ways:\n * `tf.constant` supports arbitrary constants, not just uniform scalar\n Tensors like `tf.fill`.\n * `tf.fill` creates an Op in the graph that is expanded at runtime, so it\n can efficiently represent large tensors.\n * Since `tf.fill` does not embed the value, it can produce dynamically\n sized outputs.\n\nArgs:\n value: A constant value (or list) of output type `dtype`.\n dtype: The type of the elements of the resulting tensor.\n shape: Optional dimensions of resulting tensor.\n name: Optional name for the tensor.\n\nReturns:\n A Constant Tensor.\n\nRaises:\n TypeError: if shape is incorrectly specified or unsupported.\n ValueError: if called on a symbolic tensor."} +{"repo": "tensorflow", "function": "def _remove_one_redundant_stack_unstack(in_graph_def):\n name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(in_graph_def)\n del name_to_seq_num\n do_generic_pack_unpack = True\n out = _graph_pb2.GraphDef()\n out.library.CopyFrom(in_graph_def.library)\n out.versions.CopyFrom(in_graph_def.versions)\n for n in in_graph_def.node:\n node_name = _tensor_name_base(n.name)\n if not node_name.startswith('OpHintStack') and (not n.op.startswith('Pack')):\n continue\n next_to_visit = [node_name]\n visited = set()\n unpack_nodes = set()\n pack_node = node_name\n matches_pattern = True\n is_hint_created_stack = False\n while next_to_visit:\n current_node_name = next_to_visit[0]\n visited.add(current_node_name)\n del next_to_visit[0]\n node = name_to_node[current_node_name]\n is_op_hint_stack = node.name.startswith('OpHintStack')\n is_op_hint_unstack = node.name.startswith('OpHintUnstack')\n if node.op == 'Identity' or is_op_hint_stack or (do_generic_pack_unpack and node.op == 'Pack'):\n is_hint_created_stack |= is_op_hint_stack\n next_to_visit += [input_node for input_node in name_to_input_name[current_node_name] if input_node not in visited]\n elif is_op_hint_unstack or (do_generic_pack_unpack and node.op == 'Unpack'):\n unpack_nodes.add(node.name)\n is_hint_created_stack &= is_op_hint_unstack\n else:\n matches_pattern = False\n break\n visited.add(node.name)\n if matches_pattern and len(unpack_nodes) == 1:\n pack_node = node_name\n no_external_dependency = True\n for other_n in in_graph_def.node:\n if other_n.name in visited:\n continue\n for input_tensor in name_to_input_name[other_n.name]:\n input_op = _tensor_name_base(input_tensor)\n if input_op in visited and input_op != pack_node:\n no_external_dependency = False\n if is_hint_created_stack or no_external_dependency:\n end = unpack_nodes.pop()\n end_input = name_to_node[end].input[0]\n for other_n in in_graph_def.node:\n node_name = _tensor_name_base(other_n.name)\n if node_name not in visited:\n new_node = _copy.deepcopy(other_n)\n new_node.input[:] = [end_input if stripped == pack_node else non_stripped for stripped, non_stripped in zip(name_to_input_name[node_name], new_node.input[:])]\n out.node.extend([new_node])\n return (out, True)\n return (in_graph_def, False)", "docstring": "Removes a stack->unstack pattern from in_graph_def in a returned graph.\n\nArgs:\n in_graph_def: Graph def to use as input.\n\nReturns:\n Simplified tuple (graph_def, changed_something) where changed_something\n is true if anything was done."} +{"repo": "transformers", "function": "def __call__(self, images: ImageInput=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, audio=None, videos=None, **kwargs: Unpack[ColQwen2ProcessorKwargs]) -> BatchFeature:\n output_kwargs = self._merge_kwargs(ColQwen2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n suffix = output_kwargs['text_kwargs'].pop('suffix', None)\n return_token_type_ids = True if suffix is not None else False\n if text is None and images is None:\n raise ValueError('Either text or images must be provided')\n if text is not None and images is not None:\n raise ValueError('Only one of text or images can be processed at a time')\n if images is not None:\n if is_valid_image(images):\n images = [images]\n elif isinstance(images, list) and is_valid_image(images[0]):\n pass\n elif not (isinstance(images, list) and isinstance(images[0], list) and is_valid_image(images[0][0])):\n raise ValueError('images must be an image, list of images or list of list of images')\n texts_doc = [self.visual_prompt_prefix] * len(images)\n image_inputs = self.image_processor(images=images, **output_kwargs['images_kwargs'])\n image_grid_thw = image_inputs['image_grid_thw']\n if image_grid_thw is not None:\n merge_length = self.image_processor.merge_size ** 2\n index = 0\n for i in range(len(texts_doc)):\n while self.image_token in texts_doc[i]:\n texts_doc[i] = texts_doc[i].replace(self.image_token, '<|placeholder|>' * (image_grid_thw[index].prod() // merge_length), 1)\n index += 1\n texts_doc[i] = texts_doc[i].replace('<|placeholder|>', self.image_token)\n text_inputs = self.tokenizer(texts_doc, return_token_type_ids=False, **output_kwargs['text_kwargs'])\n return_data = BatchFeature(data={**text_inputs, **image_inputs})\n offsets = return_data['image_grid_thw'][:, 1] * return_data['image_grid_thw'][:, 2]\n pixel_values = list(torch.split(return_data['pixel_values'], offsets.tolist()))\n return_data['pixel_values'] = torch.nn.utils.rnn.pad_sequence(pixel_values, batch_first=True)\n if return_token_type_ids:\n labels = return_data['input_ids'].masked_fill(return_data['token_type_ids'] == 0, -100)\n return_data.update({'labels': labels})\n return return_data\n elif text is not None:\n if isinstance(text, str):\n text = [text]\n elif not (isinstance(text, list) and isinstance(text[0], str)):\n raise ValueError('Text must be a string or a list of strings')\n if suffix is None:\n suffix = self.query_augmentation_token * 10\n texts_query: List[str] = []\n for query in text:\n augmented_query = self.query_prefix + query + suffix\n texts_query.append(augmented_query)\n batch_query = self.tokenizer(texts_query, return_token_type_ids=False, **output_kwargs['text_kwargs'])\n return batch_query", "docstring": "Main method to prepare for the model either (1) one or several texts, either (2) one or several image(s). This method is a custom\nwrapper around the Qwen2VLProcessor's [`~Qwen2VLProcessor.__call__`] method adapted for the ColQwen2 model. It cannot process\nboth text and images at the same time.\n\nWhen preparing the the text(s), this method forwards the `text` and `kwargs` arguments to Qwen2TokenizerFast's\n[`~Qwen2TokenizerFast.__call__`].\nWhen preparing the the image(s), this method forwards the `images` and `kwargs` arguments to Qwen2VLImageProcessor's\n[`~Qwen2VLImageProcessor.__call__`].\nPlease refer to the doctsring of the above two methods for more information.\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a\n number of channels, H and W are image height and width.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`."} +{"repo": "tensorflow", "function": "def _merge_with(self, other: 'DynamicRaggedShape') -> 'DynamicRaggedShape':\n max_num_row_partitions = max(self.num_row_partitions, other.num_row_partitions)\n a = self._with_num_row_partitions(max_num_row_partitions)\n b = other._with_num_row_partitions(max_num_row_partitions)\n new_row_partitions = [rp_a._merge_precomputed_encodings(rp_b) for rp_a, rp_b in zip(a._row_partitions, b._row_partitions)]\n new_dtype = b.dtype if a.dtype == dtypes.int32 else dtypes.int64\n new_static_inner_shape = a._static_inner_shape.merge_with(b._static_inner_shape)\n new_inner_shape = a._inner_shape\n return DynamicRaggedShape(new_row_partitions, new_inner_shape, new_dtype, True, new_static_inner_shape)", "docstring": "Merge two shapes that are equal modulo num_row_partitions.\n\nThe resulting num_row_partitions is the maximum of the two\nnum_row_partitions.\n\nArgs:\n other: a DynamicRaggedShape representing the same shape with a possibly\n different number of row partitions.\n\nReturns:\n A DynamicRaggedShape with the same shape and the maximum of the\n num_row_partitions of the two shapes."} +{"repo": "transformers", "function": "def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]:\n print('\\n### DIFF ###\\n')\n code_diff = []\n for commit in commits:\n for diff_obj in commit.diff(base_commit):\n if diff_obj.change_type == 'A' and diff_obj.b_path.endswith('.py'):\n code_diff.append(diff_obj.b_path)\n elif diff_obj.change_type == 'D' and diff_obj.a_path.endswith('.py'):\n code_diff.append(diff_obj.a_path)\n elif diff_obj.change_type in ['M', 'R'] and diff_obj.b_path.endswith('.py'):\n if diff_obj.a_path != diff_obj.b_path:\n code_diff.extend([diff_obj.a_path, diff_obj.b_path])\n elif diff_is_docstring_only(repo, commit, diff_obj.b_path):\n print(f'Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.')\n else:\n code_diff.append(diff_obj.a_path)\n return code_diff", "docstring": "Get the diff between a base commit and one or several commits.\n\nArgs:\n repo (`git.Repo`):\n A git repository (for instance the Transformers repo).\n base_commit (`str`):\n The commit reference of where to compare for the diff. This is the current commit, not the branching point!\n commits (`List[str]`):\n The list of commits with which to compare the repo at `base_commit` (so the branching point).\n\nReturns:\n `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files\n modified are returned if the diff in the file is not only in docstrings or comments, see\n `diff_is_docstring_only`)."} +{"repo": "temporian", "function": "def set_index(self: EventSetOrNode, indexes: Union[str, List[str]]) -> EventSetOrNode:\n from temporian.core.operators.add_index import set_index\n return set_index(self, indexes=indexes)", "docstring": "Replaces the index in an [`EventSet`][temporian.EventSet].\n\nUsage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2, 1, 0, 1, 1],\n ... features={\n ... \"f1\": [1, 1, 1, 2, 2, 2],\n ... \"f2\": [1, 1, 2, 1, 1, 2],\n ... \"f3\": [1, 1, 1, 1, 1, 1]\n ... },\n ... indexes=[\"f1\"],\n ... )\n\n >>> # \"f1\" is the current index\n >>> a\n indexes: [('f1', int64)]\n features: [('f2', int64), ('f3', int64)]\n events:\n f1=1 (3 events):\n timestamps: [1. 1. 2.]\n 'f2': [1 2 1]\n 'f3': [1 1 1]\n f1=2 (3 events):\n timestamps: [0. 1. 1.]\n 'f2': [1 1 2]\n 'f3': [1 1 1]\n ...\n\n >>> # Set \"f2\" as the only index, remove \"f1\"\n >>> b = a.set_index(\"f2\")\n >>> b\n indexes: [('f2', int64)]\n features: [('f3', int64), ('f1', int64)]\n events:\n f2=1 (4 events):\n timestamps: [0. 1. 1. 2.]\n 'f3': [1 1 1 1]\n 'f1': [2 1 2 1]\n f2=2 (2 events):\n timestamps: [1. 1.]\n 'f3': [1 1]\n 'f1': [1 2]\n ...\n\n >>> # Set both \"f1\" and \"f2\" as indices\n >>> b = a.set_index([\"f1\", \"f2\"])\n >>> b\n indexes: [('f1', int64), ('f2', int64)]\n features: [('f3', int64)]\n events:\n f1=1 f2=1 (2 events):\n timestamps: [1. 2.]\n 'f3': [1 1]\n f1=1 f2=2 (1 events):\n timestamps: [1.]\n 'f3': [1]\n f1=2 f2=1 (2 events):\n timestamps: [0. 1.]\n 'f3': [1 1]\n f1=2 f2=2 (1 events):\n timestamps: [1.]\n 'f3': [1]\n ...\n\n ```\n\nArgs:\n indexes: List of index / feature names (strings) used as\n the new indexes. These names should be either indexes or\n features in the input.\n\nReturns:\n EventSet with the updated indexes.\n\nRaises:\n KeyError: If any of the specified `indexes` are not found in the\n input."} +{"repo": "tensorflow", "function": "def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):\n if init_op is Supervisor.USE_DEFAULT:\n init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)\n if init_op is None:\n init_op = variables.global_variables_initializer()\n ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)\n self._init_op = init_op\n self._init_feed_dict = init_feed_dict", "docstring": "Initializes init_op.\n\nArgs:\n init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,\n create an op that initializes all variables and tables.\n init_feed_dict: A dictionary that maps `Tensor` objects to feed values.\n This feed dictionary will be used when `init_op` is evaluated."} +{"repo": "transformers", "function": "class BaseModelOutput(ModelOutput):\n last_hidden_state: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for model's outputs, with potential hidden states and attentions.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "transformers", "function": "class PromptDepthAnythingConfig(PretrainedConfig):\n model_type = 'prompt_depth_anything'\n\n def __init__(self, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, backbone_kwargs=None, patch_size=14, initializer_range=0.02, reassemble_hidden_size=384, reassemble_factors=[4, 2, 1, 0.5], neck_hidden_sizes=[48, 96, 192, 384], fusion_hidden_size=64, head_in_index=-1, head_hidden_size=32, depth_estimation_type='relative', max_depth=None, **kwargs):\n super().__init__(**kwargs)\n if backbone_config is None and backbone is None:\n logger.info('`backbone_config` is `None`. Initializing the config with the default `Dinov2` backbone.')\n backbone_config = CONFIG_MAPPING['dinov2'](image_size=518, hidden_size=384, num_attention_heads=6, out_indices=[9, 10, 11, 12], apply_layernorm=True, reshape_hidden_states=False)\n elif isinstance(backbone_config, dict):\n backbone_model_type = backbone_config.get('model_type')\n config_class = CONFIG_MAPPING[backbone_model_type]\n backbone_config = config_class.from_dict(backbone_config)\n verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)\n self.backbone_config = backbone_config\n self.backbone = backbone\n self.use_pretrained_backbone = use_pretrained_backbone\n self.use_timm_backbone = use_timm_backbone\n self.backbone_kwargs = backbone_kwargs\n self.reassemble_hidden_size = reassemble_hidden_size\n self.patch_size = patch_size\n self.initializer_range = initializer_range\n self.reassemble_factors = reassemble_factors\n self.neck_hidden_sizes = neck_hidden_sizes\n self.fusion_hidden_size = fusion_hidden_size\n self.head_in_index = head_in_index\n self.head_hidden_size = head_hidden_size\n if depth_estimation_type not in ['relative', 'metric']:\n raise ValueError(\"depth_estimation_type must be one of ['relative', 'metric']\")\n self.depth_estimation_type = depth_estimation_type\n self.max_depth = max_depth if max_depth else 1\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if output['backbone_config'] is not None:\n output['backbone_config'] = self.backbone_config.to_dict()\n output['model_type'] = self.__class__.model_type\n return output", "docstring": "This is the configuration class to store the configuration of a [`PromptDepthAnythingModel`]. It is used to instantiate a PromptDepthAnything\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the PromptDepthAnything\n[LiheYoung/depth-anything-small-hf](https://huggingface.co/LiheYoung/depth-anything-small-hf) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n backbone_config (`Union[Dict[str, Any], PretrainedConfig]`, *optional*):\n The configuration of the backbone model. Only used in case `is_hybrid` is `True` or in case you want to\n leverage the [`AutoBackbone`] API.\n backbone (`str`, *optional*):\n Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this\n will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`\n is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.\n use_pretrained_backbone (`bool`, *optional*, defaults to `False`):\n Whether to use pretrained weights for the backbone.\n use_timm_backbone (`bool`, *optional*, defaults to `False`):\n Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]\n API.\n backbone_kwargs (`dict`, *optional*):\n Keyword arguments to be passed to AutoBackbone when loading from a checkpoint\n e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.\n patch_size (`int`, *optional*, defaults to 14):\n The size of the patches to extract from the backbone features.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n reassemble_hidden_size (`int`, *optional*, defaults to 384):\n The number of input channels of the reassemble layers.\n reassemble_factors (`List[int]`, *optional*, defaults to `[4, 2, 1, 0.5]`):\n The up/downsampling factors of the reassemble layers.\n neck_hidden_sizes (`List[str]`, *optional*, defaults to `[48, 96, 192, 384]`):\n The hidden sizes to project to for the feature maps of the backbone.\n fusion_hidden_size (`int`, *optional*, defaults to 64):\n The number of channels before fusion.\n head_in_index (`int`, *optional*, defaults to -1):\n The index of the features to use in the depth estimation head.\n head_hidden_size (`int`, *optional*, defaults to 32):\n The number of output channels in the second convolution of the depth estimation head.\n depth_estimation_type (`str`, *optional*, defaults to `\"relative\"`):\n The type of depth estimation to use. Can be one of `[\"relative\", \"metric\"]`.\n max_depth (`float`, *optional*):\n The maximum depth to use for the \"metric\" depth estimation head. 20 should be used for indoor models\n and 80 for outdoor models. For \"relative\" depth estimation, this value is ignored.\n\nExample:\n\n```python\n>>> from transformers import PromptDepthAnythingConfig, PromptDepthAnythingForDepthEstimation\n\n>>> # Initializing a PromptDepthAnything small style configuration\n>>> configuration = PromptDepthAnythingConfig()\n\n>>> # Initializing a model from the PromptDepthAnything small style configuration\n>>> model = PromptDepthAnythingForDepthEstimation(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class VanConfig(PretrainedConfig):\n model_type = 'van'\n\n def __init__(self, image_size=224, num_channels=3, patch_sizes=[7, 3, 3, 3], strides=[4, 2, 2, 2], hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], hidden_act='gelu', initializer_range=0.02, layer_norm_eps=1e-06, layer_scale_init_value=0.01, drop_path_rate=0.0, dropout_rate=0.0, **kwargs):\n super().__init__(**kwargs)\n self.image_size = image_size\n self.num_channels = num_channels\n self.patch_sizes = patch_sizes\n self.strides = strides\n self.hidden_sizes = hidden_sizes\n self.depths = depths\n self.mlp_ratios = mlp_ratios\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.layer_scale_init_value = layer_scale_init_value\n self.drop_path_rate = drop_path_rate\n self.dropout_rate = dropout_rate", "docstring": "This is the configuration class to store the configuration of a [`VanModel`]. It is used to instantiate a VAN model\naccording to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the VAN\n[Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n patch_sizes (`List[int]`, *optional*, defaults to `[7, 3, 3, 3]`):\n Patch size to use in each stage's embedding layer.\n strides (`List[int]`, *optional*, defaults to `[4, 2, 2, 2]`):\n Stride size to use in each stage's embedding layer to downsample the input.\n hidden_sizes (`List[int]`, *optional*, defaults to `[64, 128, 320, 512]`):\n Dimensionality (hidden size) at each stage.\n depths (`List[int]`, *optional*, defaults to `[3, 3, 12, 3]`):\n Depth (number of layers) for each stage.\n mlp_ratios (`List[int]`, *optional*, defaults to `[8, 8, 4, 4]`):\n The expansion ratio for mlp layer at each stage.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in each layer. If string, `\"gelu\"`, `\"relu\"`,\n `\"selu\"` and `\"gelu_new\"` are supported.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n layer_scale_init_value (`float`, *optional*, defaults to 0.01):\n The initial value for layer scaling.\n drop_path_rate (`float`, *optional*, defaults to 0.0):\n The dropout probability for stochastic depth.\n dropout_rate (`float`, *optional*, defaults to 0.0):\n The dropout probability for dropout.\n\nExample:\n```python\n>>> from transformers import VanModel, VanConfig\n\n>>> # Initializing a VAN van-base style configuration\n>>> configuration = VanConfig()\n>>> # Initializing a model from the van-base style configuration\n>>> model = VanModel(configuration)\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "keras", "function": "class SKLearnClassifier(ClassifierMixin, SKLBase):\n\n def _process_target(self, y, reset=False):\n \"\"\"Classifiers do OHE.\"\"\"\n target_type = type_of_target(y, raise_unknown=True)\n if target_type not in ['binary', 'multiclass']:\n raise ValueError(f'Only binary and multiclass target types are supported. Target type: {target_type}')\n if reset:\n self._target_encoder = sklearn.pipeline.make_pipeline(TargetReshaper(), sklearn.preprocessing.OneHotEncoder(sparse_output=False)).fit(y)\n self.classes_ = np.unique(y)\n if len(self.classes_) == 1:\n raise ValueError(\"Classifier can't train when only one class is present.\")\n return self._target_encoder.transform(y)\n\n def _more_tags(self):\n return {'poor_score': True}\n\n def __sklearn_tags__(self):\n tags = super().__sklearn_tags__()\n tags.classifier_tags.poor_score = True\n return tags", "docstring": "scikit-learn compatible classifier wrapper for Keras models.\n\nNote that there are sources of randomness in model initialization and\ntraining. Refer to [Reproducibility in Keras Models](\nhttps://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to\ncontrol randomness.\n\nArgs:\n model: `Model`.\n An instance of `Model`, or a callable returning such an object.\n Note that if input is a `Model`, it will be cloned using\n `keras.models.clone_model` before being fitted, unless\n `warm_start=True`.\n The `Model` instance needs to be passed as already compiled.\n If callable, it must accept at least `X` and `y` as keyword\n arguments. Other arguments must be accepted if passed as\n `model_kwargs` by the user.\n warm_start: bool, defaults to `False`.\n Whether to reuse the model weights from the previous fit. If `True`,\n the given model won't be cloned and the weights from the previous\n fit will be reused.\n model_kwargs: dict, defaults to `None`.\n Keyword arguments passed to `model`, if `model` is callable.\n fit_kwargs: dict, defaults to `None`.\n Keyword arguments passed to `model.fit`. These can also be passed\n directly to the `fit` method of the scikit-learn wrapper. The\n values passed directly to the `fit` method take precedence over\n these.\n\nAttributes:\n model_ : `Model`\n The fitted model.\n history_ : dict\n The history of the fit, returned by `model.fit`.\n classes_ : array-like, shape=(n_classes,)\n The classes labels.\n\nExample:\nHere we use a function which creates a basic MLP model dynamically\nchoosing the input and output shapes. We will use this to create our\nscikit-learn model.\n\n``` python\nfrom keras.src.layers import Dense, Input, Model\n\ndef dynamic_model(X, y, loss, layers=[10]):\n # Creates a basic MLP model dynamically choosing the input and\n # output shapes.\n n_features_in = X.shape[1]\n inp = Input(shape=(n_features_in,))\n\n hidden = inp\n for layer_size in layers:\n hidden = Dense(layer_size, activation=\"relu\")(hidden)\n\n n_outputs = y.shape[1] if len(y.shape) > 1 else 1\n out = [Dense(n_outputs, activation=\"softmax\")(hidden)]\n model = Model(inp, out)\n model.compile(loss=loss, optimizer=\"rmsprop\")\n\n return model\n```\n\nYou can then use this function to create a scikit-learn compatible model\nand fit it on some data.\n\n``` python\nfrom sklearn.datasets import make_classification\nfrom keras.wrappers import SKLearnClassifier\n\nX, y = make_classification(n_samples=1000, n_features=10, n_classes=3)\nest = SKLearnClassifier(\n model=dynamic_model,\n model_kwargs={\n \"loss\": \"categorical_crossentropy\",\n \"layers\": [20, 20, 20],\n },\n)\n\nest.fit(X, y, epochs=5)\n```"} +{"repo": "tensorflow", "function": "class TrueNegatives(_ConfusionMatrixConditionCount):\n\n def __init__(self, thresholds=None, name=None, dtype=None):\n super(TrueNegatives, self).__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_NEGATIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of true negatives.\n\nIf `sample_weight` is given, calculates the sum of the weights of\ntrue negatives. This metric creates one local variable, `accumulator`\nthat is used to keep track of the number of true negatives.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to 0.5. A float value or a python\n list/tuple of float threshold values in [0, 1]. A threshold is compared\n with prediction values to determine the truth value of predictions\n (i.e., above the threshold is `true`, below is `false`). One metric\n value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nStandalone usage:\n\n>>> m = tf.keras.metrics.TrueNegatives()\n>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0])\n>>> m.result().numpy()\n2.0\n\n>>> m.reset_state()\n>>> m.update_state([0, 1, 0, 0], [1, 1, 0, 0], sample_weight=[0, 0, 1, 0])\n>>> m.result().numpy()\n1.0\n\nUsage with `compile()` API:\n\n```python\nmodel.compile(optimizer='sgd',\n loss='mse',\n metrics=[tf.keras.metrics.TrueNegatives()])\n```"} +{"repo": "tensorflow", "function": "def enqueue_tpu_embedding_sparse_tensor_batch(sample_indices, embedding_indices, aggregation_weights, table_ids, device_ordinal, max_sequence_lengths=None, num_features=None, combiners=None, mode_override=None, name=None):\n if mode_override is None:\n mode_override = 'unspecified'\n return gen_tpu_ops.enqueue_tpu_embedding_sparse_tensor_batch(sample_indices=sample_indices, embedding_indices=embedding_indices, aggregation_weights=aggregation_weights, table_ids=table_ids, device_ordinal=device_ordinal, max_sequence_lengths=max_sequence_lengths, combiners=combiners, mode_override=mode_override, num_features=num_features, name=name)", "docstring": "A placeholder op for enqueueing embedding IDs to the TPU.\n\nArgs:\n sample_indices: A list of rank 2 Tensors specifying the training example to\n which the corresponding embedding_indices and aggregation_weights values\n belong. It corresponds to sp_ids.indices in embedding_lookup_sparse(). If\n the size of its first dimension is 0, we assume each embedding_indices\n belongs to a different sample. Both int32 and int64 are allowed and will\n be converted to int32 internally.\n embedding_indices: A list of rank 1 Tensors, indices into the embedding\n tables. It corresponds to sp_ids.values in embedding_lookup_sparse(). Both\n int32 and int64 are allowed and will be converted to int32 internally.\n aggregation_weights: A list of rank 1 Tensors containing per training\n example aggregation weights. It corresponds to sp_weights.values in\n embedding_lookup_sparse(). If the size of its first dimension is 0, we\n assume all weights are 1. Both float32 and float64 are allowed and will be\n converted to float32 internally.\n table_ids: A list of integers specifying the identifier of the embedding\n table (offset of TableDescriptor in the TPUEmbeddingConfiguration) to\n lookup the corresponding input. The ith input is looked up using\n table_ids[i]. The size of the table_ids list must be equal to that of\n sample_indices, embedding_indices and aggregation_weights.\n device_ordinal: The TPU device to use. Should be >= 0 and less than the\n number of TPU cores in the task on which the node is placed.\n max_sequence_lengths: A list of integers, the size of which is equal to\n sample_indices. If equal to 0, the corresponding feature is considered to\n be a non-sequence feature, If greater than 0, the corresponding feature is\n a sequence feature with the given maximal length. If None, then we assume\n a list of all zeroes.\n num_features: A list of integers, the size of which is equal to\n sample_indices. If non-empty, entries in this list must be at least 1. For\n each batch element, we will take num_features rows of the input tensor for\n embedding lookup. E.g., when sample_indices is empty, the embedding\n indices must be of shape (batch_size*num_features).\n combiners: A list of string scalars, one for each embedding table that\n specify how to normalize the embedding activations after weighted\n summation. Supported combiners are 'mean', 'sum', or 'sqrtn'. It is\n invalid to have the sum of the weights be 0 for 'mean' or the sum of the\n squared weights be 0 for 'sqrtn'. If combiners isn't passed, the default\n is to use 'sum' for all tables (optional).\n mode_override: A string input that overrides the mode specified in the\n TPUEmbeddingConfiguration. Supported values are {'unspecified',\n 'inference', 'train', 'backward_pass_only'}. When set to 'unspecified',\n the mode set in TPUEmbeddingConfiguration is used, otherwise mode_override\n is used (optional).\n name: A name for the operation (optional).\n\nReturns:\n An EnqueueTPUEmbeddingSparseTensorBatch operation."} +{"repo": "transformers", "function": "def get_text_features(self, input_ids: Optional[torch.LongTensor]=None, text_encoder_inputs_embeds: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.LongTensor]=None) -> torch.FloatTensor:\n outputs = self.text_encoder_model(input_ids=input_ids, inputs_embeds=text_encoder_inputs_embeds, attention_mask=attention_mask)\n return outputs[0]", "docstring": "This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the\nprojection layer to the pooled output of the CLVP text encoder model.\n\nArgs:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n [What are input IDs?](../glossary#input-ids)\n text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):\n inputs_embeds for the text encoder model passed in place of `input_ids`.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\nReturns:\n `torch.FloatTensor` of shape `(batch_size, output_dim)`:\n The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text\n Model.\n\nExamples:\n\n```python\n>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration\n\n>>> # Define the Text\n>>> text = \"This is an example text.\"\n\n>>> # Define processor and model\n>>> processor = ClvpProcessor.from_pretrained(\"susnato/clvp_dev\")\n>>> model = ClvpModelForConditionalGeneration.from_pretrained(\"susnato/clvp_dev\")\n\n>>> # Generate processor output and text embeds\n>>> processor_output = processor(text=text, return_tensors=\"pt\")\n>>> text_embeds = model.get_text_features(input_ids=processor_output[\"input_ids\"])\n```"} +{"repo": "transformers", "function": "def get_resize_output_image_size(image, resolution_max_side: int, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> Tuple[int, int]:\n height, width = get_image_size(image, channel_dim=input_data_format)\n height, width = _resize_output_size_rescale_to_max_len(height, width, max_len=resolution_max_side)\n height, width = _resize_output_size_scale_below_upper_bound(height, width, max_len=MAX_IMAGE_SIZE)\n return (height, width)", "docstring": "Get the output size of the image after resizing given a dictionary specifying the max and min sizes.\nArgs:\n image (`np.ndarray`):\n Image to resize.\n resolution_max_side (`int`):\n The longest edge of the image will be resized to this value. The shortest edge will be resized to keep the\n input aspect ratio.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\nReturns:\n The output size of the image after resizing."} +{"repo": "tensorflow", "function": "def _try_put(self, item):\n try:\n self._event_queue.put(item)\n except QueueClosedError:\n self._internal_close()\n if self._worker.failure_exc_info:\n _, exception, _ = self._worker.failure_exc_info\n raise exception from None", "docstring": "Attempts to enqueue an item to the event queue.\n\nIf the queue is closed, this will close the EventFileWriter and reraise the\nexception that caused the queue closure, if one exists.\n\nArgs:\n item: the item to enqueue"} +{"repo": "keras", "function": "def update_state(self, y_true, y_pred, sample_weight=None):\n y_true = ops.convert_to_tensor(y_true, dtype=self.dtype)\n y_pred = ops.convert_to_tensor(y_pred, dtype='float32')\n y_pred = ops.cast(y_pred >= self.threshold, self.dtype)\n return super().update_state(y_true, y_pred, sample_weight)", "docstring": "Accumulates the confusion matrix statistics.\n\nBefore the confusion matrix is updated, the predicted values are\nthresholded to be:\n 0 for values that are smaller than the `threshold`\n 1 for values that are larger or equal to the `threshold`\n\nArgs:\n y_true: The ground truth values.\n y_pred: The predicted values.\n sample_weight: Optional weighting of each example. Can\n be a `Tensor` whose rank is either 0, or the same as `y_true`,\n and must be broadcastable to `y_true`. Defaults to `1`.\n\nReturns:\n Update op."} +{"repo": "tensorflow", "function": "def _create_or_get_tensor_values_cache(self, cache_name, graph, shape=None, dtype=dtypes.float32):\n if graph is None:\n raise ValueError('Invalid graph.')\n graph_cache_var = self._cache_variable_for_graph(graph)\n if cache_name not in graph_cache_var:\n if shape is None:\n raise ValueError('shape must be provided at cache creation.')\n if dtype.is_integer:\n init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)\n else:\n init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE\n with graph.as_default() as g, g.name_scope(None):\n graph_cache_var[cache_name] = variable_scope.get_variable(_TT_SNAPSHOT + '_' + self._escape_namescopes(cache_name), shape=shape, dtype=dtype, initializer=init_ops.constant_initializer(init_val), trainable=False, use_resource=True, collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])\n return graph_cache_var[cache_name]", "docstring": "Creates a variable as the cache to store intermediate tensor values.\n\nArgs:\n cache_name: Name to be given to the cache (an instance of tf.variable).\n graph: Tensorflow graph.\n shape: A list of dimensions.\n dtype: Data type of created cache.\nReturns:\n A ref to newly created or existing cache with the given dimensions.\nRaises:\n ValueError:\n (1) If graph is None, or\n (2) shape is None when a new cache needs to be created."} +{"repo": "beam", "function": "def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT]):\n self._base = base\n self._env_vars = getattr(base, '_env_vars', {})", "docstring": "A ModelHandler that skips batching in RunInference.\n\nArgs:\n base: An implementation of the underlying model handler."} +{"repo": "tensorflow", "function": "def get_min_max_value(statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions) -> tuple[float, float]:\n calib_method = calib_opts.calibration_method\n if calib_method not in _REGISTRY:\n raise ValueError(f'Unsupported calibration method: {calib_method}')\n calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts)\n return calibration_algorithm.get_min_max_value()", "docstring": "Calculates min and max from statistics using calibration options.\n\nArgs:\n statistics: Collected calibration statistics.\n calib_opts: Calibration options used for calculating min and max.\n\nReturns:\n (min_value, max_value): Min and max calculated using calib_opts.\n\nRaises:\n ValueError: Unsupported calibration method is given."} +{"repo": "tensorflow", "function": "def _from_tensor_shape(cls, shape: Any, num_row_partitions: int, dtype: dtypes.DType) -> 'DynamicRaggedShape.Spec':\n if dtype != dtypes.int32 and dtype != dtypes.int64:\n raise ValueError('dtype must be tf.int32 or tf.int64')\n shape = tensor_shape.as_shape(shape)\n if shape.rank is None:\n row_partitions = [RowPartitionSpec(dtype=dtype) for _ in range(num_row_partitions)]\n return DynamicRaggedShape.Spec(row_partitions=row_partitions, static_inner_shape=tensor_shape.TensorShape(None), dtype=dtype)\n if shape.rank <= 1:\n if num_row_partitions:\n raise ValueError('num_row_partitions should be zero ' + 'if shape is a scalar or vector.')\n return DynamicRaggedShape.Spec(row_partitions=[], static_inner_shape=shape, dtype=dtype)\n if shape.rank <= num_row_partitions:\n raise ValueError('num_row_partitions must be less than rank')\n num_elements_so_far = tensor_shape.dimension_value(shape[0])\n rp_specs = []\n for i in range(num_row_partitions):\n current_dim = tensor_shape.dimension_value(shape[i + 1])\n if current_dim is None or num_elements_so_far is None:\n nvals = None\n else:\n nvals = num_elements_so_far * current_dim\n rp_specs.append(RowPartitionSpec(nrows=num_elements_so_far, nvals=nvals, uniform_row_length=current_dim, dtype=dtype))\n num_elements_so_far = nvals\n static_inner_shape = tensor_shape.TensorShape([num_elements_so_far]) + shape[num_row_partitions + 1:]\n return DynamicRaggedShape.Spec(row_partitions=rp_specs, static_inner_shape=static_inner_shape, dtype=dtype)", "docstring": "Creates a `DynamicRaggedShape.Spec` corresponding to a `tf.TensorShape`.\n\nIt is assumed that this is a `tf.TensorShape` coming from a\n`tf.TensorSpec`, not from `RaggedTensor.shape`.\n\nIn addition to the shape, we need to know the number of row partitions,\nand the dtype used in the shape (tf.int32 or tf.int64).\n\nWithin the dimensions that are partitioned, all dimensions are assumed\nto be uniform.\n\nArgs:\n shape: a TensorShape.\n num_row_partitions: the ragged rank of the RaggedShape.\n dtype: the dtype of the shape (not the tensor); tf.int64 or tf.int32.\n\nReturns:\n a DynamicRaggedShape.Spec representing a TensorShape."} +{"repo": "tensorflow", "function": "def container(self, container_name) -> Iterator[str]:\n original_container = self._container\n self._container = container_name\n try:\n yield self._container\n finally:\n self._container = original_container", "docstring": "Returns a context manager that specifies the resource container to use.\n\nStateful operations, such as variables and queues, can maintain their\nstates on devices so that they can be shared by multiple processes.\nA resource container is a string name under which these stateful\noperations are tracked. These resources can be released or cleared\nwith `tf.Session.reset()`.\n\nFor example:\n\n```python\nwith g.container('experiment0'):\n # All stateful Operations constructed in this context will be placed\n # in resource container \"experiment0\".\n v1 = tf.Variable([1.0])\n v2 = tf.Variable([2.0])\n with g.container(\"experiment1\"):\n # All stateful Operations constructed in this context will be\n # placed in resource container \"experiment1\".\n v3 = tf.Variable([3.0])\n q1 = tf.queue.FIFOQueue(10, tf.float32)\n # All stateful Operations constructed in this context will be\n # be created in the \"experiment0\".\n v4 = tf.Variable([4.0])\n q1 = tf.queue.FIFOQueue(20, tf.float32)\n with g.container(\"\"):\n # All stateful Operations constructed in this context will be\n # be placed in the default resource container.\n v5 = tf.Variable([5.0])\n q3 = tf.queue.FIFOQueue(30, tf.float32)\n\n# Resets container \"experiment0\", after which the state of v1, v2, v4, q1\n# will become undefined (such as uninitialized).\ntf.Session.reset(target, [\"experiment0\"])\n```\n\nArgs:\n container_name: container name string.\n\nReturns:\n A context manager for defining resource containers for stateful ops,\n yields the container name."} +{"repo": "transformers", "function": "def crop_image_to_patches(self, images: np.ndarray, min_patches: int, max_patches: int, use_thumbnail: bool=True, patch_size: Optional[Union[Tuple, int, dict]]=None, data_format: ChannelDimension=None):\n if data_format is None:\n data_format = infer_channel_dimension_format(images)\n images = to_channel_dimension_format(images, ChannelDimension.FIRST, data_format)\n patch_size_height, patch_size_width = (patch_size['height'], patch_size['width'])\n original_height, original_width = images.shape[-2:]\n num_columns, num_rows = get_optimal_tiled_canvas((original_height, original_width), (patch_size_height, patch_size_width), min_patches, max_patches)\n target_width = patch_size_width * num_columns\n target_height = patch_size_height * num_rows\n num_blocks = num_columns * num_rows\n resized_image = self.resize(images, {'height': target_height, 'width': target_width}, data_format=ChannelDimension.FIRST, input_data_format=ChannelDimension.FIRST)\n processed_images = []\n for i in range(num_blocks):\n column = i % num_columns\n row = i // num_columns\n box = (column * patch_size_width, row * patch_size_height, (column + 1) * patch_size_width, (row + 1) * patch_size_height)\n patch_image = resized_image[..., box[1]:box[3], box[0]:box[2]]\n patch_image = to_channel_dimension_format(patch_image, data_format, ChannelDimension.FIRST)\n processed_images.append(patch_image)\n if use_thumbnail and len(processed_images) != 1:\n thumbnail_img = self.resize(images, patch_size, data_format=data_format, input_data_format=ChannelDimension.FIRST)\n processed_images.append(thumbnail_img)\n return processed_images", "docstring": "Crop the image to patches and return a list of cropped images.\nThe number of patches and their grid arrangement are determined by the original image size,\nthe target patch size and the minimum and maximum number of patches.\nThe aspect ratio of the patches grid is chosen to be the closest to the original image aspect ratio.\n\nArgs:\n images (`np.ndarray`):\n The image to be cropped.\n min_patches (`int`):\n The minimum number of patches to be extracted from the image.\n max_patches (`int`):\n The maximum number of patches to be extracted from the image.\n use_thumbnail (`bool`, *optional*, defaults to `True`):\n Whether to add a thumbnail image to the list of cropped patches.\n patch_size (`int`, `Tuple[int, int]`, `dict`, *optional*):\n The size of the output patches.\n data_format (`ChannelDimension`, *optional*):\n The format of the image data. If `None`, the format is inferred from the input image.\n\nReturns:\n List[`PIL.Image.Image`] or List[np.ndarray]: The list of cropped images."} +{"repo": "tf-quant-finance", "function": "def actual_actual_isda(*, start_date, end_date, schedule_info=None, dtype=None, name=None):\n del schedule_info\n with tf.name_scope(name or 'actual_actual_isda'):\n end_date = dt.convert_to_date_tensor(end_date)\n start_date = dt.convert_to_date_tensor(start_date)\n dtype = dtype or tf.float32\n days_in_leap_years, days_in_nonleap_years = du.days_in_leap_and_nonleap_years_between(start_date, end_date)\n days_in_leap_years = tf.cast(days_in_leap_years, dtype=dtype)\n days_in_nonleap_years = tf.cast(days_in_nonleap_years, dtype=dtype)\n return days_in_leap_years / 366 + days_in_nonleap_years / 365", "docstring": "Computes the year fraction between the specified dates.\n\nComputes the year fraction between the dates by dividing the actual number of\ndays in a leap year by 366 and the actual number of days in a standard year by\n365.\n\nWhen determining whether a leap day is contained in the date range,\n'start_date' is excluded and 'end_date' is included.\n\nNote that the schedule info is not needed for this convention and is ignored\nif supplied.\n\nhttps://en.wikipedia.org/wiki/Day_count_convention#Actual/Actual_ISDA\n\nArgs:\n start_date: A `DateTensor` object of any shape.\n end_date: A `DateTensor` object of compatible shape with `start_date`.\n schedule_info: The schedule info. Ignored for this convention.\n dtype: The dtype of the result. Either `tf.float32` or `tf.float64`. If not\n supplied, `tf.float32` is returned.\n name: Python `str` name prefixed to ops created by this function. If not\n supplied, `actual_actual_isda` is used.\n\nReturns:\n A real `Tensor` of supplied `dtype` and shape of `start_date`. The year\n fraction between the start and end date as computed by Actual/Actual ISDA\n convention."} +{"repo": "keras", "function": "def weight_memory_size(weights):\n unique_weights = {id(w): w for w in weights}.values()\n total_memory_size = 0\n for w in unique_weights:\n total_memory_size += _compute_memory_size(w.shape, w.dtype)\n return total_memory_size / 8", "docstring": "Compute the memory footprint for weights based on their dtypes.\n\nArgs:\n weights: An iterable contains the weights to compute weight size.\n\nReturns:\n The total memory size (in Bytes) of the weights."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n input = input_ids\n input_shape = input.shape\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input = inputs_embeds[:, :, -1]\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n embed_pos = self.embed_positions(input)\n hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n attention_mask = self._update_full_mask(attention_mask, inputs_embeds)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n if head_mask is not None:\n if head_mask.size()[0] != len(self.layers):\n raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n hidden_states = self.layer_norm(hidden_states)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "tensorflow", "function": "def _num_present(losses, weights, per_batch=False):\n if isinstance(weights, float) and weights != 0.0 or (context.executing_eagerly() and weights._rank() == 0 and (not math_ops.equal(weights, 0.0))):\n return _num_elements(losses)\n with ops.name_scope(None, 'num_present', (losses, weights)) as scope:\n weights = math_ops.cast(weights, dtype=dtypes.float32)\n present = array_ops.where(math_ops.equal(weights, 0.0), array_ops.zeros_like(weights), array_ops.ones_like(weights))\n present = weights_broadcast_ops.broadcast_weights(present, losses)\n if per_batch:\n return math_ops.reduce_sum(present, axis=math_ops.range(1, array_ops.rank(present)), keepdims=True, name=scope)\n return math_ops.reduce_sum(present, name=scope)", "docstring": "Computes the number of elements in the loss function induced by `weights`.\n\nA given weights tensor induces different numbers of usable elements in the\n`losses` tensor. The `weights` tensor is broadcast across `losses` for all\npossible dimensions. For example, if `losses` is a tensor of dimension\n`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,\nin effect, tiled to match the shape of `losses`. Following this effective\ntile, the total number of present elements is the number of non-zero weights.\n\nArgs:\n losses: `Tensor` of shape `[batch_size, d1, ... dN]`.\n weights: `Tensor` of shape `[]`, `[batch_size]` or\n `[batch_size, d1, ... dK]`, where K < N.\n per_batch: Whether to return the number of elements per batch or as a sum\n total.\n\nReturns:\n The number of present (non-zero) elements in the losses tensor. If\n `per_batch` is `True`, the value is returned as a tensor of size\n `[batch_size]`. Otherwise, a single scalar tensor is returned."} +{"repo": "tensorflow", "function": "def _to_tf_type(dtype):\n return dtypes.as_dtype(dtype)", "docstring": "Converts a native python or numpy type to TF DType.\n\nArgs:\n dtype: Could be a python type, a numpy type or a TF DType.\n\nReturns:\n A tensorflow `DType`."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "tensorflow", "function": "def inverse_stft(stfts, frame_length, frame_step, fft_length=None, window_fn=window_ops.hann_window, name=None):\n with ops.name_scope(name, 'inverse_stft', [stfts]):\n stfts = ops.convert_to_tensor(stfts, name='stfts')\n stfts.shape.with_rank_at_least(2)\n frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n frame_length.shape.assert_has_rank(0)\n frame_step = ops.convert_to_tensor(frame_step, name='frame_step')\n frame_step.shape.assert_has_rank(0)\n if fft_length is None:\n fft_length = _enclosing_power_of_two(frame_length)\n else:\n fft_length = ops.convert_to_tensor(fft_length, name='fft_length')\n fft_length.shape.assert_has_rank(0)\n real_frames = fft_ops.irfft(stfts, [fft_length])\n frame_length_static = tensor_util.constant_value(frame_length)\n if frame_length_static is None or real_frames.shape.ndims is None or real_frames.shape.as_list()[-1] is None:\n real_frames = real_frames[..., :frame_length]\n real_frames_rank = array_ops.rank(real_frames)\n real_frames_shape = array_ops.shape(real_frames)\n paddings = array_ops.concat([array_ops.zeros([real_frames_rank - 1, 2], dtype=frame_length.dtype), [[0, math_ops.maximum(0, frame_length - real_frames_shape[-1])]]], 0)\n real_frames = array_ops.pad(real_frames, paddings)\n elif real_frames.shape.as_list()[-1] > frame_length_static:\n real_frames = real_frames[..., :frame_length_static]\n elif real_frames.shape.as_list()[-1] < frame_length_static:\n pad_amount = frame_length_static - real_frames.shape.as_list()[-1]\n real_frames = array_ops.pad(real_frames, [[0, 0]] * (real_frames.shape.ndims - 1) + [[0, pad_amount]])\n if frame_length_static is not None and real_frames.shape.ndims is not None:\n real_frames.set_shape([None] * (real_frames.shape.ndims - 1) + [frame_length_static])\n if window_fn is not None:\n window = window_fn(frame_length, dtype=stfts.dtype.real_dtype)\n real_frames *= window\n return reconstruction_ops.overlap_and_add(real_frames, frame_step)", "docstring": "Computes the inverse [Short-time Fourier Transform][stft] of `stfts`.\n\nTo reconstruct an original waveform, a complementary window function should\nbe used with `inverse_stft`. Such a window function can be constructed with\n`tf.signal.inverse_stft_window_fn`.\nExample:\n\n```python\nframe_length = 400\nframe_step = 160\nwaveform = tf.random.normal(dtype=tf.float32, shape=[1000])\nstft = tf.signal.stft(waveform, frame_length, frame_step)\ninverse_stft = tf.signal.inverse_stft(\n stft, frame_length, frame_step,\n window_fn=tf.signal.inverse_stft_window_fn(frame_step))\n```\n\nIf a custom `window_fn` is used with `tf.signal.stft`, it must be passed to\n`tf.signal.inverse_stft_window_fn`:\n\n```python\nframe_length = 400\nframe_step = 160\nwindow_fn = tf.signal.hamming_window\nwaveform = tf.random.normal(dtype=tf.float32, shape=[1000])\nstft = tf.signal.stft(\n waveform, frame_length, frame_step, window_fn=window_fn)\ninverse_stft = tf.signal.inverse_stft(\n stft, frame_length, frame_step,\n window_fn=tf.signal.inverse_stft_window_fn(\n frame_step, forward_window_fn=window_fn))\n```\n\nImplemented with TPU/GPU-compatible ops and supports gradients.\n\nArgs:\n stfts: A `complex64`/`complex128` `[..., frames, fft_unique_bins]`\n `Tensor` of STFT bins representing a batch of `fft_length`-point STFTs\n where `fft_unique_bins` is `fft_length // 2 + 1`\n frame_length: An integer scalar `Tensor`. The window length in samples.\n frame_step: An integer scalar `Tensor`. The number of samples to step.\n fft_length: An integer scalar `Tensor`. The size of the FFT that produced\n `stfts`. If not provided, uses the smallest power of 2 enclosing\n `frame_length`.\n window_fn: A callable that takes a window length and a `dtype` keyword\n argument and returns a `[window_length]` `Tensor` of samples in the\n provided datatype. If set to `None`, no windowing is used.\n name: An optional name for the operation.\n\nReturns:\n A `[..., samples]` `Tensor` of `float32`/`float64` signals representing\n the inverse STFT for each input STFT in `stfts`.\n\nRaises:\n ValueError: If `stfts` is not at least rank 2, `frame_length` is not scalar,\n `frame_step` is not scalar, or `fft_length` is not scalar.\n\n[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform"} +{"repo": "tensorflow", "function": "def min_max_variable_partitioner(max_partitions=1, axis=0, min_slice_size=256 << 10, bytes_per_string_element=16):\n\n def _partitioner(shape, dtype):\n \"\"\"Partitioner that partitions list for a variable of given shape and type.\n\n Ex: Consider partitioning a variable of type float32 with\n shape=[1024, 1024].\n If `max_partitions` >= 16, this function would return\n [(1024 * 1024 * 4) / (256 * 1024), 1] = [16, 1].\n If `max_partitions` < 16, this function would return\n [`max_partitions`, 1].\n\n Args:\n shape: Shape of the variable.\n dtype: Type of the variable.\n\n Returns:\n List of partitions for each axis (currently only one axis can be\n partitioned).\n\n Raises:\n ValueError: If axis to partition along does not exist for the variable.\n \"\"\"\n if axis >= len(shape):\n raise ValueError(f'Cannot partition variable along axis {axis} when shape is only {shape}')\n dtype = dtypes.as_dtype(dtype)\n if dtype.base_dtype == dtypes.string:\n bytes_per_element = bytes_per_string_element\n else:\n bytes_per_element = dtype.size\n total_size_bytes = shape.num_elements() * bytes_per_element\n partitions = total_size_bytes / min_slice_size\n partitions_list = [1] * len(shape)\n partitions_list[axis] = max(1, min(shape.dims[axis].value, max_partitions, int(math.ceil(partitions))))\n return partitions_list\n return _partitioner", "docstring": "Partitioner to allocate minimum size per slice.\n\nReturns a partitioner that partitions the variable of given shape and dtype\nsuch that each partition has a minimum of `min_slice_size` slice of the\nvariable. The maximum number of such partitions (upper bound) is given by\n`max_partitions`.\n\nArgs:\n max_partitions: Upper bound on the number of partitions. Defaults to 1.\n axis: Axis along which to partition the variable. Defaults to 0.\n min_slice_size: Minimum size of the variable slice per partition. Defaults\n to 256K.\n bytes_per_string_element: If the `Variable` is of type string, this provides\n an estimate of how large each scalar in the `Variable` is.\n\nReturns:\n A partition function usable as the `partitioner` argument to\n `variable_scope` and `get_variable`."} +{"repo": "transformers", "function": "def sigmoid_cross_entropy_loss(inputs: torch.Tensor, labels: torch.Tensor, num_masks: int) -> torch.Tensor:\n criterion = nn.BCEWithLogitsLoss(reduction='none')\n cross_entropy_loss = criterion(inputs, labels)\n loss = cross_entropy_loss.mean(1).sum() / num_masks\n return loss", "docstring": "Args:\n inputs (`torch.Tensor`):\n A float tensor of arbitrary shape.\n labels (`torch.Tensor`):\n A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs\n (0 for the negative class and 1 for the positive class).\n\nReturns:\n loss (`torch.Tensor`): The computed loss."} +{"repo": "tensorflow", "function": "def _create_grad_indexed_slices_init(grad_output_slices, forward_input):\n assert isinstance(grad_output_slices, indexed_slices.IndexedSlices)\n assert isinstance(forward_input, tensor.Tensor)\n values_out = grad_output_slices.values\n indices_out = grad_output_slices.indices\n if values_out.shape.is_fully_defined():\n values_shape = tensor_shape.TensorShape([0] + values_out.shape.as_list()[1:])\n values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n else:\n if forward_input.dtype == dtypes.resource:\n forward_shape = gen_resource_variable_ops.variable_shape(forward_input)\n else:\n forward_shape = array_ops.shape(forward_input)\n values_shape = array_ops.concat([[0], forward_shape[1:]], 0)\n values = array_ops.zeros(values_shape, dtype=values_out.dtype, name='values_init')\n indices = constant_op.constant([], indices_out.dtype, name='indices_init')\n if forward_input.dtype == dtypes.resource:\n shape = gen_resource_variable_ops.variable_shape(forward_input, name='shape_init')\n else:\n shape = array_ops.shape(forward_input, name='shape_init')\n return indexed_slices.IndexedSlices(values=values, indices=indices, dense_shape=shape)", "docstring": "Creates an IndexedSlices to pass as input to the while grad function.\n\nArgs:\n grad_output_slices: IndexedSlices. The corresponding while grad function\n output.\n forward_input: Tensor. The corresponding input to the forward while op.\n\nReturns:\n Zeros IndexedSlices, created in current Graph."} +{"repo": "starthinker", "function": "def commandline_parser(parser=None, arguments=None):\n if parser is None:\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent(' Command line to execute all tasks in a recipe once. ( Common Entry Point )\\n\\n This script dispatches all the tasks in a JSON recipe to handlers in sequence.\\n For each task, it calls a subprocess to execute the JSON instructions, waits\\n for the process to complete and dispatches the next task, until all tasks are\\n complete or a critical failure ( exception ) is raised.\\n\\n If an exception is raised in any task, all following tasks are not executed by design.\\n\\n Example: python run.py [path to recipe file]\\n Caution: This script does NOT check if the last job finished, potentially causing overruns.\\n Notes:\\n - To avoid running the entire script when debugging a single task, the command line\\n can easily replace \"all\" with the name of any \"task\" in the json. For example\\n python tool/recipe.py scripts/say_hello.json\\n\\n - Can be easily replaced with the following to run only the \"hello\" task:\\n python task/hello/run.py scripts/say_hello.json\\n\\n - Or specified further to run only the second hello task:\\n python task/hello/run.py scripts/say_hello.json -i 2\\n\\n '))\n if arguments is None:\n parser.add_argument('json', help='Path to recipe json file to load.')\n elif '-j' in arguments:\n parser.add_argument('--json', '-j', help='Path to recipe json file to load.')\n if arguments is None or '-p' in arguments:\n parser.add_argument('--project', '-p', help='Cloud ID of Google Cloud Project.', default=None)\n if arguments is None or '-k' in arguments:\n parser.add_argument('--key', '-k', help='API Key of Google Cloud Project.', default=None)\n if arguments is None or '-u' in arguments:\n parser.add_argument('--user', '-u', help='Path to USER credentials json file.', default=None)\n if arguments is None or '-s' in arguments:\n parser.add_argument('--service', '-s', help='Path to SERVICE credentials json file.', default=None)\n if arguments is None or '-c' in arguments:\n parser.add_argument('--client', '-c', help='Path to CLIENT credentials json file.', default=None)\n if arguments is None or '-t' in arguments:\n parser.add_argument('--task', '-t', help='Task number of the task to run starting at 1.', default=None, type=int)\n if arguments is None or '-v' in arguments:\n parser.add_argument('--verbose', '-v', help='Print all the steps as they happen.', action='store_true')\n if arguments is None or '-f' in arguments:\n parser.add_argument('--force', '-force', help='Not used but included for compatiblity with another script.', action='store_true')\n if arguments is None or '-tp' in arguments:\n parser.add_argument('--trace_print', '-tp', help='Execution trace written to stdout.', action='store_true')\n if arguments is None or '-tf' in arguments:\n parser.add_argument('--trace_file', '-tf', help='Execution trace written to file.', action='store_true')\n if arguments is None or '-ni' in arguments:\n parser.add_argument('--no_input', '-ni', help='Raise exception if fields requiring input are in recipe.', action='store_true')\n return parser", "docstring": "Used in StarThinker scripts as entry point for command line calls.\n\nDefines standard parameters used by almost every entry point.\n\nUsage example:\n\n```\nimport argparse\nfrom starthinker.util.configuration import commandline_parser\n\nif __name__ == \"__main__\":\n\n # custom parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('custom', help='custom parameter to be added.')\n\n # initialize project\n commandline_parser(parser=parser, ['-c', '-u'])\n\n # access arguments\n print(args.client)\n```\n\nArgs:\n * parser: (ArgumentParser) optional custom argument parser\n * arguments: (String) optional list of parameters to use when invoking, all set if None\n\nReturns:\n ArgumentParser - parser with added parameters"} +{"repo": "transformers", "function": "def create_column_token_type_ids_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:\n table_column_ids = list(zip(*table_values))[1] if table_values else []\n return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)", "docstring": "Creates the column token type IDs according to the query token IDs and a list of table values.\n\nArgs:\n query_ids (`List[int]`): list of token IDs corresponding to the ID.\n table_values (`List[TableValue]`): lift of table values, which are named tuples containing the\n token value, the column ID and the row ID of said token.\n\nReturns:\n `List[int]`: List of ints containing the column token type IDs values."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, image_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=True, return_dict: Optional[bool]=None, return_loss: Optional[bool]=None) -> Union[BridgeTowerContrastiveOutput, Tuple[torch.FloatTensor]]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.bridgetower(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, pixel_values=pixel_values, pixel_mask=pixel_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, image_embeds=image_embeds, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict)\n pooler_output = outputs.pooler_output if return_dict else outputs[2]\n hidden_states_txt, hidden_states_img, hidden_states_cross_modal = outputs.hidden_states if return_dict else outputs[3]\n text_embeds = hidden_states_txt[-1]\n image_embeds = hidden_states_img[-1]\n image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds)\n image_token_type_embeddings = self.bridgetower.token_type_embeddings(torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)).expand_as(image_embeds_with_ln)\n image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings\n text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2)\n image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to(device=text_embeds.device)\n cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to(device=text_embeds.device)\n logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2)\n logit_scale = self.logit_scale.exp().to(device=text_embeds.device)\n logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale\n logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale\n logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale\n itc_loss = None\n if return_loss:\n labels = torch.arange(len(logits), device=logits.device)\n text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels)\n text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels)\n image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels)\n itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0\n if not return_dict:\n output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:]\n return (itc_loss,) + output if itc_loss is not None else output\n return BridgeTowerContrastiveOutput(loss=itc_loss, logits=logits, text_embeds=text_embeds, image_embeds=image_embeds, cross_embeds=cross_embeds, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\nreturn_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n\nExamples:\n\n```python\n>>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning\n>>> import requests\n>>> from PIL import Image\n>>> import torch\n\n>>> image_urls = [\n... \"https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg\",\n... \"http://images.cocodataset.org/val2017/000000039769.jpg\",\n... ]\n>>> texts = [\"two dogs in a car\", \"two cats sleeping on a couch\"]\n>>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls]\n\n>>> processor = BridgeTowerProcessor.from_pretrained(\"BridgeTower/bridgetower-large-itm-mlm-itc\")\n>>> model = BridgeTowerForContrastiveLearning.from_pretrained(\"BridgeTower/bridgetower-large-itm-mlm-itc\")\n\n>>> inputs = processor(images, texts, padding=True, return_tensors=\"pt\")\n>>> loss = model(**inputs, return_loss=True).loss\n\n>>> inputs = processor(images, texts[::-1], padding=True, return_tensors=\"pt\")\n>>> loss_swapped = model(**inputs, return_loss=True).loss\n\n>>> print(\"Loss\", round(loss.item(), 4))\nLoss 0.0019\n\n>>> print(\"Loss with swapped images\", round(loss_swapped.item(), 4))\nLoss with swapped images 2.126\n```"} +{"repo": "keras", "function": "def _step(time, output_ta_t, prev_output, *states):\n current_input = tuple((ta[time] for ta in input_ta))\n current_input = tree.pack_sequence_as(inputs, current_input)\n mask_t = masking_fn(time)\n output, new_states = step_function(current_input, tuple(states) + tuple(constants))\n flat_output = tree.flatten(output)\n flat_mask_output = flat_zero_output if zero_output_for_mask else tree.flatten(prev_output)\n flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output)\n flat_state = tree.flatten(states)\n flat_new_state = tree.flatten(new_states)\n flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state)\n new_states = tree.pack_sequence_as(new_states, flat_final_state)\n ta_index_to_write = time if return_all_outputs else 0\n for ta, out in zip(output_ta_t, flat_new_output):\n ta[ta_index_to_write] = out\n return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states)", "docstring": "RNN step function.\n\nArgs:\n time: Current timestep value.\n output_ta_t: TensorArray.\n prev_output: tuple of outputs from time - 1.\n *states: List of states.\n\nReturns:\n Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`"} +{"repo": "transformers", "function": "class ProphetNetDecoderModelOutput(ModelOutput):\n last_hidden_state: torch.FloatTensor\n last_hidden_state_ngram: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`):\n Sequence of main stream hidden-states at the output of the last layer of the decoder of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`):\n Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model.\n past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,\n num_attn_heads, decoder_sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be\n used (see `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, decoder_sequence_length, hidden_size)`.\n\n Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs.\n ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`.\n\n Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding\n outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,\n decoder_sequence_length, decoder_sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,\n decoder_sequence_length, decoder_sequence_length)`.\n\n Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the\n weighted average in the\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads,\n encoder_sequence_length, decoder_sequence_length)`.\n\n Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to\n compute the weighted average in the"} +{"repo": "transformers", "function": "def _preprocess_mask_arguments(config: PretrainedConfig, input_embeds: torch.Tensor, attention_mask: Optional[Union[torch.Tensor, BlockMask]], cache_position: torch.Tensor, past_key_values: Optional[Cache], layer_idx: Optional[int]) -> tuple[bool, Optional[Union[torch.Tensor, BlockMask]], int, int]:\n if isinstance(attention_mask, (torch.Tensor, BlockMask)) and len(attention_mask.shape) == 4:\n return (True, attention_mask, None, None)\n if config._attn_implementation not in ALL_MASK_ATTENTION_FUNCTIONS._global_mapping:\n return (True, None, None, None)\n if attention_mask is not None and attention_mask.ndim == 2:\n attention_mask = attention_mask.to(device=cache_position.device, dtype=torch.bool)\n if past_key_values is not None:\n kv_length, kv_offset = past_key_values.get_mask_sizes(cache_position, layer_idx)\n else:\n kv_length, kv_offset = (input_embeds.shape[1], 0)\n return (False, attention_mask, kv_length, kv_offset)", "docstring": "Perform some common pre-processing of the mask arguments we get from the modeling code. Mostly determine the\nkey-value length and offsets, and if we should early exit or not.\n\nArgs:\n config (`PretrainedConfig`):\n The model config.\n input_embeds (`torch.Tensor`):\n The input embeddings of shape (batch_size, query_length, hidden_dim). This is used only to infer the\n batch size, query length and dtype.\n attention_mask (`torch.Tensor`, optional):\n The 2D attention mask corresponding to padded tokens of shape (batch_size, number_of_seen_tokens+q_length).\n It can also be an already prepared 4D mask, in which case it is returned as-is.\n cache_position (`torch.Tensor`):\n A tensor of shape (query_length,) indicating the current indices of the input sequence elements.\n past_key_values (`Cache`, optional):\n The past key values, if we use a cache.\n layer_idx (`int`, optional):\n If `past_key_values` is not None, this is the layer index of the cache from which to get the key-value\n length and offset. Indeed, for hybrid caches, different layers may return different lengths.\n\nReturns:\n early_exit (`bool`):\n Whether we should early exit mask creation, and return the mask as-is.\n attention_mask (`torch.Tensor` or `BlockMask` or `None`):\n The attention mask to either return immediately, or to use in downstream mask creation.\n kv_length (`int`):\n The size that the key and value states will have during the attention computation.\n kv_offset (`int`):\n An offset to indicate at which first position the key and values states will refer to."} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, bbox: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:\n outputs = self.layoutlm(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n pooled_output = outputs[1]\n pooled_output = self.dropout(inputs=pooled_output, training=training)\n logits = self.classifier(inputs=pooled_output)\n loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\nReturns:\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TFLayoutLMForSequenceClassification\n>>> import tensorflow as tf\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"microsoft/layoutlm-base-uncased\")\n>>> model = TFLayoutLMForSequenceClassification.from_pretrained(\"microsoft/layoutlm-base-uncased\")\n\n>>> words = [\"Hello\", \"world\"]\n>>> normalized_word_boxes = [637, 773, 693, 782], [698, 773, 733, 782]\n\n>>> token_boxes = []\n>>> for word, box in zip(words, normalized_word_boxes):\n... word_tokens = tokenizer.tokenize(word)\n... token_boxes.extend([box] * len(word_tokens))\n>>> # add bounding boxes of cls + sep tokens\n>>> token_boxes = [[0, 0, 0, 0]] + token_boxes + [[1000, 1000, 1000, 1000]]\n\n>>> encoding = tokenizer(\" \".join(words), return_tensors=\"tf\")\n>>> input_ids = encoding[\"input_ids\"]\n>>> attention_mask = encoding[\"attention_mask\"]\n>>> token_type_ids = encoding[\"token_type_ids\"]\n>>> bbox = tf.convert_to_tensor([token_boxes])\n>>> sequence_label = tf.convert_to_tensor([1])\n\n>>> outputs = model(\n... input_ids=input_ids,\n... bbox=bbox,\n... attention_mask=attention_mask,\n... token_type_ids=token_type_ids,\n... labels=sequence_label,\n... )\n\n>>> loss = outputs.loss\n>>> logits = outputs.logits\n```"} +{"repo": "fhir-py", "function": "def field_content_length(msg: message.Message, field: Union[descriptor.FieldDescriptor, str]) -> int:\n if isinstance(field, str):\n field = _field_descriptor_for_name(msg, field)\n if field_is_repeated(field):\n return len(getattr(msg, field.name))\n return 1 if msg.HasField(field.name) else 0", "docstring": "Returns the size of the field.\n\nArgs:\n msg: The Message whose fields to examine.\n field: The FieldDescriptor or name of the field to examine.\n\nReturns:\n The number of elements at the provided field. If field describes a singular\n protobuf field, this will return 1. If the field is not set, returns 0."} +{"repo": "tensorflow", "function": "def _get_layer_broadcasters_from_rps(zero_broadcaster, source_rps, target_rps):\n if not isinstance(zero_broadcaster, _LayerBroadcaster):\n raise TypeError('Not a _LayerBroadcaster: ' + str(zero_broadcaster))\n assert len(source_rps) == len(target_rps)\n if not source_rps:\n return [zero_broadcaster]\n next_broadcaster = zero_broadcaster.next_layer(source_rps[0], target_rps[0])\n tail_broadcasters = _get_layer_broadcasters_from_rps(next_broadcaster, source_rps[1:], target_rps[1:])\n return [zero_broadcaster] + tail_broadcasters", "docstring": "Get LayerBroadcasters from RowPartitions.\n\n *--zero_broadcaster->*\n | |\n source_rps[0] target_rps[0]\n | |\n V V\n *---result[1]------->*\n | |\n source_rps[1] target_rps[1]\n | |\n V V\n *---result[2]------->*\n .\n .\n .\n *---result[k-1]----->*\n | |\n source_rps[k] target_rps[k]\n | |\n V V\n *---result[k]------->*\n\nNote: result[0] = zero_broadcaster\n\nArgs:\n zero_broadcaster: a broadcaster between the source and target row\n partitions' rows, and equal to result[0].\n source_rps: source row partitions.\n target_rps: target row partitions (same length as source_rps).\n\nReturns:\n result: a list of LayerBroadcasters."} +{"repo": "tensorflow", "function": "def run(self, fn, args=(), kwargs=None, options=None):\n validate_run_function(fn)\n fn, args, kwargs = _maybe_partial_apply_variables(fn, args, kwargs)\n fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())\n options = options or distribute_lib.RunOptions()\n return self.extended.tpu_run(fn, args, kwargs, options)", "docstring": "Run `fn` on each replica, with the given arguments.\n\nExecutes ops specified by `fn` on each replica. If `args` or `kwargs` have\n\"per-replica\" values, such as those produced by a \"distributed `Dataset`\",\nwhen `fn` is executed on a particular replica, it will be executed with the\ncomponent of those \"per-replica\" values that correspond to that replica.\n\n`fn` may call `tf.distribute.get_replica_context()` to access members such\nas `all_reduce`.\n\nAll arguments in `args` or `kwargs` should either be nest of tensors or\nper-replica objects containing tensors or composite tensors.\n\nUsers can pass strategy specific options to `options` argument. An example\nto enable bucketizing dynamic shapes in `TPUStrategy.run`\nis:\n\n>>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')\n>>> tf.config.experimental_connect_to_cluster(resolver)\n>>> tf.tpu.experimental.initialize_tpu_system(resolver)\n>>> strategy = tf.distribute.experimental.TPUStrategy(resolver)\n\n>>> options = tf.distribute.RunOptions(\n... experimental_bucketizing_dynamic_shape=True)\n\n>>> dataset = tf.data.Dataset.range(\n... strategy.num_replicas_in_sync, output_type=dtypes.float32).batch(\n... strategy.num_replicas_in_sync, drop_remainder=True)\n>>> input_iterator = iter(strategy.experimental_distribute_dataset(dataset))\n\n>>> @tf.function()\n... def step_fn(inputs):\n... output = tf.reduce_sum(inputs)\n... return output\n\n>>> strategy.run(step_fn, args=(next(input_iterator),), options=options)\n\nArgs:\n fn: The function to run. The output must be a `tf.nest` of `Tensor`s.\n args: (Optional) Positional arguments to `fn`.\n kwargs: (Optional) Keyword arguments to `fn`.\n options: (Optional) An instance of `tf.distribute.RunOptions` specifying\n the options to run `fn`.\n\nReturns:\n Merged return value of `fn` across replicas. The structure of the return\n value is the same as the return value from `fn`. Each element in the\n structure can either be \"per-replica\" `Tensor` objects or `Tensor`s\n (for example, if running on a single replica)."} +{"repo": "starthinker", "function": "def report_to_rows(report):\n if type(report) is GeneratorType:\n leftovers = ''\n for chunk in report:\n data, extra = chunk.rsplit('\\n', 1)\n for row in csv_to_rows(leftovers + data):\n yield row\n leftovers = extra\n else:\n for row in csv_to_rows(report):\n yield row", "docstring": "Helper to convert DCM files into iterator of rows, memory efficient.\n\nUsage example:\n\n```\nfilename, report = report_file(...)\nrows = report_to_rows(report)\n```\n\nArgs:\n * report: (iterator or file) Either an iterator or file that will be\n converted to rows.\n\nReturns:\n * Iterator of lists representing each row."} +{"repo": "tensorflow", "function": "def process_update(x):\n if callable(x):\n update = lambda: process_update(x())\n return update()\n elif isinstance(x, ops.Operation):\n update = x\n elif hasattr(x, 'op'):\n update = x.op\n else:\n update = tensor_conversion.convert_to_tensor_v2_with_dispatch(x)\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n update._unconditional_update = update not in reachable\n return update", "docstring": "Standardize update ops.\n\nArgs:\n x: Tensor, op, or callable.\n\nReturns:\n An update op."} +{"repo": "keras", "function": "def get(self, path):\n if not path:\n parsed_path = '/vars'\n else:\n parsed_path = path\n weight_map = self.sharding_config['weight_map']\n filenames = weight_map.get(parsed_path) or weight_map.get('/' + parsed_path + '/vars')\n if filenames is not None:\n if not isinstance(filenames, list):\n filenames = [filenames]\n self.current_shard_filenames = filenames\n filename = filenames[0]\n else:\n self.current_shard_filenames = []\n filename = None\n if filename is not None and filename != self.current_shard_path.name:\n self.close()\n self.h5_file = self._get_h5_file(self.path.with_name(filename))\n return super().get(path)", "docstring": "Get the H5 entry group.\n\nThis method is only available in read mode. If the path is not found in\nthe current shard, it will switch to the correct shard.\n\nArgs:\n path: `str`. The variable path."} +{"repo": "tensorflow", "function": "def _make_source_table(self, source_list, is_tf_py_library):\n path_head = 'Source file path'\n num_nodes_head = '#(nodes)'\n num_tensors_head = '#(tensors)'\n num_dumps_head = '#(tensor dumps)'\n if is_tf_py_library:\n color = cli_shared.COLOR_GRAY\n lines = [RL('TensorFlow Python library file(s):', color)]\n else:\n color = cli_shared.COLOR_WHITE\n lines = [RL('File(s) outside TensorFlow Python library:', color)]\n if not source_list:\n lines.append(RL('[No files.]'))\n lines.append(RL())\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)\n path_column_width = max(max((len(item[0]) for item in source_list)), len(path_head)) + 1\n num_nodes_column_width = max(max((len(str(item[2])) for item in source_list)), len(num_nodes_head)) + 1\n num_tensors_column_width = max(max((len(str(item[3])) for item in source_list)), len(num_tensors_head)) + 1\n head = RL(path_head + ' ' * (path_column_width - len(path_head)), color)\n head += RL(num_nodes_head + ' ' * (num_nodes_column_width - len(num_nodes_head)), color)\n head += RL(num_tensors_head + ' ' * (num_tensors_column_width - len(num_tensors_head)), color)\n head += RL(num_dumps_head, color)\n lines.append(head)\n for file_path, _, num_nodes, num_tensors, num_dumps, first_line_num in source_list:\n path_attributes = [color]\n if source_utils.is_extension_uncompiled_python_source(file_path):\n path_attributes.append(debugger_cli_common.MenuItem(None, 'ps %s -b %d' % (file_path, first_line_num)))\n line = RL(file_path, path_attributes)\n line += ' ' * (path_column_width - len(line))\n line += RL(str(num_nodes) + ' ' * (num_nodes_column_width - len(str(num_nodes))), color)\n line += RL(str(num_tensors) + ' ' * (num_tensors_column_width - len(str(num_tensors))), color)\n line += RL(str(num_dumps), color)\n lines.append(line)\n lines.append(RL())\n return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)", "docstring": "Make a table summarizing the source files that create nodes and tensors.\n\nArgs:\n source_list: List of source files and related information as a list of\n tuples (file_path, is_tf_library, num_nodes, num_tensors, num_dumps,\n first_line).\n is_tf_py_library: (`bool`) whether this table is for files that belong\n to the TensorFlow Python library.\n\nReturns:\n The table as a `debugger_cli_common.RichTextLines` object."} +{"repo": "tensorflow", "function": "def _channel_flatten_input(x, data_format):\n graph = ops.get_default_graph()\n cache_key = (graph, x.ref(), data_format)\n if cache_key not in _channel_flatten_input_cache:\n x_shape = array_ops.shape(x)\n neg_ones = constant_op.constant([-1], dtype=x_shape.dtype)\n if data_format == b'NCHW':\n order = [1, 0, 2, 3, 4]\n shape = array_ops.concat([x_shape[1:2], neg_ones, x_shape[3:]], axis=0)\n reverse_order = order\n else:\n order = [1, 2, 3, 0, 4]\n shape = array_ops.concat([x_shape[1:4], neg_ones], axis=0)\n reverse_order = [3, 0, 1, 2, 4]\n x = array_ops.transpose(x, order)\n reverse_shape = array_ops.shape(x)\n x = array_ops.reshape(x, shape)\n outputs = (x, reverse_order, reverse_shape)\n _channel_flatten_input_cache[cache_key] = outputs\n else:\n outputs = _channel_flatten_input_cache[cache_key]\n return outputs", "docstring": "Merge the stack dimension with the channel dimension.\n\nIf S is pfor's stacking dimension, then,\n - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose\n should be cheap.\n - for SNHWC, we transpose to NHWSC.\nWe then merge the S and C dimension.\n\nArgs:\n x: tensor_lib.Tensor to transform.\n data_format: \"NCHW\" or \"NHWC\".\n\nReturns:\n A 3-element tuple with the transformed value, along with the shape for\n reshape and order for transpose required to transform back."} +{"repo": "transformers", "function": "class DPRContextEncoderOutput(ModelOutput):\n pooler_output: torch.FloatTensor\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Class for outputs of [`DPRQuestionEncoder`].\n\nArgs:\n pooler_output (`torch.FloatTensor` of shape `(batch_size, embeddings_size)`):\n The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer\n hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.\n This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def _prepare_skip_target_masks(self):\n return [l is None for l in self.loss_functions]", "docstring": "Boolean mask for whether the target in the output list should be skipped.\n\nIf the loss function corresponding to a model output is None, then this\noutput will be skipped during total loss calculation and feed targets\npreparation.\n\nReturns:\n A boolean list for whether the corresponding target in the output list\n should be skipped during loss calculation."} +{"repo": "tensorflow", "function": "def _dimension_sizes(x):\n dynamic_shape = array_ops.shape(x)\n rank = x.get_shape().rank\n rank_is_known = rank is not None\n if rank_is_known and rank == 0:\n return (1,)\n if rank_is_known and rank > 0:\n static_shape = x.get_shape().as_list()\n sizes = [int(size) if size is not None else dynamic_shape[i] for i, size in enumerate(static_shape)]\n return sizes\n has_rank_zero = math_ops.equal(array_ops.rank(x), 0)\n return cond.cond(has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape)", "docstring": "Gets the dimension sizes of a tensor `x`.\n\nIf a size can be determined statically it is returned as an integer,\notherwise as a tensor.\n\nIf `x` is a scalar it is treated as rank 1 size 1.\n\nArgs:\n x: A `Tensor`.\n\nReturns:\n Dimension sizes."} +{"repo": "tensorflow", "function": "def __init__(self, dataset_id, processing_mode, address, element_spec, protocol, data_transfer_protocol, job_name=None, consumer_index=None, num_consumers=None, max_outstanding_requests=None, task_refresh_interval_hint_ms=None, cross_trainer_cache=None, target_workers='AUTO'):\n if consumer_index is None != num_consumers is None:\n raise ValueError('Must either set both `consumer_index` and `num_consumers`, or neither. ', f'consumer_index={consumer_index}, num_consumers={num_consumers}')\n if num_consumers is not None and job_name is None:\n raise ValueError(f'`job_name` must be set when setting `num_consumers`. num_consumers was set to {num_consumers}.')\n processing_mode_def = data_service_pb2.ProcessingModeDef(sharding_policy=_get_validated_sharding_policy(processing_mode)._to_proto())\n if job_name is None:\n job_name = ''\n if max_outstanding_requests is None:\n max_outstanding_requests = dataset_ops.AUTOTUNE\n if task_refresh_interval_hint_ms is None:\n task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE\n self._dataset_id = _to_tensor(dataset_id)\n self._processing_mode = ops.convert_to_tensor(processing_mode_def.SerializeToString(), dtype=dtypes.string, name='processing_mode')\n self._address = ops.convert_to_tensor(address, dtype=dtypes.string, name='address')\n self._protocol = ops.convert_to_tensor(protocol, dtype=dtypes.string, name='protocol')\n self._job_name = ops.convert_to_tensor(job_name, dtype=dtypes.string, name='job_name')\n self._consumer_index = ops.convert_to_tensor(-1 if consumer_index is None else consumer_index, dtype=dtypes.int64, name='consumer_index')\n self._num_consumers = ops.convert_to_tensor(-1 if num_consumers is None else num_consumers, dtype=dtypes.int64, name='num_consumers')\n self._max_outstanding_requests = ops.convert_to_tensor(max_outstanding_requests, dtype=dtypes.int64, name='max_outstanding_requests')\n self._element_spec = element_spec\n uncompress_func = structured_function.StructuredFunctionWrapper(lambda x: compression_ops.uncompress(x, output_spec=element_spec), transformation_name='DataServiceDataset.uncompress()', input_structure=tensor.TensorSpec(shape=(), dtype=dtypes.variant))\n cross_trainer_cache_options = cross_trainer_cache._to_proto().SerializeToString() if cross_trainer_cache else None\n compat_kwargs = {}\n if data_transfer_protocol is not None:\n compat_kwargs['data_transfer_protocol'] = data_transfer_protocol\n uncompress = True\n variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v4(dataset_id=self._dataset_id, processing_mode=self._processing_mode, address=self._address, protocol=self._protocol, job_name=self._job_name, consumer_index=self._consumer_index, num_consumers=self._num_consumers, max_outstanding_requests=self._max_outstanding_requests, task_refresh_interval_hint_ms=task_refresh_interval_hint_ms, iteration_counter=gen_experimental_dataset_ops.dummy_iteration_counter(), target_workers=target_workers, uncompress=uncompress, uncompress_fn=uncompress_func.function, cross_trainer_cache_options=cross_trainer_cache_options, **compat_kwargs, **self._flat_structure)\n super(_DataServiceDatasetV2, self).__init__(variant_tensor)", "docstring": "Constructs a _DataServiceDatasetV2.\n\nArgs:\n dataset_id: The dataset id for the dataset to read from.\n processing_mode: A `tf.data.experimental.service.ShardingPolicy`\n specifying how to shard the dataset among tf.data workers. See\n `tf.data.experimental.service.ShardingPolicy` for details. For backwards\n compatibility, `processing_mode` may also be set to the strings\n `\"parallel_epochs\"` or `\"distributed_epoch\"`, which are respectively\n equivalent to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.\n address: The tf.data service address, e.g. \"localhost:5000\".\n element_spec: The dataset element spec for the dataset to read from.\n protocol: The protocol to use for communicating with the tf.data service,\n e.g. \"grpc\".\n data_transfer_protocol: (Optional.) The protocol to use for transferring\n data with the tf.data service. If not provided, a protocol is determined\n at runtime.\n job_name: (Optional.) The name of the job. If provided, it must be a\n non-empty string or Tensor. This argument makes it possible for multiple\n datasets to share the same job. The default behavior is that the dataset\n creates anonymous, exclusively owned jobs.\n consumer_index: (Optional.) The index of the consumer in the range from\n `0` to `num_consumers`. Must be specified alongside `num_consumers`.\n When specified, consumers will read from the job in a strict round-robin\n order, instead of the default first-come-first-served order.\n num_consumers: (Optional.) The number of consumers which will consume from\n the job. Must be specified alongside `consumer_index`. When specified,\n consumers will read from the job in a strict round-robin order, instead\n of the default first-come-first-served order. When `num_consumers` is\n specified, the dataset must have infinite cardinality to prevent a\n producer from running out of data early and causing consumers to go out\n of sync.\n max_outstanding_requests: (Optional.) A limit on how many elements may be\n requested at the same time. You can use this option to control the\n amount of memory used, since `distribute` won't use more than\n `element_size` * `max_outstanding_requests` of memory.\n task_refresh_interval_hint_ms: (Optional.) A hint for how often to query\n the dispatcher for task changes.\n cross_trainer_cache: (Optional.) If a `CrossTrainerCache` object is\n provided, dataset iteration will be shared across concurrently running\n trainers. See\n https://www.tensorflow.org/api_docs/python/tf/data/experimental/service#sharing_tfdata_service_with_concurrent_trainers\n for details.\n target_workers: (Optional.) Which workers to read from. If `\"AUTO\"`,\n tf.data runtime decides which workers to read from. If `\"ANY\"`, reads\n from any tf.data service workers. If `\"LOCAL\"`, only reads from local\n in-process tf.data service workers. `\"AUTO\"` works well for most cases,\n while users can specify other targets. For example, `\"LOCAL\"` helps\n avoid RPCs and data copy if every TF worker colocates with a tf.data\n service worker. Consumers of a shared job must use the same\n `target_workers`. Defaults to `\"AUTO\"`."} +{"repo": "tensorflow", "function": "def assert_rank_in(x, ranks, data=None, summarize=None, message=None, name=None):\n with ops.name_scope(name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):\n if not isinstance(x, sparse_tensor.SparseTensor):\n x = ops.convert_to_tensor(x, name='x')\n ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])\n message = _message_prefix(message)\n if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):\n name = ''\n else:\n name = x.name\n if data is None:\n data = [message, 'Tensor %s must have rank in' % name] + list(ranks) + ['Received shape: ', array_ops.shape(x)]\n try:\n assert_op = _assert_ranks_condition(x, ranks, _static_rank_in, _dynamic_rank_in, data, summarize)\n except ValueError as e:\n if e.args[0] == 'Static rank condition failed':\n raise ValueError('%sTensor %s must have rank in %s. Received rank %d, shape %s' % (message, name, tuple((r.item() for r in e.args[2])), e.args[1], x.get_shape()))\n else:\n raise\n return assert_op", "docstring": "Assert `x` has rank in `ranks`.\n\nExample of adding a dependency to an operation:\n\n```python\nwith tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):\n output = tf.reduce_sum(x)\n```\n\nArgs:\n x: Numeric `Tensor`.\n ranks: Iterable of scalar `Tensor` objects.\n data: The tensors to print out if the condition is False. Defaults to\n error message and first few entries of `x`.\n summarize: Print this many entries of each tensor.\n message: A string to prefix to the default message.\n name: A name for this operation (optional).\n Defaults to \"assert_rank_in\".\n\nReturns:\n Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.\n If static checks determine `x` has matching rank, a `no_op` is returned.\n\nRaises:\n ValueError: If static checks determine `x` has mismatched rank."} +{"repo": "keras", "function": "def view_as_real(x):\n if any_symbolic_tensors((x,)):\n return ViewAsReal().symbolic_call(x)\n x = backend.convert_to_tensor(x)\n real_part = backend.numpy.real(x)\n imag_part = backend.numpy.imag(x)\n return backend.numpy.stack((real_part, imag_part), axis=-1)", "docstring": "Converts a complex tensor to a real tensor with shape `(..., 2)`,\nwhere the last dimension represents the real and imaginary components.\n\nArgs:\n x: A complex tensor.\n\nReturns:\n A real tensor where the last dimension contains the\n real and imaginary parts.\n\nExample:\n```\n>>> import numpy as np\n>>> from keras import ops\n\n>>> complex_tensor = np.array([1 + 2j, 3 + 4j])\n>>> real = ops.view_as_real(complex_tensor)\n>>> real\narray([[1., 2.],\n [3., 4.]])\n```"} +{"repo": "tf-quant-finance", "function": "def expected_exercise_fn(design, continuation_value, exercise_value):\n batch_design = tf.broadcast_to(tf.expand_dims(design, -1), design.shape + [continuation_value.shape[-1]])\n mask = tf.cast(exercise_value > 0, design.dtype)\n masked = tf.transpose(batch_design * mask, perm=(2, 1, 0))\n lhs = tf.matmul(masked, masked, transpose_a=True)\n lhs_pinv = tf.linalg.pinv(lhs)\n rhs = tf.matmul(masked, tf.expand_dims(tf.transpose(continuation_value), -1), transpose_a=True)\n beta = tf.linalg.matmul(lhs_pinv, rhs)\n continuation = tf.matmul(tf.transpose(batch_design, perm=(2, 1, 0)), beta)\n return tf.maximum(tf.transpose(tf.squeeze(continuation, -1)), 0.0)", "docstring": "Returns the expected continuation value for each path.\n\nArgs:\n design: A real `Tensor` of shape `[basis_size, num_samples]`.\n continuation_value: A `Tensor` of shape `[num_samples, payoff_dim]` and of\n the same dtype as `design`. The optimal value of the option conditional on\n not exercising now or earlier, taking future information into account.\n exercise_value: A `Tensor` of the same shape and dtype as\n `continuation_value`. Value of the option if exercised immideately at\n the current time\n\nReturns:\n A `Tensor` of the same shape and dtype as `continuation_value` whose\n `(n, v)`-th entry represents the expected continuation value of sample path\n `n` under the `v`-th payoff scheme."} +{"repo": "tensorflow", "function": "def _is_framework_filename(filename):\n for pattern in _EXTERNAL_FILENAME_PATTERNS:\n if pattern.search(filename):\n return False\n for pattern in _FRAMEWORK_FILENAME_PATTERNS:\n if pattern.search(filename):\n return True\n for prefix in _FRAMEWORK_PATH_PREFIXES:\n if filename.startswith(prefix):\n return True\n return False", "docstring": "Returns whether a filename should be considered a part of the framework.\n\nA file is part of the framework if it does not match a pattern in\n_EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in\n_FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix.\n\nArgs:\n filename: A filename string.\n\nReturns:\n Whether the filename should be considered to be internal to the\n TensorFlow framework for the purposes of reporting errors."} +{"repo": "beam", "function": "def GetMetrics(self, request, global_params=None):\n config = self.GetMethodConfig('GetMetrics')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Request the job status. To request the status of a job, we recommend using `projects.locations.jobs.getMetrics` with a [regional endpoint] (https://cloud.google.com/dataflow/docs/concepts/regional-endpoints). Using `projects.jobs.getMetrics` is not recommended, as you can only request the status of jobs that are running in `us-central1`.\n\nArgs:\n request: (DataflowProjectsJobsGetMetricsRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (JobMetrics) The response message."} +{"repo": "transformers", "function": "def get_unconditional_inputs(self, num_samples=1):\n last_hidden_state = torch.zeros((num_samples, 1, self.config.text_encoder.hidden_size), device=self.device, dtype=self.dtype)\n attention_mask = torch.zeros((num_samples, 1), device=self.device, dtype=torch.long)\n return MusicgenUnconditionalInput(encoder_outputs=(last_hidden_state,), attention_mask=attention_mask, guidance_scale=1.0)", "docstring": "Helper function to get null inputs for unconditional generation, enabling the model to be used without the\nfeature extractor or tokenizer.\n\nArgs:\n num_samples (int, *optional*):\n Number of audio samples to unconditionally generate.\n max_new_tokens (int, *optional*):\n Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of\n longer inference (since more audio tokens need to be generated per sample).\n\nExample:\n```python\n>>> from transformers import MusicgenForConditionalGeneration\n\n>>> model = MusicgenForConditionalGeneration.from_pretrained(\"facebook/musicgen-small\")\n\n>>> # get the unconditional (or 'null') inputs for the model\n>>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)\n>>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)\n```"} +{"repo": "yapf", "function": "class ParameterListState(object):\n\n def __init__(self, opening_bracket, newline, opening_column):\n self.opening_bracket = opening_bracket\n self.has_split_before_first_param = newline\n self.opening_column = opening_column\n self.parameters = opening_bracket.parameters\n self.split_before_closing_bracket = False\n\n @property\n def closing_bracket(self):\n return self.opening_bracket.matching_bracket\n\n @property\n def has_typed_return(self):\n return self.closing_bracket.next_token.value == '->'\n\n @property\n @lru_cache()\n def has_default_values(self):\n return any((param.has_default_value for param in self.parameters))\n\n @property\n @lru_cache()\n def ends_in_comma(self):\n if not self.parameters:\n return False\n return self.parameters[-1].last_token.next_token.value == ','\n\n @property\n @lru_cache()\n def last_token(self):\n token = self.opening_bracket.matching_bracket\n while not token.is_comment and token.next_token:\n token = token.next_token\n return token\n\n @lru_cache()\n def LastParamFitsOnLine(self, indent):\n \"\"\"Return true if the last parameter fits on a single line.\"\"\"\n if not self.has_typed_return:\n return False\n if not self.parameters:\n return True\n total_length = self.last_token.total_length\n last_param = self.parameters[-1].first_token\n total_length -= last_param.total_length - len(last_param.value)\n return total_length + indent <= style.Get('COLUMN_LIMIT')\n\n @lru_cache()\n def SplitBeforeClosingBracket(self, indent):\n \"\"\"Return true if there's a split before the closing bracket.\"\"\"\n if style.Get('DEDENT_CLOSING_BRACKETS'):\n return True\n if self.ends_in_comma:\n return True\n if not self.parameters:\n return False\n total_length = self.last_token.total_length\n last_param = self.parameters[-1].first_token\n total_length -= last_param.total_length - len(last_param.value)\n return total_length + indent > style.Get('COLUMN_LIMIT')\n\n def Clone(self):\n clone = ParameterListState(self.opening_bracket, self.has_split_before_first_param, self.opening_column)\n clone.split_before_closing_bracket = self.split_before_closing_bracket\n clone.parameters = [param.Clone() for param in self.parameters]\n return clone\n\n def __repr__(self):\n return '[opening_bracket::%s, has_split_before_first_param::%s, opening_column::%d]' % (self.opening_bracket, self.has_split_before_first_param, self.opening_column)\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self, *args, **kwargs):\n return hash((self.opening_bracket, self.has_split_before_first_param, self.opening_column, (hash(param) for param in self.parameters)))", "docstring": "Maintains the state of function parameter list formatting decisions.\n\nAttributes:\n opening_bracket: The opening bracket of the parameter list.\n closing_bracket: The closing bracket of the parameter list.\n has_typed_return: True if the function definition has a typed return.\n ends_in_comma: True if the parameter list ends in a comma.\n last_token: Returns the last token of the function declaration.\n has_default_values: True if the parameters have default values.\n has_split_before_first_param: Whether there is a newline before the first\n parameter.\n opening_column: The position of the opening parameter before a newline.\n parameters: A list of parameter objects (Parameter).\n split_before_closing_bracket: Split before the closing bracket. Sometimes\n needed if the indentation would collide."} +{"repo": "tf-quant-finance", "function": "class CalibrationResult:\n alpha: types.RealTensor\n beta: types.RealTensor\n volvol: types.RealTensor\n rho: types.RealTensor", "docstring": "Collection of calibrated SABR parameters.\n\nFor a review of the SABR model and the conventions used, please see the\ndocstring for `SABRModel`, or for `calibration` below.\n\nAttributes:\n alpha: Rank-1 `Tensor` specifying the initial volatility levels.\n beta: Rank-1 `Tensor` specifying the exponents.\n volvol: Rank-1 `Tensor` specifying the vol-vol parameters.\n rho: Rank-1 `Tensor` specifying the correlations between the forward and\n the stochastic volatility."} +{"repo": "tensorflow", "function": "def extract_tensors_from_dataset(dataset):\n iterator = get_iterator(dataset)\n inputs, targets, sample_weight = unpack_iterator_input(iterator)\n return (inputs, targets, sample_weight)", "docstring": "Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset.\n\nArgs:\n dataset: Dataset instance.\n\nReturns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None."} +{"repo": "tensorflow", "function": "def get_tensor(self, tensor_index, subgraph_index=0):\n return self._interpreter.GetTensor(tensor_index, subgraph_index)", "docstring": "Gets the value of the output tensor (get a copy).\n\nIf you wish to avoid the copy, use `tensor()`. This function cannot be used\nto read intermediate results.\n\nArgs:\n tensor_index: Tensor index of tensor to get. This value can be gotten from\n the 'index' field in get_output_details.\n subgraph_index: Index of the subgraph to fetch the tensor. Default value\n is 0, which means to fetch from the primary subgraph.\n\nReturns:\n a numpy array."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[:, :, -sequence_length:, :].to(dtype)\n padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, visual_embeds: Optional[torch.FloatTensor]=None, visual_attention_mask: Optional[torch.LongTensor]=None, visual_token_type_ids: Optional[torch.LongTensor]=None, image_text_alignment: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: Optional[torch.LongTensor]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n index_to_gather = attention_mask.sum(1) - 2\n outputs = self.visual_bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, visual_embeds=visual_embeds, visual_attention_mask=visual_attention_mask, visual_token_type_ids=visual_token_type_ids, image_text_alignment=image_text_alignment, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n index_to_gather = index_to_gather.unsqueeze(-1).unsqueeze(-1).expand(index_to_gather.size(0), 1, sequence_output.size(-1))\n pooled_output = torch.gather(sequence_output, 1, index_to_gather)\n pooled_output = self.dropout(pooled_output)\n logits = self.cls(pooled_output)\n reshaped_logits = logits.view(-1, self.num_labels)\n loss = None\n if labels is not None:\n loss_fct = nn.KLDivLoss(reduction='batchmean')\n log_softmax = nn.LogSoftmax(dim=-1)\n reshaped_logits = log_softmax(reshaped_logits)\n loss = loss_fct(reshaped_logits, labels.contiguous())\n if not return_dict:\n output = (reshaped_logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return SequenceClassifierOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "visual_embeds (`torch.FloatTensor` of shape `(batch_size, visual_seq_length, visual_embedding_dim)`, *optional*):\n The embedded representation of the visual inputs, generally derived using using an object detector.\nvisual_attention_mask (`torch.FloatTensor` of shape `(batch_size, visual_seq_length)`, *optional*):\n Mask to avoid performing attention on visual embeddings. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\nvisual_token_type_ids (`torch.LongTensor` of shape `(batch_size, visual_seq_length)`, *optional*):\n Segment token indices to indicate different portions of the visual embeds.\n\n [What are token type IDs?](../glossary#token-type-ids) The authors of VisualBERT set the\n *visual_token_type_ids* to *1* for all tokens.\nimage_text_alignment (`torch.LongTensor` of shape `(batch_size, visual_seq_length, alignment_number)`, *optional*):\n Image-Text alignment uses to decide the position IDs of the visual embeddings.\nlabels (`torch.LongTensor` of shape `(batch_size, total_sequence_length)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. A KLDivLoss is computed between the labels and the returned logits.\n\nExample:\n\n```python\n# Assumption: *get_visual_embeddings(image)* gets the visual embeddings of the image in the batch.\nfrom transformers import AutoTokenizer, VisualBertForQuestionAnswering\nimport torch\n\ntokenizer = AutoTokenizer.from_pretrained(\"google-bert/bert-base-uncased\")\nmodel = VisualBertForQuestionAnswering.from_pretrained(\"uclanlp/visualbert-vqa\")\n\ntext = \"Who is eating the apple?\"\ninputs = tokenizer(text, return_tensors=\"pt\")\nvisual_embeds = get_visual_embeddings(image).unsqueeze(0)\nvisual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)\nvisual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)\n\ninputs.update(\n {\n \"visual_embeds\": visual_embeds,\n \"visual_token_type_ids\": visual_token_type_ids,\n \"visual_attention_mask\": visual_attention_mask,\n }\n)\n\nlabels = torch.tensor([[0.0, 1.0]]).unsqueeze(0) # Batch size 1, Num labels 2\n\noutputs = model(**inputs, labels=labels)\nloss = outputs.loss\nscores = outputs.logits\n```"} +{"repo": "fhir-py", "function": "def where(self, criteria: 'Builder') -> 'Builder':\n param_nodes = self._function_args_to_nodes(self.node, [criteria], unnested=True)\n return self._to_builder(_evaluation.WhereFunction(self.node.context, self.node, param_nodes))", "docstring": "The FHIRPath where() function.\n\nFilters the collection of FHIR elements to meet criteria defined by a\nbuilder expression.\n\nHere is an example use:\n\n>>> pat = \n>>> home_addresses = pat.address.where(pat.address.use = 'home')\n\nArgs:\n criteria: An expression builder containing the filtering logic.\n\nReturns:\n An expression that contains the items that match the given criteria."} +{"repo": "tensorflow", "function": "def register_dispatchable_type(cls):\n _api_dispatcher.register_dispatchable_type(cls)\n return cls", "docstring": "Class decorator that registers a type for use with type-based dispatch.\n\nShould *not* be used with subclasses of `CompositeTensor` or `ExtensionType`\n(which are automatically registered).\n\nNote: this function is intended to support internal legacy use cases (such\nas RaggedTensorValue), and will probably not be exposed as a public API.\n\nArgs:\n cls: The class to register.\n\nReturns:\n `cls`."} +{"repo": "tensorflow", "function": "def _recreate(self, proto, node_id, nodes):\n registered_class = registration.get_registered_class(proto.registered_name)\n if registered_class is None:\n registered_class = _BUILT_IN_REGISTRATIONS.get(proto.WhichOneof('kind'))\n dependencies = {}\n for key, dep_node_id in self._get_node_dependencies(proto).items():\n dependencies[key] = nodes[dep_node_id]\n if registered_class:\n obj = registered_class._deserialize_from_proto(proto=proto.serialized_user_proto, object_proto=proto, dependencies=dependencies, export_dir=self._export_dir, asset_file_def=self._asset_file_def, operation_attributes=self._operation_attributes)\n if isinstance(obj, base.Trackable):\n setter = type(obj)._add_trackable_child\n else:\n setter = setattr\n return (obj, setter)\n else:\n return self._recreate_default(proto, node_id, dependencies)", "docstring": "Creates a Python object from a SavedObject protocol buffer.\n\nArgs:\n proto: a SavedObject proto\n node_id: int, the index of this object in the SavedObjectGraph node list.\n nodes: dict mapping int node_ids -> created objects.\n\nReturns:\n The recreated object, and the set-attribute function for reconnecting\n the trackable children."} +{"repo": "tensorflow", "function": "def he_normal(seed=None):\n return VarianceScaling(scale=2.0, mode='fan_in', distribution='truncated_normal', seed=seed)", "docstring": "He normal initializer.\n\nInitializers allow you to pre-specify an initialization strategy, encoded in\nthe Initializer object, without knowing the shape and dtype of the variable\nbeing initialized.\n\nIt draws samples from a truncated normal distribution centered on 0 with\n`stddev = sqrt(2 / fan_in)` where `fan_in` is the number of input units in the\nweight tensor.\n\nExamples:\n\n>>> def make_variables(k, initializer):\n... return (tf.Variable(initializer(shape=[k, k], dtype=tf.float32)),\n... tf.Variable(initializer(shape=[k, k, k], dtype=tf.float32)))\n>>> v1, v2 = make_variables(3, tf.initializers.he_normal())\n>>> v1\n>> v2\n>> make_variables(4, tf.initializers.RandomNormal())\n( TypeSpec:\n spec = _type_spec_from_value(value)\n if spec is not None:\n return spec\n try:\n tensor = tensor_conversion_registry.convert(value)\n spec = _type_spec_from_value(tensor)\n if spec is not None:\n return spec\n except (ValueError, TypeError) as e:\n logging.vlog(3, 'Failed to convert %r to tensor: %s' % (type(value).__name__, e))\n raise TypeError(f'Could not build a TypeSpec for {value} of unsupported type {type(value)}.')", "docstring": "Returns a `tf.TypeSpec` that represents the given `value`.\n\nExamples:\n\n >>> tf.type_spec_from_value(tf.constant([1, 2, 3]))\n TensorSpec(shape=(3,), dtype=tf.int32, name=None)\n >>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))\n TensorSpec(shape=(2,), dtype=tf.float64, name=None)\n >>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))\n RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)\n\n >>> example_input = tf.ragged.constant([[1, 2], [3]])\n >>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])\n ... def f(x):\n ... return tf.reduce_sum(x, axis=1)\n\nArgs:\n value: A value that can be accepted or returned by TensorFlow APIs. Accepted\n types for `value` include `tf.Tensor`, any value that can be converted to\n `tf.Tensor` using `tf.convert_to_tensor`, and any subclass of\n `CompositeTensor` (such as `tf.RaggedTensor`).\n\nReturns:\n A `TypeSpec` that is compatible with `value`.\n\nRaises:\n TypeError: If a TypeSpec cannot be built for `value`, because its type\n is not supported."} +{"repo": "tensorflow", "function": "def get_unpartitioned_shape(self, shape):\n shape = tensor_shape.as_shape(shape)\n dims = shape.as_list()\n if self._shard_dimension is None or self._number_of_partitions is None or (not dims):\n return None\n if dims[self._shard_dimension] is None:\n raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ')\n if self._number_of_partitions > 1:\n dims[self._shard_dimension] *= self._number_of_partitions\n return tensor_shape.as_shape(dims)", "docstring": "Returns the shape of an unpartitioned Tensor.\n\nWhen given the shape of a 'sharded-size' Tensor, returns the shape\nof the full shape of its unpartitioned Tensor.\n\nArgs:\n shape: The shape of the sharded Tensor.\n\nReturns:\n The shape of the unpartitioned version of the Tensor.\n\nRaises:\n ValueError: if shape has unknown sharded dimension"} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor) -> Tuple:\n router_probs, router_logits = self._compute_router_probabilities(hidden_states)\n expert_index = torch.argmax(router_probs, dim=-1)\n expert_index = torch.nn.functional.one_hot(expert_index, num_classes=self.num_experts)\n token_priority = torch.cumsum(expert_index, dim=-2)\n expert_capacity_mask = token_priority <= self.expert_capacity\n expert_index = expert_index * expert_capacity_mask\n router_probs = torch.max(router_probs, dim=-1).values.unsqueeze(-1)\n return (expert_index, router_probs, router_logits)", "docstring": "Generic forward function for every Router class. Each Router expects to have the same input hidden states\n(`hidden_states`) corresponding to the hidden states for each token, the `expert_capacity` corresponding to the\nnumber of tokens the Router will send to each expert, some Routers can send up to few tokens to each expert.\n\nEach Router works as the following: it expects the hidden states for each token, gets the `router_probs` and\n`router_logits` from the `router_weights`. This will assign for each token, the raw probability to be assigned\nto an expert. Then each Router class will have to define its own `_compute_routing_instructions`.\n\nArgs:\n hidden_states (`torch.Tensor`) :\n [num_groups, tokens_per_group, hidden_dim] inputs to send to experts.\nReturns:\n Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`] Tuple containing the expert index, the router probs\n and the router logits. The router probabilities and logits are required to compute the loss."} +{"repo": "transformers", "function": "def forward(self, inputs, expert_size):\n input_list = inputs.split(expert_size, dim=0)\n output_list = []\n for i in range(self.num_experts):\n output_list.append(F.linear(input_list[i], self.weight[i]))\n results = torch.cat(output_list, dim=0)\n return results", "docstring": "Forward pass of the GraniteMoeParallelExperts module.\n\nArgs:\n inputs (Tensor):\n Input tensor.\n expert_size:\n Expert size information.\n\nReturns:\n Tensor: Output tensor."} +{"repo": "tensorflow", "function": "def convert_graph_def(graph_def, pass_pipeline='tf-standard-pipeline', show_debug_info=False):\n return pywrap_mlir.import_graphdef(graph_def, pass_pipeline, show_debug_info)", "docstring": "Import a GraphDef and convert it to a textual MLIR module.\n\nThis API is only intended for inspecting the internals of TensorFlow and the\nstring returned is at the moment intended for debugging purposes.\n\nArgs:\n graph_def: An object of type graph_pb2.GraphDef or a textual proto\n representation of a valid GraphDef.\n pass_pipeline: A textual description of an MLIR Pass Pipeline to run on the\n module, see MLIR documentation for the [textual pass pipeline\n syntax](https://mlir.llvm.org/docs/PassManagement/#textual-pass-pipeline-specification).\n show_debug_info: Whether to include locations in the emitted textual form.\n\nReturns:\n A textual representation of the MLIR module corresponding to the graphdef.\n\nRaises:\n InvalidArgumentError: if graph_def is invalid or cannot be converted to\n MLIR."} +{"repo": "tensorflow", "function": "def _SparseTensorDenseMatMulGrad(op: ops.Operation, grad):\n a_indices, a_values, a_shape = op.inputs[:3]\n b = op.inputs[3]\n adj_a = op.get_attr('adjoint_a')\n adj_b = op.get_attr('adjoint_b')\n a_type = a_values.dtype.base_dtype\n b_type = b.dtype.base_dtype\n if a_type != b_type:\n raise TypeError(f'SparseTensorDenseMatMul op received operands with different types: `{a_type}` and `{b_type}`.')\n b_grad = gen_sparse_ops.sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, grad, adjoint_a=not adj_a)\n if adj_b:\n b_grad = array_ops.matrix_transpose(b_grad, conjugate=True)\n rows = a_indices[:, 0]\n cols = a_indices[:, 1]\n parts_a = array_ops.gather(grad, rows if not adj_a else cols)\n parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b), cols if not adj_a else rows)\n if not adj_a and (not adj_b):\n a_values_grad = math_ops.matmul(array_ops.expand_dims(parts_a, -2), array_ops.expand_dims(parts_b, -2), adjoint_b=True)\n elif adj_a and (not adj_b):\n a_values_grad = math_ops.matmul(array_ops.expand_dims(parts_a, -1), array_ops.expand_dims(parts_b, -1), adjoint_a=True)\n elif not adj_a and adj_b:\n a_values_grad = math_ops.matmul(array_ops.expand_dims(parts_a, -2), array_ops.expand_dims(parts_b, -1))\n elif adj_a and adj_b:\n a_values_grad = math_ops.matmul(array_ops.expand_dims(parts_a, -1), array_ops.expand_dims(parts_b, -2), adjoint_a=True, adjoint_b=True)\n return (None, array_ops.squeeze(a_values_grad, axis=[-2, -1]), None, b_grad)", "docstring": "Gradients for the dense tensor in the SparseTensorDenseMatMul op.\n\nArgs:\n op: the SparseTensorDenseMatMul op\n grad: the incoming gradient\n\nReturns:\n Gradient for each of the 4 input tensors:\n (sparse_indices, sparse_values, sparse_shape, dense_tensor)\n The gradients for indices and shape are None.\n\nRaises:\n TypeError: When the two operands don't have the same type."} +{"repo": "beam", "function": "def _is_valid_cloud_label_value(label_value):\n return _VALID_CLOUD_LABEL_PATTERN.match(label_value)", "docstring": "Returns true if label_value is a valid cloud label string.\n\nThis function can return false in cases where the label value is valid.\nHowever, it will not return true in a case where the lavel value is invalid.\nThis is because a stricter set of allowed characters is used in this\nvalidator, because foreign language characters are not accepted.\nThus, this should not be used as a generic validator for all cloud labels.\n\nSee Also:\n https://cloud.google.com/compute/docs/labeling-resources\n\nArgs:\n label_value: The label value to validate.\n\nReturns:\n True if the label value is a valid"} +{"repo": "tensorflow", "function": "def next_sample(uid):\n return next(_SHARED_SEQUENCES[uid])", "docstring": "Gets the next value from the generator `uid`.\n\nTo allow multiple generators to be used at the same time, we use `uid` to\nget a specific one. A single generator would cause the validation to\noverwrite the training generator.\n\nArgs:\n uid: int, generator identifier\n\nReturns:\n The next value of generator `uid`."} +{"repo": "tensorflow", "function": "def _wrap_and_check_metrics(self, metrics):\n if not isinstance(metrics, dict):\n metrics = {self.METRICS_NAME: metrics}\n outputs = {}\n for key, value in metrics.items():\n if isinstance(value, tuple):\n metric_val, metric_op = value\n else:\n metric_val = value.result()\n assert len(value.updates) == 1\n metric_op = value.updates[0]\n key = self._check_output_key(key, self.METRICS_NAME)\n key = self._prefix_key(key, self.METRICS_NAME)\n val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX\n op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX\n if not isinstance(metric_val, tensor.Tensor):\n raise ValueError('{} output value must be a Tensor; got {}.'.format(key, metric_val))\n if not (tensor_util.is_tensor(metric_op) or isinstance(metric_op, ops.Operation)):\n raise ValueError('{} update_op must be a Tensor or Operation; got {}.'.format(key, metric_op))\n metric_op_tensor = metric_op\n if not isinstance(metric_op, tensor.Tensor):\n with ops.control_dependencies([metric_op]):\n metric_op_tensor = constant_op.constant([], name='metric_op_wrapper')\n outputs[val_name] = metric_val\n outputs[op_name] = metric_op_tensor\n return outputs", "docstring": "Handle the saving of metrics.\n\nMetrics is either a tuple of (value, update_op), or a dict of such tuples.\nHere, we separate out the tuples and create a dict with names to tensors.\n\nArgs:\n metrics: Dict of metric results keyed by name.\n The values of the dict can be one of the following:\n (1) instance of `Metric` class.\n (2) (metric_value, update_op) tuples, or a single tuple.\n metric_value must be a Tensor, and update_op must be a Tensor or Op.\n\nReturns:\n dict of output_names to tensors\n\nRaises:\n ValueError: if the dict key is not a string, or the metric values or ops\n are not tensors."} +{"repo": "tensorflow", "function": "def _composition_must_be_self_adjoint(operators):\n if len(operators) == 1 and operators[0].is_self_adjoint:\n return True\n if linear_operator_util.is_aat_form(operators):\n return True\n return False", "docstring": "Runs some checks to see if composition operators must be SA.\n\nArgs:\n operators: List of LinearOperators.\n\nReturns:\n True if the composition must be SA. False if it is not SA OR if we did not\n determine whether the composition is SA."} +{"repo": "keras", "function": "def call(self, x):\n return ops.rms_normalization(x, scale=self.scale, axis=self.axis, epsilon=self.epsilon)", "docstring": "Applies RMS normalization to the input tensor.\n\nArgs:\n x: Input tensor of shape (batch_size, input_dim).\n\nReturns:\n The RMS-normalized tensor of the same shape (batch_size, input_dim),\n scaled by the learned `scale` parameter."} +{"repo": "tensorflow", "function": "def put(self, item):\n with self._not_full:\n if self._closed:\n raise QueueClosedError()\n if self._maxsize > 0:\n while len(self._queue) == self._maxsize:\n self._not_full.wait()\n if self._closed:\n raise QueueClosedError()\n self._queue.append(item)\n self._not_empty.notify()", "docstring": "Put an item into the queue.\n\nIf the queue is closed, fails immediately.\n\nIf the queue is full, blocks until space is available or until the queue\nis closed by a call to close(), at which point this call fails.\n\nArgs:\n item: an item to add to the queue\n\nRaises:\n QueueClosedError: if insertion failed because the queue is closed"} +{"repo": "tensorflow", "function": "def CreateShapeFromDtypeAndTuple(dtype, shape_tuple):\n element_type = types_.MAP_DTYPE_TO_RECORD[str(dtype)].primitive_type\n return Shape(element_type, shape_tuple)", "docstring": "Create a shape from a Numpy dtype and a sequence of nonnegative integers.\n\nArgs:\n dtype: a numpy dtype, e.g. np.dtype('int32').\n shape_tuple: a sequence of nonnegative integers.\n\nReturns:\n A Shape object."} +{"repo": "pytype", "function": "def start_range(self, line, membership):\n last = self._transitions[-1] if self._transitions else -1\n if line < last:\n raise ValueError('Line number less than previous start_range() call.')\n previous = len(self._transitions) % 2 == 1\n if membership == previous:\n return\n elif line == last:\n self._transitions.pop()\n else:\n self._transitions.append(line)", "docstring": "Start a range of lines that are either included/excluded from the set.\n\nArgs:\n line: A line number.\n membership: If True, lines >= line are included in the set (starting a\n range), otherwise they are excluded (ending a range).\n\nRaises:\n ValueError: if line is less than that of a previous call to start_range()."} +{"repo": "transformers", "function": "def unsqueeze(self, dim: int) -> Rigid:\n if dim >= len(self.shape):\n raise ValueError('Invalid dimension')\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rigid(rots, trans)", "docstring": "Analogous to torch.unsqueeze. The dimension is relative to the shared dimensions of the rotation/translation.\n\nArgs:\n dim: A positive or negative dimension index.\nReturns:\n The unsqueezed transformation."} +{"repo": "transformers", "function": "class PerceiverMaskedLMOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for Perceiver's masked language model outputs.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Masked language modeling (MLM) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_latents,\n num_latents)`. Attentions weights after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,\n used to compute the weighted average in the cross-attention heads."} +{"repo": "transformers", "function": "class AlignConfig(PretrainedConfig):\n model_type = 'align'\n sub_configs = {'text_config': AlignTextConfig, 'vision_config': AlignVisionConfig}\n\n def __init__(self, text_config=None, vision_config=None, projection_dim=640, temperature_init_value=1.0, initializer_range=0.02, **kwargs):\n super().__init__(**kwargs)\n if text_config is None:\n text_config = {}\n logger.info('text_config is None. Initializing the AlignTextConfig with default values.')\n if vision_config is None:\n vision_config = {}\n logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.')\n self.text_config = AlignTextConfig(**text_config)\n self.vision_config = AlignVisionConfig(**vision_config)\n self.projection_dim = projection_dim\n self.temperature_init_value = temperature_init_value\n self.initializer_range = initializer_range\n\n @classmethod\n def from_text_vision_configs(cls, text_config: AlignTextConfig, vision_config: AlignVisionConfig, **kwargs):\n \"\"\"\n Instantiate a [`AlignConfig`] (or a derived class) from align text model configuration and align vision model\n configuration.\n\n Returns:\n [`AlignConfig`]: An instance of a configuration object\n \"\"\"\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "[`AlignConfig`] is the configuration class to store the configuration of a [`AlignModel`]. It is used to\ninstantiate a ALIGN model according to the specified arguments, defining the text model and vision model configs.\nInstantiating a configuration with the defaults will yield a similar configuration to that of the ALIGN\n[kakaobrain/align-base](https://huggingface.co/kakaobrain/align-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`AlignTextConfig`].\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`AlignVisionConfig`].\n projection_dim (`int`, *optional*, defaults to 640):\n Dimensionality of text and vision projection layers.\n temperature_init_value (`float`, *optional*, defaults to 1.0):\n The initial value of the *temperature* parameter. Default is used as per the original ALIGN implementation.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\nExample:\n\n```python\n>>> from transformers import AlignConfig, AlignModel\n\n>>> # Initializing a AlignConfig with kakaobrain/align-base style configuration\n>>> configuration = AlignConfig()\n\n>>> # Initializing a AlignModel (with random weights) from the kakaobrain/align-base style configuration\n>>> model = AlignModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n\n>>> # We can also initialize a AlignConfig from a AlignTextConfig and a AlignVisionConfig\n>>> from transformers import AlignTextConfig, AlignVisionConfig\n\n>>> # Initializing ALIGN Text and Vision configurations\n>>> config_text = AlignTextConfig()\n>>> config_vision = AlignVisionConfig()\n\n>>> config = AlignConfig.from_text_vision_configs(config_text, config_vision)\n```"} +{"repo": "transformers", "function": "def forward(self, pixel_values: torch.FloatTensor, pixel_mask: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], DetrModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n batch_size, num_channels, height, width = pixel_values.shape\n device = pixel_values.device\n if pixel_mask is None:\n pixel_mask = torch.ones((batch_size, height, width), device=device)\n features, object_queries_list = self.backbone(pixel_values, pixel_mask)\n feature_map, mask = features[-1]\n if mask is None:\n raise ValueError('Backbone does not return downsampled pixel mask')\n projected_feature_map = self.input_projection(feature_map)\n flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1)\n object_queries = object_queries_list[-1].flatten(2).permute(0, 2, 1)\n flattened_mask = mask.flatten(1)\n if encoder_outputs is None:\n encoder_outputs = self.encoder(inputs_embeds=flattened_features, attention_mask=flattened_mask, object_queries=object_queries, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):\n encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1)\n queries = torch.zeros_like(query_position_embeddings)\n decoder_outputs = self.decoder(inputs_embeds=queries, attention_mask=None, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return DetrModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states)", "docstring": "decoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, num_queries)`, *optional*):\n Not used by default. Can be used to mask object queries.\ninputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you\n can choose to directly pass a flattened representation of an image.\ndecoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):\n Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an\n embedded representation.\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, DetrModel\n>>> from PIL import Image\n>>> import requests\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"facebook/detr-resnet-50\")\n>>> model = DetrModel.from_pretrained(\"facebook/detr-resnet-50\")\n\n>>> # prepare image for the model\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n\n>>> # forward pass\n>>> outputs = model(**inputs)\n\n>>> # the last hidden states are the final query embeddings of the Transformer decoder\n>>> # these are of shape (batch_size, num_queries, hidden_size)\n>>> last_hidden_states = outputs.last_hidden_state\n>>> list(last_hidden_states.shape)\n[1, 100, 256]\n```"} +{"repo": "yapf", "function": "def DumpPyTree(tree, target_stream=sys.stdout):\n dumper = PyTreeDumper(target_stream)\n dumper.Visit(tree)", "docstring": "Convenience function for dumping a given pytree.\n\nThis function presents a very minimal interface. For more configurability (for\nexample, controlling how specific node types are displayed), use PyTreeDumper\ndirectly.\n\nArguments:\n tree: the tree to dump.\n target_stream: the stream to dump the tree to. A file-like object. By\n default will dump into stdout."} +{"repo": "beam", "function": "def apply(self, transform, pvalueish=None, label=None):\n if isinstance(transform, ptransform._NamedPTransform):\n return self.apply(transform.transform, pvalueish, label or transform.label)\n if not isinstance(transform, ptransform.PTransform):\n raise TypeError('Expected a PTransform object, got %s' % transform)\n if label:\n old_label, transform.label = (transform.label, label)\n try:\n return self.apply(transform, pvalueish)\n finally:\n transform.label = old_label\n if self._current_transform() is self._root_transform():\n alter_label_if_ipython(transform, pvalueish)\n full_label = '/'.join([self._current_transform().full_label, transform.label]).lstrip('/')\n if full_label in self.applied_labels:\n auto_unique_labels = self._options.view_as(StandardOptions).auto_unique_labels\n if auto_unique_labels:\n logging.warning('Using --auto_unique_labels could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.')\n unique_label = self._generate_unique_label(transform)\n return self.apply(transform, pvalueish, unique_label)\n else:\n raise RuntimeError('A transform with label \"%s\" already exists in the pipeline. To apply a transform with a specified label, write pvalue | \"label\" >> transform or use the option \"auto_unique_labels\" to automatically generate unique transform labels. Note \"auto_unique_labels\" could cause data loss when updating a pipeline or reloading the job state. This is not recommended for streaming jobs.' % full_label)\n self.applied_labels.add(full_label)\n if pvalueish is None:\n full_label = self._current_transform().full_label\n raise TypeCheckError(f'Transform \"{full_label}\" was applied to the output of an object of type None.')\n pvalueish, inputs = transform._extract_input_pvalues(pvalueish)\n try:\n if not isinstance(inputs, dict):\n inputs = {str(ix): input for ix, input in enumerate(inputs)}\n except TypeError:\n raise NotImplementedError('Unable to extract PValue inputs from %s; either %s does not accept inputs of this format, or it does not properly override _extract_input_pvalues' % (pvalueish, transform))\n for t, leaf_input in inputs.items():\n if not isinstance(leaf_input, pvalue.PValue) or not isinstance(t, str):\n raise NotImplementedError('%s does not properly override _extract_input_pvalues, returned %s from %s' % (transform, inputs, pvalueish))\n current = AppliedPTransform(self._current_transform(), transform, full_label, inputs, None, annotations=self._current_annotations())\n self._current_transform().add_part(current)\n try:\n self.transforms_stack.append(current)\n type_options = self._options.view_as(TypeOptions)\n if type_options.pipeline_type_check:\n transform.type_check_inputs(pvalueish)\n if isinstance(pvalueish, pvalue.PBegin) and isinstance(transform, ParDo):\n full_label = self._current_transform().full_label\n raise TypeCheckError(f\"Transform '{full_label}' expects a PCollection as input. Got a PBegin/Pipeline instead.\")\n self._assert_not_applying_PDone(pvalueish, transform)\n pvalueish_result = self.runner.apply(transform, pvalueish, self._options)\n if type_options is not None and type_options.pipeline_type_check:\n transform.type_check_outputs(pvalueish_result)\n for tag, result in ptransform.get_named_nested_pvalues(pvalueish_result):\n assert isinstance(result, (pvalue.PValue, pvalue.DoOutputsTuple))\n if result.producer is None:\n result.producer = current\n self._infer_result_type(transform, tuple(inputs.values()), result)\n assert isinstance(result.producer.inputs, tuple)\n if isinstance(result, pvalue.DoOutputsTuple):\n current.add_output(result, result._main_tag)\n continue\n base = tag\n counter = 0\n while tag in current.outputs:\n counter += 1\n tag = '%s_%d' % (base, counter)\n current.add_output(result, tag)\n if type_options is not None and type_options.type_check_strictness == 'ALL_REQUIRED' and (transform.get_type_hints().output_types is None):\n ptransform_name = '%s(%s)' % (transform.__class__.__name__, full_label)\n raise TypeCheckError('Pipeline type checking is enabled, however no output type-hint was found for the PTransform %s' % ptransform_name)\n finally:\n self.transforms_stack.pop()\n return pvalueish_result", "docstring": "Applies a custom transform using the pvalueish specified.\n\nArgs:\n transform (~apache_beam.transforms.ptransform.PTransform): the\n :class:`~apache_beam.transforms.ptransform.PTransform` to apply.\n pvalueish (~apache_beam.pvalue.PCollection): the input for the\n :class:`~apache_beam.transforms.ptransform.PTransform` (typically a\n :class:`~apache_beam.pvalue.PCollection`).\n label (str): label of the\n :class:`~apache_beam.transforms.ptransform.PTransform`.\n\nRaises:\n TypeError: if the transform object extracted from the\n argument list is not a\n :class:`~apache_beam.transforms.ptransform.PTransform`.\n RuntimeError: if the transform object was already applied to\n this pipeline and needs to be cloned in order to apply again."} +{"repo": "tensorflow", "function": "def _get_metrics_from_layers(layers):\n metrics = []\n layers = layer_utils.filter_empty_layer_containers(layers)\n for layer in layers:\n if isinstance(layer, Model):\n metrics.extend(layer._metrics)\n metrics.extend(_get_metrics_from_layers(layer.layers))\n else:\n metrics.extend(layer.metrics)\n return metrics", "docstring": "Returns list of metrics from the given layers.\n\nThis will not include the `compile` metrics of a model layer.\n\nArgs:\n layers: List of layers.\n\nReturns:\n List of metrics."} +{"repo": "tensorflow", "function": "def get_canonical_import(import_set):\n import_list = sorted(import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))\n return import_list[0][0]", "docstring": "Obtain one single import from a set of possible sources of a symbol.\n\nOne symbol might come from multiple places as it is being imported and\nreexported. To simplify API changes, we always use the same import for the\nsame module, and give preference based on higher priority and alphabetical\nordering.\n\nArgs:\n import_set: (set) Imports providing the same symbol. This is a set of tuples\n in the form (import, priority). We want to pick an import with highest\n priority.\n\nReturns:\n A module name to import"} +{"repo": "starthinker", "function": "def _convert_date(self, value):\n try:\n result = parser.parse(value)\n if not result.tzinfo:\n result = ZoneInfo(self._timezone).localize(result)\n if ':' in value:\n return result.strftime('%Y-%m-%dT%H:%M:%S.000%z')\n else:\n return result.strftime('%Y-%m-%d')\n except:\n return None", "docstring": "Converts dates into a Bulkdozer specific format to be written back to the Feed.\n\nArgs:\n value: String representation of the date.\n\nReturns:\n Bulkdozer string representation of a date. Returns null if the value\n cannot be parsed into a date."} +{"repo": "temporian", "function": "def assign(self: EventSetOrNode, **others: EventSetOrNode) -> EventSetOrNode:\n from temporian.core.operators.glue import assign\n return assign(self, **others)", "docstring": "Assign new features to an [EventSet][temporian.EventSet].\n\nIf the name provided already exists on the EventSet, the feature is\noverriden.\n\nUsage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[1, 2],\n ... features={'A': [1, 2]},\n ... )\n >>> b = tp.event_set(\n ... timestamps=[1, 2],\n ... features={'B': [3, 4]},\n ... same_sampling_as=a,\n ... )\n >>> ab = a.assign(new_name=b)\n >>> ab\n indexes: []\n features: [('A', int64), ('new_name', int64)]\n events:\n (2 events):\n timestamps: [1. 2.]\n 'A': [1 2]\n 'new_name': [3 4]\n ...\n >>> ab = a.assign(B=b, B2=b['B'] * 2)\n >>> ab\n indexes: []\n features: [('A', int64), ('B', int64), ('B2', int64)]\n events:\n (2 events):\n timestamps: [1. 2.]\n 'A': [1 2]\n 'B': [3 4]\n 'B2': [6 8]\n ...\n\n ```\n\nArgs:\n **others: The argument name is going to be used as the new feature\n name. The EventSets need to have a single feature\n\nReturns:\n EventSet with the added feature."} +{"repo": "pyglove", "function": "def metadata(self) -> Dict[str, Any]:\n return self._metadata", "docstring": "Metadata of this field.\n\nMetadata is defined as a dict type, so we can add multiple annotations\nto a field.\n\n userdata = field.metadata.get('userdata', None):\n\nReturns:\n Metadata of this field as a dict."} +{"repo": "transformers", "function": "def resize_for_vision_encoder(self, image: np.ndarray, vision_encoder_max_size: int, resample: PILImageResampling=PILImageResampling.LANCZOS, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n height, width = get_image_size(image, channel_dim=input_data_format)\n aspect_ratio = width / height\n if width >= height:\n width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size\n height = int(width / aspect_ratio)\n height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size\n elif height > width:\n height = math.ceil(height / vision_encoder_max_size) * vision_encoder_max_size\n width = int(height * aspect_ratio)\n width = math.ceil(width / vision_encoder_max_size) * vision_encoder_max_size\n new_size = {'height': height, 'width': width}\n return self.resize(image, size=new_size, resample=resample, input_data_format=input_data_format, data_format=data_format)", "docstring": "Resize images to be multiples of `vision_encoder_max_size` while preserving the aspect ratio.\nArgs:\n image (`np.ndarray`):\n Images to resize.\n vision_encoder_max_size (`int`):\n Maximum size of the output image. If the image is larger than this size, it will be split into\n patches of this size, and the original image will be concatenated with the patches, resized to max_size.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.LANCZOS`):\n Resampling filter to use when resizing the image.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the output image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred"} +{"repo": "keras", "function": "def stack_residual_blocks_v2(x, filters, blocks, stride1=2, name=None):\n x = residual_block_v2(x, filters, conv_shortcut=True, name=name + '_block1')\n for i in range(2, blocks):\n x = residual_block_v2(x, filters, name=name + '_block' + str(i))\n x = residual_block_v2(x, filters, stride=stride1, name=name + '_block' + str(blocks))\n return x", "docstring": "A set of stacked residual blocks.\n\nArgs:\n x: Input tensor.\n filters: Number of filters in the bottleneck layer in a block.\n blocks: Number of blocks in the stacked blocks.\n stride1: Stride of the first layer in the first block. Defaults to `2`.\n name: Stack label.\n\nReturns:\n Output tensor for the stacked blocks."} +{"repo": "mobly", "function": "def _parse_logline_timestamp(t):\n date, time = t.split(' ')\n month, day = date.split('-')\n h, m, s = time.split(':')\n s, ms = s.split('.')\n return (month, day, h, m, s, ms)", "docstring": "Parses a logline timestamp into a tuple.\n\nArgs:\n t: Timestamp in logline format.\n\nReturns:\n An iterable of date and time elements in the order of month, day, hour,\n minute, second, microsecond."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, object_queries: Optional[torch.Tensor]=None, output_attentions: bool=False):\n residual = hidden_states\n hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask, object_queries=object_queries, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n residual = hidden_states\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n if self.training:\n if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():\n clamp_value = torch.finfo(hidden_states.dtype).max - 1000\n hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (attn_weights,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative\n values.\n object_queries (`torch.FloatTensor`, *optional*):\n Object queries (also called content embeddings), to be added to the hidden states.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "tensorflow", "function": "def _prefix_output_keys(self, output_dict, output_name):\n new_outputs = {}\n for key, val in output_dict.items():\n key = self._prefix_key(key, output_name)\n new_outputs[key] = val\n return new_outputs", "docstring": "Prepend output_name to the output_dict keys if it doesn't exist.\n\nThis produces predictable prefixes for the pre-determined outputs\nof SupervisedOutput.\n\nArgs:\n output_dict: dict of string to Tensor, assumed valid.\n output_name: prefix string to prepend to existing keys.\n\nReturns:\n dict with updated keys and existing values."} +{"repo": "tensorflow", "function": "def If(cond, inputs, then_branch, else_branch, name=None):\n if isinstance(then_branch, function._DefinedFunction):\n tlist = [_.type for _ in then_branch.definition.signature.output_arg]\n return gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n then_out = then_branch.structured_outputs\n else_out = else_branch.structured_outputs\n nest.assert_same_structure(then_out, else_out, expand_composites=True)\n tlist = nest.flatten(then_branch.output_dtypes)\n ret = gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name=name)\n return nest.pack_sequence_as(then_out, ret, expand_composites=True)", "docstring": "output = Cond(inputs) ?\n\nthen_branch(inputs) : else_branch(inputs).\n\nArgs:\n cond: A `Tensor`. A scalar. If the scalar is not a boolean, the scalar is\n converted to a boolean according to the following rule: if the scalar is a\n numerical value, non-zero means True and zero means False; if the scalar\n is a string, non-empty means True and empty means False.\n inputs: A list of input tensors.\n then_branch: A function takes 'inputs' and returns a list of tensors, whose\n types are the same as what else_branch returns.\n else_branch: A function takes 'inputs' and returns a list of tensors. whose\n types are the same as what then_branch returns.\n name: A name for the operation (optional).\n\nReturns:\n A list of tensors returned by either then_branch(inputs)\n or else_branch(inputs)."} +{"repo": "transformers", "function": "def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0]\n return len(cls) * [self.cls_token_type_id] + len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]", "docstring": "Create a mask from the two sequences passed to be used in a sequence-pair classification task. A Funnel\nTransformer sequence pair mask has the following format:\n\n```\n2 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n| first sequence | second sequence |\n```\n\nIf `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s)."} +{"repo": "tf-quant-finance", "function": "def swaption_price(*, expiries: types.RealTensor, floating_leg_start_times: types.RealTensor, floating_leg_end_times: types.RealTensor, fixed_leg_payment_times: types.RealTensor, floating_leg_daycount_fractions: types.RealTensor, fixed_leg_daycount_fractions: types.RealTensor, fixed_leg_coupon: types.RealTensor, reference_rate_fn: Callable[..., types.RealTensor], mean_reversion: Union[types.RealTensor, Callable[..., types.RealTensor]], volatility: Union[types.RealTensor, Callable[..., types.RealTensor]], notional: types.RealTensor=None, is_payer_swaption: types.BoolTensor=True, use_analytic_pricing: bool=True, num_samples: types.IntTensor=100, random_type: random.RandomType=None, seed: types.IntTensor=None, skip: types.IntTensor=0, time_step: types.RealTensor=None, dtype: tf.DType=None, name: str=None) -> types.RealTensor:\n name = name or 'hw_swaption_price'\n del floating_leg_daycount_fractions\n with tf.name_scope(name):\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n dtype = dtype or expiries.dtype\n float_leg_start_times = tf.convert_to_tensor(floating_leg_start_times, dtype=dtype, name='float_leg_start_times')\n float_leg_end_times = tf.convert_to_tensor(floating_leg_end_times, dtype=dtype, name='float_leg_end_times')\n fixed_leg_payment_times = tf.convert_to_tensor(fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')\n fixed_leg_daycount_fractions = tf.convert_to_tensor(fixed_leg_daycount_fractions, dtype=dtype, name='fixed_leg_daycount_fractions')\n fixed_leg_coupon = tf.convert_to_tensor(fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')\n notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')\n is_payer_swaption = tf.convert_to_tensor(is_payer_swaption, dtype=tf.bool, name='is_payer_swaption')\n if expiries.shape.rank < fixed_leg_payment_times.shape.rank - 1:\n raise ValueError('Swaption expiries not specified for all swaptions in the batch. Expected rank {} but received {}.'.format(fixed_leg_payment_times.shape.rank - 1, expiries.shape.rank))\n expiries = tf.expand_dims(expiries, axis=-1)\n expiries = tf.repeat(expiries, tf.shape(fixed_leg_payment_times)[-1], axis=-1)\n if use_analytic_pricing:\n return _analytic_valuation(expiries, float_leg_start_times, float_leg_end_times, fixed_leg_payment_times, fixed_leg_daycount_fractions, fixed_leg_coupon, reference_rate_fn, mean_reversion, volatility, notional, is_payer_swaption, dtype, name + '_analytic_valuation')\n if time_step is None:\n raise ValueError('`time_step` must be provided for simulation based bond option valuation.')\n model = one_factor.HullWhiteModel1F(mean_reversion, volatility, initial_discount_rate_fn=reference_rate_fn, dtype=dtype)\n\n def _sample_discount_curve_path_fn(times, curve_times, num_samples):\n p_t_tau, r_t = model.sample_discount_curve_paths(times=times, curve_times=curve_times, num_samples=num_samples, random_type=random_type, seed=seed, skip=skip)\n return (p_t_tau, r_t, None)\n sim_times, _ = tf.unique(tf.reshape(expiries, shape=[-1]))\n longest_expiry = tf.reduce_max(sim_times)\n sim_times = tf.concat([sim_times, tf.range(time_step, longest_expiry, time_step)], axis=0)\n sim_times = tf.sort(sim_times, name='sort_sim_times')\n payoff_discount_factors, payoff_bond_price = swaption_util.discount_factors_and_bond_prices_from_samples(expiries=expiries, payment_times=fixed_leg_payment_times, sample_discount_curve_paths_fn=_sample_discount_curve_path_fn, num_samples=num_samples, times=sim_times, dtype=dtype)\n fixed_leg_pv = fixed_leg_coupon * fixed_leg_daycount_fractions * tf.squeeze(payoff_bond_price, axis=-1)\n fixed_leg_pv = tf.math.reduce_sum(fixed_leg_pv, axis=-1)\n float_leg_pv = 1.0 - tf.squeeze(payoff_bond_price, axis=-1)[..., -1]\n payoff_swap = tf.squeeze(payoff_discount_factors, axis=-1)[..., -1] * (float_leg_pv - fixed_leg_pv)\n payoff_swap = tf.where(is_payer_swaption, payoff_swap, -1.0 * payoff_swap)\n payoff_swaption = tf.math.maximum(payoff_swap, 0.0)\n option_value = tf.math.reduce_mean(payoff_swaption, axis=0)\n return notional * option_value", "docstring": "Calculates the price of European Swaptions using the Hull-White model.\n\nA European Swaption is a contract that gives the holder an option to enter a\nswap contract at a future date at a prespecified fixed rate. A swaption that\ngrants the holder to pay fixed rate and receive floating rate is called a\npayer swaption while the swaption that grants the holder to receive fixed and\npay floating payments is called the receiver swaption. Typically the start\ndate (or the inception date) of the swap concides with the expiry of the\nswaption. Mid-curve swaptions are currently not supported (b/160061740).\n\nAnalytic pricing of swaptions is performed using the Jamshidian decomposition\n[1].\n\n#### References:\n [1]: D. Brigo, F. Mercurio. Interest Rate Models-Theory and Practice.\n Second Edition. 2007.\n\n#### Example\nThe example shows how value a batch of 1y x 1y and 1y x 2y swaptions using the\nHull-White model.\n\n````python\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tf_quant_finance as tff\n\ndtype = tf.float64\n\nexpiries = [1.0, 1.0]\nfloat_leg_start_times = [[1.0, 1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0],\n [1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75]]\nfloat_leg_end_times = [[1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0, 2.0],\n [1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]]\nfixed_leg_payment_times = [[1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0, 2.0],\n [1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]]\nfloat_leg_daycount_fractions = [[0.25, 0.25, 0.25, 0.25, 0.0, 0.0, 0.0, 0.0],\n [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]]\nfixed_leg_daycount_fractions = [[0.25, 0.25, 0.25, 0.25, 0.0, 0.0, 0.0, 0.0],\n [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]]\nfixed_leg_coupon = [[0.011, 0.011, 0.011, 0.011, 0.0, 0.0, 0.0, 0.0],\n [0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011]]\nzero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\nprice = tff.models.hull_white.swaption_price(\n expiries=expiries,\n floating_leg_start_times=float_leg_start_times,\n floating_leg_end_times=float_leg_end_times,\n fixed_leg_payment_times=fixed_leg_payment_times,\n floating_leg_daycount_fractions=float_leg_daycount_fractions,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n dim=1,\n mean_reversion=[0.03],\n volatility=[0.02],\n dtype=dtype)\n# Expected value: [0.7163243383624043, 1.4031415262337608] # shape = (2,1)\n````\n\nArgs:\n expiries: A real `Tensor` of any shape and dtype. The time to\n expiration of the swaptions. The shape of this input determines the number\n (and shape) of swaptions to be priced and the shape of the output.\n floating_leg_start_times: A real `Tensor` of the same dtype as `expiries`.\n The times when accrual begins for each payment in the floating leg. The\n shape of this input should be `expiries.shape + [m]` where `m` denotes\n the number of floating payments in each leg.\n floating_leg_end_times: A real `Tensor` of the same dtype as `expiries`.\n The times when accrual ends for each payment in the floating leg. The\n shape of this input should be `expiries.shape + [m]` where `m` denotes\n the number of floating payments in each leg.\n fixed_leg_payment_times: A real `Tensor` of the same dtype as `expiries`.\n The payment times for each payment in the fixed leg. The shape of this\n input should be `expiries.shape + [n]` where `n` denotes the number of\n fixed payments in each leg.\n floating_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `floating_leg_start_times`. The daycount fractions\n for each payment in the floating leg.\n fixed_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `fixed_leg_payment_times`. The daycount fractions\n for each payment in the fixed leg.\n fixed_leg_coupon: A real `Tensor` of the same dtype and compatible shape\n as `fixed_leg_payment_times`. The fixed rate for each payment in the\n fixed leg.\n reference_rate_fn: A Python callable that accepts expiry time as a real\n `Tensor` and returns a `Tensor` of either shape `input_shape` or\n `input_shape`. Returns the continuously compounded zero rate at\n the present time for the input expiry time.\n mean_reversion: A real positive scalar `Tensor` or a Python callable. The\n callable can be one of the following:\n (a) A left-continuous piecewise constant object (e.g.,\n `tff.math.piecewise.PiecewiseConstantFunc`) that has a property\n `is_piecewise_constant` set to `True`. In this case the object should\n have a method `jump_locations(self)` that returns a `Tensor` of shape\n `[num_jumps]`. The return value of `mean_reversion(t)` should return a\n `Tensor` of shape `t.shape`, `t` is a rank 1 `Tensor` of the same `dtype`\n as the output. See example in the class docstring.\n (b) A callable that accepts scalars (stands for time `t`) and returns a\n scalar `Tensor` of the same `dtype` as `strikes`.\n Corresponds to the mean reversion rate.\n volatility: A real positive `Tensor` of the same `dtype` as\n `mean_reversion` or a callable with the same specs as above.\n Corresponds to the long run price variance.\n notional: An optional `Tensor` of same dtype and compatible shape as\n `strikes`specifying the notional amount for the underlying swap.\n Default value: None in which case the notional is set to 1.\n is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.\n Indicates whether the swaption is a payer (if True) or a receiver\n (if False) swaption. If not supplied, payer swaptions are assumed.\n use_analytic_pricing: A Python boolean specifying if analytic valuation\n should be performed. Analytic valuation is only supported for constant\n `mean_reversion` and piecewise constant `volatility`. If the input is\n `False`, then valuation using Monte-Carlo simulations is performed.\n Default value: The default value is `True`.\n num_samples: Positive scalar `int32` `Tensor`. The number of simulation\n paths during Monte-Carlo valuation. This input is ignored during analytic\n valuation.\n Default value: The default value is 1.\n random_type: Enum value of `RandomType`. The type of (quasi)-random\n number generator to use to generate the simulation paths. This input is\n relevant only for Monte-Carlo valuation and ignored during analytic\n valuation.\n Default value: `None` which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is only relevant if\n `random_type` is one of\n `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,\n STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and\n `HALTON_RANDOMIZED` the seed should be an Python integer. For\n `STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer\n `Tensor` of shape `[2]`. This input is relevant only for Monte-Carlo\n valuation and ignored during analytic valuation.\n Default value: `None` which means no seed is set.\n skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or\n Halton sequence to skip. Used only when `random_type` is 'SOBOL',\n 'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.\n Default value: `0`.\n time_step: Scalar real `Tensor`. Maximal distance between time grid points\n in Euler scheme. Relevant when Euler scheme is used for simulation. This\n input is ignored during analytic valuation.\n Default value: `None`.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name\n `hw_swaption_price`.\n\nReturns:\n A `Tensor` of real dtype and shape `expiries.shape` containing the\n computed swaption prices. For swaptions that have. reset in the past\n (expiries<0), the function sets the corresponding option prices to 0.0."} +{"repo": "tensorflow", "function": "def map_to_output_names(y_pred, output_names, struct):\n single_output = not nest.is_nested(y_pred)\n outputs_are_flat_list = not single_output and isinstance(y_pred, (list, tuple)) and (not any((nest.is_nested(y_p) for y_p in y_pred)))\n if (single_output or outputs_are_flat_list) and isinstance(struct, dict):\n output_names = output_names or create_pseudo_output_names(y_pred)\n struct = copy.copy(struct)\n new_struct = [struct.pop(name, None) for name in output_names]\n if struct:\n raise ValueError('Found unexpected keys that do not correspond to any Model output: {}. Expected: {}'.format(struct.keys(), output_names))\n if len(new_struct) == 1:\n return new_struct[0]\n return new_struct\n else:\n return struct", "docstring": "Maps a dict to a list using `output_names` as keys.\n\nThis is a convenience feature only. When a `Model`'s outputs\nare a list, you can specify per-output losses and metrics as\na dict, where the keys are the output names. If you specify\nper-output losses and metrics via the same structure as the\n`Model`'s outputs (recommended), no mapping is performed.\n\nFor the Functional API, the output names are the names of the\nlast layer of each output. For the Subclass API, the output names\nare determined by `create_pseudo_output_names` (For example:\n`['output_1', 'output_2']` for a list of outputs).\n\nThis mapping preserves backwards compatibility for `compile` and\n`fit`.\n\nArgs:\n y_pred: Sample outputs of the Model, to determine if this convenience\n feature should be applied (`struct` is returned unmodified if `y_pred`\n isn't a flat list).\n output_names: List. The names of the outputs of the Model.\n struct: The structure to map.\n\nReturns:\n `struct` mapped to a list in same order as `output_names`."} +{"repo": "tensorflow", "function": "def assert_no_garbage_created(f: _F) -> _F:\n\n def decorator(self: 'TensorFlowTestCase', **kwargs):\n \"\"\"Sets DEBUG_SAVEALL, runs the test, and checks for new garbage.\"\"\"\n gc.disable()\n previous_debug_flags = gc.get_debug()\n gc.set_debug(gc.DEBUG_UNCOLLECTABLE)\n gc.collect()\n previous_garbage = len(gc.garbage)\n result = f(self, **kwargs)\n gc.collect()\n new_garbage = len(gc.garbage)\n if new_garbage > previous_garbage:\n for i, obj in enumerate(gc.garbage[previous_garbage:]):\n if getattr(obj, '__module__', '') == 'ast':\n new_garbage -= 3\n if new_garbage > previous_garbage:\n logging.error(\"The decorated test created work for Python's garbage collector, likely due to a reference cycle. New objects in cycle(s):\")\n for i, obj in enumerate(gc.garbage[previous_garbage:]):\n try:\n logging.error('Object %d of %d', i, len(gc.garbage) - previous_garbage)\n\n def _safe_object_str(obj) -> str:\n return '<%s %d>' % (obj.__class__.__name__, id(obj))\n logging.error(' Object type: %s', _safe_object_str(obj))\n logging.error(' Referrer types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referrers(obj)]))\n logging.error(' Referent types: %s', ', '.join([_safe_object_str(ref) for ref in gc.get_referents(obj)]))\n logging.error(' Object attribute names: %s', dir(obj))\n logging.error(' Object __str__:')\n logging.error(obj)\n logging.error(' Object __repr__:')\n logging.error(repr(obj))\n except Exception:\n logging.error('(Exception while printing object)')\n if new_garbage > previous_garbage:\n for i in range(previous_garbage, new_garbage):\n if _find_reference_cycle(gc.garbage, i):\n break\n self.assertEqual(previous_garbage, new_garbage)\n gc.set_debug(previous_debug_flags)\n gc.enable()\n return result\n return decorator", "docstring": "Test method decorator to assert that no garbage has been created.\n\nNote that this decorator sets DEBUG_SAVEALL, which in some Python interpreters\ncannot be un-set (i.e. will disable garbage collection for any other unit\ntests in the same file/shard).\n\nArgs:\n f: The function to decorate.\n\nReturns:\n The decorated function."} +{"repo": "tensorflow", "function": "def _recursive_apply(tensors, apply_fn):\n tensors_type = type(tensors)\n if isinstance(tensors, tensor_lib.Tensor):\n return apply_fn(tensors)\n elif isinstance(tensors, variables.Variable):\n return apply_fn(tensors.value())\n elif isinstance(tensors, (list, tuple)):\n tensors = [_recursive_apply(t, apply_fn) for t in tensors]\n if tensors_type is list:\n return list(tensors)\n elif tensors_type is tuple:\n return tuple(tensors)\n return tensors_type(*tensors)\n elif tensors_type is dict:\n return dict(((k, _recursive_apply(v, apply_fn)) for k, v in tensors.items()))\n else:\n raise TypeError(f'_recursive_apply argument {tensors!r} has invalid type {tensors_type!r}')", "docstring": "Helper method to recursively apply a function to structure of tensors.\n\nThe structure of the tensors should take the form similar to fetches in\n`tf.compat.v1.Session` and includes single `Tensor`, `list`, nested `list`,\n`tuple`,\n`namedtuple`, or `dict`.\n\nArgs:\n tensors: Single `Tensor`, `list`, nested `list, `tuple`, `namedtuple`, or\n `dict`.\n apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.\n\nReturns:\n Returns the modified tensors with the same structure.\nRaises:\n `TypeError` if undefined type in the tensors structure."} +{"repo": "tensorflow", "function": "def add_notice_to_docstring(doc, instructions, no_doc_str, suffix_str, notice, notice_type='Warning'):\n allowed_notice_types = ['Deprecated', 'Warning', 'Caution', 'Important', 'Note']\n if notice_type not in allowed_notice_types:\n raise ValueError(f'Unrecognized notice type. Should be one of: {allowed_notice_types}')\n if not doc:\n lines = [no_doc_str]\n else:\n lines = _normalize_docstring(doc).splitlines()\n lines[0] += ' ' + suffix_str\n if not notice:\n raise ValueError('The `notice` arg must not be empty.')\n notice[0] = f'{notice_type}: {notice[0]}'\n notice = [''] + notice + ([instructions] if instructions else [])\n if len(lines) > 1:\n if lines[1].strip():\n notice.append('')\n lines[1:1] = notice\n else:\n lines += notice\n return '\\n'.join(lines)", "docstring": "Adds a deprecation notice to a docstring.\n\nArgs:\n doc: The original docstring.\n instructions: A string, describing how to fix the problem.\n no_doc_str: The default value to use for `doc` if `doc` is empty.\n suffix_str: Is added to the end of the first line.\n notice: A list of strings. The main notice warning body.\n notice_type: The type of notice to use. Should be one of `[Caution,\n Deprecated, Important, Note, Warning]`\n\nReturns:\n A new docstring, with the notice attached.\n\nRaises:\n ValueError: If `notice` is empty."} +{"repo": "transformers", "function": "class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel):\n\n def __init__(self, config: PatchTSMixerConfig):\n super().__init__(config)\n self.model = PatchTSMixerModel(config, mask_input=True)\n self.head = PatchTSMixerPretrainHead(config=config)\n self.masked_loss = config.masked_loss\n self.use_return_dict = config.use_return_dict\n if config.post_init:\n self.post_init()\n\n @auto_docstring\n def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPreTrainingOutput:\n \"\"\"\n past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):\n Context values of the time series. For a pretraining task, this denotes the input time series to predict\n the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly,\n for classification or regression tasks, it denotes the appropriate context values of the time series.\n\n For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is\n greater than 1.\n observed_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n return_loss (`bool`, *optional*):\n Whether to return the loss in the `forward` call.\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.use_return_dict\n if self.masked_loss is True:\n loss = torch.nn.MSELoss(reduction='none')\n else:\n loss = torch.nn.MSELoss(reduction='mean')\n model_output = self.model(past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict)\n if isinstance(model_output, tuple):\n model_output = PatchTSMixerModelOutput(*model_output)\n x_hat = self.head(model_output.last_hidden_state)\n if return_loss is True:\n loss_val = loss(x_hat, model_output.patch_input)\n else:\n loss_val = None\n if self.masked_loss is True and loss_val is not None:\n loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10)\n if not return_dict:\n return tuple((v for v in [loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states]))\n return PatchTSMixerForPreTrainingOutput(loss=loss_val, prediction_outputs=x_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)", "docstring": "`PatchTSMixer` for mask pretraining.\n\nArgs:\n config (`PatchTSMixerConfig`):\n Configuration.\n\nReturns:\n `None`."} +{"repo": "pytype", "function": "class Signature(abc.ABC):\n name: str\n param_names: tuple[str, ...]\n posonly_count: int\n varargs_name: str | None\n kwonly_params: tuple[str, ...]\n kwargs_name: str | None\n\n @property\n def posonly_params(self):\n return self.param_names[:self.posonly_count]\n\n @abc.abstractmethod\n def has_default(self, name):\n \"\"\"Whether the named arg has a default value.\"\"\"\n\n @abc.abstractmethod\n def insert_varargs_and_kwargs(self, args):\n \"\"\"Insert varargs and kwargs from args into the signature.\"\"\"\n\n @abc.abstractmethod\n def iter_args(self, args):\n \"\"\"Iterates through the given args, attaching names and expected types.\"\"\"", "docstring": "Representation of a Python function signature.\n\nAttributes:\n name: Name of the function.\n param_names: A tuple of positional parameter names. This DOES include\n positional-only parameters and does NOT include keyword-only parameters.\n posonly_count: Number of positional-only parameters. (Python 3.8)\n varargs_name: Name of the varargs parameter. (The \"args\" in *args)\n kwonly_params: Tuple of keyword-only parameters. (Python 3) E.g. (\"x\", \"y\")\n for \"def f(a, *, x, y=2)\". These do NOT appear in param_names. Ordered\n like in the source file.\n kwargs_name: Name of the kwargs parameter. (The \"kwargs\" in **kwargs)\n posonly_params: Tuple of positional-only parameters"} +{"repo": "mobly", "function": "class AndroidDevice:\n\n def __init__(self, serial=''):\n self._serial = str(serial)\n _log_path_base = utils.abs_path(getattr(logging, 'log_path', '/tmp/logs'))\n self._log_path = os.path.join(_log_path_base, 'AndroidDevice%s' % self._normalized_serial)\n self._debug_tag = self._serial\n self.log = AndroidDeviceLoggerAdapter(logging.getLogger(), {'tag': self.debug_tag})\n self._build_info = None\n self._is_rebooting = False\n self.adb = adb.AdbProxy(serial)\n self.fastboot = fastboot.FastbootProxy(serial)\n if self.is_rootable:\n self.root_adb()\n self.services = service_manager.ServiceManager(self)\n self.services.register(SERVICE_NAME_LOGCAT, logcat.Logcat, start_service=False)\n self.services.register('snippets', snippet_management_service.SnippetManagementService)\n self._user_added_device_info = {}\n\n def __repr__(self):\n return '' % self.debug_tag\n\n @property\n def adb_logcat_file_path(self):\n if self.services.has_service_by_name(SERVICE_NAME_LOGCAT):\n return self.services.logcat.adb_logcat_file_path\n\n @property\n def _normalized_serial(self):\n \"\"\"Normalized serial name for usage in log filename.\n\n Some Android emulators use ip:port as their serial names, while on\n Windows `:` is not valid in filename, it should be sanitized first.\n \"\"\"\n if self._serial is None:\n return None\n return mobly_logger.sanitize_filename(self._serial)\n\n @property\n def device_info(self):\n \"\"\"Information to be pulled into controller info.\n\n The latest serial, model, and build_info are included. Additional info\n can be added via `add_device_info`.\n \"\"\"\n info = {'serial': self.serial, 'model': self.model, 'build_info': self.build_info, 'user_added_info': self._user_added_device_info}\n return info\n\n def add_device_info(self, name, info):\n \"\"\"Add information of the device to be pulled into controller info.\n\n Adding the same info name the second time will override existing info.\n\n Args:\n name: string, name of this info.\n info: serializable, content of the info.\n \"\"\"\n self._user_added_device_info.update({name: info})\n\n @property\n def debug_tag(self):\n \"\"\"A string that represents a device object in debug info. Default value\n is the device serial.\n\n This will be used as part of the prefix of debugging messages emitted by\n this device object, like log lines and the message of DeviceError.\n \"\"\"\n return self._debug_tag\n\n @debug_tag.setter\n def debug_tag(self, tag):\n \"\"\"Setter for the debug tag.\n\n By default, the tag is the serial of the device, but sometimes it may\n be more descriptive to use a different tag of the user's choice.\n\n Changing debug tag changes part of the prefix of debug info emitted by\n this object, like log lines and the message of DeviceError.\n\n Example:\n By default, the device's serial number is used:\n 'INFO [AndroidDevice|abcdefg12345] One pending call ringing.'\n The tag can be customized with `ad.debug_tag = 'Caller'`:\n 'INFO [AndroidDevice|Caller] One pending call ringing.'\n \"\"\"\n self.log.info('Logging debug tag set to \"%s\"', tag)\n self._debug_tag = tag\n self.log.extra['tag'] = tag\n\n @property\n def has_active_service(self):\n \"\"\"True if any service is running on the device.\n\n A service can be a snippet or logcat collection.\n \"\"\"\n return self.services.is_any_alive\n\n @property\n def log_path(self):\n \"\"\"A string that is the path for all logs collected from this device.\"\"\"\n if not os.path.exists(self._log_path):\n utils.create_dir(self._log_path)\n return self._log_path\n\n @log_path.setter\n def log_path(self, new_path):\n \"\"\"Setter for `log_path`, use with caution.\"\"\"\n if self.has_active_service:\n raise DeviceError(self, 'Cannot change `log_path` when there is service running.')\n old_path = self._log_path\n if new_path == old_path:\n return\n if os.listdir(new_path):\n raise DeviceError(self, 'Logs already exist at %s, cannot override.' % new_path)\n if os.path.exists(old_path):\n shutil.rmtree(new_path, ignore_errors=True)\n shutil.copytree(old_path, new_path)\n shutil.rmtree(old_path, ignore_errors=True)\n self._log_path = new_path\n\n @property\n def serial(self):\n \"\"\"The serial number used to identify a device.\n\n This is essentially the value used for adb's `-s` arg, which means it\n can be a network address or USB bus number.\n \"\"\"\n return self._serial\n\n def update_serial(self, new_serial):\n \"\"\"Updates the serial number of a device.\n\n The \"serial number\" used with adb's `-s` arg is not necessarily the\n actual serial number. For remote devices, it could be a combination of\n host names and port numbers.\n\n This is used for when such identifier of remote devices changes during\n a test. For example, when a remote device reboots, it may come back\n with a different serial number.\n\n This is NOT meant for switching the object to represent another device.\n\n We intentionally did not make it a regular setter of the serial\n property so people don't accidentally call this without understanding\n the consequences.\n\n Args:\n new_serial: string, the new serial number for the same device.\n\n Raises:\n DeviceError: tries to update serial when any service is running.\n \"\"\"\n new_serial = str(new_serial)\n if self.has_active_service:\n raise DeviceError(self, 'Cannot change device serial number when there is service running.')\n if self._debug_tag == self.serial:\n self._debug_tag = new_serial\n self._serial = new_serial\n self.adb.serial = new_serial\n self.fastboot.serial = new_serial\n\n @contextlib.contextmanager\n def handle_reboot(self):\n \"\"\"Properly manage the service life cycle when the device needs to\n temporarily disconnect.\n\n The device can temporarily lose adb connection due to user-triggered\n reboot. Use this function to make sure the services\n started by Mobly are properly stopped and restored afterwards.\n\n For sample usage, see self.reboot().\n \"\"\"\n live_service_names = self.services.list_live_services()\n self.services.stop_all()\n self._is_rebooting = True\n try:\n yield\n finally:\n self.wait_for_boot_completion()\n self._build_info = None\n self._is_rebooting = False\n if self.is_rootable:\n self.root_adb()\n self.services.start_services(live_service_names)\n\n @contextlib.contextmanager\n def handle_usb_disconnect(self):\n \"\"\"Properly manage the service life cycle when USB is disconnected.\n\n The device can temporarily lose adb connection due to user-triggered\n USB disconnection, e.g. the following cases can be handled by this\n method:\n\n * Power measurement: Using Monsoon device to measure battery consumption\n would potentially disconnect USB.\n * Unplug USB so device loses connection.\n * ADB connection over WiFi and WiFi got disconnected.\n * Any other type of USB disconnection, as long as snippet session can\n be kept alive while USB disconnected (reboot caused USB\n disconnection is not one of these cases because snippet session\n cannot survive reboot.\n Use handle_reboot() instead).\n\n Use this function to make sure the services started by Mobly are\n properly reconnected afterwards.\n\n Just like the usage of self.handle_reboot(), this method does not\n automatically detect if the disconnection is because of a reboot or USB\n disconnect. Users of this function should make sure the right handle_*\n function is used to handle the correct type of disconnection.\n\n This method also reconnects snippet event client. Therefore, the\n callback objects created (by calling Async RPC methods) before\n disconnection would still be valid and can be used to retrieve RPC\n execution result after device got reconnected.\n\n Example Usage:\n\n .. code-block:: python\n\n with ad.handle_usb_disconnect():\n try:\n # User action that triggers USB disconnect, could throw\n # exceptions.\n do_something()\n finally:\n # User action that triggers USB reconnect\n action_that_reconnects_usb()\n # Make sure device is reconnected before returning from this\n # context\n ad.adb.wait_for_device(timeout=SOME_TIMEOUT)\n \"\"\"\n live_service_names = self.services.list_live_services()\n self.services.pause_all()\n try:\n yield\n finally:\n self.services.resume_services(live_service_names)\n\n @property\n def build_info(self):\n \"\"\"Gets the build info of this Android device, including build id and type.\n\n This is not available if the device is in bootloader mode.\n\n Returns:\n A dict with the build info of this Android device, or None if the\n device is in bootloader mode.\n \"\"\"\n if self.is_bootloader:\n self.log.error('Device is in fastboot mode, could not get build info.')\n return\n if self._build_info is None or self._is_rebooting:\n info = {}\n build_info = self.adb.getprops(CACHED_SYSTEM_PROPS)\n for build_info_constant in BuildInfoConstants:\n info[build_info_constant.build_info_key] = build_info.get(build_info_constant.system_prop_key, '')\n self._build_info = info\n return info\n return self._build_info\n\n @property\n def is_bootloader(self):\n \"\"\"True if the device is in bootloader mode.\"\"\"\n return self.serial in list_fastboot_devices()\n\n @property\n def is_adb_root(self):\n \"\"\"True if adb is running as root for this device.\"\"\"\n try:\n return '0' == self.adb.shell('id -u').decode('utf-8').strip()\n except adb.AdbError:\n time.sleep(0.2)\n return '0' == self.adb.shell('id -u').decode('utf-8').strip()\n\n @property\n def is_rootable(self):\n return self.is_adb_detectable() and self.build_info['debuggable'] == '1'\n\n @functools.cached_property\n def model(self):\n \"\"\"The Android code name for the device.\"\"\"\n if self.is_bootloader:\n out = self.fastboot.getvar('product').strip()\n lines = out.decode('utf-8').split('\\n', 1)\n if lines:\n tokens = lines[0].split(' ')\n if len(tokens) > 1:\n return tokens[1].lower()\n return None\n model = self.build_info['build_product'].lower()\n if model == 'sprout':\n return model\n return self.build_info['product_name'].lower()\n\n @property\n def is_emulator(self):\n \"\"\"Whether this device is probably an emulator.\n\n Returns:\n True if this is probably an emulator.\n \"\"\"\n if EMULATOR_SERIAL_REGEX.match(self.serial):\n return True\n elif self.build_info['build_characteristics'] == 'emulator':\n return True\n elif self.build_info['hardware'] in ['ranchu', 'goldfish', 'cutf_cvm']:\n return True\n else:\n return False\n\n def load_config(self, config):\n \"\"\"Add attributes to the AndroidDevice object based on config.\n\n Args:\n config: A dictionary representing the configs.\n\n Raises:\n Error: The config is trying to overwrite an existing attribute.\n \"\"\"\n for k, v in config.items():\n if hasattr(self, k) and k not in _ANDROID_DEVICE_SETTABLE_PROPS:\n raise DeviceError(self, 'Attribute %s already exists with value %s, cannot set again.' % (k, getattr(self, k)))\n setattr(self, k, v)\n self.add_device_info(k, v)\n\n def root_adb(self):\n \"\"\"Change adb to root mode for this device if allowed.\n\n If executed on a production build, adb will not be switched to root\n mode per security restrictions.\n \"\"\"\n self.adb.root()\n self.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND)\n\n def load_snippet(self, name, package, config=None):\n \"\"\"Starts the snippet apk with the given package name and connects.\n\n Examples:\n\n .. code-block:: python\n\n ad.load_snippet(\n name='maps', package='com.google.maps.snippets')\n ad.maps.activateZoom('3')\n\n Args:\n name: string, the attribute name to which to attach the snippet\n client. E.g. `name='maps'` attaches the snippet client to\n `ad.maps`.\n package: string, the package name of the snippet apk to connect to.\n config: snippet_client_v2.Config, the configuration object for\n controlling the snippet behaviors. See the docstring of the `Config`\n class for supported configurations.\n\n Raises:\n SnippetError: Illegal load operations are attempted.\n \"\"\"\n if hasattr(self, name):\n raise SnippetError(self, 'Attribute \"%s\" already exists, please use a different name.' % name)\n self.services.snippets.add_snippet_client(name, package, config=config)\n\n def unload_snippet(self, name):\n \"\"\"Stops a snippet apk.\n\n Args:\n name: The attribute name the snippet server is attached with.\n\n Raises:\n SnippetError: The given snippet name is not registered.\n \"\"\"\n self.services.snippets.remove_snippet_client(name)\n\n def generate_filename(self, file_type, time_identifier=None, extension_name=None):\n \"\"\"Generates a name for an output file related to this device.\n\n The name follows the pattern:\n\n {file type},{debug_tag},{serial},{model},{time identifier}.{ext}\n\n \"debug_tag\" is only added if it's different from the serial. \"ext\" is\n added if specified by user.\n\n Args:\n file_type: string, type of this file, like \"logcat\" etc.\n time_identifier: string or RuntimeTestInfo. If a `RuntimeTestInfo`\n is passed in, the `signature` of the test case will be used. If\n a string is passed in, the string itself will be used.\n Otherwise the current timestamp will be used.\n extension_name: string, the extension name of the file.\n\n Returns:\n String, the filename generated.\n \"\"\"\n time_str = time_identifier\n if time_identifier is None:\n time_str = mobly_logger.get_log_file_timestamp()\n elif isinstance(time_identifier, runtime_test_info.RuntimeTestInfo):\n time_str = time_identifier.signature\n filename_tokens = [file_type]\n if self.debug_tag != self.serial:\n filename_tokens.append(self.debug_tag)\n filename_tokens.extend([self.serial, self.model, time_str])\n filename_str = ','.join(filename_tokens)\n if extension_name is not None:\n filename_str = '%s.%s' % (filename_str, extension_name)\n filename_str = mobly_logger.sanitize_filename(filename_str)\n self.log.debug('Generated filename: %s', filename_str)\n return filename_str\n\n def take_bug_report(self, test_name=None, begin_time=None, timeout=300, destination=None):\n \"\"\"Takes a bug report on the device and stores it in a file.\n\n Args:\n test_name: Name of the test method that triggered this bug report.\n begin_time: Timestamp of when the test started. If not set, then\n this will default to the current time.\n timeout: float, the number of seconds to wait for bugreport to\n complete, default is 5min.\n destination: string, path to the directory where the bugreport\n should be saved.\n\n Returns:\n A string that is the absolute path to the bug report on the host.\n \"\"\"\n prefix = DEFAULT_BUG_REPORT_NAME\n if test_name:\n prefix = '%s,%s' % (DEFAULT_BUG_REPORT_NAME, test_name)\n if begin_time is None:\n begin_time = mobly_logger.get_log_file_timestamp()\n new_br = True\n try:\n stdout = self.adb.shell('bugreportz -v').decode('utf-8')\n if 'not found' in stdout:\n new_br = False\n except adb.AdbError:\n new_br = False\n if destination is None:\n destination = os.path.join(self.log_path, 'BugReports')\n br_path = utils.abs_path(destination)\n utils.create_dir(br_path)\n filename = self.generate_filename(prefix, str(begin_time), 'txt')\n if new_br:\n filename = filename.replace('.txt', '.zip')\n full_out_path = os.path.join(br_path, filename)\n self.wait_for_boot_completion()\n self.log.debug('Start taking bugreport.')\n if new_br:\n out = self.adb.shell('bugreportz', timeout=timeout).decode('utf-8')\n if not out.startswith('OK'):\n raise DeviceError(self, 'Failed to take bugreport: %s' % out)\n br_out_path = out.split(':')[1].strip()\n self.adb.pull([br_out_path, full_out_path])\n self.adb.shell(['rm', br_out_path])\n else:\n self.adb.bugreport(' > \"%s\"' % full_out_path, shell=True, timeout=timeout)\n self.log.debug('Bugreport taken at %s.', full_out_path)\n return full_out_path\n\n def take_screenshot(self, destination, prefix='screenshot', all_displays=False):\n \"\"\"Takes a screenshot of the device.\n\n Args:\n destination: string, full path to the directory to save in.\n prefix: string, prefix file name of the screenshot.\n all_displays: bool, if true will take a screenshot on all connnected\n displays, if false will take a screenshot on the default display.\n\n Returns:\n string, full path to the screenshot file on the host, or\n list[str], when all_displays is True, the full paths to the screenshot\n files on the host.\n \"\"\"\n filename = self.generate_filename(prefix, extension_name='png')\n filename_no_extension, _ = os.path.splitext(filename)\n device_path = os.path.join('/storage/emulated/0/', filename)\n self.adb.shell(['screencap', '-p', '-a' if all_displays else '', device_path], timeout=TAKE_SCREENSHOT_TIMEOUT_SECOND)\n utils.create_dir(destination)\n if all_displays:\n pic_paths = []\n png_files = [device_path]\n png_files = self.adb.shell('ls /storage/emulated/0/*.png').decode('utf-8').split('\\n')\n for device_path in png_files:\n if device_path.find(filename_no_extension) < 0:\n continue\n self.adb.pull([device_path, destination])\n pic_paths.append(os.path.join(destination, os.path.basename(device_path)))\n self.log.debug('Screenshot taken, saved on the host: %s', pic_paths[-1])\n self.adb.shell(['rm', device_path])\n return pic_paths\n self.adb.pull([device_path, destination])\n pic_path = os.path.join(destination, filename)\n self.log.debug('Screenshot taken, saved on the host: %s', pic_path)\n self.adb.shell(['rm', device_path])\n return pic_path\n\n def run_iperf_client(self, server_host, extra_args=''):\n \"\"\"Start iperf client on the device.\n\n Return status as true if iperf client start successfully.\n And data flow information as results.\n\n Args:\n server_host: Address of the iperf server.\n extra_args: A string representing extra arguments for iperf client,\n e.g. '-i 1 -t 30'.\n\n Returns:\n status: true if iperf client start successfully.\n results: results have data flow information\n \"\"\"\n out = self.adb.shell('iperf3 -c %s %s' % (server_host, extra_args))\n clean_out = str(out, 'utf-8').strip().split('\\n')\n if 'error' in clean_out[0].lower():\n return (False, clean_out)\n return (True, clean_out)\n\n def wait_for_boot_completion(self, timeout=DEFAULT_TIMEOUT_BOOT_COMPLETION_SECOND):\n \"\"\"Waits for Android framework to broadcast ACTION_BOOT_COMPLETED.\n\n This function times out after 15 minutes.\n\n Args:\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n \"\"\"\n deadline = time.perf_counter() + timeout\n self.adb.wait_for_device(timeout=timeout)\n while time.perf_counter() < deadline:\n try:\n if self.is_boot_completed():\n return\n except (adb.AdbError, adb.AdbTimeoutError):\n pass\n time.sleep(5)\n raise DeviceError(self, 'Booting process timed out')\n\n def is_boot_completed(self):\n \"\"\"Checks if device boot is completed by verifying system property.\"\"\"\n completed = self.adb.getprop('sys.boot_completed')\n if completed == '1':\n self.log.debug('Device boot completed.')\n return True\n return False\n\n def is_adb_detectable(self):\n \"\"\"Checks if USB is on and device is ready by verifying adb devices.\"\"\"\n serials = list_adb_devices()\n if self.serial in serials:\n self.log.debug('Is now adb detectable.')\n return True\n return False\n\n def reboot(self):\n \"\"\"Reboots the device.\n\n Generally one should use this method to reboot the device instead of\n directly calling `adb.reboot`. Because this method gracefully handles\n the teardown and restoration of running services.\n\n This method is blocking and only returns when the reboot has completed\n and the services restored.\n\n Raises:\n Error: Waiting for completion timed out.\n \"\"\"\n if self.is_bootloader:\n self.fastboot.reboot()\n return\n with self.handle_reboot():\n self.adb.reboot()\n\n def __getattr__(self, name):\n \"\"\"Tries to return a snippet client registered with `name`.\n\n This is for backward compatibility of direct accessing snippet clients.\n \"\"\"\n client = self.services.snippets.get_snippet_client(name)\n if client:\n return client\n return self.__getattribute__(name)", "docstring": "Class representing an android device.\n\nEach object of this class represents one Android device in Mobly. This class\nprovides various ways, like adb, fastboot, and Mobly snippets, to control\nan Android device, whether it's a real device or an emulator instance.\n\nYou can also register your own services to the device's service manager.\nSee the docs of `service_manager` and `base_service` for details.\n\nAttributes:\n serial: A string that's the serial number of the Android device.\n log_path: A string that is the path where all logs collected on this\n android device should be stored.\n log: A logger adapted from root logger with an added prefix specific\n to an AndroidDevice instance. The default prefix is\n [AndroidDevice|]. Use self.debug_tag = 'tag' to use a\n different tag in the prefix.\n adb_logcat_file_path: A string that's the full path to the adb logcat\n file collected, if any.\n adb: An AdbProxy object used for interacting with the device via adb.\n fastboot: A FastbootProxy object used for interacting with the device\n via fastboot.\n services: ServiceManager, the manager of long-running services on the\n device."} +{"repo": "keras", "function": "def hard_shrink(x, threshold=0.5):\n if any_symbolic_tensors((x,)):\n return HardShrink(threshold).symbolic_call(x)\n return backend.nn.hard_shrink(x, threshold)", "docstring": "Hard Shrink activation function.\n\nThe Hard Shrink function is a thresholding operation defined as:\n\n`f(x) = x` if `|x| > threshold`,\n`f(x) = 0` otherwise.\n\nArgs:\n x: Input tensor.\n threshold: Threshold value. Defaults to 0.5.\n\nReturns:\n A tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-0.5, 0., 1.])\n>>> x_hard_shrink = keras.ops.hard_shrink(x)\n>>> print(x_hard_shrink)\narray([0. 0. 1.], shape=(3,), dtype=float64)"} +{"repo": "tensorflow", "function": "def build_graph(device, input_shape, perm, datatype, num_iters):\n with ops.device('/%s:0' % device):\n total_size = np.prod(input_shape)\n inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)\n t = constant_op.constant(inp, shape=input_shape)\n outputs = []\n transpose_op = array_ops.transpose(t, perm)\n outputs.append(transpose_op)\n for _ in range(1, num_iters):\n with ops.control_dependencies([transpose_op]):\n transpose_op = array_ops.transpose(t, perm)\n outputs.append(transpose_op)\n return control_flow_ops.group(*outputs)", "docstring": "builds a graph containing a sequence of conv2d operations.\n\nArgs:\n device: String, the device to run on.\n input_shape: Shape of the input tensor.\n perm: A list of ints with the same length as input tensor's dimension.\n datatype: numpy data type of the input tensor.\n num_iters: number of iterations to run transpose.\n\nReturns:\n An array of tensors to run()"} +{"repo": "transformers", "function": "def get_special_dtypes_update(self, model, torch_dtype: 'torch.dtype') -> Dict[str, 'torch.dtype']:\n return {name: torch_dtype for name, _ in model.named_parameters() if any((m in name for m in self.modules_to_not_convert))}", "docstring": "returns dtypes for modules that are not quantized - used for the computation of the device_map in case\none passes a str as a device_map. The method will use the `modules_to_not_convert` that is modified\nin `_process_model_before_weight_loading`.\n\nArgs:\n model (`~transformers.PreTrainedModel`):\n The model to quantize\n torch_dtype (`torch.dtype`):\n The dtype passed in `from_pretrained` method."} +{"repo": "beam", "function": "def modify_job_state(self, job_id, new_state):\n if new_state == 'JOB_STATE_DONE':\n new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DONE\n elif new_state == 'JOB_STATE_CANCELLED':\n new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_CANCELLED\n elif new_state == 'JOB_STATE_DRAINING':\n new_state = dataflow.Job.RequestedStateValueValuesEnum.JOB_STATE_DRAINING\n else:\n return False\n request = dataflow.DataflowProjectsLocationsJobsUpdateRequest()\n request.jobId = job_id\n request.projectId = self.google_cloud_options.project\n request.location = self.google_cloud_options.region\n request.job = dataflow.Job(requestedState=new_state)\n self._client.projects_locations_jobs.Update(request)\n return True", "docstring": "Modify the run state of the job.\n\nArgs:\n job_id: The id of the job.\n new_state: A string representing the new desired state. It could be set to\n either 'JOB_STATE_DONE', 'JOB_STATE_CANCELLED' or 'JOB_STATE_DRAINING'.\n\nReturns:\n True if the job was modified successfully."} +{"repo": "transformers", "function": "class TFSeq2SeqQuestionAnsweringModelOutput(ModelOutput):\n loss: tf.Tensor | None = None\n start_logits: Optional[tf.Tensor] = None\n end_logits: Optional[tf.Tensor] = None\n past_key_values: List[tf.Tensor] | None = None\n decoder_hidden_states: Tuple[tf.Tensor] | None = None\n decoder_attentions: Tuple[tf.Tensor] | None = None\n encoder_last_hidden_state: tf.Tensor | None = None\n encoder_hidden_states: Tuple[tf.Tensor] | None = None\n encoder_attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for outputs of sequence-to-sequence question answering models.\n\nArgs:\n loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be\n used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads."} +{"repo": "tensorflow", "function": "def _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n if tf_type is None:\n raise ValueError('Unsupported enum {}. The valid map of enum to tf types is : {}'.format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n return tf_type", "docstring": "Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).\n\nArgs:\n tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)\n\nRaises:\n ValueError: If an invalid tflite enum type is provided.\n\nReturns:\n tf type (eg: tf.float32)"} +{"repo": "mobly", "function": "def instrument(self, package, options=None, runner=None, handler=None) -> bytes:\n if runner is None:\n runner = DEFAULT_INSTRUMENTATION_RUNNER\n if options is None:\n options = {}\n options_list = []\n for option_key, option_value in options.items():\n options_list.append('-e %s %s' % (option_key, option_value))\n options_string = ' '.join(options_list)\n instrumentation_command = 'am instrument -r -w %s %s/%s' % (options_string, package, runner)\n logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, instrumentation_command)\n if handler is None:\n return self._exec_adb_cmd('shell', instrumentation_command, shell=False, timeout=None, stderr=None)\n else:\n return self._execute_adb_and_process_stdout('shell', instrumentation_command, shell=False, handler=handler)", "docstring": "Runs an instrumentation command on the device.\n\nThis is a convenience wrapper to avoid parameter formatting.\n\nExample:\n\n.. code-block:: python\n\n device.instrument(\n 'com.my.package.test',\n options = {\n 'class': 'com.my.package.test.TestSuite',\n },\n )\n\nArgs:\n package: string, the package of the instrumentation tests.\n options: dict, the instrumentation options including the test\n class.\n runner: string, the test runner name, which defaults to\n DEFAULT_INSTRUMENTATION_RUNNER.\n handler: optional func, when specified the function is used to parse\n the instrumentation stdout line by line as the output is\n generated; otherwise, the stdout is simply returned once the\n instrumentation is finished.\n\nReturns:\n The stdout of instrumentation command or the stderr if the handler\n is set."} +{"repo": "transformers", "function": "def __init__(self, config: Mask2FormerConfig, weight_dict: Dict[str, float]):\n super().__init__()\n requires_backends(self, ['scipy'])\n self.num_labels = config.num_labels\n self.weight_dict = weight_dict\n self.eos_coef = config.no_object_weight\n empty_weight = torch.ones(self.num_labels + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n self.num_points = config.train_num_points\n self.oversample_ratio = config.oversample_ratio\n self.importance_sample_ratio = config.importance_sample_ratio\n self.matcher = Mask2FormerHungarianMatcher(cost_class=1.0, cost_dice=config.dice_weight, cost_mask=config.mask_weight, num_points=self.num_points)", "docstring": "The Mask2Former Loss. The loss is computed very similar to DETR. The process happens in two steps: 1) we\ncompute hungarian assignment between ground truth masks and the outputs of the model 2) we supervise each pair\nof matched ground-truth / prediction (supervise class and mask)\n\nArgs:\n config (`Mask2FormerConfig`):\n The configuration for Mask2Former model also containing loss calculation specific parameters.\n weight_dict (`Dict[str, float]`):\n A dictionary of weights to be applied to the different losses."} +{"repo": "tensorflow", "function": "def send_graph_tracebacks(destinations, run_key, origin_stack, graph, send_source=True):\n _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=run_key, graph=graph, send_source=send_source)", "docstring": "Send the tracebacks of a graph execution call to debug server(s).\n\nArgs:\n destinations: gRPC destination addresses, a `str` or a `list` of `str`s,\n e.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\n `CallTraceback` proto payload will be sent to all the destinations.\n run_key: A string describing the feeds, fetches (and targets) names of the\n `tf.Session.run` call.\n origin_stack: The traceback of the `tf.Session.run()` invocation.\n graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),\n which contains op tracebacks.\n send_source: Whether the source files involved in the op tracebacks but\n outside the TensorFlow library are to be sent."} +{"repo": "beam", "function": "def convert_collections_to_typing(typ):\n if hasattr(typ, '__iter__'):\n if hasattr(typ, '__next__'):\n typ = typing.Iterator[typ.__args__]\n elif hasattr(typ, 'send') and hasattr(typ, 'throw'):\n typ = typing.Generator[typ.__args__]\n elif _match_is_exactly_iterable(typ):\n typ = typing.Iterable[typ.__args__]\n return typ", "docstring": "Converts a given collections.abc type to a typing object.\n\nArgs:\n typ: an object inheriting from a collections.abc object\n\nReturns:\n type: The corresponding typing object."} +{"repo": "tensorflow", "function": "def multiply(inputs, **kwargs):\n return Multiply(**kwargs)(inputs)", "docstring": "Functional interface to the `Multiply` layer.\n\nExample:\n\n>>> x1 = np.arange(3.0)\n>>> x2 = np.arange(3.0)\n>>> tf.keras.layers.multiply([x1, x2])\n\n\nUsage in a functional model:\n\n>>> input1 = tf.keras.layers.Input(shape=(16,))\n>>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\n>>> input2 = tf.keras.layers.Input(shape=(32,))\n>>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\n>>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)\n>>> out = tf.keras.layers.Dense(4)(out)\n>>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\nArgs:\n inputs: A list of input tensors (at least 2).\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor, the element-wise product of the inputs."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, DiffLlamaForCausalLM\n\n>>> model = DiffLlamaForCausalLM.from_pretrained(\"google/diffllama-7b\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/diffllama-7b\")\n\n>>> prompt = \"What is your favorite condiment?\"\n>>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"What is your favorite condiment?\"\n```"} +{"repo": "tensorflow", "function": "def _to_numpy_type(dtype):\n if isinstance(dtype, dtypes.DType):\n return dtype.as_numpy_dtype\n return np.dtype(dtype)", "docstring": "Converts a native python or TF DType to numpy type.\n\nArgs:\n dtype: Could be a python type, a numpy type or a TF DType.\n\nReturns:\n A NumPy `dtype`."} +{"repo": "tensorflow", "function": "def _getGraphOpTypes(self, graphdef, output_nodes):\n name_to_input_name, name_to_node, _ = _extract_graph_summary(graphdef)\n used_node_names = _bfs_for_reachable_nodes(output_nodes, name_to_input_name)\n return set([name_to_node[node_name].op for node_name in used_node_names])", "docstring": "Returns used op types in `graphdef` reachable from `output_nodes`.\n\nThis is used to check that after the stub transformation the expected\nnodes are there.\n\nNOTE: this is not a exact test that the graph is the correct output, but\n it balances compact expressibility of test with sanity checking.\n\nArgs:\n graphdef: TensorFlow proto graphdef.\n output_nodes: A list of output node names that we need to reach.\n\nReturns:\n A set of node types reachable from `output_nodes`."} +{"repo": "tensorflow", "function": "class NoisyLinearCosineDecay(LearningRateSchedule):\n\n def __init__(self, initial_learning_rate, decay_steps, initial_variance=1.0, variance_decay=0.55, num_periods=0.5, alpha=0.0, beta=0.001, name=None):\n \"\"\"Applies noisy linear cosine decay to the learning rate.\n\n Args:\n initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python\n number. The initial learning rate.\n decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.\n Number of steps to decay over.\n initial_variance: initial variance for the noise. See computation above.\n variance_decay: decay for the noise's variance. See computation above.\n num_periods: Number of periods in the cosine part of the decay.\n See computation above.\n alpha: See computation above.\n beta: See computation above.\n name: String. Optional name of the operation. Defaults to\n 'NoisyLinearCosineDecay'.\n \"\"\"\n super(NoisyLinearCosineDecay, self).__init__()\n self.initial_learning_rate = initial_learning_rate\n self.decay_steps = decay_steps\n self.initial_variance = initial_variance\n self.variance_decay = variance_decay\n self.num_periods = num_periods\n self.alpha = alpha\n self.beta = beta\n self.name = name\n\n def __call__(self, step):\n with ops.name_scope_v2(self.name or 'NoisyLinearCosineDecay') as name:\n initial_learning_rate = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.initial_learning_rate, name='initial_learning_rate')\n dtype = initial_learning_rate.dtype\n decay_steps = math_ops.cast(self.decay_steps, dtype)\n initial_variance = math_ops.cast(self.initial_variance, dtype)\n variance_decay = math_ops.cast(self.variance_decay, dtype)\n num_periods = math_ops.cast(self.num_periods, dtype)\n alpha = math_ops.cast(self.alpha, dtype)\n beta = math_ops.cast(self.beta, dtype)\n global_step_recomp = math_ops.cast(step, dtype)\n global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)\n linear_decayed = (decay_steps - global_step_recomp) / decay_steps\n variance = initial_variance / math_ops.pow(1.0 + global_step_recomp, variance_decay)\n std = math_ops.sqrt(variance)\n noisy_linear_decayed = linear_decayed + random_ops.random_normal(linear_decayed.shape, stddev=std)\n completed_fraction = global_step_recomp / decay_steps\n fraction = 2.0 * num_periods * completed_fraction\n cosine_decayed = 0.5 * (1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))\n noisy_linear_cosine_decayed = (alpha + noisy_linear_decayed) * cosine_decayed + beta\n return math_ops.multiply(initial_learning_rate, noisy_linear_cosine_decayed, name=name)\n\n def get_config(self):\n return {'initial_learning_rate': self.initial_learning_rate, 'decay_steps': self.decay_steps, 'initial_variance': self.initial_variance, 'variance_decay': self.variance_decay, 'num_periods': self.num_periods, 'alpha': self.alpha, 'beta': self.beta, 'name': self.name}", "docstring": "A LearningRateSchedule that uses a noisy linear cosine decay schedule.\n\nSee [Bello et al., ICML2017] Neural Optimizer Search with RL.\nhttps://arxiv.org/abs/1709.07417\n\nFor the idea of warm starts here controlled by `num_periods`,\nsee [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent\nwith Warm Restarts. https://arxiv.org/abs/1608.03983\n\nNote that linear cosine decay is more aggressive than cosine decay and\nlarger initial learning rates can typically be used.\n\nWhen training a model, it is often recommended to lower the learning rate as\nthe training progresses. This schedule applies a noisy linear cosine decay\nfunction to an optimizer step, given a provided initial learning rate.\nIt requires a `step` value to compute the decayed learning rate. You can\njust pass a TensorFlow variable that you increment at each training step.\n\nThe schedule a 1-arg callable that produces a decayed learning\nrate when passed the current optimizer step. This can be useful for changing\nthe learning rate value across different invocations of optimizer functions.\nIt is computed as:\n\n```python\ndef decayed_learning_rate(step):\n step = min(step, decay_steps)\n linear_decay = (decay_steps - step) / decay_steps)\n cosine_decay = 0.5 * (\n 1 + cos(pi * 2 * num_periods * step / decay_steps))\n decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta\n return initial_learning_rate * decayed\n```\nwhere eps_t is 0-centered gaussian noise with variance\ninitial_variance / (1 + global_step) ** variance_decay\n\nExample usage:\n```python\ndecay_steps = 1000\nlr_decayed_fn = (\n tf.keras.experimental.NoisyLinearCosineDecay(\n initial_learning_rate, decay_steps))\n```\n\nYou can pass this schedule directly into a `tf.keras.optimizers.Optimizer`\nas the learning rate. The learning rate schedule is also serializable and\ndeserializable using `tf.keras.optimizers.schedules.serialize` and\n`tf.keras.optimizers.schedules.deserialize`.\n\nReturns:\n A 1-arg callable learning rate schedule that takes the current optimizer\n step and outputs the decayed learning rate, a scalar `Tensor` of the same\n type as `initial_learning_rate`."} +{"repo": "weather-tools", "function": "def get_firestore_config(self) -> t.Dict:\n parsed = urlparse(self.location)\n query_params = {}\n if parsed.query:\n query_params = dict(parse_qsl(parsed.query, strict_parsing=True))\n return {'collection': parsed.netloc, **query_params}", "docstring": "Parse firestore Location format: 'fs://?projectId='\nUsers must specify a 'projectId' query parameter in the firestore location. If this argument\nisn't passed in, users must set the `GOOGLE_CLOUD_PROJECT` environment variable.\nUsers may specify options to `firebase_admin.initialize_app()` via query arguments in the URL.\nFor more information about what options are available, consult this documentation:\nhttps://firebase.google.com/docs/reference/admin/python/firebase_admin#initialize_app\n Note: each query key-value pair may only appear once. If there are duplicates, the last pair\n will be used.\nOptionally, users may configure these options via the `FIREBASE_CONFIG` environment variable,\nwhich is typically a path/to/a/file.json.\nExamples:\n >>> location = Location(\"fs://my-collection?projectId=my-project-id&storageBucket=foo\")\n >>> FirestoreManifest(location).get_firestore_config()\n {'collection': 'my-collection', 'projectId': 'my-project-id', 'storageBucket': 'foo'}\nRaises:\n ValueError: If query parameters are malformed.\n AssertionError: If the 'projectId' query parameter is not set."} +{"repo": "tensorflow", "function": "class ActivityRegularization(Layer):\n\n def __init__(self, l1=0.0, l2=0.0, **kwargs):\n super(ActivityRegularization, self).__init__(activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {'l1': self.l1, 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "docstring": "Layer that applies an update to the cost function based input activity.\n\nArgs:\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\nInput shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\nOutput shape:\n Same shape as input."} +{"repo": "tensorflow", "function": "def aggregate_and_return_name_for_input(self, out_graphdef):\n del out_graphdef\n raise RuntimeError('Unimplemented abstract method.')", "docstring": "This adds the node(s) to out_graphdef and returns the input node name.\n\nArgs:\n out_graphdef: A graphdef that is ready to have this input added.\n\nReturns:\n The output that the stub should use as an input for this operand.\n\nRaises:\n RuntimeError: if the method is not implemented."} +{"repo": "transformers", "function": "def decode(self, token_ids, skip_special_tokens: bool=False, clean_up_tokenization_spaces: Optional[bool]=None, output_offsets: bool=False, time_precision: float=0.02, decode_with_timestamps: bool=False, normalize: bool=False, basic_normalize: bool=False, remove_diacritics: bool=False, **kwargs) -> str:\n filtered_ids = self._preprocess_token_ids(token_ids, skip_special_tokens=skip_special_tokens)\n text = super().decode(filtered_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, normalize=normalize, basic_normalize=basic_normalize, remove_diacritics=remove_diacritics, **kwargs)\n if decode_with_timestamps:\n text = self._decode_with_timestamps(filtered_ids, time_precision=time_precision, skip_special_tokens=skip_special_tokens)\n else:\n text = self._filter_timestamp_ids(text)\n if output_offsets:\n offsets = self._compute_offsets(token_ids, time_precision=time_precision)\n return {'text': text, 'offsets': offsets}\n return text", "docstring": "Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\ntokens and clean up tokenization spaces.\n\nSimilar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\nArgs:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding. Will remove the previous tokens (pre-prompt)\n if present.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces. If `None`, will default to\n `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).\n output_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output the offsets of the tokens. This should only be set if the model predicted\n timestamps. If there are previous tokens (pre-prompt) to decode, they will only appear in the decoded\n text if they contain timestamp tokens.\n time_precision (`float`, *optional*, defaults to 0.02):\n The time ratio to convert from token to time.\n decode_with_timestamps (`bool`, *optional*, defaults to `False`):\n Whether or not to decode with timestamps included in the raw text.\n normalize (`bool`, *optional*, defaults to `False`):\n Whether or not to apply the English text normalizer to the decoded text. Only applicable when the\n target text is in English. Otherwise, the basic text normalizer should be applied.\n basic_normalize (`bool`, *optional*, defaults to `False`):\n Whether or not to apply the Basic text normalizer to the decoded text. Applicable to multilingual\n target text.\n remove_diacritics (`bool`, *optional*, defaults to `False`):\n Whether or not to remove diacritics when applying the Basic text normalizer. Removing diacritics may\n destroy information in the decoded text, hence it should be used with caution.\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\nReturns:\n `str`: The decoded sentence."} +{"repo": "transformers", "function": "def __call__(self, image: Union['Image.Image', str, List[Dict[str, Any]]], question: Optional[str]=None, word_boxes: Optional[Tuple[str, List[float]]]=None, **kwargs: Any) -> Union[Dict[str, Any], List[Dict[str, Any]]]:\n if isinstance(question, str):\n inputs = {'question': question, 'image': image}\n if word_boxes is not None:\n inputs['word_boxes'] = word_boxes\n else:\n inputs = image\n return super().__call__(inputs, **kwargs)", "docstring": "Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an\noptional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not\nprovided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for\nLayoutLM-like models which require them as input. For Donut, no OCR is run.\n\nYou can invoke the pipeline several ways:\n\n- `pipeline(image=image, question=question)`\n- `pipeline(image=image, question=question, word_boxes=word_boxes)`\n- `pipeline([{\"image\": image, \"question\": question}])`\n- `pipeline([{\"image\": image, \"question\": question, \"word_boxes\": word_boxes}])`\n\nArgs:\n image (`str` or `PIL.Image`):\n The pipeline handles three types of images:\n\n - A string containing a http link pointing to an image\n - A string containing a local path to an image\n - An image loaded in PIL directly\n\n The pipeline accepts either a single image or a batch of images. If given a single image, it can be\n broadcasted to multiple questions.\n question (`str`):\n A question to ask of the document.\n word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*):\n A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the\n pipeline will use these words and boxes instead of running OCR on the image to derive them for models\n that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the\n pipeline without having to re-run it each time.\n top_k (`int`, *optional*, defaults to 1):\n The number of answers to return (will be chosen by order of likelihood). Note that we return less than\n top_k answers if there are not enough options available within the context.\n doc_stride (`int`, *optional*, defaults to 128):\n If the words in the document are too long to fit with the question for the model, it will be split in\n several chunks with some overlap. This argument controls the size of that overlap.\n max_answer_len (`int`, *optional*, defaults to 15):\n The maximum length of predicted answers (e.g., only answers with a shorter length are considered).\n max_seq_len (`int`, *optional*, defaults to 384):\n The maximum length of the total sentence (context + question) in tokens of each chunk passed to the\n model. The context will be split in several chunks (using `doc_stride` as overlap) if needed.\n max_question_len (`int`, *optional*, defaults to 64):\n The maximum length of the question after tokenization. It will be truncated if needed.\n handle_impossible_answer (`bool`, *optional*, defaults to `False`):\n Whether or not we accept impossible as an answer.\n lang (`str`, *optional*):\n Language to use while running OCR. Defaults to english.\n tesseract_config (`str`, *optional*):\n Additional flags to pass to tesseract while running OCR.\n timeout (`float`, *optional*, defaults to None):\n The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and\n the call may block forever.\n\nReturn:\n A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys:\n\n - **score** (`float`) -- The probability associated to the answer.\n - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided\n `word_boxes`).\n - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided\n `word_boxes`).\n - **answer** (`str`) -- The answer to the question.\n - **words** (`list[int]`) -- The index of each word/box pair that is in the answer"} +{"repo": "tensorflow", "function": "def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops, gradient_uid='__unsupported__'):\n if len(grad_ys) != len(ys):\n raise ValueError(f'Length mismatch. Passed {len(grad_ys)} grad_ys for {len(ys)} ys')\n grad_ys = indexed_slices.convert_n_to_tensor_or_indexed_slices(grad_ys, name='grad_y')\n new_grad_ys = []\n for i, (y, grad_y) in enumerate(zip(ys, grad_ys)):\n with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):\n if grad_y is None:\n if y.dtype.is_complex:\n raise TypeError(f'Gradients of complex tensors ({y}) must set grad_ys (y.dtype = {dtypes.as_dtype(y.dtype).name})')\n new_grad_ys.append(array_ops.ones(array_ops.shape(y), dtype=y.dtype, name='grad_ys_%d' % i))\n continue\n if y.dtype.is_floating or y.dtype.is_integer:\n if not grad_y.dtype.is_floating and (not grad_y.dtype.is_integer):\n raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for real or integer-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real or integer')\n elif y.dtype.is_complex:\n if not grad_y.dtype.is_complex:\n raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for complex-valued tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be real')\n elif y.dtype == dtypes.variant:\n if grad_y.dtype != dtypes.variant:\n raise TypeError(f'Gradient type {dtypes.as_dtype(grad_y.dtype).name} generated for variant tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be variant')\n elif y.dtype == dtypes.resource:\n if grad_y.dtype == dtypes.resource:\n raise TypeError(f'Input gradient {grad_y} for resource tensor {y} should not be a resource')\n else:\n raise TypeError(f'Tensor {y} with type {dtypes.as_dtype(y.dtype).name} must be numeric to obtain a default gradient')\n if isinstance(grad_y, indexed_slices.IndexedSlices):\n new_grad_ys.append(indexed_slices.IndexedSlices(indices=array_ops.identity(grad_y.indices, name='grad_ys_%d_indices' % i) if isinstance(grad_y.indices, tensor_lib.Tensor) else grad_y.indices, values=array_ops.identity(grad_y.values, name='grad_ys_%d_values' % i) if isinstance(grad_y.values, tensor_lib.Tensor) else grad_y.values, dense_shape=array_ops.identity(grad_y.dense_shape, name='grad_ys_%d_shape' % i) if isinstance(grad_y.dense_shape, tensor_lib.Tensor) else grad_y.dense_shape))\n else:\n new_grad_ys.append(array_ops.identity(grad_y, name='grad_ys_%d' % i))\n return new_grad_ys", "docstring": "Fill in default values for grad_ys.\n\nArgs:\n grad_ys: List of gradients, can contain None.\n ys: List of tensors.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n gradient_uid: A unique identifier within the graph indicating\n which invocation of gradients is being executed. Used to cluster\n ops for compilation.\n\nReturns:\n A list of gradients to use, without None.\n\nRaises:\n ValueError: If sizes of gradients and inputs don't match\n TypeError: If type of any gradient is not valid for its input."} +{"repo": "starthinker", "function": "def recipe_policebot(config, recipe_name):\n drive(config, {'auth': 'user', 'hour': [], 'copy': {'source': 'https://docs.google.com/spreadsheets/d/1dkESiK2s8YvdC03F3t4Jk_wvxJ0NMNk8CTGxO0HQk6I', 'destination': recipe_name}})", "docstring": "A tool that helps enforce CM object name conventions by checking names against a\nset of client-defined patterns, and emailing violations to appropriate\nagency teams on a daily basis.\n\nArgs:\n recipe_name (string) - Name of document to deploy to."} +{"repo": "tensorflow", "function": "def WriteDebuggedGraph(self, debugged_graph):\n debug_event = debug_event_pb2.DebugEvent(debugged_graph=debugged_graph)\n self._EnsureTimestampAdded(debug_event)\n _pywrap_debug_events_writer.WriteDebuggedGraph(self._dump_root, debug_event)", "docstring": "Write a DebuggedGraph proto with the writer.\n\nArgs:\n debugged_graph: A DebuggedGraph proto, describing the details of a\n TensorFlow Graph that has completed its construction."} +{"repo": "tensorflow", "function": "def row_partitions(self):\n if self.rank < 2:\n return ()\n return self._ragged_shape._as_row_partitions()", "docstring": "A tuple of `RowPartition`s defining the shape of this `StructuredTensor`.\n\nWhen `self.rank <= 1`, this tuple will be empty.\n\nWhen `self.rank > 1`, these `RowPartitions` define the shape of the\n`StructuredTensor` by describing how a flat (1D) list of structures can be\nrepeatedly partitioned to form a higher-dimensional object. In particular,\nthe flat list is first partitioned into sublists using `row_partitions[-1]`,\nand then those sublists are further partitioned using `row_partitions[-2]`,\netc. The following examples show the row partitions used to describe\nseveral different `StructuredTensor`, each of which contains 8 copies of\nthe same structure (`x`):\n\n>>> x = {'a': 1, 'b': ['foo', 'bar', 'baz']} # shape = [] (scalar)\n\n>>> s1 = [[x, x, x, x], [x, x, x, x]] # shape = [2, 4]\n>>> tf.experimental.StructuredTensor.from_pyval(s1).row_partitions\n(tf.RowPartition(row_splits=[0 4 8]),)\n\n>>> s2 = [[x, x], [x, x], [x, x], [x, x]] # shape = [4, 2]\n>>> tf.experimental.StructuredTensor.from_pyval(s2).row_partitions\n(tf.RowPartition(row_splits=[0 2 4 6 8]),)\n\n>>> s3 = [[x, x, x], [], [x, x, x, x], [x]] # shape = [2, None]\n>>> tf.experimental.StructuredTensor.from_pyval(s3).row_partitions\n(tf.RowPartition(row_splits=[0 3 3 7 8]),)\n\n>>> s4 = [[[x, x], [x, x]], [[x, x], [x, x]]] # shape = [2, 2, 2]\n>>> tf.experimental.StructuredTensor.from_pyval(s4).row_partitions\n(tf.RowPartition(row_splits=[0 2 4]),\n tf.RowPartition(row_splits=[0 2 4 6 8]))\n\n\n>>> s5 = [[[x, x], [x]], [[x, x]], [[x, x], [x]]] # shape = [3, None, None]\n>>> tf.experimental.StructuredTensor.from_pyval(s5).row_partitions\n(tf.RowPartition(row_splits=[0 2 3 5]),\n tf.RowPartition(row_splits=[0 2 3 5 7 8]))\n\nNote that shapes for nested fields (such as `x['b']` in the above example)\nare not considered part of the shape of a `StructuredTensor`, and are not\nincluded in `row_partitions`.\n\nIf this `StructuredTensor` has a ragged shape (i.e., if any of the\n`row_partitions` is not uniform in size), then all fields will be encoded\nas either `RaggedTensor`s or `StructuredTensor`s with these `RowPartition`s\nused to define their outermost `self.rank` dimensions.\n\nReturns:\n A `tuple` of `RowPartition` objects with length `self.rank - 1`\n (or `0` if `self.rank < 2`)"} +{"repo": "mobly", "function": "def _get_user_command_string(self):\n sdk_version = int(self._device.build_info['build_version_sdk'])\n if sdk_version < 24:\n return ''\n return f'--user {self.user_id}'", "docstring": "Gets the appropriate command argument for specifying device user ID.\n\nBy default, this client operates within the current user. We\ndon't add the `--user {ID}` argument when Android's SDK is below 24,\nwhere multi-user support is not well implemented.\n\nReturns:\n A string of the command argument section to be formatted into\n adb commands."} +{"repo": "tensorflow", "function": "def should_stop(self):\n return self._stop_event.is_set()", "docstring": "Check if stop was requested.\n\nReturns:\n True if a stop was requested."} +{"repo": "tensorflow", "function": "def get_input_type_from_signature(op_signature):\n start = op_signature.find(':')\n end = op_signature.find('::OUTPUT')\n inputs = op_signature[start + 1:end]\n lst = inputs.split('::')\n out_str = ''\n for i in range(len(lst)):\n if i % 2 == 0:\n out_str += 'shape:'\n else:\n out_str += 'type:'\n out_str += lst[i]\n out_str += ','\n return out_str[:-1]", "docstring": "Parses op_signature and returns a string denoting the input tensor type.\n\nArgs:\n op_signature: a string specifying the signature of a particular operator.\n The signature of an operator contains the input tensor's shape and type,\n output tensor's shape and type, operator's name and its version. It has\n the following schema:\n INPUT:input_1_shape::input_1_type::input_2_shape::input_2_type::..\n ::OUTPUT:output_1_shape::output_1_type::output_2_shape::output_2_type::\n ..::NAME:operator_name ::VERSION:operator_version\n An example of an operator signature is:\n INPUT:[1,73,73,160]::float::[64,1,1,160]::float::[64]::float::\n OUTPUT:[1,73,73,64]::float::NAME:Conv::VERSION:1\n\nReturns:\n A string denoting the input tensors' type. In the form of shape/type\n separated\n by comma. For example:\n shape:[1,73,73,160],type:float,shape:[64,1,1,160],type:float,shape:[64],\n type:float"} +{"repo": "tensorflow", "function": "def __init__(self, name, freevars, extra_locals):\n self._name = name\n self._freevars = freevars\n self._extra_locals = extra_locals\n self._unbound_factory = None\n self.module = None\n self.source_map = None", "docstring": "Creates a new factory for a Python function.\n\nArgs:\n name: The function name.\n freevars: The list of non-global free variables for the function.\n extra_locals: Dict[Text, Any], names and values for custom variables that\n are accessible to the generated code as local variables."} +{"repo": "transformers", "function": "def forward(self, image_embeddings: torch.Tensor, image_positional_embeddings: torch.Tensor, sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, multimask_output: bool, output_attentions: Optional[bool]=None, attention_similarity: Optional[torch.Tensor]=None, target_embedding: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n batch_size, num_channels, height, width = image_embeddings.shape\n point_batch_size = sparse_prompt_embeddings.shape[1]\n output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)\n if sparse_prompt_embeddings.sum().item() != 0:\n tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)\n else:\n tokens = output_tokens\n point_embeddings = tokens.to(self.iou_token.weight.dtype)\n image_embeddings = image_embeddings + dense_prompt_embeddings\n image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)\n image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)\n point_embedding, image_embeddings, attentions = self.transformer(point_embeddings=point_embeddings, image_embeddings=image_embeddings, image_positional_embeddings=image_positional_embeddings, attention_similarity=attention_similarity, target_embedding=target_embedding, output_attentions=output_attentions)\n iou_token_out = point_embedding[:, :, 0, :]\n mask_tokens_out = point_embedding[:, :, 1:1 + self.num_mask_tokens, :]\n image_embeddings = image_embeddings.transpose(2, 3).reshape(batch_size * point_batch_size, num_channels, height, width)\n upscaled_embedding = self.upscale_conv1(image_embeddings)\n upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))\n upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))\n hyper_in_list = []\n for i in range(self.num_mask_tokens):\n current_mlp = self.output_hypernetworks_mlps[i]\n hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]\n hyper_in = torch.stack(hyper_in_list, dim=2)\n _, num_channels, height, width = upscaled_embedding.shape\n upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)\n masks = (hyper_in @ upscaled_embedding).reshape(batch_size, point_batch_size, -1, height, width)\n iou_pred = self.iou_prediction_head(iou_token_out)\n if multimask_output:\n mask_slice = slice(1, None)\n else:\n mask_slice = slice(0, 1)\n masks = masks[:, :, mask_slice, :, :]\n iou_pred = iou_pred[:, :, mask_slice]\n outputs = (masks, iou_pred)\n if output_attentions:\n outputs = outputs + (attentions,)\n else:\n outputs = outputs + (None,)\n return outputs", "docstring": "Predict masks given image and prompt embeddings.\n\nArgs:\n image_embeddings (`torch.Tensor`):\n the embeddings from the image encoder\n image_positional_embedding (`torch.Tensor`):\n positional encoding with the shape of image_embeddings\n sparse_prompt_embeddings (`torch.Tensor`):\n The embeddings of the points and boxes\n dense_prompt_embeddings (`torch.Tensor`):\n the embeddings of the mask inputs\n multimask_output (bool):\n Whether to return multiple masks or a single mask.\n output_attentions (bool, *optional*):\n Whether or not to return the attentions tensors of all attention layers."} +{"repo": "tf-quant-finance", "function": "def _sobol_generating_matrices(dim: types.IntTensor, log_num_results: types.IntTensor, num_digits: types.IntTensor, dtype=None) -> types.IntTensor:\n global _INITIAL_DIRECTION_NUMBERS\n global _PRIMITIVE_POLYNOMIAL_COEFFICIENTS\n dtype = dtype or tf.int32\n zero = tf.constant(0, dtype=dtype)\n indices = tf.cast(tf.range(0, log_num_results), dtype)\n dimensions = tf.range(0, dim)\n directions = tf.convert_to_tensor(_INITIAL_DIRECTION_NUMBERS, dtype=dtype, name='direction_numbers')\n padding = log_num_results - utils.get_shape(directions)[0]\n padding = tf.math.maximum(zero, padding)\n directions = tf.pad(directions, [[zero, padding], [zero, zero]])\n directions = directions[:log_num_results]\n directions = tf.gather(directions, dimensions, axis=1)\n directions = tf.cast(tf.transpose(directions), dtype)\n polynomial = tf.convert_to_tensor(_PRIMITIVE_POLYNOMIAL_COEFFICIENTS, dtype=dtype, name='polynomial_coefficients')\n polynomial = tf.cast(tf.gather(polynomial, tf.expand_dims(dimensions, axis=1)), dtype)\n degree = tf.cast(tf.math.floor(utils.log2(tf.cast(polynomial, dtype=tf.float32))), dtype=dtype)\n initial_matrices = tf.bitwise.left_shift(directions, tf.cast(tf.expand_dims(num_digits - 1 - indices, axis=0), dtype))\n\n def loop_predicate_fn(matrix_values, column):\n del matrix_values\n return column < log_num_results - 1\n\n def loop_body_fn(matrices, column):\n column_values = tf.gather(matrices, [column], axis=1)\n should_be_updated = tf.logical_and(tf.less_equal(tf.math.maximum(degree, column + 1), indices), tf.less_equal(indices, column + degree))\n updated_matrices = tf.bitwise.bitwise_xor(tf.where(tf.equal(indices, column + degree), tf.bitwise.right_shift(column_values, degree), matrices), utils.filter_tensor(column_values, polynomial, column + degree - indices))\n returned_matrices = tf.where(should_be_updated, updated_matrices, matrices)\n return (returned_matrices, column + 1)\n matrices, _ = tf.while_loop(loop_predicate_fn, loop_body_fn, loop_vars=(initial_matrices, tf.constant(0, dtype)), maximum_iterations=tf.cast(log_num_results, tf.int32) - 1)\n return matrices", "docstring": "Returns all Sobol generating matrices.\n\nArgs:\n dim: Positive scalar `Tensor` with rank 0 representing the event size of\n points which can be sampled from the resulting generating matrix.\n log_num_results: Positive scalar `Tensor` with rank 0 representing the\n base-2 logarithm of the maximum number of points which can be sampled from\n the resulting generating matrix.\n num_digits: Positive scalar `Tensor` with rank 0 representing the base-2\n precision of points which can be sampled from the resulting generating\n matrix.\n dtype: Optional `dtype`. The `dtype` of the output `Tensor` (either a signed\n or unsigned integer `dtype`).\n Default value: `None` which maps to `int32`.\n\nReturns:\n A scalar `Tensor` with shape `(dim, ceil(log2(num_results)))`."} +{"repo": "transformers", "function": "def split_to_tiles(image: np.ndarray, num_tiles_height: int, num_tiles_width: int) -> np.ndarray:\n num_channels, height, width = image.shape\n tile_height = height // num_tiles_height\n tile_width = width // num_tiles_width\n image = image.reshape(num_channels, num_tiles_height, tile_height, num_tiles_width, tile_width)\n image = image.transpose(1, 3, 0, 2, 4)\n image = image.reshape(num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width)\n return np.ascontiguousarray(image)", "docstring": "Split an image into a specified number of tiles along its width and height dimensions.\n\nArgs:\n image (`np.ndarray`):\n Input image with shape (num_channels, height, width).\n num_tiles_height (`int`):\n Number of tiles to split the image into along its height.\n num_tiles_width (`int`):\n Number of tiles to split the image into along its width.\n\nReturns:\n `np.ndarray`:\n Array of image tiles with shape (num_tiles_width * num_tiles_height, num_channels, tile_height, tile_width)."} +{"repo": "transformers", "function": "def _compute_posterior(self, likelihoods_watermarked: torch.Tensor, likelihoods_unwatermarked: torch.Tensor, mask: torch.Tensor, prior: float) -> torch.Tensor:\n mask = torch.unsqueeze(mask, dim=-1)\n prior = torch.clamp(prior, min=1e-05, max=1 - 1e-05)\n log_likelihoods_watermarked = torch.log(torch.clamp(likelihoods_watermarked, min=1e-30, max=float('inf')))\n log_likelihoods_unwatermarked = torch.log(torch.clamp(likelihoods_unwatermarked, min=1e-30, max=float('inf')))\n log_odds = log_likelihoods_watermarked - log_likelihoods_unwatermarked\n relative_surprisal_likelihood = torch.einsum('i...->i', log_odds * mask)\n relative_surprisal_prior = torch.log(prior) - torch.log(1 - prior)\n relative_surprisal = relative_surprisal_prior + relative_surprisal_likelihood\n return torch.sigmoid(relative_surprisal)", "docstring": "Compute posterior P(w|g) given likelihoods, mask and prior.\n\nArgs:\n likelihoods_watermarked (`torch.Tensor` of shape `(batch, length, depth)`):\n Likelihoods P(g_values|watermarked) of g-values under watermarked model.\n likelihoods_unwatermarked (`torch.Tensor` of shape `(batch, length, depth)`):\n Likelihoods P(g_values|unwatermarked) of g-values under unwatermarked model.\n mask (`torch.Tensor` of shape `(batch, length)`):\n A binary array indicating which g-values should be used. g-values with mask value 0 are discarded.\n prior (`float`):\n the prior probability P(w) that the text is watermarked.\n\nReturns:\n Posterior probability P(watermarked|g_values), shape [batch]."} +{"repo": "tensorflow", "function": "def bessel_i0(x, name=None):\n with ops.name_scope(name, 'bessel_i0', [x]):\n return gen_special_math_ops.bessel_i0(x)", "docstring": "Computes the Bessel i0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\nIt is preferable to use the numerically stabler function `i0e(x)` instead.\n\n>>> tf.math.special.bessel_i0([-1., -0.5, 0.5, 1.]).numpy()\narray([1.26606588, 1.06348337, 1.06348337, 1.26606588], dtype=float32)\n\nArgs:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.i0\n@end_compatibility"} +{"repo": "transformers", "function": "def forward(self, input_ids: torch.LongTensor=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, pixel_values: Optional[torch.Tensor]=None, pixel_values_videos: Optional[torch.FloatTensor]=None, image_grid_thw: Optional[torch.LongTensor]=None, video_grid_thw: Optional[torch.LongTensor]=None, rope_deltas: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, second_per_grid_ts: Optional[torch.Tensor]=None, **kwargs: Unpack[KwargsForCausalLM]) -> Union[Tuple, Qwen2_5_VLCausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, pixel_values_videos=pixel_values_videos, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, second_per_grid_ts=second_per_grid_ts, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs)\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size)\n return Qwen2_5_VLCausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, rope_deltas=outputs.rope_deltas)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\npixel_values_videos (`torch.FloatTensor` of shape `(seq_length, num_channels * temporal_size * image_size * image_size)):\n The tensors corresponding to the input videos. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`Qwen2VLImageProcessor.__call__`] for details. [`Qwen2_5_VLProcessor`] uses\n [`Qwen2VLImageProcessor`] for processing videos.\nimage_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):\n The temporal, height and width of feature shape of each image in LLM.\nvideo_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):\n The temporal, height and width of feature shape of each video in LLM.\nrope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):\n The rope index difference between sequence length and multimodal rope.\nsecond_per_grid_ts (`torch.Tensor` of shape `(num_videos)`, *optional*):\n The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.\n\nExample:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration\n\n>>> model = Qwen2_5_VLForConditionalGeneration.from_pretrained(\"Qwen/Qwen2.5-VL-7B-Instruct\")\n>>> processor = AutoProcessor.from_pretrained(\"Qwen/Qwen2.5-VL-7B-Instruct\")\n\n>>> messages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\"},\n {\"type\": \"text\", \"text\": \"What is shown in this image?\"},\n ],\n },\n]\n>>> url = \"https://www.ilankelman.org/stopsigns/australia.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n>>> inputs = processor(text=[text], images=[image], vision_infos=[vision_infos])\n\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"The image shows a street scene with a red stop sign in the foreground. In the background, there is a large red gate with Chinese characters ...\"\n```"} +{"repo": "beam", "function": "def run(argv=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n model_handler = HuggingFacePipelineModelHandler(task=PipelineTask.QuestionAnswering, model=known_args.model_name, load_model_args={'framework': 'pt', 'revision': known_args.revision})\n if not known_args.input:\n text = pipeline | 'CreateSentences' >> beam.Create(['What does Apache Beam do?;Apache Beam enables batch and streaming data processing.', 'What is the capital of France?;The capital of France is Paris .', 'Where was beam summit?;Apache Beam Summit 2023 was in NYC.'])\n else:\n text = pipeline | 'ReadSentences' >> beam.io.ReadFromText(known_args.input)\n processed_text = text | 'PreProcess' >> beam.ParDo(preprocess) | 'SquadExample' >> beam.ParDo(create_squad_example)\n output = processed_text | 'RunInference' >> RunInference(KeyedModelHandler(model_handler)) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n _ = output | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing."} +{"repo": "transformers", "function": "class DacDecoderOutput(ModelOutput):\n audio_values: Optional[torch.FloatTensor] = None", "docstring": "Args:\n audio_values (`torch.FloatTensor` of shape `(batch_size, input_length)`, *optional*):\n Decoded audio values, obtained using the decoder part of Dac."} +{"repo": "fhir-py", "function": "def _find_resource_in_bundle(uri: str, bundle_json: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n for entry in bundle_json.get('entry', ()):\n resource = entry.get('resource', {})\n if resource.get('url') == uri:\n return resource\n elif resource.get('resourceType') == 'Bundle':\n bundled_resource = _find_resource_in_bundle(uri, resource)\n if bundled_resource is not None:\n return bundled_resource\n return None", "docstring": "Finds the JSON object for the resource with `uri` inside `bundle_json`.\n\nArgs:\n uri: The resource URI to search for.\n bundle_json: Parsed JSON object for the bundle to search\n\nReturns:\n Parsed JSON object for the resource or `None` if it can not be found."} +{"repo": "tensorflow", "function": "def inferred_steps(self):\n return self._inferred_steps", "docstring": "The inferred steps per epoch of the created `Dataset`.\n\nThis will be `None` in the case where:\n\n(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and\n(2) `steps_per_epoch` was not provided, and\n(3) The first epoch of iteration has not yet completed.\n\nReturns:\n The inferred steps per epoch of the created `Dataset`."} +{"repo": "tensorflow", "function": "def assert_no_entries_with_modulus_zero(x, message=None, name='assert_no_entries_with_modulus_zero'):\n with ops.name_scope(name, values=[x]):\n x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name='x')\n dtype = x.dtype.base_dtype\n should_be_nonzero = math_ops.abs(x)\n zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype=dtype.real_dtype)\n return check_ops.assert_less(zero, should_be_nonzero, message=message)", "docstring": "Returns `Op` that asserts Tensor `x` has no entries with modulus zero.\n\nArgs:\n x: Numeric `Tensor`, real, integer, or complex.\n message: A string message to prepend to failure message.\n name: A name to give this `Op`.\n\nReturns:\n An `Op` that asserts `x` has no entries with modulus zero."} +{"repo": "tensorflow", "function": "def reduce(self, reduce_op, value, axis):\n return super(CentralStorageStrategy, self).reduce(reduce_op, value, axis)", "docstring": "Reduce `value` across replicas.\n\nGiven a per-replica value returned by `run`, say a\nper-example loss, the batch will be divided across all the replicas. This\nfunction allows you to aggregate across replicas and optionally also across\nbatch elements. For example, if you have a global batch size of 8 and 2\nreplicas, values for examples `[0, 1, 2, 3]` will be on replica 0 and\n`[4, 5, 6, 7]` will be on replica 1. By default, `reduce` will just\naggregate across replicas, returning `[0+4, 1+5, 2+6, 3+7]`. This is useful\nwhen each replica is computing a scalar or some other value that doesn't\nhave a \"batch\" dimension (like a gradient). More often you will want to\naggregate across the global batch, which you can get by specifying the batch\ndimension as the `axis`, typically `axis=0`. In this case it would return a\nscalar `0+1+2+3+4+5+6+7`.\n\nIf there is a last partial batch, you will need to specify an axis so\nthat the resulting shape is consistent across replicas. So if the last\nbatch has size 6 and it is divided into [0, 1, 2, 3] and [4, 5], you\nwould get a shape mismatch unless you specify `axis=0`. If you specify\n`tf.distribute.ReduceOp.MEAN`, using `axis=0` will use the correct\ndenominator of 6. Contrast this with computing `reduce_mean` to get a\nscalar value on each replica and this function to average those means,\nwhich will weigh some values `1/8` and others `1/4`.\n\nFor Example:\n```\nstrategy = tf.distribute.experimental.CentralStorageStrategy(\n compute_devices=['CPU:0', 'GPU:0'], parameter_device='CPU:0')\nds = tf.data.Dataset.range(10)\n# Distribute that dataset\ndist_dataset = strategy.experimental_distribute_dataset(ds)\n\nwith strategy.scope():\n @tf.function\n def train_step(val):\n # pass through\n return val\n\n # Iterate over the distributed dataset\n for x in dist_dataset:\n result = strategy.run(train_step, args=(x,))\n\nresult = strategy.reduce(tf.distribute.ReduceOp.SUM, result,\n axis=None).numpy()\n# result: array([ 4, 6, 8, 10])\n\nresult = strategy.reduce(tf.distribute.ReduceOp.SUM, result, axis=0).numpy()\n# result: 28\n```\n\nArgs:\n reduce_op: A `tf.distribute.ReduceOp` value specifying how values should\n be combined.\n value: A \"per replica\" value, e.g. returned by `run` to\n be combined into a single tensor.\n axis: Specifies the dimension to reduce along within each\n replica's tensor. Should typically be set to the batch dimension, or\n `None` to only reduce across replicas (e.g. if the tensor has no batch\n dimension).\n\nReturns:\n A `Tensor`."} +{"repo": "beam", "function": "def __init__(self, topic, with_attributes=False, id_label=None, timestamp_attribute=None, expansion_service=None):\n self.params = WriteToPubsubSchema(topic=topic, id_label=id_label, timestamp_attribute=timestamp_attribute)\n self.expansion_service = expansion_service\n self.with_attributes = with_attributes", "docstring": "Initializes ``WriteToPubSub``.\n\nArgs:\n topic: Cloud Pub/Sub topic in the form \"/topics//\".\n with_attributes:\n True - input elements will be\n :class:`~apache_beam.io.gcp.pubsub.PubsubMessage` objects.\n False - input elements will be of type ``bytes`` (message\n data only).\n id_label: If set, will set an attribute for each Cloud Pub/Sub message\n with the given name and a unique value. This attribute can then be used\n in a ReadFromPubSub PTransform to deduplicate messages.\n timestamp_attribute: If set, will set an attribute for each Cloud Pub/Sub\n message with the given name and the message's publish time as the value."} +{"repo": "pytype", "function": "def _get_python_exe_version(python_exe: list[str]):\n try:\n python_exe_version = subprocess.check_output(python_exe + ['-V'], stderr=subprocess.STDOUT).decode()\n except (subprocess.CalledProcessError, FileNotFoundError):\n return None\n return _parse_exe_version_string(python_exe_version)", "docstring": "Determine the major and minor version of given Python executable.\n\nArguments:\n python_exe: absolute path to the Python executable\n\nReturns:\n Version as (major, minor) tuple, or None if it could not be determined."} +{"repo": "transformers", "function": "def preprocess(self, images, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: Optional[PILImageResampling]=None, do_pad: Optional[bool]=None, padding_value: Optional[float]=None, padding_mode: Optional[str]=None, do_normalize: Optional[bool]=None, image_mean: Optional[float]=None, image_std: Optional[float]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, patch_size: Optional[Dict[str, int]]=None, data_format: Optional[Union[str, ChannelDimension]]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None, return_tensors: Optional[TensorType]=None):\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n resample = resample if resample is not None else self.resample\n do_pad = do_pad if do_pad is not None else self.do_pad\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n padding_value = padding_value if padding_value is not None else self.padding_value\n padding_mode = padding_mode if padding_mode is not None else self.padding_mode\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n patch_size = patch_size if patch_size is not None else self.patch_size\n if isinstance(images, list) and any((isinstance(elem, list) and len(elem) >= 2 for elem in images)):\n raise ValueError('Multiple images for a single sample are not yet supported.')\n batch_images = make_list_of_list_of_images(images)\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_pad=do_pad, size_divisibility=size, do_resize=do_resize, size=size, resample=resample)\n batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]\n if do_rescale and is_scaled_image(batch_images[0][0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(batch_images[0][0])\n original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]\n size = get_size_dict(size)\n if do_resize:\n batch_images = [[self.resize(image, size=size, input_data_format=input_data_format) for image in images] for images in batch_images]\n image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]\n image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]\n image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]\n image_scale_factors = [[resized_size[0] / original_size[0]] for original_size, resized_size in zip(original_image_sizes, image_sizes)]\n if do_pad:\n batch_images = [[self.pad_image(image, size=size, mode=padding_mode, constant_values=padding_value, input_data_format=input_data_format) for image in images] for images in batch_images]\n if do_rescale:\n batch_images = [[self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images] for images in batch_images]\n if do_normalize:\n batch_images = [[self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images] for images in batch_images]\n if data_format is not None:\n batch_images = [[to_channel_dimension_format(image, data_format, input_data_format) for image in images] for images in batch_images]\n data = {'images': batch_images, 'image_unpadded_heights': image_unpadded_heights, 'image_unpadded_widths': image_unpadded_widths, 'image_scale_factors': image_scale_factors}\n return FuyuBatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Utility function to preprocess the images and extract necessary information about original formats.\n\nArgs:\n images (`ImageInput`):\n Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel\n values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image to `size`.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the image to `size`.\n padding_value (`float`, *optional*, defaults to `self.padding_value`):\n The value to pad the image with.\n padding_mode (`str`, *optional*, defaults to `self.padding_mode`):\n The padding mode to use when padding the image.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float`, *optional*, defaults to `self.image_mean`):\n The mean to use when normalizing the image.\n image_std (`float`, *optional*, defaults to `self.image_std`):\n The standard deviation to use when normalizing the image.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n The factor to use when rescaling the image.\n patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the patches.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format of the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format."} +{"repo": "transformers", "function": "def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.return_dict\n if attention_mask is None:\n attention_mask = jnp.ones_like(input_ids)\n if position_ids is None:\n batch_size, sequence_length = input_ids.shape\n position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n\n def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):\n encode_module = module._get_encoder_module()\n return encode_module(input_ids, attention_mask, position_ids, **kwargs)\n return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)", "docstring": "Returns:\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration\n\n>>> model = FlaxMBartForConditionalGeneration.from_pretrained(\"facebook/mbart-large-cc25\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/mbart-large-cc25\")\n\n>>> text = \"My friends are cool but they eat too many carbs.\"\n>>> inputs = tokenizer(text, max_length=1024, return_tensors=\"jax\")\n>>> encoder_outputs = model.encode(**inputs)\n```"} +{"repo": "tensorflow", "function": "def __init__(self, sess_creator):\n self._sess_creator = sess_creator\n _WrappedSession.__init__(self, self._create_session())", "docstring": "Create a new `_RecoverableSession`.\n\nThe value returned by calling `sess_creator.create_session()` will be the\nsession wrapped by this recoverable session.\n\nArgs:\n sess_creator: A 'SessionCreator' to be wrapped by recoverable."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None\n hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights,)\n if use_cache:\n outputs += (present_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail."} +{"repo": "tensorflow", "function": "def __init__(self, session_root, watch_fn=None, thread_name_filter=None):\n self._session_root = session_root\n self._watch_fn = watch_fn\n self._thread_name_filter = thread_name_filter\n self._session_wrapper = None", "docstring": "Create a local debugger command-line interface (CLI) hook.\n\nArgs:\n session_root: See doc of\n `dumping_wrapper.DumpingDebugWrapperSession.__init__`.\n watch_fn: See doc of\n `dumping_wrapper.DumpingDebugWrapperSession.__init__`.\n thread_name_filter: Regular-expression white list for threads on which the\n wrapper session will be active. See doc of `BaseDebugWrapperSession` for\n more details."} +{"repo": "fhir-py", "function": "def _create_scalar_select(lhs_result: _sql_data_types.StandardSqlExpression, rhs_result: _sql_data_types.StandardSqlExpression, scalar_check_op: str, sql_data_type: _sql_data_types.StandardSqlDataType, sql_alias: str):\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(f'({lhs_result.as_operand()} {scalar_check_op} {rhs_result.as_operand()})', _sql_data_type=sql_data_type, _sql_alias=sql_alias), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Construct a Spark SQL select statement for scalar values.\n\nArgs:\n lhs_result: The result of the left-hand side expression.\n rhs_result: The result of the right-hand side expression.\n scalar_check_op: The scalar operation to be applied ('=' or '!=').\n sql_data_type: The SQL data type for the result.\n sql_alias: The SQL alias for the result.\n\nReturns:\n A compiled Spark SQL select statement."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n dtype = q.dtype\n q = q.float()\n k = k.float()\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed.to(dtype=dtype), k_embed.to(dtype=dtype))", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "tensorflow", "function": "def getargspec(obj):\n if isinstance(obj, functools.partial):\n return _get_argspec_for_partial(obj)\n decorators, target = tf_decorator.unwrap(obj)\n spec = next((d.decorator_argspec for d in decorators if d.decorator_argspec is not None), None)\n if spec:\n return spec\n try:\n return _getargspec(target)\n except TypeError:\n pass\n if isinstance(target, type):\n try:\n return _getargspec(target.__init__)\n except TypeError:\n pass\n try:\n return _getargspec(target.__new__)\n except TypeError:\n pass\n return _getargspec(type(target).__call__)", "docstring": "TFDecorator-aware replacement for `inspect.getargspec`.\n\nNote: `getfullargspec` is recommended as the python 2/3 compatible\nreplacement for this function.\n\nArgs:\n obj: A function, partial function, or callable object, possibly decorated.\n\nReturns:\n The `ArgSpec` that describes the signature of the outermost decorator that\n changes the callable's signature, or the `ArgSpec` that describes\n the object if not decorated.\n\nRaises:\n ValueError: When callable's signature can not be expressed with\n ArgSpec.\n TypeError: For objects of unsupported types."} +{"repo": "keras", "function": "def max_epochs():\n return _MAX_EPOCHS", "docstring": "Get the maximum number of epochs for any call to fit.\n\nRetrieves the limit on the number of epochs set by\n`keras.config.set_max_epochs` or the `KERAS_MAX_EPOCHS` environment\nvariable.\n\nReturns:\n The integer limit on the number of epochs or `None`, if no limit has\n been set."} +{"repo": "transformers", "function": "def _resize_output_size_rescale_to_max_len(height: int, width: int, min_len: Optional[int]=1, max_len: Optional[int]=None) -> Tuple[int, int]:\n max_len = max(height, width) if max_len is None else max_len\n aspect_ratio = width / height\n if width >= height:\n width = max_len\n height = int(width / aspect_ratio)\n if height % 2 != 0:\n height += 1\n elif height > width:\n height = max_len\n width = int(height * aspect_ratio)\n if width % 2 != 0:\n width += 1\n height = max(height, min_len)\n width = max(width, min_len)\n return (height, width)", "docstring": "Get the output size of the image after resizing given a dictionary specifying the max and min sizes.\nArgs:\n height (`int`):\n Height of the input image.\n width (`int`):\n Width of the input image.\n min_len (`int`, *optional*, defaults to 1):\n Minimum size of the output image.\n max_len (`int`, *optional*, defaults to the maximum size of the image):\n Maximum size of the output image.\nReturns:\n The output size of the image after resizing."} +{"repo": "fhir-py", "function": "def __init__(self, value_set_codes_table: Optional[bigquery.TableReference]=None, value_set_codes_definitions: Optional[fhir_package.FhirPackageManager]=None) -> None:\n self._value_set_codes_table = value_set_codes_table\n self._value_set_codes_definitions = value_set_codes_definitions\n self._use_resource_alias = None", "docstring": "Creates a BigQuerySqlInterpreter.\n\nArgs:\n value_set_codes_table: The name of the database table containing value set\n code definitions. Used when building SQL for memberOf expressions. If\n given, value set definitions needed for memberOf expressions will be\n retrieved from this table if they can not be found in\n `value_set_codes_definitions`. If neither this nor\n `value_set_codes_definitions` is given, no memberOf SQL will be\n generated.\n value_set_codes_definitions: A package manager containing value set\n definitions which can be used to build SQL for memberOf expressions.\n These value set definitions can be consulted in favor of using an\n external `value_set_codes_table`. If neither this nor\n `value_set_codes_definitions` is given, no memberOf SQL will be\n generated."} +{"repo": "transformers", "function": "def _get_token_budget(self, question_tokens, max_length=None):\n return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost(question_tokens)", "docstring": "Computes the number of tokens left for the table after tokenizing a question, taking into account the max\nsequence length of the model.\n\nArgs:\n question_tokens (`List[String]`):\n List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max\n length."} +{"repo": "tensorflow", "function": "def __init__(self, boundaries, values, name=None):\n super(PiecewiseConstantDecay, self).__init__()\n if len(boundaries) != len(values) - 1:\n raise ValueError('The length of boundaries should be 1 less than the length of values')\n self.boundaries = boundaries\n self.values = values\n self.name = name", "docstring": "Piecewise constant from boundaries and interval values.\n\nArgs:\n boundaries: A list of `Tensor`s or `int`s or `float`s with strictly\n increasing entries, and with all elements having the same type as the\n optimizer step.\n values: A list of `Tensor`s or `float`s or `int`s that specifies the\n values for the intervals defined by `boundaries`. It should have one\n more element than `boundaries`, and all elements should have the same\n type.\n name: A string. Optional name of the operation. Defaults to\n 'PiecewiseConstant'.\n\nRaises:\n ValueError: if the number of elements in the lists do not match."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return self.prefix_tokens + token_ids_0 + self.suffix_tokens\n return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An NLLB sequence has the following format, where `X` represents the sequence:\n\n- `input_ids` (for encoder) `X [eos, src_lang_code]`\n- `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`\n\nBOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a\nseparator.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def _maybe_add_call_warning(self, node, full_name, name):\n warned = False\n if isinstance(node.func, ast.Attribute):\n warned = self._maybe_add_warning(node, '*.' + name)\n arg_warnings = self._get_applicable_dict('function_arg_warnings', full_name, name)\n variadic_args = uses_star_args_or_kwargs_in_call(node)\n for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):\n present, _ = get_arg_value(node, kwarg, arg) or variadic_args\n if present:\n warned = True\n warning_message = warning.replace('', full_name or name)\n template = '%s called with %s argument, requires manual check: %s'\n if variadic_args:\n template = '%s called with *args or **kwargs that may include %s, requires manual check: %s'\n self.add_log(level, node.lineno, node.col_offset, template % (full_name or name, kwarg, warning_message))\n return warned", "docstring": "Print a warning when specific functions are called with selected args.\n\nThe function _print_warning_for_function matches the full name of the called\nfunction, e.g., tf.foo.bar(). This function matches the function name that\nis called, as long as the function is an attribute. For example,\n`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.\n\nArgs:\n node: ast.Call object\n full_name: The precomputed full name of the callable, if one exists, None\n otherwise.\n name: The precomputed name of the callable, if one exists, None otherwise.\n\nReturns:\n Whether an error was recorded."} +{"repo": "transformers", "function": "class TFSeq2SeqSequenceClassifierOutput(ModelOutput):\n loss: tf.Tensor | None = None\n logits: Optional[tf.Tensor] = None\n past_key_values: List[tf.Tensor] | None = None\n decoder_hidden_states: Tuple[tf.Tensor] | None = None\n decoder_attentions: Tuple[tf.Tensor] | None = None\n cross_attentions: Tuple[tf.Tensor] | None = None\n encoder_last_hidden_state: tf.Tensor | None = None\n encoder_hidden_states: Tuple[tf.Tensor] | None = None\n encoder_attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for outputs of sequence-to-sequence sentence classification models.\n\nArgs:\n loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`tf.Tensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be\n used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`\n encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads."} +{"repo": "nsscache", "function": "def ParseNSSwitchConf(nsswitch_filename):\n with open(nsswitch_filename, 'r') as nsswitch_file:\n nsswitch = {}\n map_re = re.compile('^([a-z]+): *(.*)$')\n for line in nsswitch_file:\n match = map_re.match(line)\n if match:\n sources = match.group(2).split()\n nsswitch[match.group(1)] = sources\n return nsswitch", "docstring": "Parse /etc/nsswitch.conf and return the sources for each map.\n\nArgs:\n nsswitch_filename: Full path to an nsswitch.conf to parse. See manpage\n nsswitch.conf(5) for full details on the format expected.\n\nReturns:\n a dictionary keyed by map names and containing a list of sources\n for each map."} +{"repo": "tf-quant-finance", "function": "def equity_leg_cashflows(forward_prices, spots, notional, dividends=None, dtype=None, name=None):\n name = name or 'equity_leg_cashflows'\n with tf.name_scope(name):\n forward_prices = tf.convert_to_tensor(forward_prices, dtype=dtype, name='forward_prices')\n dtype = dtype or forward_prices.dtype\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')\n dividends = 0 if dividends is None else dividends\n dividends = tf.convert_to_tensor(dividends, dtype=dtype, name='dividends')\n spots_expand = tf.expand_dims(spots, axis=-1)\n forward_prices = tf.concat([spots_expand, forward_prices], axis=-1)\n return tf.math.divide_no_nan(notional * (forward_prices[..., 1:] - forward_prices[..., :-1]) + dividends, forward_prices[..., :-1])", "docstring": "Computes cashflows for a batch of equity legs.\n\nEquity cashflows are defined as a total equity return between pay dates, say,\n`T_1, ..., T_n`. Let `S_i` represent the value of the equity at time `T_i` and\n`d_i` be a discrete dividend paid at this time. Then the the payment at time\n`T_i` is defined as `(S_i - S_{i - 1}) / S_{i-1} + d_i`. The value of\nthe cashflow is then the discounted sum of the paments. See, e.g., [1] for the\nreference.\n\n#### Example\n```python\nnotional = 10000\nforward_prices = [[110, 120, 140], [210, 220, 240]]\nspots = [100, 200]\ndividends = [[1, 1, 1], [2, 2, 2]]\nequity_leg_cashflows(forward_prices, spots, notional, dividends,\n dtype=tf.float64)\n# Expected:\n# [[1000.01, 909.1, 1666.675],\n# [ 500.01, 476.2, 909.1]]\n```\n\nArgs:\n forward_prices: A real `Tensor` of shape `batch_shape + [num_cashflows]`,\n where `num_cashflows` is the number of cashflows for each batch element.\n Equity forward prices at leg reset times.\n spots: A `Tensor` of the same `dtype` as `forward_prices` and of\n shape compatible with `batch_shape`. Spot prices for each batch element\n notional: A `Tensor` of the same `dtype` as `forward_prices` and of\n compatible shape. Notional amount for each cashflow.\n dividends: A `Tensor` of the same `dtype` as `forward_prices` and of\n compatible shape. Discrete dividends paid at the leg reset times.\n Default value: None which maps to zero dividend.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `forward_prices`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'equity_leg_cashflows'.\n\nReturns:\n A `Tensor` of the same `dtype` as `forward_prices` and of shape\n `batch_shape + [num_cashflows]`.\n\n#### References\n[1] Don M. Chance and Don R Rich,\n The Pricing of Equity Swaps and Swaptions, 1998\n https://jod.pm-research.com/content/5/4/19"} +{"repo": "tensorflow", "function": "def _dequeue_return_value(self, tensors):\n if self._names:\n return {n: tensors[i] for i, n in enumerate(self._names)}\n elif len(tensors) == 1:\n return tensors[0]\n else:\n return tensors", "docstring": "Return the value to return from a dequeue op.\n\nIf the queue has names, return a dictionary with the\nnames as keys. Otherwise return either a single tensor\nor a list of tensors depending on the length of `tensors`.\n\nArgs:\n tensors: List of tensors from the dequeue op.\n\nReturns:\n A single tensor, a list of tensors, or a dictionary\n of tensors."} +{"repo": "tensorflow", "function": "def __getitem__(self, index):\n rank = self.rank\n if isinstance(index, slice):\n if index.step is not None and index.step != 1:\n raise IndexError('Cannot stride through a shape')\n start = index.start\n stop = index.stop\n if start is None:\n start = 0\n start = _fix_start_index(start, rank, self.num_row_partitions)\n stop = _fix_stop_index(stop, rank)\n return self._slice_shape(start, stop)\n elif isinstance(index, int):\n if index < 0:\n if rank is None:\n raise ValueError('Rank must be known to use __getitem__ with a negative index.')\n return self._dimension(rank + index)\n return self._dimension(index)\n else:\n raise TypeError('Argument is not an int or a slice')", "docstring": "Returns a dimension or a slice of the shape.\n\nRagged shapes can have ragged dimensions that depend upon other dimensions.\nTherefore, if you ask for a dimension that is ragged, this function returns\na ValueError. For similar reasons, if a slice is selected that includes\na ragged dimension without including the zero dimension, then this fails.\n\nAny slice that does not start at zero will return a shape\nwith num_row_partitions == 0.\n\nArgs:\n index: the index: can be an int or a slice.\n\nRaises:\n IndexError: if the index is not in range.\n ValueError: if the rank is unknown, or a ragged rank is requested\n incorrectly."} +{"repo": "tensorflow", "function": "def save(self, output_saved_model_dir, save_gpu_specific_engines=True, options=None):\n assert self._converted\n if trt_utils.is_experimental_feature_activated('remove_native_segments'):\n logging.info(\"'remove_native_segments' experimental feature is enabled during saving of converted SavedModel.\")\n self._converted_func = _remove_native_segments(self._converted_func)\n self._converted_graph_def = self._converted_func.graph.as_graph_def()\n if self._need_calibration and (not self._calibrated):\n raise RuntimeError('A model that requires INT8 calibration has to be built before saving it. Call build() to build and calibrate the TensorRT engines.')\n engine_asset_dir = tempfile.mkdtemp()\n resource_map = {}\n\n def _serialize_and_track_engine(node):\n \"\"\"Serialize TRT engines in the cache and track them.\"\"\"\n canonical_engine_name = _get_canonical_engine_name(node.name)\n if canonical_engine_name in resource_map:\n return\n filename = os.path.join(engine_asset_dir, 'trt-serialized-engine.' + canonical_engine_name)\n try:\n gen_trt_ops.serialize_trt_resource(resource_name=canonical_engine_name, filename=filename, delete_resource=True, save_gpu_specific_engines=save_gpu_specific_engines)\n except errors.NotFoundError:\n logging.info('Could not find %s in TF-TRT cache. This can happen if build() is not called, which means TensorRT engines will be built and cached at runtime.', canonical_engine_name)\n return\n resource_map[canonical_engine_name] = _TRTEngineResource(canonical_engine_name, filename, self._conversion_params.maximum_cached_engines)\n self._for_each_trt_node(self._converted_graph_def, _serialize_and_track_engine)\n trackable = autotrackable.AutoTrackable() if self.freeze else self._saved_model\n trackable.trt_engine_resources = resource_map\n if not self._conversion_params.allow_build_at_runtime:\n\n def _reset_allow_build_at_runtime(node):\n node.attr['_allow_build_at_runtime'].b = False\n self._for_each_trt_node(self._converted_graph_def, _reset_allow_build_at_runtime)\n reset_converted_func = wrap_function.function_from_graph_def(self._converted_graph_def, [tensor.name for tensor in self._converted_func.inputs], [tensor.name for tensor in self._converted_func.outputs])\n reset_converted_func.graph.structured_outputs = nest.pack_sequence_as(self._converted_func.graph.structured_outputs, reset_converted_func.graph.structured_outputs)\n reset_converted_func.graph.structured_input_signature = self._converted_func.structured_input_signature\n self._converted_func = reset_converted_func\n signatures = {self._input_saved_model_signature_key: self._converted_func}\n save.save(trackable, output_saved_model_dir, signatures, options=options)", "docstring": "Save the converted SavedModel.\n\nArgs:\n output_saved_model_dir: directory to saved the converted SavedModel.\n save_gpu_specific_engines: whether to save TRT engines that have been\n built. When True, all engines are saved and when False, the engines\n are not saved and will be rebuilt at inference time. By using\n save_gpu_specific_engines=False after doing INT8 calibration, inference\n can be done on different GPUs than the GPU that the model was calibrated\n and saved on.\n options: `tf.saved_model.SaveOptions` object for configuring save options.\nRaises:\n RuntimeError: if the needed calibration hasn't been done."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple, BaseModelOutput]]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.Tensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if encoder_outputs is None:\n encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):\n encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\ndecoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\ncross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, BlenderbotSmallModel\n\n>>> model = BlenderbotSmallModel.from_pretrained(\"facebook/blenderbot_small-90M\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/blenderbot_small-90M\")\n\n>>> inputs = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\")\n>>> decoder_inputs = tokenizer(\"Studies show that\", return_tensors=\"pt\") # Batch size 1\n>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)\n\n>>> last_hidden_states = outputs.last_hidden_state\n>>> list(last_hidden_states.shape)\n[1, 3, 512]\n```"} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor]=None):\n pixel_values = pixel_values.type(self.visual.dtype)\n image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)\n return image_embeds", "docstring": "Encodes images into continuous embeddings that can be forwarded to the language model.\n\nArgs:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\n The tensors corresponding to the input images.\n image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):\n The temporal, height and width of feature shape of each image in LLM."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, pixel_attention_mask: Optional[torch.BoolTensor]=None, image_hidden_states: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> Union[Tuple, Idefics3CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, pixel_values=pixel_values, pixel_attention_mask=pixel_attention_mask, image_hidden_states=image_hidden_states, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, return_dict=True, **kwargs)\n hidden_states = outputs[0]\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs)\n return Idefics3CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states)", "docstring": "pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):\n Mask to avoid performing attention on padding pixel indices.\nimage_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):\n The hidden states of the image encoder after modality projection.\nlabels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or `model.image_token_id` (where `model` is your instance of `Idefics3ForConditionalGeneration`).\n Tokens with indices set to `model.image_token_id` are ignored (masked), the loss is only\n computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> import requests\n>>> import torch\n>>> from PIL import Image\n>>> from io import BytesIO\n\n>>> from transformers import AutoProcessor, AutoModelForVision2Seq\n>>> from transformers.image_utils import load_image\n\n>>> # Note that passing the image urls (instead of the actual pil images) to the processor is also possible\n>>> image1 = load_image(\"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg\")\n>>> image2 = load_image(\"https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg\")\n>>> image3 = load_image(\"https://cdn.britannica.com/68/170868-050-8DDE8263/Golden-Gate-Bridge-San-Francisco.jpg\")\n\n>>> processor = AutoProcessor.from_pretrained(\"HuggingFaceM4/Idefics3-8B-Llama3\")\n>>> model = AutoModelForVision2Seq.from_pretrained(\"HuggingFaceM4/Idefics3-8B-Llama3\", torch_dtype=torch.bfloat16, device_map=\"auto\")\n\n>>> # Create inputs\n>>> messages = [\n... {\n... \"role\": \"user\",\n... \"content\": [\n... {\"type\": \"image\"},\n... {\"type\": \"text\", \"text\": \"In this image, we can see the city of New York, and more specifically the Statue of Liberty.\"},\n... {\"type\": \"image\"},\n... {\"type\": \"text\", \"text\": \"What can we see in this image?\"},\n... ]\n... },\n... {\n... \"role\": \"user\",\n... \"content\": [\n... {\"type\": \"image\"},\n... {\"type\": \"text\", \"text\": \"In which city is that bridge located?\"},\n... ]\n... }\n... ]\n\n>>> prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]\n>>> images = [[image1, image2], [image3]]\n>>> inputs = processor(text=prompts, images=images, padding=True, return_tensors=\"pt\").to(model.device)\n\n>>> # Generate\n>>> generated_ids = model.generate(**inputs, max_new_tokens=256)\n>>> generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)\n\n>>> print(generated_texts[0])\nAssistant: There are buildings, trees, lights, and water visible in this image.\n\n>>> print(generated_texts[1])\nAssistant: The bridge is in San Francisco.\n```"} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return cls + token_ids_0 + sep\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. An FNet sequence has the following format:\n\n- single sequence: `[CLS] X [SEP]`\n- pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def stateless_random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed):\n if min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or (max_jpeg_quality > 100):\n raise ValueError('jpeg encoding range must be between 0 and 100.')\n if min_jpeg_quality >= max_jpeg_quality:\n raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.')\n jpeg_quality = stateless_random_ops.stateless_random_uniform(shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed, dtype=dtypes.int32)\n return adjust_jpeg_quality(image, jpeg_quality)", "docstring": "Deterministically radomize jpeg encoding quality for inducing jpeg noise.\n\nGuarantees the same results given the same `seed` independent of how many\ntimes the function is called, and independent of global seed settings (e.g.\n`tf.random.set_seed`).\n\n`min_jpeg_quality` must be in the interval `[0, 100]` and less than\n`max_jpeg_quality`.\n`max_jpeg_quality` must be in the interval `[0, 100]`.\n\nUsage Example:\n\n>>> x = tf.constant([[[1, 2, 3],\n... [4, 5, 6]],\n... [[7, 8, 9],\n... [10, 11, 12]]], dtype=tf.uint8)\n>>> seed = (1, 2)\n>>> tf.image.stateless_random_jpeg_quality(x, 75, 95, seed)\n\n\nArgs:\n image: 3D image. Size of the last dimension must be 1 or 3.\n min_jpeg_quality: Minimum jpeg encoding quality to use.\n max_jpeg_quality: Maximum jpeg encoding quality to use.\n seed: A shape [2] Tensor, the seed to the random number generator. Must have\n dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.)\n\nReturns:\n Adjusted image(s), same shape and DType as `image`.\n\nRaises:\n ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid."} +{"repo": "tensorflow", "function": "def _VerifyExplicitPaddings(self, tensor_in_sizes, filter_in_sizes, strides, padding, data_format, dtype, use_gpu, op_name, dilations=(1, 1), test_grappler_layout_optimizer=False, tol=1e-05):\n input_tensor = self._CreateNumpyTensor(tensor_in_sizes)\n filter_tensor = self._CreateNumpyTensor(filter_in_sizes)\n input_tensor = array_ops.pad(input_tensor, [(0, 0)] + padding + [(0, 0)])\n dilations = list(dilations)\n conv2d_result = nn_ops.conv2d(input_tensor, filter_tensor, [1] + list(strides) + [1], 'VALID', dilations=[1] + dilations + [1])\n expected = list(self.evaluate(array_ops.reshape(conv2d_result, [-1])))\n self._VerifyValuesParameters(tensor_in_sizes, filter_in_sizes, strides, padding, expected, data_format, dtype, use_gpu, op_name, dilations, test_grappler_layout_optimizer=test_grappler_layout_optimizer, tol=tol)", "docstring": "Verifies Conv2D with explicit padding generates correct values.\n\nIt does this by comparing with Conv2D without explicit padding. This\nfunction assumes Conv2D without explicit padding works correctly.\n\nArgs:\n tensor_in_sizes: Input tensor dimensions in [batch, input_rows,\n input_cols, input_depth].\n filter_in_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols,\n input_depth, output_depth].\n strides: [row_stride, col_stride] for the convolution;\n padding: Explicit padding amounts.\n data_format: \"NCHW\" or \"NHWC\"\n dtype: data type to perform test\n use_gpu: True if testing on the GPU\n op_name: \"Conv\" or \"Conv2D\"\n dilations: Dilation values\n test_grappler_layout_optimizer: If True, allow the Grappler layout\n optimizer to run, which turns NHWC Conv2Ds on the GPU to NCHW Conv2Ds.\n tol: The absolute and relative tolerance."} +{"repo": "transformers", "function": "def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):\n batch_size, mask_seq_len = hidden_states.shape[:2]\n indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)\n bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)\n mask = hidden_states.new_ones((batch_size, mask_seq_len))\n mask = mask.masked_fill(bool_mask, 0)\n return mask", "docstring": "Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that\nstops at the corresponding element in `seq_lens`.\nArgs:\n hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):\n The sequences to mask, where `*` is any number of sequence-specific dimensions including none.\n seq_lens (`torch.Tensor` of shape `(batch)`:\n Each element represents the length of the sequence at the same index in `hidden_states`\nReturns:\n `torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`"} +{"repo": "pytype", "function": "def get_module_file(self, namespace, module, version):\n module_parts = module.split('.')\n module_path = path_utils.join(*module_parts)\n paths = []\n if namespace == 'stdlib':\n path = path_utils.join(namespace, module_path)\n if self._is_module_in_typeshed(module_parts, version) or path in self.missing:\n paths.append(path)\n elif namespace == 'third_party':\n for package in sorted(self._third_party_packages[module_parts[0]]):\n paths.append(path_utils.join('stubs', package, module_path))\n for path_rel in paths:\n if path_rel in self.missing:\n relpath = path_utils.join('nonexistent', path_rel + '.pyi')\n return (relpath, builtin_stubs.DEFAULT_SRC)\n for path in [path_utils.join(path_rel, '__init__.pyi'), path_rel + '.pyi']:\n try:\n name, src = self._store.load_file(path)\n return (name, src)\n except OSError:\n pass\n raise OSError(f\"Couldn't find {module}\")", "docstring": "Get the contents of a typeshed .pyi file.\n\nArguments:\n namespace: selects a top-level directory within typeshed/ Allowed values\n are \"stdlib\" and \"third_party\". \"third_party\" corresponds to the the\n typeshed/stubs/ directory.\n module: module name (e.g., \"sys\" or \"__builtins__\"). Can contain dots, if\n it's a submodule. Package names should omit the \"__init__\" suffix (e.g.,\n pass in \"os\", not \"os.__init__\").\n version: The Python version. (major, minor)\n\nReturns:\n A tuple with the filename and contents of the file\nRaises:\n IOError: if file not found"} +{"repo": "transformers", "function": "def from_dict(cls, feature_extractor_dict: dict[str, Any], **kwargs) -> PreTrainedFeatureExtractor:\n return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)\n to_remove = []\n for key, value in kwargs.items():\n if key in feature_extractor_dict:\n feature_extractor_dict[key] = value\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n feature_extractor = cls(**feature_extractor_dict)\n logger.info(f'Feature extractor {feature_extractor}')\n if return_unused_kwargs:\n return (feature_extractor, kwargs)\n else:\n return feature_extractor", "docstring": "Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of\nparameters.\n\nArgs:\n feature_extractor_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be\n retrieved from a pretrained checkpoint by leveraging the\n [`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the feature extractor object.\n\nReturns:\n [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those\n parameters."} +{"repo": "tensorflow", "function": "def __init__(self, functions, inference_args, input_tangents, tape_watching):\n self._functions = functions\n self._inference_args = inference_args\n self._input_tangents = input_tangents\n self._tape_watching = tape_watching", "docstring": "Collects information about the function call.\n\nArgs:\n functions: An object which produces forward and backward functions, either\n a _DelayedRewriteGradientFunctions or a _TapeGradientFunctions object.\n inference_args: A flat list of Tensors, arguments to the inference\n function.\n input_tangents: A flat list of Tensors, jvps associated with\n `inference_args`.\n tape_watching: Boolean, with True indicating that recording is necessary."} +{"repo": "transformers", "function": "def forward(self, input, padding_mask: Optional[torch.Tensor]=None, causal_mask: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[torch.Tensor]]=None, output_attentions=False, use_cache=False):\n seq_len, bsz, embed_dim = input.size()\n if embed_dim != self.config.hidden_size:\n raise ValueError(f'Input embedding dimension should be {self.config.hidden_size}; received {embed_dim}')\n residual = input\n if self.config.normalize_before_mega:\n input = self.norm(input)\n value = self.activation(self.v_proj(input))\n if self.config.is_decoder and past_key_values is not None:\n if seq_len > 1:\n raise ValueError(f'Incremental decoding only supports self sequence length of 1; received {seq_len}')\n prev_self_key, prev_self_value, prev_ema_state = past_key_values[0:3]\n else:\n prev_self_key = prev_self_value = prev_ema_state = None\n ema_out, updated_ema_state = self.ema_gate(input, attention_mask=padding_mask, prev_state=prev_ema_state, use_cache=use_cache)\n ema_out = self.dropout(ema_out)\n base = self.mx_proj(ema_out)\n residual_weight, query_key_gates, intermediate_state = torch.split(base, [self.config.hidden_size, self.config.shared_representation_size + self.config.intermediate_size, self.config.hidden_size], dim=-1)\n residual_weight = torch.sigmoid(residual_weight)\n query_key_gates = F.silu(query_key_gates)\n query_key, attention_gate = torch.split(query_key_gates, [self.config.shared_representation_size, self.config.intermediate_size], dim=-1)\n query_key = query_key.unsqueeze(2) * self.qk_weight + self.qk_bias\n query, key = torch.unbind(query_key, dim=2)\n query = query.transpose(0, 1)\n key = key.transpose(0, 1)\n value = value.transpose(0, 1)\n if self.config.is_decoder:\n if prev_self_key is not None:\n key = torch.cat([prev_self_key, key], dim=1)\n if prev_self_value is not None:\n value = torch.cat([prev_self_value, value], dim=1)\n if not self.config.use_chunking:\n updated_self_key = key\n updated_self_value = value\n else:\n curr_len = key.size(1) % self.config.chunk_size\n if curr_len == 0:\n updated_self_key = None\n updated_self_value = None\n else:\n updated_self_key = key\n updated_self_value = value\n ctx_len = key.size(1)\n if not self.config.use_chunking:\n query = query.unsqueeze(1)\n key = key.unsqueeze(1)\n value = value.unsqueeze(1)\n if padding_mask is not None:\n padding_mask = padding_mask.unsqueeze(1)\n else:\n if seq_len < self.config.chunk_size:\n query = query.unsqueeze(1)\n else:\n n_chunks = seq_len // self.config.chunk_size\n query = query.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size)\n if ctx_len < self.config.chunk_size:\n key = key.unsqueeze(1)\n value = value.unsqueeze(1)\n if padding_mask is not None:\n padding_mask = padding_mask.unsqueeze(1)\n else:\n n_chunks = ctx_len // self.config.chunk_size\n key = key.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size)\n value = value.reshape(bsz, n_chunks, self.config.chunk_size, self.config.intermediate_size)\n if padding_mask is not None:\n padding_mask = padding_mask.view(bsz, n_chunks, self.config.chunk_size)\n if padding_mask is not None and padding_mask.dim() == 0:\n padding_mask = None\n attn_weights = self.attention_function(query, key, padding_mask=padding_mask, causal_mask=causal_mask)\n value = self.hidden_dropout(value, batch_first=True)\n kernel = self.attention_dropout(attn_weights)\n weighted_self_output = torch.matmul(kernel, value).view(bsz, seq_len, self.config.intermediate_size).transpose(0, 1)\n weighted_self_output = self.activation(intermediate_state + self.h_proj(weighted_self_output * attention_gate))\n weighted_self_output = self.dropout(weighted_self_output)\n out = torch.addcmul(residual, residual_weight, weighted_self_output - residual)\n if not self.config.normalize_before_mega:\n out = self.norm(out)\n return_values = (out, attn_weights) if output_attentions else (out,)\n if self.config.is_decoder:\n return_values = return_values + (updated_self_key, updated_self_value, updated_ema_state)\n return return_values", "docstring": "Mega's self-attention block, which combines multi-headed EMA with traditional self-attention\n\nArgs:\n input (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`):\n Hidden states to be updated by Mega's self-attention\n padding_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*\n or 0 for *masked*\n causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*):\n Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not\n masked* or 0 for *masked*\n past_key_values (`tuple(torch.Tensor)`, *optional*):\n The hidden states returned from the previous timestep during incremental decoding; expects that\n self-attention key, value, and EMA states are the first 3 entries in the tuple\n output_attentions (`bool`, default `False`):\n Whether to return self-attention weights\n use_cache (`bool`, default `False`):\n Whether to perform incremental decoding; uses `past_key_values` as prior state, and returns the updated\n states for use in the next step\n\nReturns:\n `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and\n inputs:\n - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden\n states from target sequence updated by Mega's self-attention\n - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape\n `(batch_size, 1, sequence_length, sequence_length)` -- The self-attention weights corresponding to how\n each token in the input sequence attends to every other token\n - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,\n sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next\n step of incremental decoding\n - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,\n sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of\n incremental decoding\n - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape\n `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding."} +{"repo": "tensorflow", "function": "class Softmax(Layer):\n\n def __init__(self, axis=-1, **kwargs):\n super(Softmax, self).__init__(**kwargs)\n self.supports_masking = True\n self.axis = axis\n\n def call(self, inputs, mask=None):\n if mask is not None:\n adder = (1.0 - math_ops.cast(mask, inputs.dtype)) * _large_compatible_negative(inputs.dtype)\n inputs += adder\n if isinstance(self.axis, (tuple, list)):\n if len(self.axis) > 1:\n return math_ops.exp(inputs - math_ops.reduce_logsumexp(inputs, axis=self.axis, keepdims=True))\n else:\n return backend.softmax(inputs, axis=self.axis[0])\n return backend.softmax(inputs, axis=self.axis)\n\n def get_config(self):\n config = {'axis': self.axis}\n base_config = super(Softmax, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape", "docstring": "Softmax activation function.\n\nExample without mask:\n\n>>> inp = np.asarray([1., 2., 1.])\n>>> layer = tf.keras.layers.Softmax()\n>>> layer(inp).numpy()\narray([0.21194157, 0.5761169 , 0.21194157], dtype=float32)\n>>> mask = np.asarray([True, False, True], dtype=bool)\n>>> layer(inp, mask).numpy()\narray([0.5, 0. , 0.5], dtype=float32)\n\nInput shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\nOutput shape:\n Same shape as the input.\n\nArgs:\n axis: Integer, or list of Integers, axis along which the softmax\n normalization is applied.\nCall arguments:\n inputs: The inputs, or logits to the softmax layer.\n mask: A boolean mask of the same shape as `inputs`. Defaults to `None`. The\n mask specifies 1 to keep and 0 to mask.\n\nReturns:\n softmaxed output with the same shape as `inputs`."} +{"repo": "tensorflow", "function": "def batch_reduce_and_verify(self, inputs, expect, options):\n\n def replica_fn():\n CollectiveReplicaLauncher._prefer_unique_instance_key = options.prefer_unique_instance_key\n collective, devices, pid = self.make_collective(options.num_processes, options.gpus_per_process)\n\n def batch_reduce_fn():\n batch_size = len(inputs[0])\n value_dst_pairs = []\n for i in range(batch_size):\n\n def value_fn(device_idx, idx=i):\n return inputs[pid * len(devices) + device_idx][idx]\n per_replica_value = make_per_replica_value(value_fn, devices)\n value_dst_pairs.append((per_replica_value, per_replica_value))\n reduced_values = collective.batch_reduce(options.reduce_op, value_dst_pairs, options.communication_options)\n if options.gpus_per_process > 1:\n for v in reduced_values:\n self.assertIsInstance(v, value_lib.Mirrored)\n reduced_values = [self.as_list(v) for v in reduced_values]\n for v in reduced_values:\n self.assertAllEqual(devices, [t.device for t in v])\n return nest.map_structure(ops.convert_to_tensor, reduced_values)\n per_replica_expect = nest.map_structure(lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)\n if 'eager' in options.mode:\n got = batch_reduce_fn()\n self.assertAllClose(got, per_replica_expect)\n if 'func_graph' in options.mode:\n got = def_function.function(batch_reduce_fn)()\n self.assertAllClose(got, per_replica_expect)\n get_global_mpr(options.num_processes).run(replica_fn)", "docstring": "Batch reduce the given `inputs` and verify the output matches `expect`.\n\nArgs:\n inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th\n value will be fed to i-th replica.\n expect: a list of `Tensor` or `IndexedSlices`. This should be the expected\n value for one replica.\n options: a `RunOpotions` instance."} +{"repo": "transformers", "function": "class DetrModelOutput(Seq2SeqModelOutput):\n intermediate_hidden_states: Optional[torch.FloatTensor] = None", "docstring": "Base class for outputs of the DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput,\nnamely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\ngone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each\n layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the\n weighted average in the self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,\n used to compute the weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each\n layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the\n weighted average in the self-attention heads.\n intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):\n Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a\n layernorm."} +{"repo": "tensorflow", "function": "def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map):\n for tag in tags:\n meta_graph_def.meta_info_def.tags.append(tag)\n if signature_def_map is not None:\n for key in signature_def_map:\n meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key])\n proto_meta_graph_def = self._saved_model.meta_graphs.add()\n proto_meta_graph_def.CopyFrom(meta_graph_def)", "docstring": "Tags the meta graph def and adds it to the SavedModel.\n\nTags the meta graph def with the supplied tags, adds signature defs to it if\nprovided and appends the meta graph def to the SavedModel proto.\n\nArgs:\n meta_graph_def: The meta graph def to add to the SavedModel.\n tags: The set of tags to annotate the meta graph def with.\n signature_def_map: The map of signature defs to be added to the meta graph\n def."} +{"repo": "tensorflow", "function": "def end(self, session):\n pass", "docstring": "Called at the end of session.\n\nThe `session` argument can be used in case the hook wants to run final ops,\nsuch as saving a last checkpoint.\n\nIf `session.run()` raises exception other than OutOfRangeError or\nStopIteration then `end()` is not called.\nNote the difference between `end()` and `after_run()` behavior when\n`session.run()` raises OutOfRangeError or StopIteration. In that case\n`end()` is called but `after_run()` is not called.\n\nArgs:\n session: A TensorFlow Session that will be soon closed."} +{"repo": "tensorflow", "function": "def input(self):\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors', 'input')", "docstring": "Retrieves the input tensor(s) of a layer.\n\nOnly applicable if the layer has exactly one input,\ni.e. if it is connected to one incoming layer.\n\nReturns:\n Input tensor or list of input tensors.\n\nRaises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found."} +{"repo": "mobly", "function": "def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):\n self._counter = self._id_counter()\n try:\n self._conn = socket.create_connection(('localhost', self.host_port), _SOCKET_CONNECTION_TIMEOUT)\n except ConnectionRefusedError as err:\n self.log.debug('Failed to connect to localhost, trying 127.0.0.1: {}'.format(str(err)))\n self._conn = socket.create_connection(('127.0.0.1', self.host_port), _SOCKET_CONNECTION_TIMEOUT)\n self._conn.settimeout(_SOCKET_READ_TIMEOUT)\n self._client = self._conn.makefile(mode='brw')\n resp = self._cmd(cmd, uid)\n if not resp:\n raise ProtocolError(self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding='utf8'))\n if result['status']:\n self.uid = result['uid']\n else:\n self.uid = UNKNOWN_UID", "docstring": "Opens a connection to a JSON RPC server.\n\nOpens a connection to a remote client. The connection attempt will time\nout if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each\nsubsequent operation over this socket will time out after\n_SOCKET_READ_TIMEOUT seconds as well.\n\nArgs:\n uid: int, The uid of the session to join, or UNKNOWN_UID to start a\n new session.\n cmd: JsonRpcCommand, The command to use for creating the connection.\n\nRaises:\n IOError: Raised when the socket times out from io error\n socket.timeout: Raised when the socket waits to long for connection.\n ProtocolError: Raised when there is an error in the protocol."} +{"repo": "transformers", "function": "def call(self, context: tf.Tensor, latents: tf.Tensor) -> tf.Tensor:\n context = self.context_layer_norm(context)\n latents = self.latents_layer_norm(latents)\n batch_size, seq_length, embed_dim = shape_list(context)\n q = self.q_proj(latents)\n k = self.k_proj(tf.concat([context, latents], axis=-2))\n v = self.v_proj(tf.concat([context, latents], axis=-2))\n q, k, v = [tf.transpose(tf.reshape(x, (batch_size, x.shape[1], self.n_heads, self.head_dim)), perm=[0, 2, 1, 3]) for x in (q, k, v)]\n if self.qk_layer_norms:\n q = self.q_layer_norm(q)\n k = self.k_layer_norm(k)\n scores = tf.einsum('... i d, ... j d -> ... i j', q * self.qk_scale, k)\n stabilized_scores = scores - tf.reduce_max(scores, axis=-1, keepdims=True)\n attn = tf.nn.softmax(stabilized_scores, axis=-1)\n resampled = tf.einsum('... i j, ... j d -> ... i d', attn, v)\n return self.output_proj(tf.reshape(tf.transpose(resampled, perm=[0, 2, 1, 3]), (batch_size, -1, self.n_heads * self.head_dim)))", "docstring": "Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!\n\nArgs:\n context (`tf.Tensor`):\n Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.\n latents (`tf.Tensor`):\n Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.\n\nReturns:\n `tf.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross\n from context."} +{"repo": "transformers", "function": "def set_record_attn(self, record_attn):\n\n def _should_record_attn(layer_idx):\n if isinstance(record_attn, bool):\n return record_attn\n return layer_idx in record_attn\n for i, layer in enumerate(self._attn_mods):\n layer.attn.record_attn = _should_record_attn(i)\n if not record_attn:\n self.saved_attn_weights = []", "docstring": "Makes forward prop dump self-attention softmaxes to self.saved_attn_weights.\n\nArgs:\n record_attn (`Union[bool,set]`):\n Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether\n to dump all."} +{"repo": "tensorflow", "function": "def set(self, value):\n pywrap_tfe.TFE_MonitoringIntGaugeCellSet(self._cell, value)", "docstring": "Atomically set the value.\n\nArgs:\n value: integer value."} +{"repo": "yapf", "function": "def _PushParameterListState(self, newline):\n current = self.next_token\n previous = current.previous_token\n if _IsFunctionDefinition(previous):\n first_param_column = previous.total_length + self.stack[-2].indent\n self.param_list_stack.append(object_state.ParameterListState(previous, newline, first_param_column))", "docstring": "Push a new parameter list state for a function definition.\n\nArgs:\n newline: Whether the current token is to be added on a newline."} +{"repo": "beam", "function": "def checksum(self, path):\n raise NotImplementedError", "docstring": "Fetch checksum metadata of a file on the\n:class:`~apache_beam.io.filesystem.FileSystem`.\n\nThis operation returns checksum metadata as stored in the underlying\nFileSystem. It should not need to read file data to obtain this value.\nChecksum type and format are FileSystem dependent and are not compatible\nbetween FileSystems.\nFileSystem implementations may return file size if a checksum isn't\navailable.\n\nArgs:\n path: string path of a file.\n\nReturns: string containing checksum\n\nRaises:\n ``BeamIOError``: if path isn't a file or doesn't exist."} +{"repo": "transformers", "function": "def register_for_auto_class(cls, auto_class='AutoConfig'):\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n import transformers.models.auto as auto_module\n if not hasattr(auto_module, auto_class):\n raise ValueError(f'{auto_class} is not a valid auto class.')\n cls._auto_class = auto_class", "docstring": "Register this class with a given auto class. This should only be used for custom configurations as the ones in\nthe library are already mapped with `AutoConfig`.\n\n\n\nArgs:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\n The auto class to register this new configuration with."} +{"repo": "keras", "function": "def convert_format(boxes, source, target, height=None, width=None, dtype='float32'):\n box_utils = BoundingBox()\n if backend_utils.in_tf_graph():\n box_utils.backend.set_backend('tensorflow')\n boxes = box_utils.convert_format(boxes=boxes, source=source, target=target, height=height, width=width, dtype=dtype)\n box_utils.backend.reset()\n return boxes", "docstring": "Converts bounding boxes between formats.\n\nSupported formats (case-insensitive):\n`\"xyxy\"`: [left, top, right, bottom]\n`\"yxyx\"`: [top, left, bottom, right]\n`\"xywh\"`: [left, top, width, height]\n`\"center_xywh\"`: [center_x, center_y, width, height]\n`\"center_yxhw\"`: [center_y, center_x, height, width]\n`\"rel_xyxy\"`, `\"rel_yxyx\"`, `\"rel_xywh\"`, `\"rel_center_xywh\"`: Relative\n versions of the above formats, where coordinates are normalized\n to the range [0, 1] based on the image `height` and `width`.\n\nArgs:\n boxes: Bounding boxes tensor/array or dictionary of `boxes` and\n `labels`.\n source: Source format string.\n target: Target format string.\n height: Image height (required for relative target format).\n width: Image width (required for relative target format).\n dtype: Data type for conversion (optional).\n\nReturns:\n Converted boxes.\n\nRaises:\n ValueError: For invalid formats, shapes, or missing dimensions.\n\nExample:\n```python\nboxes = np.array([[10, 20, 30, 40], [50, 60, 70, 80]])\n# Convert from 'xyxy' to 'xywh' format\nboxes_xywh = keras.utils.bounding_boxes.convert_format(\n boxes, source='xyxy', target='xywh'\n) # Output: [[10. 20. 20. 20.], [50. 60. 20. 20.]]\n\n# Convert to relative 'rel_xyxy' format\nboxes_rel_xyxy = keras.utils.bounding_boxes.convert_format(\n boxes, source='xyxy', target='rel_xyxy', height=200, width=300\n) # Output: [[0.03333334 0.1 0.1 0.2 ],\n #[0.16666667 0.3 0.23333333 0.4 ]]\n```"} +{"repo": "tensorflow", "function": "class ModelCheckpoint(Callback):\n\n def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', save_freq='epoch', options=None, **kwargs):\n super(ModelCheckpoint, self).__init__()\n self._supports_tf_logs = True\n self.monitor = monitor\n self.verbose = verbose\n self.filepath = path_to_string(filepath)\n self.save_best_only = save_best_only\n self.save_weights_only = save_weights_only\n self.save_freq = save_freq\n self.epochs_since_last_save = 0\n self._batches_seen_since_last_saving = 0\n self._last_batch_seen = 0\n if save_weights_only:\n if options is None or isinstance(options, checkpoint_options_lib.CheckpointOptions):\n self._options = options or checkpoint_options_lib.CheckpointOptions()\n else:\n raise TypeError('If save_weights_only is True, then `options` must be either None or a tf.train.CheckpointOptions')\n elif options is None or isinstance(options, save_options_lib.SaveOptions):\n self._options = options or save_options_lib.SaveOptions()\n else:\n raise TypeError('If save_weights_only is False, then `options` must beeither None or a tf.saved_model.SaveOptions')\n if 'load_weights_on_restart' in kwargs:\n self.load_weights_on_restart = kwargs['load_weights_on_restart']\n logging.warning('`load_weights_on_restart` argument is deprecated. Please use `model.load_weights()` for loading weights before the start of `model.fit()`.')\n else:\n self.load_weights_on_restart = False\n if 'period' in kwargs:\n self.period = kwargs['period']\n logging.warning('`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.')\n else:\n self.period = 1\n if mode not in ['auto', 'min', 'max']:\n logging.warning('ModelCheckpoint mode %s is unknown, fallback to auto mode.', mode)\n mode = 'auto'\n if mode == 'min':\n self.monitor_op = np.less\n self.best = np.inf\n elif mode == 'max':\n self.monitor_op = np.greater\n self.best = -np.inf\n elif 'acc' in self.monitor or self.monitor.startswith('fmeasure'):\n self.monitor_op = np.greater\n self.best = -np.inf\n else:\n self.monitor_op = np.less\n self.best = np.inf\n if self.save_freq != 'epoch' and (not isinstance(self.save_freq, int)):\n raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))\n self._chief_worker_only = False\n\n def on_train_begin(self, logs=None):\n if self.load_weights_on_restart:\n filepath_to_load = self._get_most_recently_modified_file_matching_pattern(self.filepath)\n if filepath_to_load is not None and self._checkpoint_exists(filepath_to_load):\n try:\n self.model.load_weights(filepath_to_load)\n except (IOError, ValueError) as e:\n raise ValueError('Error loading file from {}. Reason: {}'.format(filepath_to_load, e))\n\n def _implements_train_batch_hooks(self):\n return self.save_freq != 'epoch'\n\n def on_train_batch_end(self, batch, logs=None):\n if self._should_save_on_batch(batch):\n self._save_model(epoch=self._current_epoch, logs=logs)\n\n def on_epoch_begin(self, epoch, logs=None):\n self._current_epoch = epoch\n\n def on_epoch_end(self, epoch, logs=None):\n self.epochs_since_last_save += 1\n if self.save_freq == 'epoch':\n self._save_model(epoch=epoch, logs=logs)\n\n def _should_save_on_batch(self, batch):\n \"\"\"Handles batch-level saving logic, supports steps_per_execution.\"\"\"\n if self.save_freq == 'epoch':\n return False\n if batch <= self._last_batch_seen:\n add_batches = batch + 1\n else:\n add_batches = batch - self._last_batch_seen\n self._batches_seen_since_last_saving += add_batches\n self._last_batch_seen = batch\n if self._batches_seen_since_last_saving >= self.save_freq:\n self._batches_seen_since_last_saving = 0\n return True\n return False\n\n def _save_model(self, epoch, logs):\n \"\"\"Saves the model.\n\n Args:\n epoch: the epoch this iteration is in.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n \"\"\"\n logs = logs or {}\n if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period:\n logs = tf_utils.sync_to_numpy_or_python_type(logs)\n self.epochs_since_last_save = 0\n filepath = self._get_file_path(epoch, logs)\n try:\n if self.save_best_only:\n current = logs.get(self.monitor)\n if current is None:\n logging.warning('Can save best model only with %s available, skipping.', self.monitor)\n elif self.monitor_op(current, self.best):\n if self.verbose > 0:\n print('\\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath))\n self.best = current\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True, options=self._options)\n else:\n self.model.save(filepath, overwrite=True, options=self._options)\n elif self.verbose > 0:\n print('\\nEpoch %05d: %s did not improve from %0.5f' % (epoch + 1, self.monitor, self.best))\n else:\n if self.verbose > 0:\n print('\\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True, options=self._options)\n else:\n self.model.save(filepath, overwrite=True, options=self._options)\n self._maybe_remove_file()\n except IsADirectoryError as e:\n raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath))\n except IOError as e:\n if 'is a directory' in str(e.args[0]).lower():\n raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath))\n raise e\n\n def _get_file_path(self, epoch, logs):\n \"\"\"Returns the file path for checkpoint.\"\"\"\n try:\n file_path = self.filepath.format(epoch=epoch + 1, **logs)\n except KeyError as e:\n raise KeyError('Failed to format this callback filepath: \"{}\". Reason: {}'.format(self.filepath, e))\n self._write_filepath = distributed_file_utils.write_filepath(file_path, self.model.distribute_strategy)\n return self._write_filepath\n\n def _maybe_remove_file(self):\n distributed_file_utils.remove_temp_dir_with_filepath(self._write_filepath, self.model.distribute_strategy)\n\n def _checkpoint_exists(self, filepath):\n \"\"\"Returns whether the checkpoint `filepath` refers to exists.\"\"\"\n if filepath.endswith('.h5'):\n return file_io.file_exists_v2(filepath)\n tf_saved_model_exists = file_io.file_exists_v2(filepath)\n tf_weights_only_checkpoint_exists = file_io.file_exists_v2(filepath + '.index')\n return tf_saved_model_exists or tf_weights_only_checkpoint_exists\n\n def _get_most_recently_modified_file_matching_pattern(self, pattern):\n \"\"\"Returns the most recently modified filepath matching pattern.\n\n Pattern may contain python formatting placeholder. If\n `tf.train.latest_checkpoint()` does not return None, use that; otherwise,\n check for most recently modified one that matches the pattern.\n\n In the rare case where there are more than one pattern-matching file having\n the same modified time that is most recent among all, return the filepath\n that is largest (by `>` operator, lexicographically using the numeric\n equivalents). This provides a tie-breaker when multiple files are most\n recent. Note that a larger `filepath` can sometimes indicate a later time of\n modification (for instance, when epoch/batch is used as formatting option),\n but not necessarily (when accuracy or loss is used). The tie-breaker is\n put in the logic as best effort to return the most recent, and to avoid\n undeterministic result.\n\n Modified time of a file is obtained with `os.path.getmtime()`.\n\n This utility function is best demonstrated via an example:\n\n ```python\n file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'\n test_dir = self.get_temp_dir()\n path_pattern = os.path.join(test_dir, file_pattern)\n file_paths = [\n os.path.join(test_dir, file_name) for file_name in\n ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']\n ]\n for file_path in file_paths:\n # Write something to each of the files\n self.assertEqual(\n _get_most_recently_modified_file_matching_pattern(path_pattern),\n file_paths[-1])\n ```\n\n Args:\n pattern: The file pattern that may optionally contain python placeholder\n such as `{epoch:02d}`.\n\n Returns:\n The most recently modified file's full filepath matching `pattern`. If\n `pattern` does not contain any placeholder, this returns the filepath\n that\n exactly matches `pattern`. Returns `None` if no match is found.\n \"\"\"\n dir_name = os.path.dirname(pattern)\n base_name = os.path.basename(pattern)\n base_name_regex = '^' + re.sub('{.*}', '.*', base_name) + '$'\n latest_tf_checkpoint = checkpoint_management.latest_checkpoint(dir_name)\n if latest_tf_checkpoint is not None and re.match(base_name_regex, os.path.basename(latest_tf_checkpoint)):\n return latest_tf_checkpoint\n latest_mod_time = 0\n file_path_with_latest_mod_time = None\n n_file_with_latest_mod_time = 0\n file_path_with_largest_file_name = None\n if file_io.file_exists_v2(dir_name):\n for file_name in os.listdir(dir_name):\n if re.match(base_name_regex, file_name):\n file_path = os.path.join(dir_name, file_name)\n mod_time = os.path.getmtime(file_path)\n if file_path_with_largest_file_name is None or file_path > file_path_with_largest_file_name:\n file_path_with_largest_file_name = file_path\n if mod_time > latest_mod_time:\n latest_mod_time = mod_time\n file_path_with_latest_mod_time = file_path\n n_file_with_latest_mod_time = 1\n elif mod_time == latest_mod_time:\n n_file_with_latest_mod_time += 1\n if n_file_with_latest_mod_time == 1:\n return file_path_with_latest_mod_time\n else:\n return file_path_with_largest_file_name", "docstring": "Callback to save the Keras model or model weights at some frequency.\n\n`ModelCheckpoint` callback is used in conjunction with training using\n`model.fit()` to save a model or weights (in a checkpoint file) at some\ninterval, so the model or weights can be loaded later to continue the training\nfrom the state saved.\n\nA few options this callback provides include:\n\n- Whether to only keep the model that has achieved the \"best performance\" so\n far, or whether to save the model at the end of every epoch regardless of\n performance.\n- Definition of 'best'; which quantity to monitor and whether it should be\n maximized or minimized.\n- The frequency it should save at. Currently, the callback supports saving at\n the end of every epoch, or after a fixed number of training batches.\n- Whether only weights are saved, or the whole model is saved.\n\nNote: If you get `WARNING:tensorflow:Can save best model only with \navailable, skipping` see the description of the `monitor` argument for\ndetails on how to get this right.\n\nExample:\n\n```python\nmodel.compile(loss=..., optimizer=...,\n metrics=['accuracy'])\n\nEPOCHS = 10\ncheckpoint_filepath = '/tmp/checkpoint'\nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n save_weights_only=True,\n monitor='val_accuracy',\n mode='max',\n save_best_only=True)\n\n# Model weights are saved at the end of every epoch, if it's the best seen\n# so far.\nmodel.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])\n\n# The model weights (that are considered the best) are loaded into the model.\nmodel.load_weights(checkpoint_filepath)\n```\n\nArgs:\n filepath: string or `PathLike`, path to save the model file. e.g.\n filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`\n can contain named formatting options, which will be filled the value of\n `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if\n `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model\n checkpoints will be saved with the epoch number and the validation loss\n in the filename. The directory of the filepath should not be reused by\n any other callbacks to avoid conflicts.\n monitor: The metric name to monitor. Typically the metrics are set by the\n `Model.compile` method. Note:\n\n * Prefix the name with `\"val_`\" to monitor validation metrics.\n * Use `\"loss\"` or \"`val_loss`\" to monitor the model's total loss.\n * If you specify metrics as strings, like `\"accuracy\"`, pass the same\n string (with or without the `\"val_\"` prefix).\n * If you pass `metrics.Metric` objects, `monitor` should be set to\n `metric.name`\n * If you're not sure about the metric names you can check the contents\n of the `history.history` dictionary returned by\n `history = model.fit()`\n * Multi-output models set additional prefixes on the metric names.\n\n verbose: verbosity mode, 0 or 1.\n save_best_only: if `save_best_only=True`, it only saves when the model\n is considered the \"best\" and the latest best model according to the\n quantity monitored will not be overwritten. If `filepath` doesn't\n contain formatting options like `{epoch}` then `filepath` will be\n overwritten by each new better model.\n mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the\n decision to overwrite the current save file is made based on either\n the maximization or the minimization of the monitored quantity.\n For `val_acc`, this should be `max`, for `val_loss` this should be\n `min`, etc. In `auto` mode, the mode is set to `max` if the quantities\n monitored are 'acc' or start with 'fmeasure' and are set to `min` for\n the rest of the quantities.\n save_weights_only: if True, then only the model's weights will be saved\n (`model.save_weights(filepath)`), else the full model is saved\n (`model.save(filepath)`).\n save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves\n the model after each epoch. When using integer, the callback saves the\n model at end of this many batches. If the `Model` is compiled with\n `steps_per_execution=N`, then the saving criteria will be\n checked every Nth batch. Note that if the saving isn't aligned to\n epochs, the monitored metric may potentially be less reliable (it\n could reflect as little as 1 batch, since the metrics get reset every\n epoch). Defaults to `'epoch'`.\n options: Optional `tf.train.CheckpointOptions` object if\n `save_weights_only` is true or optional `tf.saved_model.SaveOptions`\n object if `save_weights_only` is false.\n **kwargs: Additional arguments for backwards compatibility. Possible key\n is `period`."} +{"repo": "tensorflow", "function": "def _run_simple_loop_test(self, mode, inp, body, out):\n self._maybe_skip(mode)\n with ops.device(_get_device(mode)):\n random_seed.set_random_seed(0)\n expected_types = []\n for section in [inp, body, out]:\n section_expected_types = []\n for color in section:\n if color.isupper():\n expected_type = self._lower_precision_dtype(mode).as_datatype_enum\n else:\n expected_type = types_pb2.DT_FLOAT\n section_expected_types.append(expected_type)\n expected_types.append(section_expected_types)\n a = _build_simple_loop_graph(inp, body, out)\n output_val_ref, output_val, cost_graph = self._run(mode, a)\n node_map = _build_node_map(cost_graph.node)\n section_names = ['input', 'while/body', 'output']\n all_types_correct = True\n for section_name, expected_types in zip(section_names, expected_types):\n for i, expected_type in enumerate(expected_types):\n node_name = section_name + '_%i' % i\n output_port = 0\n optimized_type = node_map[node_name].output_info[output_port].dtype\n if optimized_type != expected_type:\n print('Expected node %s to have type %s but got type %s' % (node_name, expected_type, optimized_type))\n all_types_correct = False\n self.assertTrue(all_types_correct)\n if mode == 'mkl':\n self.assertAllClose(output_val_ref, output_val, atol=0.02, rtol=0.02)\n else:\n self.assertAllClose(output_val_ref, output_val, atol=0.002, rtol=0.001)", "docstring": "Runs a test of a simple loop.\n\nThe loop has different node colors in different sections of the graph. The\narguments must be strings where each character represents the color of a\nnode in that section of the graph: w = allow, g = infer, c = clear,\nb = deny. CAPITALIZED characters indicate that the node is expected to be\nchanged to DT_HALF during graph optimization.\n\ninp -> loop [ body ] -> out.\n\nArgs:\n mode: Either 'cuda' or 'mkl'.\n inp: A string of letters indicating the colors and expected dtypes of the\n input nodes.\n body: A string of letters indicating the colors and expected dtypes of the\n body nodes.\n out: A string of letters indicating the colors and expected dtypes of the\n output nodes."} +{"repo": "beam", "function": "def upload_part(self, request):\n try:\n boto_response = self.client.upload_part(Body=request.bytes, Bucket=request.bucket, Key=request.object, PartNumber=request.part_number, UploadId=request.upload_id)\n response = messages.UploadPartResponse(boto_response['ETag'], request.part_number)\n return response\n except Exception as e:\n raise messages.S3ClientError(str(e), get_http_error_code(e))", "docstring": "Uploads part of a file to S3 during a multipart upload\n\nArgs:\n request: (UploadPartRequest) input message\nReturns:\n (UploadPartResponse) The response message."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):\n cos = cos[position_ids].unsqueeze(unsqueeze_dim)\n sin = sin[position_ids].unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`):\n The position indices of the tokens corresponding to the query and key tensors. For example, this can be\n used to pass offsetted position ids when working with a KV-cache.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "tensorflow", "function": "def _trace_variant_creation(self):\n variant = self._variant_tensor\n if not isinstance(variant, ops.EagerTensor):\n raise NotImplementedError('Constructing a tf.function that reproduces a given dataset is only supported for datasets created eagerly. Please file a feature request if this is important to you.')\n with context.eager_mode(), ops.device('CPU'):\n graph_def = graph_pb2.GraphDef().FromString(self._as_serialized_graph(external_state_policy=options_lib.ExternalStatePolicy.FAIL).numpy())\n output_node_names = []\n for node in graph_def.node:\n if node.op == '_Retval':\n output_node_names = node.input\n if len(output_node_names) != 1:\n raise AssertionError(f'Dataset graph is expected to only have one return value but found {len(output_node_names)} return values: {output_node_names}.')\n output_node_name = output_node_names[0]\n file_path_nodes = {}\n if ops.get_default_graph().building_function:\n asset_tracker = self._maybe_track_assets(graph_def)\n for key in asset_tracker:\n assets_list = [array_ops.expand_dims(asset.asset_path, axis=0) for asset in asset_tracker[key]]\n file_path_nodes[key] = array_ops.concat(assets_list, axis=0)\n variant_function = wrap_function.function_from_graph_def(graph_def, inputs=[], outputs=output_node_name + ':0', captures=file_path_nodes)\n for used_function in self._functions():\n used_function.function.add_to_graph(variant_function.graph)\n return variant_function", "docstring": "Traces a function which outputs a variant `tf.Tensor` for this dataset.\n\nNote that creating this function involves evaluating an op, and is currently\nonly supported when executing eagerly.\n\nReturns:\n A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`."} +{"repo": "tensorflow", "function": "def _export_to_saved_model_graph(self, object_map, tensor_map, options, **kwargs):\n _, _, _ = (object_map, tensor_map, options)\n del kwargs\n return []", "docstring": "Creates a copy of this object's tensors onto SavedModel graph.\n\nNeeds to be overridden if the class contains tensors that must be saved\ninto the graph. This method should update the `object_map` and `tensor_map`\ndictionaries.\n\nThis method is called on all nodes in the Trackable Graph (generated by\n`_trackable_children`). The nodes are traversed in the order defined by\n`_deserialization_dependencies`\n\nAll usages of _map_resources should be migrated to this method.\n\nArgs:\n object_map: A dictionary that maps original Trackables to the copied\n Trackables. This only needs to be updated if the object is a\n tf.function, or if the copied tensors are necessary for checkpointing\n this object.\n tensor_map: Dictionary mapping original tensors to copied tensors.\n options: A `tf.saved_model.SaveOptions` object.\n **kwargs: Additional kwargs that may be added at a later time.\n\nReturns:\n Flat list of original tensors that have been copied."} +{"repo": "transformers", "function": "class BasicTokenizer:\n\n def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None):\n if never_split is None:\n never_split = []\n self.do_lower_case = do_lower_case\n self.never_split = set(never_split)\n self.tokenize_chinese_chars = tokenize_chinese_chars\n self.strip_accents = strip_accents\n\n def tokenize(self, text, never_split=None):\n \"\"\"\n Basic Tokenization of a piece of text. Split on \"white spaces\" only, for sub-word tokenization, see\n WordPieceTokenizer.\n\n Args:\n **never_split**: (*optional*) list of str\n Kept for backward compatibility purposes. Now implemented directly at the base class level (see\n [`PreTrainedTokenizer.tokenize`]) List of token not to split.\n \"\"\"\n never_split = self.never_split.union(set(never_split)) if never_split else self.never_split\n text = self._clean_text(text)\n if self.tokenize_chinese_chars:\n text = self._tokenize_chinese_chars(text)\n orig_tokens = whitespace_tokenize(text)\n split_tokens = []\n for token in orig_tokens:\n if token not in never_split:\n if self.do_lower_case:\n token = token.lower()\n if self.strip_accents is not False:\n token = self._run_strip_accents(token)\n elif self.strip_accents:\n token = self._run_strip_accents(token)\n split_tokens.extend(self._run_split_on_punc(token, never_split))\n output_tokens = whitespace_tokenize(' '.join(split_tokens))\n return output_tokens\n\n def _run_strip_accents(self, text):\n \"\"\"Strips accents from a piece of text.\"\"\"\n text = unicodedata.normalize('NFD', text)\n output = []\n for char in text:\n cat = unicodedata.category(char)\n if cat == 'Mn':\n continue\n output.append(char)\n return ''.join(output)\n\n def _run_split_on_punc(self, text, never_split=None):\n \"\"\"Splits punctuation on a piece of text.\"\"\"\n if never_split is not None and text in never_split:\n return [text]\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if _is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n return [''.join(x) for x in output]\n\n def _tokenize_chinese_chars(self, text):\n \"\"\"Adds whitespace around any CJK character.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if self._is_chinese_char(cp):\n output.append(' ')\n output.append(char)\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)\n\n def _is_chinese_char(self, cp):\n \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n if cp >= 19968 and cp <= 40959 or (cp >= 13312 and cp <= 19903) or (cp >= 131072 and cp <= 173791) or (cp >= 173824 and cp <= 177983) or (cp >= 177984 and cp <= 178207) or (cp >= 178208 and cp <= 183983) or (cp >= 63744 and cp <= 64255) or (cp >= 194560 and cp <= 195103):\n return True\n return False\n\n def _clean_text(self, text):\n \"\"\"Performs invalid character removal and whitespace cleanup on text.\"\"\"\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)", "docstring": "Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).\n\nArgs:\n do_lower_case (`bool`, *optional*, defaults to `True`):\n Whether or not to lowercase the input when tokenizing.\n never_split (`Iterable`, *optional*):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n `do_basic_tokenize=True`\n tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):\n Whether or not to tokenize Chinese characters.\n\n This should likely be deactivated for Japanese (see this\n [issue](https://github.com/huggingface/transformers/issues/328)).\n strip_accents (`bool`, *optional*):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for `lowercase` (as in the original BERT)."} +{"repo": "transformers", "function": "class FlaxBaseModelOutputWithPastAndCrossAttentions(ModelOutput):\n last_hidden_state: Optional[jnp.ndarray] = None\n past_key_values: Optional[Tuple[Tuple[jnp.ndarray]]] = None\n hidden_states: Optional[Tuple[jnp.ndarray]] = None\n attentions: Optional[Tuple[jnp.ndarray]] = None\n cross_attentions: Optional[Tuple[jnp.ndarray]] = None", "docstring": "Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).\n\nArgs:\n last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`tuple(tuple(jnp.ndarray))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(jnp.ndarray)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if\n `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,\n encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n cross_attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads."} +{"repo": "transformers", "function": "class UdopProcessor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = 'LayoutLMv3ImageProcessor'\n tokenizer_class = ('UdopTokenizer', 'UdopTokenizerFast')\n optional_call_args = ['text_pair']\n\n def __init__(self, image_processor, tokenizer):\n super().__init__(image_processor, tokenizer)\n\n def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, *args, audio=None, videos=None, **kwargs: Unpack[UdopProcessorKwargs]) -> BatchFeature:\n \"\"\"\n This method first forwards the `images` argument to [`~UdopImageProcessor.__call__`]. In case\n [`UdopImageProcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and\n bounding boxes along with the additional arguments to [`~UdopTokenizer.__call__`] and returns the output,\n together with the prepared `pixel_values`. In case [`UdopImageProcessor`] was initialized with `apply_ocr` set\n to `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the\n additional arguments to [`~UdopTokenizer.__call__`] and returns the output, together with the prepared\n `pixel_values`.\n\n Alternatively, one can pass `text_target` and `text_pair_target` to prepare the targets of UDOP.\n\n Please refer to the docstring of the above two methods for more information.\n \"\"\"\n output_kwargs = self._merge_kwargs(UdopProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, **self.prepare_and_validate_optional_call_args(*args))\n boxes = output_kwargs['text_kwargs'].pop('boxes', None)\n word_labels = output_kwargs['text_kwargs'].pop('word_labels', None)\n text_pair = output_kwargs['text_kwargs'].pop('text_pair', None)\n return_overflowing_tokens = output_kwargs['text_kwargs'].get('return_overflowing_tokens', False)\n return_offsets_mapping = output_kwargs['text_kwargs'].get('return_offsets_mapping', False)\n text_target = output_kwargs['text_kwargs'].get('text_target', None)\n if self.image_processor.apply_ocr and boxes is not None:\n raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')\n if self.image_processor.apply_ocr and word_labels is not None:\n raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')\n if return_overflowing_tokens and (not return_offsets_mapping):\n raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.')\n if text_target is not None:\n return self.tokenizer(**output_kwargs['text_kwargs'])\n else:\n features = self.image_processor(images=images, **output_kwargs['images_kwargs'])\n features_words = features.pop('words', None)\n features_boxes = features.pop('boxes', None)\n output_kwargs['text_kwargs'].pop('text_target', None)\n output_kwargs['text_kwargs'].pop('text_pair_target', None)\n output_kwargs['text_kwargs']['text_pair'] = text_pair\n output_kwargs['text_kwargs']['boxes'] = boxes if boxes is not None else features_boxes\n output_kwargs['text_kwargs']['word_labels'] = word_labels\n if text is not None and self.image_processor.apply_ocr and (text_pair is None):\n if isinstance(text, str):\n text = [text]\n output_kwargs['text_kwargs']['text_pair'] = features_words\n encoded_inputs = self.tokenizer(text=text if text is not None else features_words, **output_kwargs['text_kwargs'])\n if return_overflowing_tokens is True:\n features['pixel_values'] = self.get_overflowing_images(features['pixel_values'], encoded_inputs['overflow_to_sample_mapping'])\n features.update(encoded_inputs)\n return features\n\n def get_overflowing_images(self, images, overflow_to_sample_mapping):\n images_with_overflow = []\n for sample_idx in overflow_to_sample_mapping:\n images_with_overflow.append(images[sample_idx])\n if len(images_with_overflow) != len(overflow_to_sample_mapping):\n raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}')\n return images_with_overflow\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer\n to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n return ['pixel_values', 'input_ids', 'bbox', 'attention_mask']", "docstring": "Constructs a UDOP processor which combines a LayoutLMv3 image processor and a UDOP tokenizer into a single processor.\n\n[`UdopProcessor`] offers all the functionalities you need to prepare data for the model.\n\nIt first uses [`LayoutLMv3ImageProcessor`] to resize, rescale and normalize document images, and optionally applies OCR\nto get words and normalized bounding boxes. These are then provided to [`UdopTokenizer`] or [`UdopTokenizerFast`],\nwhich turns the words and bounding boxes into token-level `input_ids`, `attention_mask`, `token_type_ids`, `bbox`.\nOptionally, one can provide integer `word_labels`, which are turned into token-level `labels` for token\nclassification tasks (such as FUNSD, CORD).\n\nAdditionally, it also supports passing `text_target` and `text_pair_target` to the tokenizer, which can be used to\nprepare labels for language modeling tasks.\n\nArgs:\n image_processor (`LayoutLMv3ImageProcessor`):\n An instance of [`LayoutLMv3ImageProcessor`]. The image processor is a required input.\n tokenizer (`UdopTokenizer` or `UdopTokenizerFast`):\n An instance of [`UdopTokenizer`] or [`UdopTokenizerFast`]. The tokenizer is a required input."} +{"repo": "transformers", "function": "def torch_distributed_zero_first(local_rank: int):\n if local_rank not in [-1, 0]:\n dist.barrier()\n yield\n if local_rank == 0:\n dist.barrier()", "docstring": "Decorator to make all processes in distributed training wait for each local_master to do something.\n\nArgs:\n local_rank (`int`): The rank of the local process."} +{"repo": "beam", "function": "def get_container_image_from_options(pipeline_options):\n worker_options = pipeline_options.view_as(WorkerOptions)\n if worker_options.sdk_container_image:\n return worker_options.sdk_container_image\n container_repo = names.DATAFLOW_CONTAINER_IMAGE_REPOSITORY\n image_name = '{repository}/beam_python{major}.{minor}_sdk'.format(repository=container_repo, major=sys.version_info[0], minor=sys.version_info[1])\n image_tag = _get_required_container_version()\n return image_name + ':' + image_tag", "docstring": "For internal use only; no backwards-compatibility guarantees.\n\nArgs:\n pipeline_options (PipelineOptions): A container for pipeline options.\n\nReturns:\n str: Container image for remote execution."} +{"repo": "transformers", "function": "def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask):\n labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index)\n column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32)\n no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0)\n column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label)\n column_dist = tfp.distributions.Categorical(logits=column_logits)\n column_loss_per_example = -column_dist.log_prob(column_label)\n logits_per_cell, _ = reduce_mean(token_logits, cell_index)\n labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index)\n column_id_for_cells = cell_index.project_inner(labels_index).indices\n column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32)\n cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell)\n cell_log_prob = cell_dist.log_prob(labels_per_cell)\n cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1)\n cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION\n selection_loss_per_example = column_loss_per_example\n selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss)\n selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32)\n selected_column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32)\n selected_column_mask = tf.where(tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask)\n logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)\n logits = gather(logits_per_cell, cell_index)\n return (selection_loss_per_example, logits)", "docstring": "Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The\nmodel first predicts a column and then selects cells within that column (conditioned on the column). Cells outside\nthe selected column are never selected.\n\nArgs:\n token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Tensor containing the logits per token.\n column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`):\n Tensor containing the logits per column.\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Labels per token.\n cell_index (`ProductIndexMap`):\n Index that groups tokens into cells.\n col_index (`IndexMap`):\n Index that groups tokens into columns.\n cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`):\n Mask for cells that exist in the table (i.e. that are not padding).\n\nReturns:\n selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor`\n of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single\n column. Logits outside of the most likely column according to *column_logits* will be set to a very low value\n (such that the probabilities are 0)."} +{"repo": "tensorflow", "function": "def write(self, file_prefix: str) -> str:", "docstring": "Serializes proto to disk.\n\nArgs:\n file_prefix: string prefix of the filepath.\n\nReturns:\n The actual path the proto is written to."} +{"repo": "beam", "function": "def enrichment_transform(pcoll, enrichment_handler: str, handler_config: dict[str, Any], timeout: Optional[float]=30):\n options.YamlOptions.check_enabled(pcoll.pipeline, 'Enrichment')\n if not Enrichment:\n raise ValueError(f\"gcp dependencies not installed. Cannot use {enrichment_handler} handler. Please install using 'pip install apache-beam[gcp]'.\")\n if enrichment_handler == 'FeastFeatureStore' and (not FeastFeatureStoreEnrichmentHandler):\n raise ValueError(\"FeastFeatureStore handler requires 'feast' package to be installed. \" + \"Please install using 'pip install feast[gcp]' and try again.\")\n handler_map = {'BigQuery': BigQueryEnrichmentHandler, 'BigTable': BigTableEnrichmentHandler, 'FeastFeatureStore': FeastFeatureStoreEnrichmentHandler, 'VertexAIFeatureStore': VertexAIFeatureStoreEnrichmentHandler}\n if enrichment_handler not in handler_map:\n raise ValueError(f'Unknown enrichment source: {enrichment_handler}')\n handler = handler_map[enrichment_handler](**handler_config)\n return pcoll | Enrichment(source_handler=handler, timeout=timeout)", "docstring": "The Enrichment transform allows one to dynamically enhance elements in a\npipeline by performing key-value lookups against external services like\nAPIs or databases.\n\nExample using BigTable: ::\n\n - type: Enrichment\n config:\n enrichment_handler: 'BigTable'\n handler_config:\n project_id: 'apache-beam-testing'\n instance_id: 'beam-test'\n table_id: 'bigtable-enrichment-test'\n row_key: 'product_id'\n timeout: 30\n\nFor more information on Enrichment, see the [Beam docs](\nhttps://beam.apache.org/documentation/transforms/python/elementwise/enrichment/).\n\nArgs:\n enrichment_handler (str): Specifies the source from where data needs\n to be extracted into the pipeline for enriching data. One of\n \"BigQuery\", \"BigTable\", \"FeastFeatureStore\" or \"VertexAIFeatureStore\".\n handler_config (str): Specifies the parameters for the respective\n enrichment_handler in a YAML/JSON format. To see the full set of\n handler_config parameters, see their corresponding doc pages:\n\n - [BigQueryEnrichmentHandler](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.enrichment_handlers.bigquery.html#apache_beam.transforms.enrichment_handlers.bigquery.BigQueryEnrichmentHandler)\n - [BigTableEnrichmentHandler](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.enrichment_handlers.bigtable.html#apache_beam.transforms.enrichment_handlers.bigtable.BigTableEnrichmentHandler)\n - [FeastFeatureStoreEnrichmentHandler](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.enrichment_handlers.feast_feature_store.html#apache_beam.transforms.enrichment_handlers.feast_feature_store.FeastFeatureStoreEnrichmentHandler)\n - [VertexAIFeatureStoreEnrichmentHandler](https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.enrichment_handlers.vertex_ai_feature_store.html#apache_beam.transforms.enrichment_handlers.vertex_ai_feature_store.VertexAIFeatureStoreEnrichmentHandler)\n timeout (float): Timeout for source requests in seconds. Defaults to 30\n seconds."} +{"repo": "tensorflow", "function": "def compute_gradients(self, *args, **kwargs):\n return self._opt.compute_gradients(*args, **kwargs)", "docstring": "Compute gradients of \"loss\" for the variables in \"var_list\".\n\nThis simply wraps the compute_gradients() from the real optimizer. The\ngradients will be aggregated in the apply_gradients() so that user can\nmodify the gradients like clipping with per replica global norm if needed.\nThe global norm with aggregated gradients can be bad as one replica's huge\ngradients can hurt the gradients from other replicas.\n\nArgs:\n *args: Arguments for compute_gradients().\n **kwargs: Keyword arguments for compute_gradients().\n\nReturns:\n A list of (gradient, variable) pairs."} +{"repo": "transformers", "function": "def get_added_vocab(self) -> dict[str, int]:\n return self._added_tokens_encoder", "docstring": "Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from\nthe fast call because for now we always add the tokens even if they are already in the vocabulary. This is\nsomething we should change.\n\nReturns:\n `Dict[str, int]`: The added tokens."} +{"repo": "pytype", "function": "def get_instance_type_parameter(self, name: str, node: 'cfg.CFGNode | None'=None):\n del name\n if node is None:\n node = self.ctx.root_node\n return self.ctx.new_unsolvable(node)", "docstring": "Get a cfg.Variable of the instance's values for the type parameter.\n\nTreating self as an abstract.Instance, gets the variable of its values for\nthe given type parameter. For the real implementation, see\nSimpleValue.get_instance_type_parameter.\n\nArgs:\n name: The name of the type parameter.\n node: Optionally, the current CFG node.\n\nReturns:\n A Variable which may be empty."} +{"repo": "transformers", "function": "def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[Dict[str, int]]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, return_tensors: Optional[Union[TensorType, str]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n if do_resize:\n images = [self.resize(image, size=size, resample=resample, input_data_format=input_data_format) for image in images]\n if do_center_crop:\n images = [self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images]\n if do_rescale:\n images = [self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format) for image in images]\n if do_normalize:\n images = [self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format) for image in images]\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images]\n encoded_inputs = BatchFeature(data={'pixel_values': images}, tensor_type=return_tensors)\n return encoded_inputs", "docstring": "Prepares an image or batch of images for the model.\n\nArgs:\n images (`ImageInput`):\n The image or batch of images to be prepared. Expects a single or batch of images with pixel values\n ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n The size to resize the input to. Only has an effect if `do_resize` is set to `True`.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to\n `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether or not to center crop the input. If `True`, will center crop the input to the size specified by\n `crop_size`.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether or not to rescale the input. If `True`, will rescale the input by dividing it by\n `rescale_factor`.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean`\n and dividing by `image_std`.\n image_mean (`Union[float, List[float]]`, *optional*, defaults to `self.image_mean`):\n The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to\n `True`.\n image_std (`Union[float, List[float]]`, *optional*, defaults to `self.image_std`):\n The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is\n set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: defaults to the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format."} +{"repo": "tf-quant-finance", "function": "def swap_curve_fit(float_leg_start_times: List[types.RealTensor], float_leg_end_times: List[types.RealTensor], float_leg_daycount_fractions: List[types.RealTensor], fixed_leg_start_times: List[types.RealTensor], fixed_leg_end_times: List[types.RealTensor], fixed_leg_daycount_fractions: List[types.RealTensor], fixed_leg_cashflows: List[types.RealTensor], present_values: List[types.RealTensor], initial_curve_rates: types.RealTensor, present_values_settlement_times: List[types.RealTensor]=None, float_leg_discount_rates: List[types.RealTensor]=None, float_leg_discount_times: List[types.RealTensor]=None, fixed_leg_discount_rates: List[types.RealTensor]=None, fixed_leg_discount_times: List[types.RealTensor]=None, optimizer: Callable[..., Any]=None, curve_interpolator: Callable[..., types.RealTensor]=None, instrument_weights: types.RealTensor=None, curve_tolerance: types.RealTensor=1e-08, maximum_iterations: types.IntTensor=50, dtype: tf.DType=None, name: str=None) -> scc.SwapCurveBuilderResult:\n with tf.name_scope(name or 'swap_curve'):\n if optimizer is None:\n optimizer = optimizers.conjugate_gradient_minimize\n present_values = _convert_to_tensors(dtype, present_values, 'present_values')\n dtype = present_values[0].dtype\n if present_values_settlement_times is None:\n pv_settlement_times = [tf.zeros([], dtype=dtype) for pv in present_values]\n else:\n pv_settlement_times = _convert_to_tensors(dtype, present_values_settlement_times, 'pv_settlement_times')\n float_leg_start_times = _convert_to_tensors(dtype, float_leg_start_times, 'float_leg_start_times')\n float_leg_end_times = _convert_to_tensors(dtype, float_leg_end_times, 'float_leg_end_times')\n float_leg_daycount_fractions = _convert_to_tensors(dtype, float_leg_daycount_fractions, 'float_leg_daycount_fractions')\n fixed_leg_start_times = _convert_to_tensors(dtype, fixed_leg_start_times, 'fixed_leg_start_times')\n fixed_leg_end_times = _convert_to_tensors(dtype, fixed_leg_end_times, 'fixed_leg_end_times')\n fixed_leg_daycount_fractions = _convert_to_tensors(dtype, fixed_leg_daycount_fractions, 'fixed_leg_daycount_fractions')\n fixed_leg_cashflows = _convert_to_tensors(dtype, fixed_leg_cashflows, 'fixed_leg_cashflows')\n present_values = tf.stack(present_values, axis=-1)\n if instrument_weights is None:\n instrument_weights = _initialize_instrument_weights(float_leg_end_times, fixed_leg_end_times, dtype=dtype)\n else:\n instrument_weights = _convert_to_tensors(dtype, instrument_weights, 'instrument_weights')\n if curve_interpolator is None:\n\n def default_interpolator(xi, x, y):\n return linear.interpolate(xi, x, y, dtype=dtype)\n curve_interpolator = default_interpolator\n self_discounting_float_leg = False\n self_discounting_fixed_leg = False\n if float_leg_discount_rates is None and fixed_leg_discount_rates is None:\n self_discounting_float_leg = True\n self_discounting_fixed_leg = True\n float_leg_discount_rates = [0.0]\n float_leg_discount_times = [0.0]\n fixed_leg_discount_rates = [0.0]\n fixed_leg_discount_times = [0.0]\n elif fixed_leg_discount_rates is None:\n fixed_leg_discount_rates = float_leg_discount_rates\n fixed_leg_discount_times = float_leg_discount_times\n elif float_leg_discount_rates is None:\n self_discounting_float_leg = True\n float_leg_discount_rates = [0.0]\n float_leg_discount_times = [0.0]\n float_leg_discount_rates = _convert_to_tensors(dtype, float_leg_discount_rates, 'float_disc_rates')\n float_leg_discount_rates = tf.stack(float_leg_discount_rates, axis=-1)\n float_leg_discount_times = _convert_to_tensors(dtype, float_leg_discount_times, 'float_disc_times')\n float_leg_discount_times = tf.stack(float_leg_discount_times, axis=-1)\n fixed_leg_discount_rates = _convert_to_tensors(dtype, fixed_leg_discount_rates, 'fixed_disc_rates')\n fixed_leg_discount_rates = tf.stack(fixed_leg_discount_rates, axis=-1)\n fixed_leg_discount_times = _convert_to_tensors(dtype, fixed_leg_discount_times, 'fixed_disc_times')\n fixed_leg_discount_times = tf.stack(fixed_leg_discount_times, axis=-1)\n if initial_curve_rates is not None:\n initial_rates = tf.convert_to_tensor(initial_curve_rates, dtype=dtype, name='initial_rates')\n else:\n raise ValueError('Initial state of the curve is not specified.')\n return _build_swap_curve(float_leg_start_times, float_leg_end_times, float_leg_daycount_fractions, fixed_leg_start_times, fixed_leg_end_times, fixed_leg_cashflows, fixed_leg_daycount_fractions, float_leg_discount_rates, float_leg_discount_times, fixed_leg_discount_rates, fixed_leg_discount_times, self_discounting_float_leg, self_discounting_fixed_leg, present_values, pv_settlement_times, optimizer, curve_interpolator, initial_rates, instrument_weights, curve_tolerance, maximum_iterations)", "docstring": "Constructs the zero swap curve using optimization.\n\nA zero swap curve is a function of time which gives the interest rate that\ncan be used to project forward rates at arbitrary `t` for the valuation of\ninterest rate securities.\n\nSuppose we have a set of `N` Interest Rate Swaps (IRS) `S_i` with increasing\nexpiries whose market prices are known.\nSuppose also that the `i`th IRS issues cashflows at times `T_{ij}` where\n`1 <= j <= n_i` and `n_i` is the number of cashflows (including expiry)\nfor the `i`th swap.\nDenote by `T_i` the time of final payment for the `i`th swap\n(hence `T_i = T_{i,n_i}`). This function estimates a set of rates `r(T_i)`\nsuch that when these rates are interpolated to all other cashflow times,\nthe computed value of the swaps matches the market value of the swaps\n(within some tolerance). Rates at intermediate times are interpolated using\nthe user specified interpolation method (the default interpolation method\nis linear interpolation on rates).\n\n#### Example:\n\nThe following example illustrates the usage by building an implied swap curve\nfrom four vanilla (fixed to float) LIBOR swaps.\n\n```python\n\ndtype = np.float64\n\n# Next we will set up LIBOR reset and payment times for four spot starting\n# swaps with maturities 1Y, 2Y, 3Y, 4Y. The LIBOR rate spans 6M.\n\nfloat_leg_start_times = [\n np.array([0., 0.5], dtype=dtype),\n np.array([0., 0.5, 1., 1.5], dtype=dtype),\n np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5], dtype=dtype),\n np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=dtype)\n ]\n\nfloat_leg_end_times = [\n np.array([0.5, 1.0], dtype=dtype),\n np.array([0.5, 1., 1.5, 2.0], dtype=dtype),\n np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0], dtype=dtype),\n np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], dtype=dtype)\n ]\n\n# Next we will set up start and end times for semi-annual fixed coupons.\n\nfixed_leg_start_times = [\n np.array([0., 0.5], dtype=dtype),\n np.array([0., 0.5, 1., 1.5], dtype=dtype),\n np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5], dtype=dtype),\n np.array([0., 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=dtype)\n ]\n\nfixed_leg_end_times = [\n np.array([0.5, 1.0], dtype=dtype),\n np.array([0.5, 1., 1.5, 2.0], dtype=dtype),\n np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0], dtype=dtype),\n np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], dtype=dtype)\n ]\n\n# Next setup a trivial daycount for floating and fixed legs.\n\nfloat_leg_daycount = [\n np.array([0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype)\n ]\n\nfixed_leg_daycount = [\n np.array([0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype),\n np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5], dtype=dtype)\n ]\n\nfixed_leg_cashflows = [\n # 1 year swap with 2.855% semi-annual fixed payments.\n np.array([-0.02855, -0.02855], dtype=dtype),\n # 2 year swap with 3.097% semi-annual fixed payments.\n np.array([-0.03097, -0.03097, -0.03097, -0.03097], dtype=dtype),\n # 3 year swap with 3.1% semi-annual fixed payments.\n np.array([-0.031, -0.031, -0.031, -0.031, -0.031, -0.031], dtype=dtype),\n # 4 year swap with 3.2% semi-annual fixed payments.\n np.array([-0.032, -0.032, -0.032, -0.032, -0.032, -0.032, -0.032,\n -0.032], dtype=dtype)\n ]\n\n# The present values of the above IRS.\npvs = np.array([0., 0., 0., 0.], dtype=dtype)\n\n# Initial state of the curve.\ninitial_curve_rates = np.array([0.01, 0.01, 0.01, 0.01], dtype=dtype)\n\nresults = swap_curve_fit(float_leg_start_times, float_leg_end_times,\n float_leg_daycount, fixed_leg_start_times,\n fixed_leg_end_times, fixed_leg_cashflows,\n fixed_leg_daycount, pvs, dtype=dtype,\n initial_curve_rates=initial_curve_rates)\n\n#### References:\n[1]: Leif B.G. Andersen and Vladimir V. Piterbarg. Interest Rate Modeling,\n Volume I: Foundations and Vanilla Models. Chapter 6. 2010.\n\nArgs:\n float_leg_start_times: List of `Tensor`s. Each `Tensor` must be either of\n shape `batch_shape + [k_i]` or `[k_i]` and of the same real dtype. `k_i`\n may be of different sizes. Each `Tensor` represents the beginning of the\n accrual period for the forward rate which determines the floating payment.\n Each element in the list belong to a unique swap to be used to build the\n curve.\n float_leg_end_times: List of `Tensor`s of shapes and `dtype` compatible with\n `float_leg_start_times`. Each `Tensor` represents the end of the\n accrual period for the forward rate which determines the floating payment.\n float_leg_daycount_fractions: List of `Tensor`s of shapes and `dtype`\n compatible with `float_leg_start_times`. Each `Tensor` represents the\n daycount fraction of the forward rate which determines the floating\n payment.\n fixed_leg_start_times: List of `Tensor`s. Each `Tensor` must be either of\n shape `batch_shape + [n_i]` or `[n_i]` and of the same real dtype.\n `n_i` may be of different sizes. All elements must have the same `dtype`\n as `float_leg_start_times`. Each `Tensor` represents the beginning of the\n accrual period fixed coupon.\n fixed_leg_end_times: List of `Tensor`s of shapes and `dtype` compatible with\n `fixed_leg_start_times`. All elements must have the same `dtype` as\n `fixed_leg_start_times`. Each `Tensor` represents the\n end of the accrual period for the fixed coupon.\n fixed_leg_daycount_fractions: List of `Tensor`s of shapes and `dtype`\n compatible with\n `fixed_leg_start_times` Each `Tensor` represents the daycount fraction\n applicable for the fixed payment.\n fixed_leg_cashflows: List of `Tensor`s of shapes and `dtype` compatible with\n `fixed_leg_start_times`. The input contains fixed cashflows at each\n coupon payment time including notional (if any). The sign should be\n negative (positive) to indicate net outgoing (incoming) cashflow.\n present_values: List containing `Tensor`s of the same dtype as\n elements of `fixed_leg_cashflows` and of shapes compatible with\n `batch_shape`. The length of the list must be the same as the length of\n `fixed_leg_cashflows`. The input contains the market price of the\n underlying instruments.\n initial_curve_rates: A `Tensor` of the `dtype` as `present_values` and of\n shape `[batch_shape, num_instruments]` where `num_instruments` is the\n length of `float_leg_start_times`. The starting guess for the discount\n rates used to initialize the iterative procedure.\n present_values_settlement_times: Optional list of `Tensor`s with the shapes\n and `dtype` compatible with `present_values` The settlement times for the\n present values is the time from now when the instrument is traded to the\n time that the purchase price is actually delivered. If not supplied, then\n it is assumed that the settlement times are zero for every bond.\n Default value: `None` which is equivalent to zero settlement times.\n float_leg_discount_rates: Optional list of `Tensor`s with the shapes\n and `dtype` compatible with `present_values`. This input contains the\n continuously compounded discount rates the will be used\n to discount the floating cashflows. This allows the swap curve to\n constructed using an independent discount curve (e.g. OIS curve). By\n default the cashflows are discounted using the curve that is being\n constructed.\n float_leg_discount_times: Optional list of `Tensor`s with the shapes\n and `dtype` compatible with `present_values`. This input contains the\n times corresponding to the rates specified via\n the `float_leg_discount_rates`.\n fixed_leg_discount_rates: Optional list of `Tensor`s with the shapes\n and `dtype` compatible with `present_values`. This input contains the\n continuously compounded discount rates the will be used to discount the\n fixed cashflows. This allows the swap curve to constructed using an\n independent discount curve (e.g. OIS curve). By default the cashflows are\n discounted using the curve that is being constructed.\n fixed_leg_discount_times: Optional list of `Tensor`s with the shapes\n and `dtype` compatible with `present_values`. This input contains the\n times corresponding to the rates specified via the\n `fixed_leg_discount_rates`.\n optimizer: Optional Python callable which implements the algorithm used\n to minimize the objective function during calibration. It should have\n the following interface: result =\n optimizer(value_and_gradients_function, initial_position, tolerance,\n max_iterations) `value_and_gradients_function` is a Python callable that\n accepts a point as a real `Tensor` and returns a tuple of `Tensor`s of\n real dtype containing the value of the function and its gradient at that\n point. 'initial_position' is a real `Tensor` containing the starting\n point of the optimization, 'tolerance' is a real scalar `Tensor` for\n stopping tolerance for the procedure and `max_iterations` specifies the\n maximum number of iterations.\n `optimizer` should return a namedtuple containing the items: `position`\n (a tensor containing the optimal value), `converged` (a boolean\n indicating whether the optimize converged according the specified\n criteria), `failed` (a boolean indicating if the optimization resulted\n in a failure), `num_iterations` (the number of iterations used), and\n `objective_value` ( the value of the objective function at the optimal\n value). The default value for `optimizer` is None and conjugate\n gradient algorithm is used.\n Default value: `None` - indicating conjugate gradient minimizer.\n curve_interpolator: Optional Python callable used to interpolate the zero\n swap rates at cashflow times. It should have the following interface:\n yi = curve_interpolator(xi, x, y)\n `x`, `y`, 'xi', 'yi' are all `Tensors` of real dtype. `x` and `y` are the\n sample points and values (respectively) of the function to be\n interpolated. `xi` are the points at which the interpolation is\n desired and `yi` are the corresponding interpolated values returned by the\n function. The default value for `curve_interpolator` is None in which\n case linear interpolation is used.\n Default value: `None`. If not supplied, the yields to maturity for the\n bonds is used as the initial value.\n instrument_weights: Optional 'Tensor' of the same dtype and shape as\n `initial_curve_rates`. This input contains the weight of each instrument\n in computing the objective function for the conjugate gradient\n optimization. By default the weights are set to be the inverse of\n maturities.\n curve_tolerance: Optional positive scalar `Tensor` of same dtype as\n elements of `bond_cashflows`. The absolute tolerance for terminating the\n iterations used to fit the rate curve. The iterations are stopped when the\n estimated discounts at the expiry times of the bond_cashflows change by a\n amount smaller than `discount_tolerance` in an iteration.\n Default value: 1e-8.\n maximum_iterations: Optional positive integer `Tensor`. The maximum number\n of iterations permitted when fitting the curve.\n Default value: 50.\n dtype: `tf.Dtype`. If supplied the dtype for the (elements of)\n `float_leg_start_times`, and `fixed_leg_start_times`.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'swap_curve'.\n\nReturns:\n curve_builder_result: An instance of `SwapCurveBuilderResult` containing the\n following attributes.\n times: Rank 1 real `Tensor`. Times for the computed discount rates. These\n are chosen to be the expiry times of the supplied cashflows.\n discount_rates: Rank 1 `Tensor` of the same dtype as `times`.\n The inferred discount rates.\n discount_factor: Rank 1 `Tensor` of the same dtype as `times`.\n The inferred discount factors.\n initial_discount_rates: Rank 1 `Tensor` of the same dtype as `times`. The\n initial guess for the discount rates.\n converged: Scalar boolean `Tensor`. Whether the procedure converged.\n The procedure is said to have converged when the maximum absolute\n difference in the discount factors from one iteration to the next falls\n below the `discount_tolerance`.\n failed: Scalar boolean `Tensor`. Whether the procedure failed. Procedure\n may fail either because a NaN value was encountered for the discount\n rates or the discount factors.\n iterations: Scalar int32 `Tensor`. Number of iterations performed.\n objective_value: Scalar real `Tensor`. The value of the ibjective function\n evaluated using the fitted swap curve.\n\nRaises:\n ValueError: If the initial state of the curve is not\n supplied to the function."} +{"repo": "mobly", "function": "def list_fastboot_devices():\n out = fastboot.FastbootProxy().devices()\n return parse_device_list(out)", "docstring": "List all android devices connected to the computer that are in in\nfastboot mode. These are detected by fastboot.\n\nThis function doesn't raise any error if `fastboot` binary doesn't exist,\nbecause `FastbootProxy` itself doesn't raise any error.\n\nReturns:\n A list of android device serials. Empty if there's none."} +{"repo": "tensorflow", "function": "def _copy_trackable_to_cpu(self, object_map):\n del object_map\n raise NotImplementedError('Need to implement _copy_trackable_to_cpu() if the Trackable requires AsyncCheckpoint support.')", "docstring": "Creates a copy of this object onto CPU, also copies values over.\n\nNeeds to be overridden if the `Trackable` requires AsyncCheckpoint support.\nThe method first checks whether a copy of `self` is already created in\n`object_map`, and creates one if not already created. Then the method copies\nthe **values** of itself over to its copy mapped by `object_map`.\n\nArgs:\n object_map: A dictionary that maps original Trackables to the copied\n Trackables, which reside in the CPU."} +{"repo": "transformers", "function": "def dtype(self) -> torch.dtype:\n if self._rot_mats is not None:\n return self._rot_mats.dtype\n elif self._quats is not None:\n return self._quats.dtype\n else:\n raise ValueError('Both rotations are None')", "docstring": "Returns the dtype of the underlying rotation.\n\nReturns:\n The dtype of the underlying rotation"} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[Tuple, TFBaseModelOutput]:\n encoder_outputs = self.encoder(input_ids, attention_mask=attention_mask, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=inputs_embeds, head_mask=head_mask, past_key_values=None, use_cache=False, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n if not return_dict:\n return encoder_outputs\n return TFBaseModelOutput(last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, TFT5EncoderModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google-t5/t5-small\")\n>>> model = TFT5EncoderModel.from_pretrained(\"google-t5/t5-small\")\n\n>>> input_ids = tokenizer(\n... \"Studies have been shown that owning a dog is good for you\", return_tensors=\"tf\"\n... ).input_ids # Batch size 1\n>>> outputs = model(input_ids)\n```"} +{"repo": "pytype", "function": "def generate_matches(patterns, nodes):\n if not patterns:\n yield (0, {})\n else:\n p, rest = (patterns[0], patterns[1:])\n for c0, r0 in p.generate_matches(nodes):\n if not rest:\n yield (c0, r0)\n else:\n for c1, r1 in generate_matches(rest, nodes[c0:]):\n r = {}\n r.update(r0)\n r.update(r1)\n yield (c0 + c1, r)", "docstring": "Generator yielding matches for a sequence of patterns and nodes.\n\nArgs:\n patterns: a sequence of patterns\n nodes: a sequence of nodes\n\nYields:\n (count, results) tuples where:\n count: the entire sequence of patterns matches nodes[:count];\n results: dict containing named submatches."} +{"repo": "pyglove", "function": "def run(code: str, *, global_vars: Optional[Dict[str, Any]]=None, permission: Optional[permissions.CodePermission]=None, returns_stdout: bool=False, outputs_intermediate: bool=False, sandbox: Optional[bool]=None, timeout: Optional[float]=None) -> Union[Any, Dict[str, Any]]:\n return maybe_sandbox_call(evaluate, code=code, global_vars=global_vars, permission=permission, returns_stdout=returns_stdout, outputs_intermediate=outputs_intermediate, sandbox=sandbox, timeout=timeout)", "docstring": "Executes Python code.\n\nFeatures:\n * Fine-grained execution policy for limiting what APIs could be executed.\n This eliminates the need for sandboxing.\n * It exposes both the final results and intermediate results (variables).\n\nArgs:\n code: Python code to run.\n global_vars: An optional dict of\n permission: Permission for the Python code to run.\n returns_stdout: If True, the stdout (a str) will be returned.\n outputs_intermediate: Applicable when returns_stdout is False. If True,\n intermediate output will be outputted as a dict, with the last line's\n value accessible by key '__result__' and the std output accessible by\n key '__stdout__'. Otherwise the value of the last line will be returned.\n sandbox: If True, run code in sandbox; If False, run code in current\n process. If None, run in sandbox first, if the output could not be\n serialized and pass to current process, run the code again in current\n process.\n timeout: Execution timeout in seconds. If None, wait the code the complete.\n\nReturns:\n The value of the last line of the code block. Or a dict of variable\n names of all locals to their evaluated values as the output of the code to\n run. The value for the last line can be accessed by key '__result__'. Or the\n stdout as a str.\n\nRaises:\n TimeoutError: If the execution time exceeds the timeout.\n Exception: Exception that are raised from the code."} +{"repo": "beam", "function": "def __init__(self, columns: list[str], bucket_boundaries: Iterable[Union[int, float]], name: Optional[str]=None):\n super().__init__(columns)\n self.bucket_boundaries = [bucket_boundaries]\n self.name = name", "docstring": "Interpolates values within the provided buckets and then normalizes to\n[0, 1].\n\nInput values are bucketized based on the provided boundaries such that the\ninput is mapped to a positive index i for which `bucket_boundaries[i-1] <=\nelement < bucket_boundaries[i]`, if it exists. The values are then\nnormalized to the range [0,1] within the bucket, with NaN values being\nmapped to 0.5.\n\nFor more information, see:\nhttps://www.tensorflow.org/tfx/transform/api_docs/python/tft/apply_buckets_with_interpolation\n\nArgs:\n columns: A list of column names to apply the transformation on.\n bucket_boundaries: An iterable of ints or floats representing the bucket\n boundaries sorted in ascending order.\n name: (Optional) A string that specifies the name of the operation."} +{"repo": "tf-quant-finance", "function": "def sample_paths(self, times, num_samples=1, initial_state=None, random_type=None, seed=None, **kwargs):\n pass", "docstring": "Returns a sample of paths from the process.\n\nArgs:\n times: A `Tensor` of positive real values of a shape `[T, k]`, where\n `T` is either empty or a shape which is broadcastable to `batch_shape`\n (as defined when this ItoProcess was initialised) and `k` is the\n number of time points. The definition of `batch_shape` and how it is set\n is dictated by the individual child classes. Typically `batch_shape`\n is set based on the shapes of the input parameters that define the\n ItoProcess, see `GeometricBrownianMotion` for an example. The times at\n which the path points are to be evaluated.\n num_samples: Positive scalar `int`. The number of paths to draw.\n initial_state: `Tensor` of shape `[dim]`. The initial state of the\n process.\n Default value: None which maps to a zero initial state.\n random_type: Enum value of `RandomType`. The type of (quasi)-random number\n generator to use to generate the paths.\n Default value: None which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is\n only relevant if `random_type` is one of\n `[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,\n STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and\n `HALTON_RANDOMIZED` the seed should be an Python integer. For\n `STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer\n `Tensor` of shape `[2]`.\n Default value: `None` which means no seed is set.\n **kwargs: Any other keyword args needed by an implementation.\n\nReturns:\n A real `Tensor` of shape [`batch_shape`, num_samples, k, n] where `k` is\n the size of `times`, `n` is the dimension of the process and `batch_shape`\n is the batch dimensions of the ItoProcess."} +{"repo": "starthinker", "function": "def _post_process(self, feed_item, item):\n if item['assetIdentifier']['name']:\n feed_item[FieldMap.CREATIVE_ASSET_NAME] = item['assetIdentifier']['name']", "docstring": "Maps ids and names of related entities so they can be updated in the Bulkdozer feed.\n\nWhen Bulkdozer is done processing an item, it writes back the updated names\nand ids of related objects, this method makes sure those are updated in the\ncreative asset feed.\n\nArgs:\n feed_item: Feed item representing the creative asset from the Bulkdozer\n feed.\n item: The DCM creative asset being updated or created."} +{"repo": "transformers", "function": "class Phi4MultimodalAudioConfig(PretrainedConfig):\n model_type = 'phi4_multimodal_audio'\n\n def __init__(self, hidden_size: int=1024, intermediate_size: int=1536, num_blocks: int=24, num_attention_heads: int=16, activation: str='swish', chunk_size: int=-1, left_chunk: int=18, dropout_rate: float=0.0, ext_pw_out_channel: int=1024, depthwise_seperable_out_channel: int=1024, depthwise_multiplier: int=1, kernel_size: int=3, conv_activation: str='swish', input_size: int=80, conv_glu_type: str='swish', time_reduction: int=8, bias_max_distance: int=1000, bias_symmetric: bool=False, nemo_activation: str='relu', nemo_conv_channels: int=1024, downsample_rate: int=1, initializer_range: float=0.02, audio_token_id: int=200011, feature_layer: int=-2, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.activation = activation\n self.chunk_size = chunk_size\n self.left_chunk = left_chunk\n self.num_blocks = num_blocks\n self.dropout_rate = dropout_rate\n self.ext_pw_out_channel = ext_pw_out_channel\n self.depthwise_seperable_out_channel = depthwise_seperable_out_channel\n self.depthwise_multiplier = depthwise_multiplier\n self.kernel_size = kernel_size\n self.conv_activation = conv_activation\n self.input_size = input_size\n self.conv_glu_type = conv_glu_type\n self.time_reduction = time_reduction\n self.bias_max_distance = bias_max_distance\n self.bias_symmetric = bias_symmetric\n self.nemo_activation = nemo_activation\n self.nemo_conv_channels = nemo_conv_channels\n self.downsample_rate = downsample_rate\n self.audio_token_id = audio_token_id\n self.initializer_range = initializer_range\n self.feature_layer = feature_layer\n if time_reduction % 2 != 0:\n raise ValueError('`time_reduction` should be a multiple of 2!')\n length = input_size\n for _ in range(int(math.log(time_reduction, 2))):\n length = math.floor((length - 1) / 2 + 1)\n self.nemo_final_size = length", "docstring": "This is the configuration class to store the configuration of a [`Phi4MultimodalAudioModel`]. It is used to instantiate a\nPhi4Multimodal audio encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the audio encoder of\n[microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 1024):\n Dimensionality of the encoder layers.\n intermediate_size (`int`, *optional*, defaults to 1536):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_blocks (`int`, *optional*, defaults to 24):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n activation (`str`, *optional*, defaults to `\"swish\"`):\n The non-linear activation function in the MLPs.\n chunk_size (`int`, *optional*, defaults to -1):\n The chunk size to create the masks.\n left_chunk (`int`, *optional*, defaults to 18):\n The left chunk to create the masks.\n dropout_rate (`float`, *optional*, defaults to 0.0):\n The dropout ratio.\n ext_pw_out_channel (`int`, *optional*, defaults to 1024):\n Number of out channels in the point-wise conv modules.\n depthwise_seperable_out_channel (`int`, *optional*, defaults to 1024):\n Number of out channels in the depth-wise separable conv modules.\n depthwise_multiplier (`int`, *optional*, defaults to 1):\n Input size multiplier for the depth-wise separable conv modules.\n kernel_size (`int`, *optional*, defaults to 3):\n Kernel size for the depth-wise separable conv modules.\n conv_activation (`str`, *optional*, defaults to `\"swish\"`):\n The non-linear activation function in the conv modules.\n input_size (`int`, *optional*, defaults to 80):\n Input size for the audio model.\n conv_glu_type (`str`, *optional*, defaults to `\"swish\"`):\n The non-linear activation function in the point-wise conv modules.\n time_reduction (`int`, *optional*, defaults to 8):\n Time reduction (subsampling factor).\n bias_max_distance (`int`, *optional*, defaults to 1000):\n Max distance for the relative attention bias module.\n bias_symmetric (`bool`, *optional*, defaults to `False`):\n Whether the relative attention bias should be symmetric or not.\n nemo_activation (`str`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function in the nemo conv modules.\n nemo_conv_channels (`int`, *optional*, defaults to 1024):\n Number of channels in the nemo conv modules.\n downsample_rate (`int`, *optional*, defaults to 1):\n Downsample rate for the audio feature extractor.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n audio_token_id (`int`, *optional*, defaults to 200011):\n The audio token id.\n feature_layer (`int`, *optional*, defaults to -2):\n The index of the layer of the encoder from which to extract audio features.\n\nExample:\n\n```python\n>>> from transformers import Phi4MultimodalAudioConfig\n\n>>> # Initializing a Phi4MultimodalAudioConfig with microsoft/Phi-4-multimodal-instruct style configuration\n>>> configuration = Phi4MultimodalAudioConfig()\n```"} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values, params: Optional[dict]=None, dropout_rng: jax.random.PRNGKey=None, train=False):\n pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))\n rngs = {}\n if dropout_rng is not None:\n rngs['dropout'] = dropout_rng\n\n def _get_features(module, pixel_values, deterministic):\n vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic)\n pooled_output = vision_outputs[1]\n image_features = module.visual_projection(pooled_output)\n return image_features\n return self.module.apply({'params': params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, method=_get_features, rngs=rngs)", "docstring": "Args:\n pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained\n using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.\n\nReturns:\n image_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The image embeddings obtained by\n applying the projection layer to the pooled output of [`FlaxCLIPVisionModel`]\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, FlaxCLIPModel\n\n>>> model = FlaxCLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n>>> processor = AutoProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = processor(images=image, return_tensors=\"np\")\n\n>>> image_features = model.get_image_features(**inputs)\n```"} +{"repo": "tensorflow", "function": "def approx_min_k(operand, k, reduction_dimension=-1, recall_target=0.95, reduction_input_size_override=-1, aggregate_to_topk=True, name=None):\n return gen_nn_ops.approx_top_k(operand, k=k, reduction_dimension=reduction_dimension, recall_target=recall_target, is_max_k=False, reduction_input_size_override=reduction_input_size_override, aggregate_to_topk=aggregate_to_topk, name=name)", "docstring": "Returns min `k` values and their indices of the input `operand` in an approximate manner.\n\nSee https://arxiv.org/abs/2206.14286 for the algorithm details. This op is\nonly optimized on TPU currently.\n\nArgs:\n operand : Array to search for min-k. Must be a floating number type.\n k : Specifies the number of min-k.\n reduction_dimension: Integer dimension along which to search. Default: -1.\n recall_target: Recall target for the approximation.\n reduction_input_size_override : When set to a positive value, it overrides\n the size determined by `operand[reduction_dim]` for evaluating the recall.\n This option is useful when the given `operand` is only a subset of the\n overall computation in SPMD or distributed pipelines, where the true input\n size cannot be deferred by the `operand` shape.\n aggregate_to_topk: When true, aggregates approximate results to top-k. When\n false, returns the approximate results. The number of the approximate\n results is implementation defined and is greater equals to the specified\n `k`.\n name: Optional name for the operation.\n\nReturns:\n Tuple of two arrays. The arrays are the least `k` values and the\n corresponding indices along the `reduction_dimension` of the input\n `operand`. The arrays' dimensions are the same as the input `operand`\n except for the `reduction_dimension`: when `aggregate_to_topk` is true,\n the reduction dimension is `k`; otherwise, it is greater equals to `k`\n where the size is implementation-defined.\n\nWe encourage users to wrap `approx_min_k` with jit. See the following example\nfor nearest neighbor search over the squared l2 distance:\n\n>>> import tensorflow as tf\n>>> @tf.function(jit_compile=True)\n... def l2_ann(qy, db, half_db_norms, k=10, recall_target=0.95):\n... dists = half_db_norms - tf.einsum('ik,jk->ij', qy, db)\n... return tf.nn.approx_min_k(dists, k=k, recall_target=recall_target)\n>>>\n>>> qy = tf.random.uniform((256,128))\n>>> db = tf.random.uniform((2048,128))\n>>> half_db_norms = tf.norm(db, axis=1) / 2\n>>> dists, neighbors = l2_ann(qy, db, half_db_norms)\n\nIn the example above, we compute `db_norms/2 - dot(qy, db^T)` instead of\n`qy^2 - 2 dot(qy, db^T) + db^2` for performance reason. The former uses less\narithmetics and produces the same set of neighbors."} +{"repo": "tensorflow", "function": "def _assert_float_dtype(dtype):\n dtype = dtypes.as_dtype(dtype)\n if not dtype.is_floating:\n raise ValueError(f'Argument `dtype` is expected to be floating point. Received: {dtype}.')\n return dtype", "docstring": "Validate and return floating point type based on `dtype`.\n\n`dtype` must be a floating point type.\n\nArgs:\n dtype: The data type to validate.\n\nReturns:\n Validated type.\n\nRaises:\n ValueError: if `dtype` is not a floating point type."} +{"repo": "tensorflow", "function": "def initialize_system_for_tpu_embedding(embedding_config: embedding_pb2.TPUEmbeddingConfiguration, job: Optional[Text]=None) -> ops.Operation:\n config_string = embedding_config.SerializeToString()\n with ops.device(_tpu_system_device_name(job)):\n return tpu_ops.configure_tpu_embedding(config=config_string)", "docstring": "Initializes a distributed TPU Embedding system for use with TensorFlow.\n\nThe following two are equivalent:\n1. initialize_system() with embedding_config.\n2. initialize_system() without embedding_config, then\n initialize_system_for_tpu_embedding().\ninitialize_system() should not be called with embedding_config if\ninitialize_system_for_tpu_embedding() is meant to be called later.\n\nArgs:\n embedding_config: a `TPUEmbeddingConfiguration` proto describing the desired\n configuration of the hardware embedding lookup tables.\n job: The job (the XXX in TensorFlow device specification /job:XXX) that\n contains the TPU devices that will be initialized. If job=None it is\n assumed there is only one job in the TensorFlow flock, and an error will\n be returned if this assumption does not hold.\n\nReturns:\n A no-op."} +{"repo": "tf-quant-finance", "function": "def _get_quadratic_coeffs(normalized_prices, normalized_forwards, log_normalized_forwards, option_signs, polya_factor):\n q1 = normalized_forwards - 1\n q2 = normalized_forwards + 1\n r = 2 * normalized_prices - option_signs * q1\n f1 = tf.math.pow(normalized_forwards, -polya_factor)\n f2 = 1 / f1\n g1 = f1 * normalized_forwards\n g2 = 1 / g1\n a = tf.math.square(g1 - g2)\n h = tf.math.square(normalized_forwards)\n r2 = tf.math.square(r)\n b = 4 * (f1 + f2) - 2 * (g1 + g2) * (1 + h - r2) / normalized_forwards\n lnc1 = tf.math.log(4.0 * normalized_prices) + tf.math.log(normalized_prices - option_signs * q1)\n lnc2 = tf.math.log(q2 - r) + tf.math.log(q2 + r)\n lnc = lnc1 + lnc2 - 2.0 * log_normalized_forwards\n return (a, b, lnc)", "docstring": "Computes the coefficients of the quadratic in Stefanica-Radiocic method.\n\nComputes the coefficients described in Table 3 in Ref [1].\n\nArgs:\n normalized_prices: `Tensor` of real dtype and any shape. The prices of the\n options to be inverted. Normalization means that the raw price is divided\n by the strike discounted to the present.\n normalized_forwards: `Tensor` or same dtype and shape as `normalized_prices`\n The forwards divided by the strike of the options.\n log_normalized_forwards: `Tensor` or same dtype and shape as\n `normalized_prices`. Log of the normalized forwards.\n option_signs: Real `Tensor` of same shape and dtype as `normalized_prices`.\n Should be +1 for a Call option and -1 for a put option.\n polya_factor: Scalar `Tensor` of same dtype as `normalized_prices`. This is\n the factor to use for approximating the normal CDF in a Polya-like\n expression. Polya approximation is (here `k` is the `polya_factor`) N(x) ~\n 0.5 + sign(x) sqrt(1-e^(-k x^2)) with k = 2 / pi. However, it has been\n found that other values for `k` may be more accurate. The value that\n minimizes the absolute error over the range [-10, 10] is 0.62305051\n (approximately 5/8).\n\nReturns:\n (A, B, ln(C)): A 3-tuple of coefficients in terms of which the approximate\n implied vol is calculated."} +{"repo": "pytype", "function": "def _get_python_exes(python_version) -> Iterable[list[str]]:\n if python_version in _CUSTOM_PYTHON_EXES:\n yield [_path_to_custom_exe(_CUSTOM_PYTHON_EXES[python_version])]\n return\n for version in (utils.format_version(python_version), '3'):\n if sys.platform == 'win32':\n python_exe = ['py', f'-{version}']\n else:\n python_exe = [f'python{version}']\n yield python_exe", "docstring": "Find possible python executables to use.\n\nArguments:\n python_version: the version tuple (e.g. (3, 7))\n\nYields:\n The path to the executable"} +{"repo": "transformers", "function": "def resize(self, image: np.ndarray, size: Dict[str, int], anti_aliasing: bool=True, anti_aliasing_sigma=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n requires_backends(self, 'scipy')\n output_shape = (size['height'], size['width'])\n image = to_channel_dimension_format(image, ChannelDimension.LAST)\n image, output_shape = _preprocess_resize_output_shape(image, output_shape)\n input_shape = image.shape\n factors = np.divide(input_shape, output_shape)\n ndi_mode = 'mirror'\n cval = 0\n order = 1\n if anti_aliasing:\n if anti_aliasing_sigma is None:\n anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2)\n else:\n anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors)\n if np.any(anti_aliasing_sigma < 0):\n raise ValueError('Anti-aliasing standard deviation must be greater than or equal to zero')\n elif np.any((anti_aliasing_sigma > 0) & (factors <= 1)):\n warnings.warn('Anti-aliasing standard deviation greater than zero but not down-sampling along all axes')\n filtered = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=ndi_mode)\n else:\n filtered = image\n zoom_factors = [1 / f for f in factors]\n out = ndi.zoom(filtered, zoom_factors, order=order, mode=ndi_mode, cval=cval, grid_mode=True)\n image = _clip_warp_output(image, out)\n image = to_channel_dimension_format(image, input_data_format, ChannelDimension.LAST)\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image", "docstring": "Resize an image as per the original implementation.\n\nArgs:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary containing the height and width to resize the image to.\n anti_aliasing (`bool`, *optional*, defaults to `True`):\n Whether to apply anti-aliasing when downsampling the image.\n anti_aliasing_sigma (`float`, *optional*, defaults to `None`):\n Standard deviation for Gaussian kernel when downsampling the image. If `None`, it will be calculated\n automatically.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image."} +{"repo": "keras", "function": "def _get_init_rng(self):\n return self.seed_generator.next()", "docstring": "Returns a JAX `PRNGKey` or structure of `PRNGKey`s to pass to `init_fn`.\n\nBy default, this returns a single `PRNGKey` retrieved by calling\n`self.seed_generator.next()`. Override this to return a different\nstructure.\n\nReturns:\n a JAX `PRNGKey` or structure of `PRNGKey`s that will be passed as\n the `rng` argument of `init_fn`."} +{"repo": "tensorflow", "function": "def sort(values, axis=-1, direction='ASCENDING', name=None):\n with framework_ops.name_scope(name, 'sort'):\n return _sort_or_argsort(values, axis, direction, return_argsort=False)", "docstring": "Sorts a tensor.\n\nUsage:\n\n>>> a = [1, 10, 26.9, 2.8, 166.32, 62.3]\n>>> tf.sort(a).numpy()\narray([ 1. , 2.8 , 10. , 26.9 , 62.3 , 166.32], dtype=float32)\n\n>>> tf.sort(a, direction='DESCENDING').numpy()\narray([166.32, 62.3 , 26.9 , 10. , 2.8 , 1. ], dtype=float32)\n\nFor multidimensional inputs you can control which axis the sort is applied\nalong. The default `axis=-1` sorts the innermost axis.\n\n>>> mat = [[3,2,1],\n... [2,1,3],\n... [1,3,2]]\n>>> tf.sort(mat, axis=-1).numpy()\narray([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]], dtype=int32)\n>>> tf.sort(mat, axis=0).numpy()\narray([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]], dtype=int32)\n\nSee also:\n\n * `tf.argsort`: Like sort, but it returns the sort indices.\n * `tf.math.top_k`: A partial sort that returns a fixed number of top values\n and corresponding indices.\n\n\nArgs:\n values: 1-D or higher **numeric** `Tensor`.\n axis: The axis along which to sort. The default is -1, which sorts the last\n axis.\n direction: The direction in which to sort the values (`'ASCENDING'` or\n `'DESCENDING'`).\n name: Optional name for the operation.\n\nReturns:\n A `Tensor` with the same dtype and shape as `values`, with the elements\n sorted along the given `axis`.\n\nRaises:\n tf.errors.InvalidArgumentError: If the `values.dtype` is not a `float` or\n `int` type.\n ValueError: If axis is not a constant scalar, or the direction is invalid."} +{"repo": "starthinker", "function": "def to_type(self, entry: dict) -> str:\n t = entry.get('type')\n f = entry.get('format')\n if t == 'any':\n return 'STRING'\n elif t == 'array':\n return 'REPEATED'\n elif t == 'boolean':\n return 'BOOLEAN'\n elif t == 'integer':\n return 'INT64'\n elif t == 'number':\n if f == 'double':\n return 'FLOAT64'\n else:\n return 'FLOAT'\n elif t == 'object':\n return 'STRUCT'\n elif t == 'string':\n if f == 'byte':\n return 'BYTES'\n elif f == 'date':\n return 'DATE'\n elif f == 'date-time':\n return 'TIMESTAMP'\n elif f == 'int64':\n return 'INT64'\n elif f == 'uint64':\n return 'INT64'\n else:\n return 'STRING'\n else:\n return 'STRING'", "docstring": "Convert a Discovery API Document type to a BigQuery type.\n\nCalled internally but exposed for convenience.\n\nArgs:\n entry: discovery type format: https://developers.google.com/discovery/v1/type-format\n\nReturns:\n Bigquery type: https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types"} +{"repo": "fhir-py", "function": "def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> _sql_data_types.Select:\n lhs_result = self.visit(relation.left)\n rhs_result = self.visit(relation.right)\n in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result\n in_rhs = rhs_result if isinstance(relation, _evaluation.InNode) else lhs_result\n sql_expr = f'({in_lhs.as_operand()}) IN ({in_rhs.as_operand()})'\n return _sql_data_types.Select(select_part=_sql_data_types.RawExpression(sql_expr, _sql_data_type=_sql_data_types.Boolean, _sql_alias='mem_'), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK)", "docstring": "Translates a FHIRPath membership relation to Spark SQL.\n\nFor the `IN` relation, the LHS operand is assumed to be a collection of a\nsingle value. For 'CONTAINS', the RHS operand is assumed to be a collection\nof a single value. Equality is handled in the visit_equality function.\n\nArgs:\n relation: The FHIRPath AST `MembershipRelation` node.\n\nReturns:\n A compiled Spark SQL expression."} +{"repo": "starthinker", "function": "def recipe_google_api_to_bigquery(config, auth_read, api, version, function, kwargs, kwargs_remote, api_key, developer_token, login_customer_id, dataset, table):\n google_api(config, {'auth': auth_read, 'api': api, 'version': version, 'function': function, 'kwargs': kwargs, 'kwargs_remote': kwargs_remote, 'key': api_key, 'headers': {'developer-token': developer_token, 'login-customer-id': login_customer_id}, 'results': {'bigquery': {'dataset': dataset, 'table': table}}})", "docstring": "Execute any Google API function and store results to BigQuery.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n api (string) - See developer guide.\n version (string) - Must be supported version.\n function (string) - Full function dot notation path.\n kwargs (json) - Dictionray object of name value pairs.\n kwargs_remote (json) - Fetch arguments from remote source.\n api_key (string) - Associated with a Google Cloud Project.\n developer_token (string) - Associated with your organization.\n login_customer_id (string) - Associated with your Adwords account.\n dataset (string) - Existing dataset in BigQuery.\n table (string) - Table to write API call results to."} +{"repo": "transformers", "function": "def sample_frames(self, video: 'torch.Tensor', metadata: Union[VideoMetadata, dict], num_frames: Optional[int]=None, fps: Optional[int]=None, skip_secs: Optional[int]=1):\n num_frames = num_frames if num_frames is not None else self.num_frames\n fps = fps if fps is not None else self.fps\n total_num_frames = video.shape[0]\n estimated_frames = int(round(fps * metadata['duration']))\n desired_frames = min(estimated_frames, num_frames)\n if desired_frames < 1:\n desired_frames = 1\n start_idx = 0\n end_idx = total_num_frames - 1\n if skip_secs > 0 and metadata['duration'] - 2 * skip_secs > num_frames * fps:\n start_idx = int(skip_secs * metadata['fps'])\n end_idx = int(total_num_frames - skip_secs * metadata['fps'])\n start_idx = max(0, start_idx)\n end_idx = min(end_idx, total_num_frames - 1)\n if start_idx >= end_idx:\n start_idx, end_idx = (0, total_num_frames - 1)\n indices = np.linspace(start_idx, end_idx, desired_frames, dtype=int)\n indices = np.unique(indices)\n video = video[indices].contiguous()\n timestamps = []\n for idx in indices:\n sec = idx / metadata['fps']\n mm = int(sec // 60)\n ss = int(sec % 60)\n timestamps.append([mm, ss])\n return (video, timestamps, int(metadata['duration']))", "docstring": "Video sampling function which:\n - Uses `num_frames` (if provided) or calculates it from `fps` and metadata.\n - Applies a basic center-skip if fewer frames than available, otherwise\n optionally skips `skip_secs` from both the start and end.\n - Uniformly samples the desired number of frames between the start and end indices.\n\nArgs:\n video (`torch.Tensor`):\n Video that need to be sampled.\n metadata (`VideoMetadata`):\n Metadata of the video containing information about total duration, fps and total number of frames.\n num_frames (`int`, *optional*):\n Maximum number of frames to sample. Defaults to `self.num_frames`.\n fps (`int`, *optional*):\n Target frames to sample per second. Defaults to `self.fps`.\n skip_secs (`float`, *optional*, defaults to `1`):\n Number of seconds to skip from the start and end if the video is long enough.\n\nReturns:\n torch.Tensor:\n Sampled video frames."} +{"repo": "transformers", "function": "class Multimodal2VisionEncoder(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n @can_return_tuple\n def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n \"\"\"\n Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Causal mask for the text model. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`Multimodal2VisionEncoderLayer`].\n\nArgs:\n config: Multimodal2VisionConfig"} +{"repo": "tf-quant-finance", "function": "def subtract_period_and_roll(self, date_tensor, period_tensor, roll_convention=constants.BusinessDayConvention.NONE):\n minus_period_tensor = periods.PeriodTensor(-period_tensor.quantity(), period_tensor.period_type())\n return self.add_period_and_roll(date_tensor, minus_period_tensor, roll_convention)", "docstring": "Subtracts given periods from given dates and rolls to business days.\n\nThe original dates are not rolled prior to subtraction.\n\nArgs:\n date_tensor: `DateTensor` of dates to subtract from.\n period_tensor: PeriodTensor broadcastable to `date_tensor`.\n roll_convention: BusinessDayConvention. Determines how to roll a date that\n falls on a holiday.\n\nReturns:\n The resulting `DateTensor`."} +{"repo": "pyglove", "function": "def to_json(value: Any, **kwargs) -> Any:\n if isinstance(value, Symbolic):\n return value.sym_jsonify(**kwargs)\n return utils.to_json(value, **kwargs)", "docstring": "Serializes a (maybe) symbolic value into a plain Python object.\n\nExample::\n\n @pg.members([\n ('x', pg.typing.Any())\n ])\n class A(pg.Object):\n pass\n\n a1 = A(1)\n json = a1.to_json()\n a2 = pg.from_json(json)\n assert pg.eq(a1, a2)\n\nArgs:\n value: value to serialize. Applicable value types are:\n\n * Builtin python types: None, bool, int, float, string;\n * JSONConvertible types;\n * List types;\n * Tuple types;\n * Dict types.\n\n **kwargs: Keyword arguments to pass to value.to_json if value is\n JSONConvertible.\n\nReturns:\n JSON value."} +{"repo": "tensorflow", "function": "class Orthogonal(Initializer):\n\n def __init__(self, gain=1.0, seed=None):\n self.gain = gain\n self.seed = seed\n self._random_generator = _RandomGenerator(seed)\n\n def __call__(self, shape, dtype=None, **kwargs):\n \"\"\"Returns a tensor object initialized to an orthogonal matrix.\n\n Args:\n shape: Shape of the tensor.\n dtype: Optional dtype of the tensor. Only floating point types are\n supported. If not specified, `tf.keras.backend.floatx()` is used,\n which default to `float32` unless you configured it otherwise\n (via `tf.keras.backend.set_floatx(float_dtype)`)\n **kwargs: Additional keyword arguments.\n \"\"\"\n _validate_kwargs(self.__class__.__name__, kwargs, support_partition=False)\n dtype = _assert_float_dtype(_get_dtype(dtype))\n if len(shape) < 2:\n raise ValueError('The tensor to initialize must be at least two-dimensional')\n num_rows = 1\n for dim in shape[:-1]:\n num_rows *= dim\n num_cols = shape[-1]\n flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))\n a = self._random_generator.random_normal(flat_shape, dtype=dtype)\n q, r = gen_linalg_ops.qr(a, full_matrices=False)\n d = array_ops.tensor_diag_part(r)\n q *= math_ops.sign(d)\n if num_rows < num_cols:\n q = array_ops.matrix_transpose(q)\n return self.gain * array_ops.reshape(q, shape)\n\n def get_config(self):\n return {'gain': self.gain, 'seed': self.seed}", "docstring": "Initializer that generates an orthogonal matrix.\n\nAlso available via the shortcut function `tf.keras.initializers.orthogonal`.\n\nIf the shape of the tensor to initialize is two-dimensional, it is initialized\nwith an orthogonal matrix obtained from the QR decomposition of a matrix of\nrandom numbers drawn from a normal distribution.\nIf the matrix has fewer rows than columns then the output will have orthogonal\nrows. Otherwise, the output will have orthogonal columns.\n\nIf the shape of the tensor to initialize is more than two-dimensional,\na matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`\nis initialized, where `n` is the length of the shape vector.\nThe matrix is subsequently reshaped to give a tensor of the desired shape.\n\nExamples:\n\n>>> # Standalone usage:\n>>> initializer = tf.keras.initializers.Orthogonal()\n>>> values = initializer(shape=(2, 2))\n\n>>> # Usage in a Keras layer:\n>>> initializer = tf.keras.initializers.Orthogonal()\n>>> layer = tf.keras.layers.Dense(3, kernel_initializer=initializer)\n\nArgs:\n gain: multiplicative factor to apply to the orthogonal matrix\n seed: A Python integer. An initializer created with a given seed will\n always produce the same random tensor for a given shape and dtype.\n\nReferences:\n [Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)\n ([pdf](https://arxiv.org/pdf/1312.6120.pdf))"} +{"repo": "weather-tools", "function": "class PartitionConfig:\n config: Config\n store: Store\n manifest: Manifest\n\n def _create_partition_config(self, option: t.Tuple) -> Config:\n \"\"\"Create a config for a single partition option.\n\n Output a config dictionary, overriding the range of values for\n each key with the partition instance in 'selection'.\n Continuing the example from prepare_partitions, the selection section\n would be:\n { 'foo': ..., 'year': ['2020'], 'month': ['01'], ... }\n { 'foo': ..., 'year': ['2020'], 'month': ['02'], ... }\n { 'foo': ..., 'year': ['2020'], 'month': ['03'], ... }\n\n Args:\n option: A single item in the range of partition_keys.\n config: The download config, including the parameters and selection sections.\n\n Returns:\n A configuration with that selects a single download partition.\n \"\"\"\n copy = cp.deepcopy(self.config.selection)\n out = cp.deepcopy(self.config)\n for idx, key in enumerate(self.config.partition_keys):\n copy[key] = [option[idx]]\n if 'hdate' in copy:\n copy['hdate'] = [generate_hdate(copy['date'][0], v) for v in copy['hdate']]\n out.selection = copy\n return out\n\n def skip_partition(self, config: Config) -> bool:\n \"\"\"Return true if partition should be skipped.\"\"\"\n if config.force_download:\n return False\n target = prepare_target_name(config)\n if self.store.exists(target):\n logger.info(f'file {target} found, skipping.')\n self.manifest.skip(config.config_name, config.dataset, config.selection, target, config.user_id)\n return True\n return False\n\n def prepare_partitions(self) -> t.Iterator[Config]:\n \"\"\"Iterate over client parameters, partitioning over `partition_keys`.\n\n This produces a Cartesian-Cross over the range of keys.\n\n For example, if the keys were 'year' and 'month', it would produce\n an iterable like:\n ( ('2020', '01'), ('2020', '02'), ('2020', '03'), ...)\n\n Returns:\n An iterator of `Config`s.\n \"\"\"\n for option in itertools.product(*[self.config.selection[key] for key in self.config.partition_keys]):\n yield self._create_partition_config(option)\n\n def new_downloads_only(self, candidate: Config) -> bool:\n \"\"\"Predicate function to skip already downloaded partitions.\"\"\"\n if self.store is None:\n self.store = FSStore()\n should_skip = self.skip_partition(candidate)\n return not should_skip\n\n def update_manifest_collection(self, partition: Config) -> Config:\n \"\"\"Updates the DB.\"\"\"\n location = prepare_target_name(partition)\n self.manifest.schedule(partition.config_name, partition.dataset, partition.selection, location, partition.user_id)\n logger.info(f'Created partition {location!r}.')", "docstring": "Partition a config into multiple data requests.\n\nPartitioning involves four main operations: First, we fan-out shards based on\npartition keys (a cross product of the values). Second, we filter out existing\ndownloads (unless we want to force downloads). Last, we assemble each partition\ninto a single Config.\n\nAttributes:\n store: A cloud storage system, used for checking the existence of downloads.\n manifest: A download manifest to register preparation state."} +{"repo": "pyglove", "function": "def element(cls, tag: str, inner_html: Optional[utils.Nestable[WritableTypes]]=None, *, options: Union[str, Iterable[str], None]=None, css_classes: NestableStr=None, styles: Union[str, Dict[str, Any], None]=None, **properties) -> 'Html':\n s = cls()\n css_classes = cls.concate(css_classes)\n options = cls.concate(options)\n styles = cls.style_str(styles)\n s.write(f'<{tag}', f' {options}' if options else None, f' class=\"{css_classes}\"' if css_classes else None, f' style=\"{styles}\"' if styles else None)\n for k, v in properties.items():\n if v is not None:\n s.write(f' {k.replace('_', '-')}=\"{v}\"')\n s.write('>')\n if inner_html:\n if isinstance(inner_html, list):\n for child in utils.flatten(inner_html).values():\n s.write(child)\n else:\n s.write(inner_html)\n s.write(f'')\n return s", "docstring": "Creates an HTML element.\n\nArgs:\n tag: The HTML tag name.\n inner_html: The inner HTML of the element.\n options: Positional options that will be added to the element. E.g. 'open'\n for `
`.\n css_classes: The CSS class name or a list of CSS class names.\n styles: A single CSS style string or a dictionary of CSS properties.\n **properties: Keyword arguments for HTML properties. For properties with\n underscore in the name, the underscore will be replaced by dash in the\n generated HTML. E.g. `background_color` will be converted to\n `background-color`.\n\nReturns:\n The opening tag of an HTML element."} +{"repo": "transformers", "function": "class PerceiverProjectionDecoder(PerceiverAbstractDecoder):\n\n def __init__(self, config):\n super().__init__()\n self.classifier = nn.Linear(config.d_latents, config.num_labels)\n\n def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):\n return None\n\n def forward(self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor]=None) -> torch.FloatTensor:\n z = torch.mean(z, dim=1)\n logits = self.classifier(z)\n return logits", "docstring": "Baseline projection decoder (no cross-attention).\n\nArgs:\n config ([`PerceiverConfig`]):\n Model configuration."} +{"repo": "transformers", "function": "class BitImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values']\n\n def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BICUBIC, do_center_crop: bool=True, crop_size: Optional[Dict[str, int]]=None, do_rescale: bool=True, rescale_factor: Union[int, float]=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: bool=True, **kwargs) -> None:\n super().__init__(**kwargs)\n size = size if size is not None else {'shortest_edge': 224}\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else {'height': 224, 'width': 224}\n crop_size = get_size_dict(crop_size, default_to_square=True, param_name='crop_size')\n self.do_resize = do_resize\n self.size = size\n self.resample = resample\n self.do_center_crop = do_center_crop\n self.crop_size = crop_size\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.do_normalize = do_normalize\n self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN\n self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD\n self.do_convert_rgb = do_convert_rgb\n\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if 'shortest_edge' in size:\n size = size['shortest_edge']\n default_to_square = False\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n output_size = get_resize_output_image_size(image, size=size, default_to_square=default_to_square, input_data_format=input_data_format)\n return resize(image, size=output_size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)\n\n @filter_out_non_signature_kwargs()\n def preprocess(self, images: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_center_crop: Optional[bool]=None, crop_size: Optional[int]=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, do_convert_rgb: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, param_name='size', default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name='crop_size', default_to_square=True)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n images = make_list_of_images(images)\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_center_crop=do_center_crop, crop_size=crop_size, do_resize=do_resize, size=size, resample=resample)\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n images = [to_numpy_array(image) for image in images]\n if do_rescale and is_scaled_image(images[0]):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(images[0])\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n if do_normalize:\n image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n all_images.append(image)\n images = [to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in all_images]\n data = {'pixel_values': images}\n return BatchFeature(data=data, tensor_type=return_tensors)", "docstring": "Constructs a BiT image processor.\n\nArgs:\n do_resize (`bool`, *optional*, defaults to `True`):\n Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by\n `do_resize` in the `preprocess` method.\n size (`Dict[str, int]` *optional*, defaults to `{\"shortest_edge\": 224}`):\n Size of the image after resizing. The shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`\n method.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.\n do_center_crop (`bool`, *optional*, defaults to `True`):\n Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the\n `preprocess` method.\n crop_size (`Dict[str, int]` *optional*, defaults to 224):\n Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`\n method.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in\n the `preprocess` method.\n rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):\n Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`\n method.\n do_normalize:\n Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.\n image_mean (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):\n Mean to use if normalizing the image. This is a float or list of floats the length of the number of\n channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.\n image_std (`float` or `List[float]`, *optional*, defaults to `OPENAI_CLIP_MEAN`):\n Standard deviation to use if normalizing the image. This is a float or list of floats the length of the\n number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.\n Can be overridden by the `image_std` parameter in the `preprocess` method.\n do_convert_rgb (`bool`, *optional*, defaults to `True`):\n Whether to convert the image to RGB."} +{"repo": "transformers", "function": "class ChineseCLIPProcessor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = ('ChineseCLIPImageProcessor', 'ChineseCLIPImageProcessorFast')\n tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n feature_extractor = None\n if 'feature_extractor' in kwargs:\n warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)\n feature_extractor = kwargs.pop('feature_extractor')\n image_processor = image_processor if image_processor is not None else feature_extractor\n if image_processor is None:\n raise ValueError('You need to specify an `image_processor`.')\n if tokenizer is None:\n raise ValueError('You need to specify a `tokenizer`.')\n super().__init__(image_processor, tokenizer)\n self.current_processor = self.image_processor\n\n def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, images: ImageInput=None, audio=None, videos=None, **kwargs: Unpack[ChineseClipProcessorKwargs]) -> BatchEncoding:\n \"\"\"\n Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\n and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode\n the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\n CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\n of the above two methods for more information.\n\n Args:\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n Returns:\n [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n \"\"\"\n if text is None and images is None:\n raise ValueError('You have to specify either text or images. Both cannot be none.')\n output_kwargs = self._merge_kwargs(ChineseClipProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if text is not None:\n encoding = self.tokenizer(text, **output_kwargs['text_kwargs'])\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n if 'return_tensors' in output_kwargs['common_kwargs']:\n return_tensors = output_kwargs['common_kwargs'].pop('return_tensors', None)\n if text is not None and images is not None:\n encoding['pixel_values'] = image_features.pixel_values\n return encoding\n elif text is not None:\n return encoding\n else:\n return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n image_processor_input_names = self.image_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))\n\n @property\n def feature_extractor_class(self):\n warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)\n return self.image_processor_class", "docstring": "Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a\nsingle processor.\n\n[`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].\nSee the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.\n\nArgs:\n image_processor ([`ChineseCLIPImageProcessor`], *optional*):\n The image processor is a required input.\n tokenizer ([`BertTokenizerFast`], *optional*):\n The tokenizer is a required input."} +{"repo": "transformers", "function": "def forward(self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor]=None, mask_time_indices: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n if attention_mask is not None:\n attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)\n hidden_states = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)\n encoder_outputs = self.encoder(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n hidden_states = encoder_outputs[0]\n if not return_dict:\n return (hidden_states,) + encoder_outputs[1:]\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions)", "docstring": "mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict\n masked extracted features in *config.proj_codevector_dim* space.\n\nExample:\n\n```python\n>>> from transformers import AutoProcessor, HubertModel\n>>> from datasets import load_dataset\n>>> import soundfile as sf\n\n>>> processor = AutoProcessor.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n>>> model = HubertModel.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n\n\n>>> def map_to_array(batch):\n... speech, _ = sf.read(batch[\"file\"])\n... batch[\"speech\"] = speech\n... return batch\n\n\n>>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n>>> ds = ds.map(map_to_array)\n\n>>> input_values = processor(ds[\"speech\"][0], return_tensors=\"pt\").input_values # Batch size 1\n>>> hidden_states = model(input_values).last_hidden_state\n```"} +{"repo": "mobly", "function": "def _print_test_names(test_class):\n cls = test_class(config_parser.TestRunConfig())\n test_names = []\n try:\n cls._pre_run()\n if cls.tests:\n test_names = list(cls.tests)\n else:\n test_names = cls.get_existing_test_names()\n except Exception:\n logging.exception('Failed to retrieve generated tests.')\n finally:\n cls._clean_up()\n print('==========> %s <==========' % cls.TAG)\n for name in test_names:\n print(name)", "docstring": "Prints the names of all the tests in a test module.\n\nIf the module has generated tests defined based on controller info, this\nmay not be able to print the generated tests.\n\nArgs:\n test_class: module, the test module to print names from."} +{"repo": "keras", "function": "def compute_loss(self, x=None, y=None, y_pred=None, sample_weight=None, training=True):\n del x\n del training\n losses = []\n if self._compile_loss is not None:\n loss = self._compile_loss(y, y_pred, sample_weight)\n if loss is not None:\n losses.append(loss)\n for loss in self.losses:\n losses.append(self._aggregate_additional_loss(loss))\n if backend.backend() != 'jax' and len(losses) == 0:\n raise ValueError('No loss to compute. Provide a `loss` argument in `compile()`.')\n if len(losses) == 1:\n total_loss = losses[0]\n elif len(losses) == 0:\n total_loss = ops.zeros(())\n else:\n total_loss = ops.sum(losses)\n return total_loss", "docstring": "Compute the total loss, validate it, and return it.\n\nSubclasses can optionally override this method to provide custom loss\ncomputation logic.\n\nExample:\n\n```python\nclass MyModel(Model):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.loss_tracker = metrics.Mean(name='loss')\n\n def compute_loss(self, x, y, y_pred, sample_weight, training=True):\n loss = ops.mean((y_pred - y) ** 2)\n loss += ops.sum(self.losses)\n self.loss_tracker.update_state(loss)\n return loss\n\n def reset_metrics(self):\n self.loss_tracker.reset_state()\n\n @property\n def metrics(self):\n return [self.loss_tracker]\n\ninputs = layers.Input(shape=(10,), name='my_input')\noutputs = layers.Dense(10)(inputs)\nmodel = MyModel(inputs, outputs)\nmodel.add_loss(ops.sum(outputs))\n\noptimizer = SGD()\nmodel.compile(optimizer, loss='mse', steps_per_execution=10)\ndataset = ...\nmodel.fit(dataset, epochs=2, steps_per_epoch=10)\nprint(f\"Custom loss: {model.loss_tracker.result()}\")\n```\n\nArgs:\n x: Input data.\n y: Target data.\n y_pred: Predictions returned by the model (output of `model(x)`)\n sample_weight: Sample weights for weighting the loss function.\n training: Whether we are training or evaluating the model.\n\nReturns:\n The total loss as a scalar tensor, or `None` if no loss results\n (which is the case when called by `Model.test_step`)."} +{"repo": "pyglove", "function": "def set_dna(self, dna: geno.DNA) -> None:\n self._dna = dna\n self._decoded_value = None", "docstring": "Use this DNA to generate value.\n\nNOTE(daiyip): self._dna is only used in __call__.\nThus 'set_dna' can be called multiple times to generate different values.\n\nArgs:\n dna: DNA to use to decode the value."} +{"repo": "transformers", "function": "def encode_plus_boxes(self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, is_split_into_words: bool=False, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding:\n padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(padding=padding, truncation=truncation, max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, verbose=verbose, **kwargs)\n return self._encode_plus_boxes(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, is_split_into_words=is_split_into_words, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)", "docstring": "Tokenize and prepare for the model a sequence or a pair of sequences.\n\n\n\nThis method is deprecated, `__call__` should be used instead.\n\n\n\nArgs:\n text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`):\n The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the\n `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\n method).\n text_pair (`str`, `List[str]` or `List[int]`, *optional*):\n Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using\n the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`\n method)."} +{"repo": "tensorflow", "function": "def element_spec(self):\n raise NotImplementedError('Optional.element_spec')", "docstring": "The type specification of an element of this optional.\n\n>>> optional = tf.experimental.Optional.from_value(42)\n>>> print(optional.element_spec)\ntf.TensorSpec(shape=(), dtype=tf.int32, name=None)\n\nReturns:\n A (nested) structure of `tf.TypeSpec` objects matching the structure of an\n element of this optional, specifying the type of individual components."} +{"repo": "keras", "function": "class SpatialDropout1D(BaseSpatialDropout):\n\n def __init__(self, rate, seed=None, name=None, dtype=None):\n super().__init__(rate, seed=seed, name=name, dtype=dtype)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = ops.shape(inputs)\n return (input_shape[0], 1, input_shape[2])", "docstring": "Spatial 1D version of Dropout.\n\nThis layer performs the same function as Dropout, however, it drops\nentire 1D feature maps instead of individual elements. If adjacent frames\nwithin feature maps are strongly correlated (as is normally the case in\nearly convolution layers) then regular dropout will not regularize the\nactivations and will otherwise just result in an effective learning rate\ndecrease. In this case, `SpatialDropout1D` will help promote independence\nbetween feature maps and should be used instead.\n\nArgs:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n\nCall arguments:\n inputs: A 3D tensor.\n training: Python boolean indicating whether the layer\n should behave in training mode (applying dropout)\n or in inference mode (pass-through).\n\nInput shape:\n 3D tensor with shape: `(samples, timesteps, channels)`\n\nOutput shape: Same as input.\n\nReference:\n\n- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)"} +{"repo": "tensorflow", "function": "def _build_all_reduce_ring(core_locations: List[_CoreLocation], rotate: bool=False) -> List[int]:\n permutation = list(range(len(core_locations)))\n if not permutation:\n return permutation\n logging.vlog(2, 'Core locations in: %s', core_locations)\n first_column = min([l.x for l in core_locations])\n first_row = min([l.y for l in core_locations])\n same_z = len(set([l.z for l in core_locations])) == 1\n logging.vlog(2, 'first_column: %d', first_column)\n logging.vlog(2, 'first_row: %d', first_row)\n logging.vlog(2, 'same_z: %s', same_z)\n\n def _cmp_2d(ia: int, ib: int) -> int:\n if not rotate:\n a = core_locations[ia]\n b = core_locations[ib]\n a_first = a.x == first_column and a.y != first_row\n b_first = b.x == first_column and b.y != first_row\n if a_first != b_first:\n return -1 if b_first else 1\n if a.y != b.y:\n return b.y - a.y if a_first else a.y - b.y\n if a.x != b.x:\n return a.x - b.x if a.y % 2 == 0 else b.x - a.x\n return a.core - b.core\n else:\n a = core_locations[ia]\n b = core_locations[ib]\n a_first = a.y == first_row and a.x != first_column\n b_first = b.y == first_row and b.x != first_column\n if a_first != b_first:\n return -1 if b_first else 1\n if a.x != b.x:\n return b.x - a.x if a_first else a.x - b.x\n if a.y != b.y:\n return a.y - b.y if a.x % 2 == 0 else b.y - a.y\n return a.core - b.core\n\n def _cmp_3d(ia: int, ib: int) -> int:\n a = core_locations[ia]\n b = core_locations[ib]\n a_corner = a.x == first_column and a.y == first_row\n b_corner = b.x == first_column and b.y == first_row\n if a_corner and b_corner:\n return b.z - a.z if a.z != b.z else a.core - b.core\n if a_corner != b_corner:\n return -1 if b_corner else 1\n if a.z == b.z:\n return _cmp_2d(ia, ib) if a.z % 2 == 0 else -_cmp_2d(ia, ib)\n return a.z - b.z\n if same_z:\n permutation.sort(key=functools.cmp_to_key(_cmp_2d))\n else:\n permutation.sort(key=functools.cmp_to_key(_cmp_3d))\n logging.vlog(2, 'Permutation out: %s', permutation)\n return permutation", "docstring": "Reorders a list of TPU cores to optimize for AllReduce performance.\n\nThis is ported from the C++ tensorflow::BuildAllReduceRing function,\nmixed with some logic from TF TPU's device_assignment._ring_3d.\n\nArgs:\n core_locations: A list of core locations expressed as [x, y, z, core].\n rotate: If true, scan the cores in a column-major order. False by default.\n\nReturns:\n A permutation of the input list such that neighbors in the sequence are\n nearby in the TPU topology."} +{"repo": "tensorflow", "function": "def session(self, graph=None, config=None, target=None):\n config = self._create_config(config)\n if target is None:\n target = self._default_target\n with session.Session(graph=graph, config=config, target=target) as sess:\n yield sess", "docstring": "Create a test session with master target set to the testing cluster.\n\nCreates a test session that connects to the local testing cluster.\n\nArgs:\n graph: Optional graph to use during the returned session.\n config: An optional config_pb2.ConfigProto to use to configure the\n session.\n target: the target of session to connect to.\n\nYields:\n A Session object that should be used as a context manager to surround\n the graph building and execution code in a test case."} +{"repo": "tensorflow", "function": "def scatter_add(self, sparse_delta, use_locking=False, name=None):\n if not isinstance(sparse_delta, indexed_slices.IndexedSlices):\n raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta)\n return gen_state_ops.scatter_add(self._variable, sparse_delta.indices, sparse_delta.values, use_locking=use_locking, name=name)", "docstring": "Adds `tf.IndexedSlices` to this variable.\n\nArgs:\n sparse_delta: `tf.IndexedSlices` to be added to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\nReturns:\n A `Tensor` that will hold the new value of this variable after\n the scattered addition has completed.\n\nRaises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`."} +{"repo": "tensorflow", "function": "def load_file_system_library(library_filename):\n py_tf.TF_LoadLibrary(library_filename)", "docstring": "Loads a TensorFlow plugin, containing file system implementation.\n\nPass `library_filename` to a platform-specific mechanism for dynamically\nloading a library. The rules for determining the exact location of the\nlibrary are platform-specific and are not documented here.\n\nArgs:\n library_filename: Path to the plugin.\n Relative or absolute filesystem path to a dynamic library file.\n\nReturns:\n None.\n\nRaises:\n RuntimeError: when unable to load the library."} +{"repo": "starthinker", "function": "def sheets_tab_range(sheet_tab, sheet_range):\n if sheet_range:\n return '%s!%s' % (sheet_tab, sheet_range)\n else:\n return sheet_tab", "docstring": "Helper for creating range format.\n\nArgs:\n sheet_tab - name of tab in sheet\n sheet_range - A1 notation\n\nReturns:\n String containing full sheet range specification."} +{"repo": "transformers", "function": "class BlenderbotDecoder(BlenderbotPreTrainedModel):\n\n def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding]=None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = BlenderbotScaledWordEmbedding(config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale)\n self.embed_positions = BlenderbotLearnedPositionalEmbedding(config.max_position_embeddings, config.d_model)\n self.layers = nn.ModuleList([BlenderbotDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])\n self.layer_norm = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position: Optional[torch.Tensor]=None):\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing\n cross-attention on hidden heads. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if (input_ids is None) ^ (inputs_embeds is not None):\n raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')\n elif input_ids is not None:\n input = input_ids\n input_shape = input.shape\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n input = inputs_embeds[:, :, -1]\n else:\n raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input)\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')\n use_cache = False\n return_legacy_cache = False\n if use_cache and (not isinstance(past_key_values, Cache)):\n return_legacy_cache = True\n logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')\n past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)\n batch_size, seq_length = inputs_embeds.size()[:-1]\n past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0\n if cache_position is None:\n cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)\n if attention_mask is None and (not is_torchdynamo_compiling()):\n mask_seq_length = past_key_values_length + seq_length\n attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)\n self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache)\n encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)\n position_ids = self.embed_positions((batch_size, seq_length), past_key_values_length, position_ids=cache_position)\n hidden_states = inputs_embeds + position_ids\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n next_decoder_cache = None\n for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ['head_mask', 'cross_attn_head_mask']):\n if attn_mask is not None:\n if attn_mask.size()[0] != len(self.layers):\n raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[3 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n hidden_states = self.layer_norm(hidden_states)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if return_legacy_cache:\n next_cache = past_key_values.to_legacy_cache()\n if not return_dict:\n return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))\n return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`]\n\nArgs:\n config: BlenderbotConfig\n embed_tokens (nn.Embedding): output embedding"} +{"repo": "etils", "function": "def batch_dot(x0: FloatArray['... n'], x1: FloatArray['... n'], *, keepdims: bool=False, xnp: numpy_utils.NpModule=...) -> FloatArray['... 1?']:\n y = xnp.einsum('...m,...m->...', x0, x1)\n return y[..., None] if keepdims else y", "docstring": "Dot product on the last dimension, with broadcasting support.\n\nContrary to `np.dot`, the behavior is consistent for 1-dim vs n-dim (while\ndot act as matmul).\nFirst dimensions are always broadcasted.\n\nArgs:\n x0: Vector array\n x1: Vector array\n keepdims: If True, returns `FloatArray['... 1']`\n xnp: Numpy module to use\n\nReturns:\n The dot product along the last axis."} +{"repo": "beam", "function": "async def get_log(self, pipeline_uuid: str, example_filepath: str) -> str:\n self._verify_pipeline_uuid(pipeline_uuid)\n request = api_pb2.GetLogsRequest(pipeline_uuid=pipeline_uuid)\n response = await self._stub.GetLogs(request, **self._kwargs)\n if response.output == '':\n logging.info('Log for %s is empty', example_filepath)\n return response.output", "docstring": "Get the result of pipeline execution.\n\nArgs:\n pipeline_uuid: uuid of the pipeline\n example_filepath: path to the file of the example\n\nReturns:\n output: contain the result of pipeline execution"} +{"repo": "tensorflow", "function": "def __bool__(self):\n self._disallow_bool_casting()", "docstring": "Dummy method to prevent a tensor from being used as a Python `bool`.\n\nThis overload raises a `TypeError` when the user inadvertently\ntreats a `Tensor` as a boolean (most commonly in an `if` or `while`\nstatement), in code that was not converted by AutoGraph. For example:\n\n```python\nif tf.constant(True): # Will raise.\n # ...\n\nif tf.constant(5) < tf.constant(7): # Will raise.\n # ...\n```\n\nRaises:\n `TypeError`."} +{"repo": "beam", "function": "def __init__(self, max_batch_size: int=5000, project: str=None, retry: Retry=None, timeout: float=120, metadata: Sequence[Tuple[str, str]]=(), catalog_name: str='default_catalog', event_store: str='default_event_store'):\n self.max_batch_size = max_batch_size\n self.project = project\n self.retry = retry\n self.timeout = timeout\n self.metadata = metadata\n self.catalog_name = catalog_name\n self.event_store = event_store", "docstring": "Initializes a :class:`WriteUserEvent` transform.\n\nArgs:\n batch_size (int): Required. Maximum number of catalogitems\n per request.\n project (str): Optional. GCP project name in which the catalog\n data will be imported.\n retry: Optional. Designation of what\n errors, if any, should be retried.\n timeout (float): Optional. The amount of time, in seconds, to wait\n for the request to complete.\n metadata: Optional. Strings which\n should be sent along with the request as metadata.\n catalog_name (str): Optional. Name of the catalog.\n Default: 'default_catalog'\n event_store (str): Optional. Name of the event store.\n Default: 'default_event_store'"} +{"repo": "tensorflow", "function": "def diff(self, obj):\n overlapping_nodes = self.match(obj)\n only_in_checkpoint_view = []\n only_in_trackable = []\n for node_id in self.descendants():\n if node_id not in overlapping_nodes.keys():\n only_in_checkpoint_view.append(node_id)\n for trackable in trackable_view.TrackableView(obj).descendants():\n if trackable not in object_identity.ObjectIdentitySet(overlapping_nodes.values()):\n only_in_trackable.append(trackable)\n return (overlapping_nodes, only_in_checkpoint_view, only_in_trackable)", "docstring": "Returns diff between CheckpointView and Trackable.\n\nThis method is intended to be used to compare the object stored in a\ncheckpoint vs a live model in Python. For example, if checkpoint\nrestoration fails the `assert_consumed()` or\n`assert_existing_objects_matched()` checks, you can use this to list out\nthe objects/checkpoint nodes which were not restored.\n\nExample Usage:\n\n>>> class SimpleModule(tf.Module):\n... def __init__(self, name=None):\n... super().__init__(name=name)\n... self.a_var = tf.Variable(5.0)\n... self.b_var = tf.Variable(4.0)\n... self.vars = [tf.Variable(1.0), tf.Variable(2.0)]\n\n>>> root = SimpleModule(name=\"root\")\n>>> leaf = root.leaf = SimpleModule(name=\"leaf\")\n>>> leaf.leaf3 = tf.Variable(6.0, name=\"leaf3\")\n>>> leaf.leaf4 = tf.Variable(7.0, name=\"leaf4\")\n>>> ckpt = tf.train.Checkpoint(root)\n>>> save_path = ckpt.save('/tmp/tf_ckpts')\n>>> checkpoint_view = tf.train.CheckpointView(save_path)\n\n>>> root2 = SimpleModule(name=\"root\")\n>>> leaf2 = root2.leaf2 = SimpleModule(name=\"leaf2\")\n>>> leaf2.leaf3 = tf.Variable(6.0)\n>>> leaf2.leaf4 = tf.Variable(7.0)\n\nPass `node_id=0` to `tf.train.CheckpointView.children()` to get the\ndictionary of all children directly linked to the checkpoint root.\n\n>>> checkpoint_view_diff = checkpoint_view.diff(root2)\n>>> checkpoint_view_match = checkpoint_view_diff[0].items()\n>>> for item in checkpoint_view_match:\n... print(item)\n(0, ...)\n(1, )\n(2, )\n(3, ListWrapper([, ]))\n(6, )\n(7, )\n\n>>> only_in_checkpoint_view = checkpoint_view_diff[1]\n>>> print(only_in_checkpoint_view)\n[4, 5, 8, 9, 10, 11, 12, 13, 14]\n\n>>> only_in_trackable = checkpoint_view_diff[2]\n>>> print(only_in_trackable)\n[..., ,\n,\nListWrapper([,\n]),\n,\n,\n,\n]\n\nArgs:\n obj: `Trackable` root.\n\nReturns:\n Tuple of (\n - Overlaps: Dictionary containing all overlapping trackables that maps\n `node_id` to `Trackable`, same as CheckpointView.match().\n - Only in CheckpointView: List of `node_id` that only exist in\n CheckpointView.\n - Only in Trackable: List of `Trackable` that only exist in Trackable.\n )"} +{"repo": "transformers", "function": "class ClapAudioConfig(PretrainedConfig):\n model_type = 'clap_audio_model'\n base_config_key = 'audio_config'\n\n def __init__(self, window_size=8, num_mel_bins=64, spec_size=256, hidden_act='gelu', patch_size=4, patch_stride=[4, 4], num_classes=527, hidden_size=768, projection_dim=512, depths=[2, 2, 6, 2], num_attention_heads=[4, 8, 16, 32], enable_fusion=False, hidden_dropout_prob=0.1, fusion_type=None, patch_embed_input_channels=1, flatten_patch_embeds=True, patch_embeds_hidden_size=96, enable_patch_layer_norm=True, drop_path_rate=0.0, attention_probs_dropout_prob=0.0, qkv_bias=True, mlp_ratio=4.0, aff_block_r=4, num_hidden_layers=4, projection_hidden_act='relu', layer_norm_eps=1e-05, initializer_factor=1.0, **kwargs):\n super().__init__(**kwargs)\n self.window_size = window_size\n self.num_mel_bins = num_mel_bins\n self.spec_size = spec_size\n self.patch_size = patch_size\n self.patch_stride = patch_stride\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.depths = depths\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.window_size = window_size\n self.enable_fusion = enable_fusion\n self.fusion_type = fusion_type\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.projection_dim = projection_dim\n self.flatten_patch_embeds = flatten_patch_embeds\n self.patch_embeds_hidden_size = patch_embeds_hidden_size\n self.enable_patch_layer_norm = enable_patch_layer_norm\n self.drop_path_rate = drop_path_rate\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.qkv_bias = qkv_bias\n self.mlp_ratio = mlp_ratio\n self.patch_embed_input_channels = patch_embed_input_channels\n self.aff_block_r = aff_block_r\n self.layer_norm_eps = layer_norm_eps\n self.initializer_factor = initializer_factor\n self.projection_hidden_act = projection_hidden_act", "docstring": "This is the configuration class to store the configuration of a [`ClapAudioModel`]. It is used to instantiate a\nCLAP audio encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the audio encoder of the CLAP\n[laion/clap-htsat-fused](https://huggingface.co/laion/clap-htsat-fused) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n window_size (`int`, *optional*, defaults to 8):\n Image size of the spectrogram\n num_mel_bins (`int`, *optional*, defaults to 64):\n Number of mel features used per frames. Should correspond to the value used in the `ClapProcessor` class.\n spec_size (`int`, *optional*, defaults to 256):\n Desired input size of the spectrogram that the model supports. It can be different from the output of the\n `ClapFeatureExtractor`, in which case the input features will be resized. Corresponds to the `image_size`\n of the audio models.\n hidden_act (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n patch_size (`int`, *optional*, defaults to 4):\n Patch size for the audio spectrogram\n patch_stride (`list`, *optional*, defaults to `[4, 4]`):\n Patch stride for the audio spectrogram\n num_classes (`int`, *optional*, defaults to 527):\n Number of classes used for the head training\n hidden_size (`int`, *optional*, defaults to 768):\n Hidden size of the output of the audio encoder. Correspond to the dimension of the penultimate layer's\n output,which is sent to the projection MLP layer.\n projection_dim (`int`, *optional*, defaults to 512):\n Hidden size of the projection layer.\n depths (`list`, *optional*, defaults to `[2, 2, 6, 2]`):\n Depths used for the Swin Layers of the audio model\n num_attention_heads (`list`, *optional*, defaults to `[4, 8, 16, 32]`):\n Number of attention heads used for the Swin Layers of the audio model\n enable_fusion (`bool`, *optional*, defaults to `False`):\n Whether or not to enable patch fusion. This is the main contribution of the authors, and should give the\n best results.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the encoder.\n fusion_type (`[type]`, *optional*):\n Fusion type used for the patch fusion.\n patch_embed_input_channels (`int`, *optional*, defaults to 1):\n Number of channels used for the input spectrogram\n flatten_patch_embeds (`bool`, *optional*, defaults to `True`):\n Whether or not to flatten the patch embeddings\n patch_embeds_hidden_size (`int`, *optional*, defaults to 96):\n Hidden size of the patch embeddings. It is used as the number of output channels.\n enable_patch_layer_norm (`bool`, *optional*, defaults to `True`):\n Whether or not to enable layer normalization for the patch embeddings\n drop_path_rate (`float`, *optional*, defaults to 0.0):\n Drop path rate for the patch fusion\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether or not to add a bias to the query, key, value projections.\n mlp_ratio (`float`, *optional*, defaults to 4.0):\n Ratio of the mlp hidden dim to embedding dim.\n aff_block_r (`int`, *optional*, defaults to 4):\n downsize_ratio used in the AudioFF block\n num_hidden_layers (`int`, *optional*, defaults to 4):\n Number of hidden layers in the Transformer encoder.\n projection_hidden_act (`str`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the projection layer. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n layer_norm_eps (`[type]`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n initializer_factor (`float`, *optional*, defaults to 1.0):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\nExample:\n\n```python\n>>> from transformers import ClapAudioConfig, ClapAudioModel\n\n>>> # Initializing a ClapAudioConfig with laion/clap-htsat-fused style configuration\n>>> configuration = ClapAudioConfig()\n\n>>> # Initializing a ClapAudioModel (with random weights) from the laion/clap-htsat-fused style configuration\n>>> model = ClapAudioModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def dot(inputs, axes, normalize=False, **kwargs):\n return Dot(axes=axes, normalize=normalize, **kwargs)(inputs)", "docstring": "Functional interface to the `Dot` layer.\n\nArgs:\n inputs: A list of input tensors (at least 2).\n axes: Integer or tuple of integers,\n axis or axes along which to take the dot product.\n normalize: Whether to L2-normalize samples along the\n dot product axis before taking the dot product.\n If set to True, then the output of the dot product\n is the cosine proximity between the two samples.\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor, the dot product of the samples from the inputs."} +{"repo": "keras", "function": "def gelu(x, approximate=True):\n if any_symbolic_tensors((x,)):\n return Gelu(approximate).symbolic_call(x)\n return backend.nn.gelu(x, approximate)", "docstring": "Gaussian Error Linear Unit (GELU) activation function.\n\nIf `approximate` is `True`, it is defined as:\n`f(x) = 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`\n\nOr if `approximate` is `False`, it is defined as:\n`f(x) = x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,\nwhere `P(X) ~ N(0, 1)`.\n\nArgs:\n x: Input tensor.\n approximate: Approximate version of GELU activation. Defaults to `True`.\n\nReturns:\n A tensor with the same shape as `x`.\n\nExample:\n\n>>> x = np.array([-1., 0., 1.])\n>>> x_gelu = keras.ops.gelu(x)\n>>> print(x_gelu)\narray([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64)"} +{"repo": "tensorflow", "function": "def reduce_std(input_tensor, axis=None, keepdims=False, name=None):\n name = name if name else 'reduce_std'\n with ops.name_scope(name):\n input_tensor = ops.convert_to_tensor(input_tensor)\n variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)\n return gen_math_ops.sqrt(variance)", "docstring": "Computes the standard deviation of elements across dimensions of a tensor.\n\nReduces `input_tensor` along the dimensions given in `axis`.\nUnless `keepdims` is true, the rank of the tensor is reduced by 1 for each\nof the entries in `axis`, which must be unique. If `keepdims` is true, the\nreduced dimensions are retained with length 1.\n\nIf `axis` is None, all dimensions are reduced, and a\ntensor with a single element is returned.\n\nFor example:\n\n>>> x = tf.constant([[1., 2.], [3., 4.]])\n>>> tf.math.reduce_std(x)\n\n>>> tf.math.reduce_std(x, 0)\n\n>>> tf.math.reduce_std(x, 1)\n\n\nArgs:\n input_tensor: The tensor to reduce. Should have real or complex type.\n axis: The dimensions to reduce. If `None` (the default), reduces all\n dimensions. Must be in the range `[-rank(input_tensor),\n rank(input_tensor))`.\n keepdims: If true, retains reduced dimensions with length 1.\n name: A name scope for the associated operations (optional).\n\nReturns:\n The reduced tensor, of the same dtype as the input_tensor. Note, for\n `complex64` or `complex128` input, the returned `Tensor` will be of type\n `float32` or `float64`, respectively.\n\n@compatibility(numpy)\nEquivalent to np.std\n\nPlease note `np.std` has a `dtype` parameter that could be used to specify the\noutput type. By default this is `dtype=float64`. On the other hand,\n`tf.math.reduce_std` has aggressive type inference from `input_tensor`.\n@end_compatibility"} +{"repo": "tensorflow", "function": "def __init__(self, sess, bad_init_action=None, bad_run_start_action=None, bad_debug_urls=None):\n self._bad_init_action = bad_init_action\n self._bad_run_start_action = bad_run_start_action\n self._bad_debug_urls = bad_debug_urls\n framework.BaseDebugWrapperSession.__init__(self, sess)", "docstring": "Constructor.\n\nArgs:\n sess: The TensorFlow Session object to be wrapped.\n bad_init_action: (str) bad action value to be returned during the\n on-session-init callback.\n bad_run_start_action: (str) bad action value to be returned during the\n the on-run-start callback.\n bad_debug_urls: Bad URL values to be returned during the on-run-start\n callback."} +{"repo": "tensorflow", "function": "class Graph(collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])):\n\n def __repr__(self):\n return self.as_dot()\n\n def as_dot(self):\n \"\"\"Print CFG in DOT format.\"\"\"\n result = 'digraph CFG {\\n'\n for node in self.index.values():\n result += ' %s [label=\"%s\"];\\n' % (id(node), node)\n for node in self.index.values():\n for next_ in node.next:\n result += ' %s -> %s;\\n' % (id(node), id(next_))\n result += '}'\n return result", "docstring": "A Control Flow Graph.\n\nThe CFG maintains an index to allow looking up a CFG node by the AST node to\nwhich it is associated. The index can also be enumerated in top-down, depth\nfirst order.\n\nWalking the graph in forward or reverse order is supported by double\nparent-child links.\n\nNote: the error nodes are not wired to their corresponding finally guards,\nbecause these are shared, and wiring them would create a reverse path from\nnormal control flow into the error nodes, which we want to avoid.\n\nThe graph also maintains edges corresponding to higher level statements\nlike for-else loops. A node is considered successor of a statement if there\nis an edge from a node that is lexically a child of that statement to a node\nthat is not. Statement predecessors are analogously defined.\n\nAttributes:\n entry: Node, the entry node\n exit: FrozenSet[Node, ...], the exit nodes\n error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised\n error (errors propagated from function calls are not accounted)\n index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node\n stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes\n to their predecessor CFG nodes\n stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes\n to their successor CFG nodes"} +{"repo": "beam", "function": "def throttle_request(self, now):\n throttling_probability = self._throttling_probability(now)\n self._all_requests.add(now, 1)\n return self._random.uniform(0, 1) < throttling_probability", "docstring": "Determines whether one RPC attempt should be throttled.\n\nThis should be called once each time the caller intends to send an RPC; if\nit returns true, drop or delay that request (calling this function again\nafter the delay).\n\nArgs:\n now: int, time in ms since the epoch\nReturns:\n bool, True if the caller should throttle or delay the request."} +{"repo": "tensorflow", "function": "def reduce(self, initial_state, reduce_func, name=None):\n with ops.name_scope('initial_state'):\n initial_state = structure.normalize_element(initial_state)\n state_structure = structure.type_spec_from_value(initial_state)\n need_to_rerun = True\n while need_to_rerun:\n wrapped_func = structured_function.StructuredFunctionWrapper(reduce_func, 'reduce()', input_structure=(state_structure, self.element_spec), add_to_graph=False)\n output_classes = wrapped_func.output_classes\n state_classes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), state_structure)\n for new_state_class, state_class in zip(nest.flatten(output_classes), nest.flatten(state_classes)):\n if not issubclass(new_state_class, state_class):\n raise TypeError(f'The element classes for the new state must match the initial state. Expected {state_classes} but got {wrapped_func.output_classes}.')\n output_types = wrapped_func.output_types\n state_types = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), state_structure)\n for new_state_type, state_type in zip(nest.flatten(output_types), nest.flatten(state_types)):\n if new_state_type != state_type:\n raise TypeError(f'The element types for the new state must match the initial state. Expected {state_types} but got {wrapped_func.output_types}.')\n output_shapes = wrapped_func.output_shapes\n state_shapes = nest.map_structure(lambda component_spec: component_spec._to_legacy_output_shapes(), state_structure)\n flat_state_shapes = nest.flatten(state_shapes)\n flat_new_state_shapes = nest.flatten(output_shapes)\n weakened_state_shapes = [original.most_specific_compatible_shape(new) for original, new in zip(flat_state_shapes, flat_new_state_shapes)]\n need_to_rerun = False\n for original_shape, weakened_shape in zip(flat_state_shapes, weakened_state_shapes):\n if original_shape.ndims is not None and (weakened_shape.ndims is None or original_shape.as_list() != weakened_shape.as_list()):\n need_to_rerun = True\n break\n if need_to_rerun:\n state_structure = structure.convert_legacy_structure(state_types, nest.pack_sequence_as(state_shapes, weakened_state_shapes), state_classes)\n reduce_func = wrapped_func.function\n reduce_func.add_to_graph(ops.get_default_graph())\n dataset = self._apply_debug_options()\n metadata = dataset_metadata_pb2.Metadata()\n if name:\n metadata.name = _validate_and_encode(name)\n return structure.from_compatible_tensor_list(state_structure, gen_dataset_ops.reduce_dataset(dataset._variant_tensor, structure.to_tensor_list(state_structure, initial_state), reduce_func.captured_inputs, f=reduce_func, output_shapes=structure.get_flat_tensor_shapes(state_structure), output_types=structure.get_flat_tensor_types(state_structure), metadata=metadata.SerializeToString()))", "docstring": "Reduces the input dataset to a single element.\n\nThe transformation calls `reduce_func` successively on every element of\nthe input dataset until the dataset is exhausted, aggregating information in\nits internal state. The `initial_state` argument is used for the initial\nstate and the final state is returned as the result.\n\n>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x +\n... 1).numpy().item()\n5\n>>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x +\n... y).numpy().item()\n10\n\nArgs:\n initial_state: An element representing the initial state of the\n transformation.\n reduce_func: A function that maps `(old_state, input_element)` to\n `new_state`. It must take two arguments and return a new element The\n structure of `new_state` must match the structure of `initial_state`.\n name: (Optional.) A name for the tf.data operation.\n\nReturns:\n A dataset element corresponding to the final state of the transformation."} +{"repo": "beam", "function": "def get_record_schema_from_dict_table_schema(schema_name: str, table_schema: Dict[str, Any], namespace: str='apache_beam.io.gcp.bigquery') -> Dict[str, Any]:\n avro_fields = [table_field_to_avro_field(field, '.'.join((namespace, schema_name))) for field in table_schema['fields']]\n return {'type': 'record', 'name': schema_name, 'fields': avro_fields, 'doc': 'Translated Avro Schema for {}'.format(schema_name), 'namespace': namespace}", "docstring": "Convert a table schema into an Avro schema.\n\nArgs:\n schema_name (str): The name of the record.\n table_schema (Dict[str, Any]): A BigQuery table schema in dict form.\n namespace (str): The namespace of the Avro schema.\n\nReturns:\n Dict[str, Any]: The schema as an Avro RecordSchema."} +{"repo": "tensorflow", "function": "def sampled_softmax_loss_v2(weights, biases, labels, inputs, num_sampled, num_classes, num_true=1, sampled_values=None, remove_accidental_hits=True, seed=None, name='sampled_softmax_loss'):\n return sampled_softmax_loss(weights, biases, labels, inputs, num_sampled, num_classes, num_true=num_true, sampled_values=sampled_values, remove_accidental_hits=remove_accidental_hits, partition_strategy='div', name=name, seed=seed)", "docstring": "Computes and returns the sampled softmax training loss.\n\nThis is a faster way to train a softmax classifier over a huge number of\nclasses.\n\nThis operation is for training only. It is generally an underestimate of\nthe full softmax loss.\n\nA common use case is to use this method for training, and calculate the full\nsoftmax loss for evaluation or inference as in the following example:\n\n```python\nif mode == \"train\":\n loss = tf.nn.sampled_softmax_loss(\n weights=weights,\n biases=biases,\n labels=labels,\n inputs=inputs,\n ...)\nelif mode == \"eval\":\n logits = tf.matmul(inputs, tf.transpose(weights))\n logits = tf.nn.bias_add(logits, biases)\n labels_one_hot = tf.one_hot(labels, n_classes)\n loss = tf.nn.softmax_cross_entropy_with_logits(\n labels=labels_one_hot,\n logits=logits)\n```\n\nSee our [Candidate Sampling Algorithms Reference]\n(https://www.tensorflow.org/extras/candidate_sampling.pdf)\n\nAlso see Section 3 of [Jean et al., 2014](http://arxiv.org/abs/1412.2007)\n([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.\n\nNote: when doing embedding lookup on `weights` and `bias`, \"div\" partition\nstrategy will be used. Support for other partition strategy will be added\nlater.\n\nArgs:\n weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`\n objects whose concatenation along dimension 0 has shape [num_classes,\n dim]. The (possibly-sharded) class embeddings.\n biases: A `Tensor` of shape `[num_classes]`. The class biases.\n labels: A `Tensor` of type `int64` and shape `[batch_size, num_true]`. The\n target classes. Note that this format differs from the `labels` argument\n of `nn.softmax_cross_entropy_with_logits`.\n inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of\n the input network.\n num_sampled: An `int`. The number of classes to randomly sample per batch.\n num_classes: An `int`. The number of possible classes.\n num_true: An `int`. The number of target classes per training example.\n sampled_values: a tuple of (`sampled_candidates`, `true_expected_count`,\n `sampled_expected_count`) returned by a `*_candidate_sampler` function.\n (if None, we default to `log_uniform_candidate_sampler`)\n remove_accidental_hits: A `bool`. whether to remove \"accidental hits\"\n where a sampled class equals one of the target classes. Default is True.\n seed: random seed for candidate sampling. Default to None, which doesn't set\n the op-level random seed for candidate sampling.\n name: A name for the operation (optional).\n\nReturns:\n A `batch_size` 1-D tensor of per-example sampled softmax losses."} +{"repo": "tensorflow", "function": "def __init__(self, trainable=None, caching_device=None, name=None, shape=None, dtype=None, constraint=None, synchronization=None, aggregation=None, extra_handle_data=None, distribute_strategy=None, **unused_kwargs):\n with ops.init_scope():\n self._in_graph_mode = not context.executing_eagerly()\n with ops.name_scope(name, 'Variable', skip_on_eager=False) as name:\n handle_name = ops.name_from_scope_name(name)\n if self._in_graph_mode:\n shared_name = handle_name\n unique_id = shared_name\n else:\n unique_id = '%s_%d' % (handle_name, ops.uid())\n shared_name = None\n handle = _variable_handle_from_shape_and_dtype(shape=shape, dtype=dtype, shared_name=shared_name, name=name, graph_mode=self._in_graph_mode, initial_value=extra_handle_data)\n handle._parent_trackable = weakref.ref(self)\n handle._name = handle_name + ':0'\n handle._unique_id = unique_id\n if self._in_graph_mode:\n with ops.name_scope('Read'):\n with ops.device(handle.device):\n value = gen_resource_variable_ops.read_variable_op(handle, dtype)\n _maybe_set_handle_data(dtype, handle, value)\n graph_element = value\n ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)\n else:\n graph_element = None\n super(UninitializedVariable, self).__init__(distribute_strategy=distribute_strategy, shape=shape, dtype=dtype, unique_id=unique_id, handle_name=handle_name, constraint=constraint, handle=handle, graph_element=graph_element, trainable=trainable, synchronization=synchronization, aggregation=aggregation, in_graph_mode=self._in_graph_mode, **unused_kwargs)", "docstring": "Creates the variable handle.\n\nArgs:\n trainable: If `True`, GradientTapes automatically watch uses of this\n Variable.\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n shape: The variable's shape.\n dtype: The variable's dtype.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n extra_handle_data: Optional, another resource handle or Tensor with handle\n data to merge with `shape` and `dtype`.\n distribute_strategy: The tf.distribute.Strategy this variable is being\n created inside of."} +{"repo": "tensorflow", "function": "def gen_outputs(self, ds_fn, break_points, num_outputs, ckpt_saved=False, sparse_tensors=False, verify_exhausted=True, save_checkpoint_at_end=True):\n outputs = []\n if context.executing_eagerly():\n for i in range(len(break_points) + 1):\n iterator = iter(ds_fn())\n ckpt = tracking_util.Checkpoint(iterator=iterator)\n if ckpt_saved:\n ckpt_path = self._latest_ckpt()\n ckpt.restore(ckpt_path)\n start = break_points[i - 1] if i > 0 else 0\n end = break_points[i] if i < len(break_points) else num_outputs\n num_iters = end - start\n for _ in range(num_iters):\n outputs.append(self.evaluate(next(iterator)))\n if i < len(break_points) and end == num_outputs and verify_exhausted:\n with self.assertRaises(StopIteration):\n next(iterator)\n if i == len(break_points) and verify_exhausted:\n with self.assertRaises(StopIteration):\n next(iterator)\n if save_checkpoint_at_end or i < len(break_points):\n ckpt_options = checkpoint_options.CheckpointOptions()\n ckpt_options.experimental_enable_async_checkpoint = False\n ckpt_options.enable_async = False\n ckpt_path = ckpt.save(self._ckpt_path(), options=ckpt_options)\n ckpt_saved = True\n else:\n\n def get_ops():\n if ckpt_saved:\n saver = self._import_meta_graph()\n init_op, get_next_op = self._get_iterator_ops_from_collection(ds_fn, sparse_tensors=sparse_tensors)\n else:\n init_op, get_next_op, saver = self._build_graph(ds_fn, sparse_tensors=sparse_tensors)\n return (init_op, get_next_op, saver)\n for i in range(len(break_points) + 1):\n with ops.Graph().as_default() as g:\n init_op, get_next_op, saver = get_ops()\n get_next_op = remove_variants(get_next_op)\n with self.session(graph=g) as sess:\n if ckpt_saved:\n self._initialize(init_op, sess)\n self._restore(saver, sess)\n else:\n self._initialize(init_op, sess)\n start = break_points[i - 1] if i > 0 else 0\n end = break_points[i] if i < len(break_points) else num_outputs\n num_iters = end - start\n for _ in range(num_iters):\n outputs.append(sess.run(get_next_op))\n if i < len(break_points) and end == num_outputs and verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n if i == len(break_points) and verify_exhausted:\n with self.assertRaises(errors.OutOfRangeError):\n sess.run(get_next_op)\n if save_checkpoint_at_end or i < len(break_points):\n self._save(sess, saver)\n ckpt_saved = True\n return outputs", "docstring": "Generates elements from input dataset while stopping at break points.\n\nProduces `num_outputs` outputs and saves the state of the iterator in the\nSaver checkpoint.\n\nArgs:\n ds_fn: 0-argument function that returns the dataset.\n break_points: A list of integers. For each `break_point` in\n `break_points`, we produce outputs till `break_point` number of items\n have been produced and then checkpoint the state. The current graph and\n session are destroyed and a new graph and session are used to produce\n outputs till next checkpoint or till `num_outputs` elements have been\n produced. `break_point` must be <= `num_outputs`.\n num_outputs: The total number of outputs to produce from the iterator.\n ckpt_saved: Whether a checkpoint already exists.\n sparse_tensors: Whether dataset is built from SparseTensor(s).\n verify_exhausted: Whether to verify that the iterator has been exhausted\n after producing `num_outputs` elements.\n save_checkpoint_at_end: Whether to save a checkpoint after producing all\n outputs. If False, checkpoints are saved each break point but not at the\n end. Note that checkpoints overwrite each other so there is always only\n a single checkpoint available. Defaults to True.\n\nReturns:\n A list of `num_outputs` items."} +{"repo": "tensorflow", "function": "def _calculate_scores(self, query, key):\n return NotImplementedError", "docstring": "Calculates attention scores.\n\nArgs:\n query: Query tensor of shape `[batch_size, Tq, dim]`.\n key: Key tensor of shape `[batch_size, Tv, dim]`.\n\nReturns:\n Tensor of shape `[batch_size, Tq, Tv]`."} +{"repo": "tensorflow", "function": "def _broadcast_dynamic_shape_next_layer_both_uniform(ac_0: _LayerBroadcaster, bc_0: _LayerBroadcaster, a_1: RowPartition, b_1: RowPartition) -> Tuple[RowPartition, _LayerBroadcaster, _LayerBroadcaster]:\n if not isinstance(ac_0, _LayerBroadcaster):\n raise TypeError('ac_0 should be a _LayerBroadcaster')\n if not isinstance(bc_0, _LayerBroadcaster):\n raise TypeError('bc_0 should be a _LayerBroadcaster')\n if not isinstance(a_1, RowPartition):\n raise TypeError('a_1 should be a RowPartition')\n if not isinstance(b_1, RowPartition):\n raise TypeError('b_1 should be a RowPartition')\n assert a_1.is_uniform()\n assert b_1.is_uniform()\n static_a_1 = tensor_util.constant_value(a_1.uniform_row_length())\n static_b_1 = tensor_util.constant_value(b_1.uniform_row_length())\n if static_a_1 is not None:\n if static_a_1 == static_b_1:\n [ac_1, _] = _broadcast_half(ac_0, a_1)\n [bc_1, _] = _broadcast_half(bc_0, b_1)\n c_1 = RowPartition.from_uniform_row_length(static_a_1, nrows=ac_0.dest_nrows())\n return [c_1, ac_1, bc_1]\n elif static_a_1 == 1:\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n ac_1 = _LayerBroadcaster.from_gather_index(array_ops.gather(ac_0.gather_index, c_1b.value_rowids()))\n c_1 = RowPartition.from_uniform_row_length(b_1.uniform_row_length(), nrows=bc_0.dest_nrows())\n return [c_1, ac_1, bc_1]\n if static_b_1 == 1:\n [ac_1, c_1a] = _broadcast_half(ac_0, a_1)\n bc_1 = _LayerBroadcaster.from_gather_index(array_ops.gather(bc_0.gather_index, c_1a.value_rowids()))\n c_1 = RowPartition.from_uniform_row_length(a_1.uniform_row_length(), nrows=ac_0.dest_nrows())\n return [c_1, ac_1, bc_1]\n\n def broadcast_noop():\n [ac_1, _] = _broadcast_half(ac_0, a_1)\n [bc_1, _] = _broadcast_half(bc_0, b_1)\n return [a_1.uniform_row_length(), ac_1.gather_index, bc_1.gather_index]\n\n def broadcast_a():\n [bc_1, c_1b] = _broadcast_half(bc_0, b_1)\n ac_1_gather_index = array_ops.gather(ac_0.gather_index, c_1b.value_rowids())\n return [b_1.uniform_row_length(), ac_1_gather_index, bc_1.gather_index]\n\n def broadcast_b():\n [ac_1, c_1a] = _broadcast_half(ac_0, a_1)\n bc_1_gather_index = array_ops.gather(bc_0.gather_index, c_1a.value_rowids())\n return [a_1.uniform_row_length(), ac_1.gather_index, bc_1_gather_index]\n can_broadcast_b = math_ops.equal(b_1.uniform_row_length(), 1)\n\n def no_broadcast_a():\n return cond.cond(can_broadcast_b, true_fn=broadcast_b, false_fn=broadcast_noop)\n can_broadcast_a = math_ops.equal(a_1.uniform_row_length(), 1)\n broadcast_asserts = [check_ops.assert_equal(math_ops.logical_or(math_ops.logical_or(can_broadcast_a, can_broadcast_b), math_ops.equal(a_1.uniform_row_length(), b_1.uniform_row_length())), True)]\n result = cond.cond(can_broadcast_a, true_fn=broadcast_a, false_fn=no_broadcast_a)\n [c_1_uniform_row_length, ac_1_gather_index, bc_1_gather_index] = [control_flow_ops.with_dependencies(broadcast_asserts, x) for x in result]\n c_1 = RowPartition.from_uniform_row_length(c_1_uniform_row_length, nvals=c_1_uniform_row_length * ac_0.dest_nrows(), nrows=ac_0.dest_nrows())\n ac_1 = _LayerBroadcaster.from_gather_index(ac_1_gather_index)\n bc_1 = _LayerBroadcaster.from_gather_index(bc_1_gather_index)\n return [c_1, ac_1, bc_1]", "docstring": "Broadcast target and next layer broadcaster of two uniform dynamic shapes.\n\n *--ac_0-->*<--bc_0--*\n | | |\n a_1 c_1 b_1\n | | |\n V V V\n *--ac_1-->*<--bc_1--*\n\nArgs:\n ac_0: _LayerBroadcaster from a to c in the previous layer.\n bc_0: _LayerBroadcaster from b to c in the previous layer.\n a_1: a RowPartition for the next layer of a.\n b_1: a RowPartition for the next layer of b.\n\nReturns:\n (c_1, ac_1, bc_1)\n c_1: a RowPartition for the next layer of the dynamic shape.\n ac_1: _LayerBroadcaster from a to c in the next layer.\n bc_1: _LayerBroadcaster from b to c in the next layer."} +{"repo": "transformers", "function": "class Emu3Config(PretrainedConfig):\n model_type = 'emu3'\n keys_to_ignore_at_inference = ['past_key_values']\n sub_configs = {'text_config': Emu3TextConfig, 'vq_config': Emu3VQVAEConfig}\n\n def __init__(self, vq_config: Union[Dict, Emu3VQVAEConfig]=None, text_config: Union[Dict, Emu3TextConfig]=None, vocabulary_map: Optional[Dict[int, int]]=None, **kwargs):\n if vq_config is None:\n vq_config = Emu3VQVAEConfig()\n elif isinstance(vq_config, dict):\n vq_config = Emu3VQVAEConfig(**vq_config)\n if text_config is None:\n text_config = Emu3TextConfig()\n elif isinstance(text_config, dict):\n text_config = Emu3TextConfig(**text_config)\n self.vq_config = vq_config\n self.text_config = text_config\n self.vocabulary_map = vocabulary_map\n self.image_token_id = vocabulary_map.get('') if vocabulary_map is not None else None\n super().__init__(**kwargs)", "docstring": "This is the configuration class to store the configuration of a [`Emu3Model`]. It is used to instantiate a\nemu3 model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the\n[Emu3-community/Emu3-Chat-hf](https://huggingface.co/Emu3-community/Emu3-Chat-hf).\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n vq_config (`Union[Dict, Emu3VQVAEConfig]`, *optional*):\n Emu3VQVAEConfig instance containing the configuration for the VQ-VAE model.\n text_config (`Union[Dict, Emu3TextConfig]``, *optional*):\n Emu3TextConfig instance containing the configuration for the language model.\n vocabulary_map (`dict`, *optional*):\n A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[List[torch.FloatTensor], Cache]]=None, token_type_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.LongTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, logits_to_keep: Union[int, torch.Tensor]=0, **lm_kwargs) -> ShieldGemma2ImageClassifierOutputWithNoAttention:\n outputs = self.model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, token_type_ids=token_type_ids, cache_position=cache_position, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, logits_to_keep=logits_to_keep, **lm_kwargs)\n logits = outputs.logits\n selected_logits = logits[:, -1, [self.yes_token_index, self.no_token_index]]\n probabilities = torch.softmax(selected_logits, dim=-1)\n return ShieldGemma2ImageClassifierOutputWithNoAttention(logits=selected_logits, probabilities=probabilities)", "docstring": "Returns:\n A `ShieldGemma2ImageClassifierOutputWithNoAttention` instance containing the logits and probabilities\n associated with the model predicting the `Yes` or `No` token as the response to that prompt, captured in the\n following properties.\n\n * `logits` (`torch.Tensor` of shape `(batch_size, 2)`):\n The first position along dim=1 is the logits for the `Yes` token and the second position along dim=1 is\n the logits for the `No` token.\n * `probabilities` (`torch.Tensor` of shape `(batch_size, 2)`):\n The first position along dim=1 is the probability of predicting the `Yes` token and the second position\n along dim=1 is the probability of predicting the `No` token.\n\n ShieldGemma prompts are constructed such that predicting the `Yes` token means the content *does violate* the\n policy as described. If you are only interested in the violative condition, use\n `violated = outputs.probabilities[:, 1]` to extract that slice from the output tensors.\n\n When used with the `ShieldGemma2Processor`, the `batch_size` will be equal to `len(images) * len(policies)`,\n and the order within the batch will be img1_policy1, ... img1_policyN, ... imgM_policyN."} +{"repo": "transformers", "function": "class LEDSeq2SeqSequenceClassifierOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[List[torch.FloatTensor]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n encoder_global_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None", "docstring": "Base class for outputs of sequence-to-sequence sentence classification models.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,\n num_heads, sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be\n used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n encoder_global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,\n where `x` is the number of tokens with global attention mask.\n\n Global attentions weights after the attention softmax, used to compute the weighted average in the\n self-attention heads. Those are the attention weights from every token with global attention to every token\n in the sequence."} +{"repo": "tensorflow", "function": "def __init__(self, session, logdir, max_queue=10, flush_secs=120, filename_suffix=''):\n self._session = session\n self._logdir = logdir\n self._closed = False\n gfile.MakeDirs(self._logdir)\n with self._session.graph.as_default():\n with ops.name_scope('filewriter'):\n file_writer = summary_ops_v2.create_file_writer(logdir=self._logdir, max_queue=max_queue, flush_millis=flush_secs * 1000, filename_suffix=filename_suffix)\n with summary_ops_v2.always_record_summaries(), file_writer.as_default():\n self._event_placeholder = array_ops.placeholder_with_default(constant_op.constant('unused', dtypes.string), shape=[])\n self._add_event_op = summary_ops_v2.import_event(self._event_placeholder)\n self._init_op = file_writer.init()\n self._flush_op = file_writer.flush()\n self._close_op = file_writer.close()\n self._session.run(self._init_op)", "docstring": "Creates an `EventFileWriterV2` and an event file to write to.\n\nOn construction, this calls `tf.contrib.summary.create_file_writer` within\nthe graph from `session.graph` to look up a shared summary writer resource\nfor `logdir` if one exists, and create one if not. Creating the summary\nwriter resource in turn creates a new event file in `logdir` to be filled\nwith `Event` protocol buffers passed to `add_event`. Graph ops to control\nthis writer resource are added to `session.graph` during this init call;\nstateful methods on this class will call `session.run()` on these ops.\n\nNote that because the underlying resource is shared, it is possible that\nother parts of the code using the same session may interact independently\nwith the resource, e.g. by flushing or even closing it. It is the caller's\nresponsibility to avoid any undesirable sharing in this regard.\n\nThe remaining arguments to the constructor (`flush_secs`, `max_queue`, and\n`filename_suffix`) control the construction of the shared writer resource\nif one is created. If an existing resource is reused, these arguments have\nno effect. See `tf.contrib.summary.create_file_writer` for details.\n\nArgs:\n session: A `tf.compat.v1.Session`. Session that will hold shared writer\n resource. The writer ops will be added to session.graph during this\n init call.\n logdir: A string. Directory where event file will be written.\n max_queue: Integer. Size of the queue for pending events and summaries.\n flush_secs: Number. How often, in seconds, to flush the\n pending events and summaries to disk.\n filename_suffix: A string. Every event file's name is suffixed with\n `filename_suffix`."} +{"repo": "tensorflow", "function": "def make_rhs(self, operator, adjoint, with_batch=True):\n raise NotImplementedError('make_rhs is not defined.')", "docstring": "Make a rhs appropriate for calling operator.solve(rhs).\n\nArgs:\n operator: A `LinearOperator`\n adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the\n adjoint operator.\n with_batch: Python `bool`. If `True`, create `rhs` with the same batch\n shape as operator, and otherwise create a matrix without any batch\n shape.\n\nReturns:\n A `Tensor`"} +{"repo": "tensorflow", "function": "def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n raise NotImplementedError", "docstring": "Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\nArgs:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\nReturns:\n The updated variable. If `read_value` is false, instead returns None in\n Eager mode and the assign op in graph mode."} +{"repo": "tensorflow", "function": "def fresnel_sin(x, name=None):\n with ops.name_scope(name, 'fresnel_sin', [x]):\n return gen_special_math_ops.fresnel_sin(x)", "docstring": "Computes Fresnel's sine integral of `x` element-wise.\n\nThe Fresnel sine integral is defined as the integral of `sin(t^2)` from\n`0` to `x`, with the domain of definition all real numbers.\n\n>>> tf.math.special.fresnel_sin([-1., -0.1, 0.1, 1.]).numpy()\narray([-0.43825912, -0.00052359, 0.00052359, 0.43825912], dtype=float32)\n\nThis implementation is based off of the Cephes math library.\n\nArgs:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types:\n `float32`, `float64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.fresnel first output.\n@end_compatibility"} +{"repo": "python-fire", "function": "def GetTermSize():\n xy = None\n for get_terminal_size in (_GetTermSizePosix, _GetTermSizeWindows, _GetTermSizeEnvironment, _GetTermSizeTput):\n try:\n xy = get_terminal_size()\n if xy:\n break\n except:\n pass\n return xy or (80, 24)", "docstring": "Gets the terminal x and y dimensions in characters.\n\n_GetTermSize*() helper functions taken from:\n http://stackoverflow.com/questions/263890/\n\nReturns:\n (columns, lines): A tuple containing the terminal x and y dimensions."} +{"repo": "beam", "function": "def run(argv=None, model_class=None, model_params=None, save_main_session=True, test_pipeline=None) -> PipelineResult:\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n if not model_class:\n model_class = maskrcnn_resnet50_fpn\n model_params = {'num_classes': 91}\n model_handler = PytorchModelHandlerTensor(state_dict_path=known_args.model_state_dict_path, model_class=model_class, model_params=model_params)\n pipeline = test_pipeline\n if not test_pipeline:\n pipeline = beam.Pipeline(options=pipeline_options)\n filename_value_pair = pipeline | 'ReadImageNames' >> beam.io.ReadFromText(known_args.input) | 'FilterEmptyLines' >> beam.ParDo(filter_empty_lines) | 'ReadImageData' >> beam.Map(lambda image_name: read_image(image_file_name=image_name, path_to_dir=known_args.images_dir)) | 'PreprocessImages' >> beam.MapTuple(lambda file_name, data: (file_name, preprocess_image(data)))\n predictions = filename_value_pair | 'PyTorchRunInference' >> RunInference(KeyedModelHandler(model_handler)) | 'ProcessOutput' >> beam.ParDo(PostProcessor())\n _ = predictions | 'WriteOutput' >> beam.io.WriteToText(known_args.output, shard_name_template='', append_trailing_newlines=True)\n result = pipeline.run()\n result.wait_until_finish()\n return result", "docstring": "Args:\n argv: Command line arguments defined for this example.\n model_class: Reference to the class definition of the model.\n If None, maskrcnn_resnet50_fpn will be used as default .\n model_params: Parameters passed to the constructor of the model_class.\n These will be used to instantiate the model object in the\n RunInference API.\n save_main_session: Used for internal testing.\n test_pipeline: Used for internal testing."} +{"repo": "beam", "function": "def __init__(self, num_workers, *unused_args, **unused_kwargs):\n super().__init__(*unused_args, **unused_kwargs)\n self._num_workers = num_workers\n self._successful_ops = util.MovingSum(window_ms=1000, bucket_ms=1000)\n self._first_instant = datetime.datetime.now()\n self._throttled_secs = Metrics.counter(RampupThrottlingFn, 'cumulativeThrottlingSeconds')", "docstring": "Initializes a ramp-up throttler transform.\n\nArgs:\n num_workers: A hint for the expected number of workers, used to derive\n the local rate limit."} +{"repo": "transformers", "function": "class Owlv2Processor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = 'Owlv2ImageProcessor'\n tokenizer_class = ('CLIPTokenizer', 'CLIPTokenizerFast')\n optional_call_args = ['query_images']\n\n def __init__(self, image_processor, tokenizer, **kwargs):\n super().__init__(image_processor, tokenizer)\n\n def __call__(self, images: Optional[ImageInput]=None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, *args, audio=None, videos=None, **kwargs: Unpack[Owlv2ProcessorKwargs]) -> BatchFeature:\n \"\"\"\n Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and\n `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode:\n the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\n CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\n of the above two methods for more information.\n\n Args:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`,\n `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n query_images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The query image to be prepared, one query image is expected per target image to be queried. Each image\n can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image\n should be of shape (C, H, W), where C is a number of channels, H and W are image height and width.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\n Returns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n - **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`.\n \"\"\"\n output_kwargs = self._merge_kwargs(Owlv2ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, **self.prepare_and_validate_optional_call_args(*args))\n query_images = output_kwargs['images_kwargs'].pop('query_images', None)\n return_tensors = output_kwargs['common_kwargs']['return_tensors']\n if text is None and query_images is None and (images is None):\n raise ValueError('You have to specify at least one text or query image or image. All three cannot be none.')\n images, text = _validate_images_text_input_order(images, text)\n data = {}\n if text is not None:\n if isinstance(text, str) or (isinstance(text, List) and (not isinstance(text[0], List))):\n encodings = [self.tokenizer(text, **output_kwargs['text_kwargs'])]\n elif isinstance(text, List) and isinstance(text[0], List):\n encodings = []\n max_num_queries = max([len(text_single) for text_single in text])\n for text_single in text:\n if len(text_single) != max_num_queries:\n text_single = text_single + [' '] * (max_num_queries - len(text_single))\n encoding = self.tokenizer(text_single, **output_kwargs['text_kwargs'])\n encodings.append(encoding)\n else:\n raise TypeError('Input text should be a string, a list of strings or a nested list of strings')\n if return_tensors == 'np':\n input_ids = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)\n elif return_tensors == 'jax' and is_flax_available():\n import jax.numpy as jnp\n input_ids = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0)\n elif return_tensors == 'pt' and is_torch_available():\n import torch\n input_ids = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0)\n attention_mask = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0)\n elif return_tensors == 'tf' and is_tf_available():\n import tensorflow as tf\n input_ids = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0)\n attention_mask = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0)\n else:\n raise ValueError('Target return tensor type could not be returned')\n data['input_ids'] = input_ids\n data['attention_mask'] = attention_mask\n if query_images is not None:\n query_pixel_values = self.image_processor(query_images, **output_kwargs['images_kwargs']).pixel_values\n data = {'query_pixel_values': query_pixel_values}\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n data['pixel_values'] = image_features.pixel_values\n return BatchFeature(data=data, tensor_type=return_tensors)\n\n def post_process_object_detection(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to [`Owlv2ImageProcessor.post_process_object_detection`]. Please refer\n to the docstring of this method for more information.\n \"\"\"\n warnings.warn('`post_process_object_detection` method is deprecated for OwlVitProcessor and will be removed in v5. Use `post_process_grounded_object_detection` instead.', FutureWarning)\n return self.image_processor.post_process_object_detection(*args, **kwargs)\n\n def post_process_grounded_object_detection(self, outputs: 'Owlv2ObjectDetectionOutput', threshold: float=0.1, target_sizes: Optional[Union[TensorType, List[Tuple]]]=None, text_labels: Optional[List[List[str]]]=None):\n \"\"\"\n Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,\n bottom_right_x, bottom_right_y) format.\n\n Args:\n outputs ([`Owlv2ObjectDetectionOutput`]):\n Raw outputs of the model.\n threshold (`float`, *optional*, defaults to 0.1):\n Score threshold to keep object detection predictions.\n target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*):\n Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size\n `(height, width)` of each image in the batch. If unset, predictions will not be resized.\n text_labels (`List[List[str]]`, *optional*):\n List of lists of text labels for each image in the batch. If unset, \"text_labels\" in output will be\n set to `None`.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the following keys:\n - \"scores\": The confidence scores for each predicted box on the image.\n - \"labels\": Indexes of the classes predicted by the model on the image.\n - \"boxes\": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.\n - \"text_labels\": The text labels for each predicted bounding box on the image.\n \"\"\"\n output = self.image_processor.post_process_object_detection(outputs=outputs, threshold=threshold, target_sizes=target_sizes)\n if text_labels is not None and len(text_labels) != len(output):\n raise ValueError('Make sure that you pass in as many lists of text labels as images')\n if text_labels is not None:\n for image_output, image_text_labels in zip(output, text_labels):\n object_text_labels = [image_text_labels[i] for i in image_output['labels']]\n image_output['text_labels'] = object_text_labels\n else:\n for image_output in output:\n image_output['text_labels'] = None\n return output\n\n def post_process_image_guided_detection(self, outputs: 'Owlv2ImageGuidedObjectDetectionOutput', threshold: float=0.0, nms_threshold: float=0.3, target_sizes: Optional[Union[TensorType, List[Tuple]]]=None):\n \"\"\"\n Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO\n api.\n\n Args:\n outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]):\n Raw outputs of the model.\n threshold (`float`, *optional*, defaults to 0.0):\n Minimum confidence threshold to use to filter out predicted boxes.\n nms_threshold (`float`, *optional*, defaults to 0.3):\n IoU threshold for non-maximum suppression of overlapping boxes.\n target_sizes (`torch.Tensor`, *optional*):\n Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in\n the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to\n None, predictions will not be unnormalized.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the following keys:\n - \"scores\": The confidence scores for each predicted box on the image.\n - \"boxes\": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.\n - \"labels\": Set to `None`.\n \"\"\"\n return self.image_processor.post_process_image_guided_detection(outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes)\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)", "docstring": "Constructs an Owlv2 processor which wraps [`Owlv2ImageProcessor`] and [`CLIPTokenizer`]/[`CLIPTokenizerFast`] into\na single processor that inherits both the image processor and tokenizer functionalities. See the\n[`~OwlViTProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more information.\n\nArgs:\n image_processor ([`Owlv2ImageProcessor`]):\n The image processor is a required input.\n tokenizer ([`CLIPTokenizer`, `CLIPTokenizerFast`]):\n The tokenizer is a required input."} +{"repo": "transformers", "function": "class ColQwen2ForRetrievalOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n embeddings: Optional[torch.Tensor] = None\n past_key_values: Optional[Union[List[torch.FloatTensor], Cache]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for ColQwen2 embeddings output.\n\nArgs:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Language modeling loss (for next-token prediction).\n embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n The embeddings of the model.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`)\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "tensorflow", "function": "def initialize_tpu_system_impl(cluster_resolver, tpu_cluster_resolver_cls):\n if tpu_cluster_resolver_cls is None or not issubclass(tpu_cluster_resolver_cls, cluster_resolver_lib.ClusterResolver) or (not hasattr(tpu_cluster_resolver_cls, 'tpu_hardware_feature')):\n raise TypeError('tpu_cluster_resolver_cls is not tf.distribute.cluster_resolver.TPUClusterResolver.')\n logging.info('Deallocate tpu buffers before initializing tpu system.')\n context.context()._clear_caches()\n context.context().clear_kernel_cache()\n gc.collect()\n job = None\n if cluster_resolver is None:\n if context.executing_eagerly():\n curr_device = device.DeviceSpec.from_string(context.context().device_name)\n if curr_device.job is not None:\n job = '{}/replica:0/task:0'.format(curr_device.job)\n cluster_resolver = tpu_cluster_resolver_cls('')\n assert isinstance(cluster_resolver, tpu_cluster_resolver_cls)\n tpu_name = compat.as_text(cluster_resolver._tpu)\n if tpu_name in _INITIALIZED_TPU_SYSTEMS:\n logging.warning('TPU system %s has already been initialized. Reinitializing the TPU can cause previously created variables on TPU to be lost.', tpu_name)\n logging.info('Initializing the TPU system: %s', tpu_name)\n if tpu_name not in _LOCAL_MASTERS:\n job = '{}/replica:0/task:0'.format(cluster_resolver.get_job_name())\n if context.executing_eagerly():\n\n @def_function.function(autograph=False)\n def _tpu_init_fn():\n return tpu.initialize_system(job=job, compilation_failure_closes_chips=False, tpu_cancellation_closes_chips=False)\n run_eagerly = def_function.functions_run_eagerly()\n if run_eagerly:\n logging.warning('It looks like tf.function behavior was disabled, perhaps using tf.config.run_functions_eagerly. tf.tpu.experimental.initialize_tpu_system requires tf.function to work. This primitive will override the disable.')\n def_function.run_functions_eagerly(False)\n try:\n with ops.device(tpu._tpu_system_device_name(job)):\n output = _tpu_init_fn()\n context.async_wait()\n except errors.InvalidArgumentError as e:\n raise errors.NotFoundError(None, None, 'TPUs not found in the cluster. Failed in initialization: ' + str(e))\n finally:\n if run_eagerly is not None:\n def_function.run_functions_eagerly(run_eagerly)\n context.context()._initialize_logical_devices()\n serialized_topology = output.numpy()\n elif not ops.executing_eagerly_outside_functions():\n master = cluster_resolver.master()\n cluster_spec = cluster_resolver.cluster_spec()\n session_config = config_pb2.ConfigProto(allow_soft_placement=True)\n if cluster_spec:\n session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())\n with ops.Graph().as_default():\n with session_lib.Session(config=session_config, target=master) as sess:\n serialized_topology = sess.run(tpu.initialize_system())\n else:\n with ops.device(tpu._tpu_system_device_name(job)):\n serialized_topology = tpu.initialize_system(job=job, compilation_failure_closes_chips=False)\n return serialized_topology\n logging.info('Finished initializing TPU system.')\n tpu_topology = topology.Topology(serialized=serialized_topology)\n cluster_resolver.set_tpu_topology(serialized_topology)\n _INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology\n _tpu_worker_address.get_cell('address').set(cluster_resolver.get_master())\n return tpu_topology", "docstring": "Implementation for tpu.experimental.initialize_tpu_system.\n\nKept separate to avoid tpu_oss code duplication.\n\nInitialize the TPU devices.\n\nArgs:\n cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,\n which provides information about the TPU cluster.\n tpu_cluster_resolver_cls: a reference to\n tf.distribute.cluster_resolver.TPUClusterResolver so that an instance\n of it can be initialized if cluster_resolver is None.\nReturns:\n The tf.tpu.Topology object for the topology of the TPU cluster. If called\n inside tf.function, it returns the serialized topology object instead.\n\nRaises:\n RuntimeError: If running inside a tf.function.\n NotFoundError: If no TPU devices found in eager mode.\n TypeError: If tpu_cluster_resolver_cls is\n not tf.distribute.cluster_resolver.TPUClusterResolver."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Union[Tuple[torch.Tensor], BaseModelOutput]]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, cache_position: Optional[torch.Tensor]=None) -> Seq2SeqModelOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if encoder_outputs is None:\n encoder_outputs = self.encoder(input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):\n encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n Marian uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\ndecoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\ncross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,\n 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, MarianModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n>>> model = MarianModel.from_pretrained(\"Helsinki-NLP/opus-mt-en-de\")\n\n>>> inputs = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\")\n>>> decoder_inputs = tokenizer(\n... \" Studien haben gezeigt dass es hilfreich ist einen Hund zu besitzen\",\n... return_tensors=\"pt\",\n... add_special_tokens=False,\n... )\n>>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids)\n\n>>> last_hidden_states = outputs.last_hidden_state\n>>> list(last_hidden_states.shape)\n[1, 26, 512]\n```"} +{"repo": "tensorflow", "function": "def as_str_any(value, encoding='utf-8'):\n if isinstance(value, bytes):\n return as_str(value, encoding=encoding)\n else:\n return str(value)", "docstring": "Converts input to `str` type.\n\n Uses `str(value)`, except for `bytes` typed inputs, which are converted\n using `as_str`.\n\nArgs:\n value: A object that can be converted to `str`.\n encoding: Encoding for `bytes` typed inputs.\n\nReturns:\n A `str` object."} +{"repo": "tensorflow", "function": "def _useRPCConfig(self):\n return config_pb2.ConfigProto(rpc_options=rpc_options_pb2.RPCOptions(use_rpc_for_inprocess_master=True))", "docstring": "Return a `tf.compat.v1.ConfigProto` that ensures we use the RPC stack for tests.\n\nThis configuration ensures that we continue to exercise the gRPC\nstack when testing, rather than using the in-process optimization,\nwhich avoids using gRPC as the transport between a client and\nmaster in the same process.\n\nReturns:\n A `tf.compat.v1.ConfigProto`."} +{"repo": "transformers", "function": "def generalized_box_iou(boxes1, boxes2):\n if not (boxes1[:, 2:] >= boxes1[:, :2]).all():\n raise ValueError(f'boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}')\n if not (boxes2[:, 2:] >= boxes2[:, :2]).all():\n raise ValueError(f'boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}')\n iou, union = box_iou(boxes1, boxes2)\n top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n width_height = (bottom_right - top_left).clamp(min=0)\n area = width_height[:, :, 0] * width_height[:, :, 1]\n return iou - (area - union) / area", "docstring": "Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format.\n\nReturns:\n `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2)"} +{"repo": "tensorflow", "function": "class Subtract(_Merge):\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n super(Subtract, self).build(input_shape)\n if len(input_shape) != 2:\n raise ValueError('A `Subtract` layer should be called on exactly 2 inputs')\n\n def _merge_function(self, inputs):\n if len(inputs) != 2:\n raise ValueError('A `Subtract` layer should be called on exactly 2 inputs')\n return inputs[0] - inputs[1]", "docstring": "Layer that subtracts two inputs.\n\nIt takes as input a list of tensors of size 2,\nboth of the same shape, and returns a single tensor, (inputs[0] - inputs[1]),\nalso of the same shape.\n\nExamples:\n\n```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n # Equivalent to subtracted = keras.layers.subtract([x1, x2])\n subtracted = keras.layers.Subtract()([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n```"} +{"repo": "keras", "function": "class RandomElasticTransform(BaseImagePreprocessingLayer):\n _USE_BASE_FACTOR = False\n _FACTOR_BOUNDS = (0, 1)\n _SUPPORTED_INTERPOLATION = ('nearest', 'bilinear')\n _SUPPORTED_FILL_MODES = {'constant', 'nearest', 'wrap', 'mirror', 'reflect'}\n\n def __init__(self, factor=1.0, scale=1.0, interpolation='bilinear', fill_mode='reflect', fill_value=0.0, value_range=(0, 255), seed=None, data_format=None, **kwargs):\n super().__init__(data_format=data_format, **kwargs)\n self._set_factor(factor)\n self.scale = self._set_factor_by_name(scale, 'scale')\n self.interpolation = interpolation\n self.fill_mode = fill_mode\n self.fill_value = fill_value\n self.value_range = value_range\n self.seed = seed\n self.generator = SeedGenerator(seed)\n if interpolation not in self._SUPPORTED_INTERPOLATION:\n raise NotImplementedError(f'Unknown `interpolation` {interpolation}. Expected of one {self._SUPPORTED_INTERPOLATION}.')\n if fill_mode not in self._SUPPORTED_FILL_MODES:\n raise NotImplementedError(f'Unknown `fill_mode` {fill_mode}. Expected of one {self._SUPPORTED_FILL_MODES}.')\n if self.data_format == 'channels_first':\n self.height_axis = -2\n self.width_axis = -1\n self.channel_axis = -3\n else:\n self.height_axis = -3\n self.width_axis = -2\n self.channel_axis = -1\n\n def _set_factor_by_name(self, factor, name):\n error_msg = f'The `{name}` argument should be a number (or a list of two numbers) in the range [{self._FACTOR_BOUNDS[0]}, {self._FACTOR_BOUNDS[1]}]. Received: factor={factor}'\n if isinstance(factor, (tuple, list)):\n if len(factor) != 2:\n raise ValueError(error_msg)\n if factor[0] > self._FACTOR_BOUNDS[1] or factor[1] < self._FACTOR_BOUNDS[0]:\n raise ValueError(error_msg)\n lower, upper = sorted(factor)\n elif isinstance(factor, (int, float)):\n if factor < self._FACTOR_BOUNDS[0] or factor > self._FACTOR_BOUNDS[1]:\n raise ValueError(error_msg)\n factor = abs(factor)\n lower, upper = [max(-factor, self._FACTOR_BOUNDS[0]), factor]\n else:\n raise ValueError(error_msg)\n return (lower, upper)\n\n def get_random_transformation(self, data, training=True, seed=None):\n if not training:\n return None\n if self.scale[1] == 0 or self.factor[1] == 0:\n return None\n if isinstance(data, dict):\n images = data['images']\n else:\n images = data\n images_shape = self.backend.shape(images)\n unbatched = len(images_shape) == 3\n if unbatched:\n batch_size = 1\n else:\n batch_size = images_shape[0]\n seed = seed or self._get_seed_generator(self.backend._backend)\n transformation_probability = self.backend.random.uniform(shape=(batch_size,), minval=self.factor[0], maxval=self.factor[1], seed=seed)\n random_threshold = self.backend.random.uniform(shape=(batch_size,), minval=0.0, maxval=1.0, seed=seed)\n apply_transform = random_threshold < transformation_probability\n distortion_factor = self.backend.random.uniform(shape=(), minval=self.scale[0], maxval=self.scale[1], seed=seed, dtype=self.compute_dtype)\n return {'apply_transform': apply_transform, 'distortion_factor': distortion_factor, 'seed': seed}\n\n def get_elastic_transform_params(self, height, width, factor):\n alpha_scale = 0.1 * factor\n sigma_scale = 0.05 * factor\n alpha = max(height, width) * alpha_scale\n sigma = min(height, width) * sigma_scale\n return (alpha, sigma)\n\n def transform_images(self, images, transformation, training=True):\n images = self.backend.cast(images, self.compute_dtype)\n if training and transformation is not None:\n apply_transform = transformation['apply_transform']\n distortion_factor = transformation['distortion_factor']\n seed = transformation['seed']\n height, width = (images.shape[self.height_axis], images.shape[self.width_axis])\n alpha, sigma = self.get_elastic_transform_params(height, width, distortion_factor)\n transformed_images = self.backend.image.elastic_transform(images, alpha=alpha, sigma=sigma, interpolation=self.interpolation, fill_mode=self.fill_mode, fill_value=self.fill_value, seed=seed, data_format=self.data_format)\n apply_transform = apply_transform[:, None, None] if len(images.shape) == 3 else apply_transform[:, None, None, None]\n images = self.backend.numpy.where(apply_transform, transformed_images, images)\n images = self.backend.numpy.clip(images, self.value_range[0], self.value_range[1])\n images = self.backend.cast(images, self.compute_dtype)\n return images\n\n def transform_labels(self, labels, transformation, training=True):\n return labels\n\n def transform_segmentation_masks(self, segmentation_masks, transformation, training=True):\n return self.transform_images(segmentation_masks, transformation, training=training)\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n base_config = super().get_config()\n config = {'factor': self.factor, 'scale': self.scale, 'interpolation': self.interpolation, 'fill_mode': self.fill_mode, 'fill_value': self.fill_value, 'value_range': self.value_range, 'seed': self.seed}\n return {**base_config, **config}", "docstring": "A preprocessing layer that applies random elastic transformations.\n\nThis layer distorts input images by applying elastic deformations,\nsimulating a physically realistic transformation. The magnitude of the\ndistortion is controlled by the `scale` parameter, while the `factor`\ndetermines the probability of applying the transformation.\n\nArgs:\n factor: A single float or a tuple of two floats.\n `factor` controls the probability of applying the transformation.\n - `factor=0.0` ensures no erasing is applied.\n - `factor=1.0` means erasing is always applied.\n - If a tuple `(min, max)` is provided, a probability value\n is sampled between `min` and `max` for each image.\n - If a single float is provided, a probability is sampled\n between `0.0` and the given float.\n Default is 1.0.\n scale: A float or a tuple of two floats defining the magnitude of\n the distortion applied.\n - If a tuple `(min, max)` is provided, a random scale value is\n sampled within this range.\n - If a single float is provided, a random scale value is sampled\n between `0.0` and the given float.\n Default is 1.0.\n interpolation: Interpolation mode. Supported values: `\"nearest\"`,\n `\"bilinear\"`.\n fill_mode: Points outside the boundaries of the input are filled\n according to the given mode. Available methods are `\"constant\"`,\n `\"nearest\"`, `\"wrap\"` and `\"reflect\"`. Defaults to `\"constant\"`.\n - `\"reflect\"`: `(d c b a | a b c d | d c b a)`\n The input is extended by reflecting about the edge of the last\n pixel.\n - `\"constant\"`: `(k k k k | a b c d | k k k k)`\n The input is extended by filling all values beyond\n the edge with the same constant value k specified by\n `fill_value`.\n - `\"wrap\"`: `(a b c d | a b c d | a b c d)`\n The input is extended by wrapping around to the opposite edge.\n - `\"nearest\"`: `(a a a a | a b c d | d d d d)`\n The input is extended by the nearest pixel.\n Note that when using torch backend, `\"reflect\"` is redirected to\n `\"mirror\"` `(c d c b | a b c d | c b a b)` because torch does not\n support `\"reflect\"`.\n Note that torch backend does not support `\"wrap\"`.\n fill_value: a float represents the value to be filled outside the\n boundaries when `fill_mode=\"constant\"`.\n value_range: the range of values the incoming images will have.\n Represented as a two-number tuple written `[low, high]`. This is\n typically either `[0, 1]` or `[0, 255]` depending on how your\n preprocessing pipeline is set up.\n seed: Integer. Used to create a random seed."} +{"repo": "mobly", "function": "def _parse_cli_args(argv):\n parser = argparse.ArgumentParser(description='Mobly Suite Executable.')\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-c', '--config', type=str, metavar='', help='Path to the test configuration file.')\n group.add_argument('-l', '--list_tests', action='store_true', help='Print the names of the tests defined in a script without executing them.')\n parser.add_argument('--tests', '--test_case', nargs='+', type=str, metavar='[ClassA[_test_suffix][.test_a] ClassB[_test_suffix][.test_b] ...]', help='A list of test classes and optional tests to execute. Note: test_suffix based names are only supported when running by suite class')\n parser.add_argument('-tb', '--test_bed', nargs='+', type=str, metavar='[ ...]', help='Specify which test beds to run tests on.')\n parser.add_argument('-v', '--verbose', action='store_true', help='Set console logger level to DEBUG')\n if not argv:\n argv = sys.argv[1:]\n return parser.parse_known_args(argv)[0]", "docstring": "Parses cli args that are consumed by Mobly.\n\nArgs:\n argv: A list that is then parsed as cli args. If None, defaults to cli\n input.\n\nReturns:\n Namespace containing the parsed args."} +{"repo": "keras", "function": "def ctc(y_true, y_pred):\n if len(ops.shape(y_true)) != 2:\n raise ValueError(f'Targets `y_true` are expected to be a tensor of shape `(batch_size, max_length)` in integer format. Received: y_true.shape={ops.shape(y_true)}')\n if len(ops.shape(y_pred)) != 3:\n raise ValueError(f'Logits `y_pred` are expected to be a tensor of shape `(batch_size, max_length, num_classes)`. Received: y_pred.shape={ops.shape(y_pred)}')\n mask_index = 0\n batch_length = ops.shape(y_pred)[0]\n input_length = ops.shape(y_pred)[1]\n input_length = input_length * ops.ones((batch_length,), dtype='int32')\n label_length = ops.cast(ops.sum(y_true != mask_index, axis=-1), dtype='int32')\n return ops.ctc_loss(y_true, y_pred, label_length, input_length, mask_index=mask_index)", "docstring": "CTC (Connectionist Temporal Classification) loss.\n\nArgs:\n y_true: A tensor of shape `(batch_size, max_length)` containing\n the true labels in integer format. `0` always represents\n the blank/mask index and should not be used for classes.\n y_pred: A tensor of shape `(batch_size, max_length, num_classes)`\n containing logits (the output of your model).\n They should *not* be normalized via softmax."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')\n elif input_ids is not None:\n input = input_ids\n input_ids = input_ids.view(-1, input_ids.shape[-1])\n elif inputs_embeds is not None:\n input = inputs_embeds[:, :, -1]\n else:\n raise ValueError('You have to specify either input_ids or inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n embed_pos = self.embed_positions(input)\n embed_pos = embed_pos.to(inputs_embeds.device)\n hidden_states = inputs_embeds + embed_pos\n hidden_states = self.layernorm_embedding(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n attention_mask = self._update_full_mask(attention_mask, inputs_embeds)\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n if head_mask is not None:\n if head_mask.size()[0] != len(self.layers):\n raise ValueError(f'The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n to_drop = False\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n to_drop = True\n if to_drop:\n layer_outputs = (None, None)\n else:\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, output_attentions)\n else:\n layer_outputs = encoder_layer(hidden_states, attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "transformers", "function": "def get_head_mask(self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool=False) -> Tensor:\n if head_mask is not None:\n head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)\n if is_attention_chunked is True:\n head_mask = head_mask.unsqueeze(-1)\n else:\n head_mask = [None] * num_hidden_layers\n return head_mask", "docstring": "Prepare the head mask if needed.\n\nArgs:\n head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):\n The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).\n num_hidden_layers (`int`):\n The number of hidden layers in the model.\n is_attention_chunked (`bool`, *optional*, defaults to `False`):\n Whether or not the attentions scores are computed by chunks or not.\n\nReturns:\n `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with\n `[None]` for each layer."} +{"repo": "transformers", "function": "class DetrDecoderOutput(BaseModelOutputWithCrossAttentions):\n intermediate_hidden_states: Optional[torch.FloatTensor] = None", "docstring": "Base class for outputs of the DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions,\nnamely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them\ngone through a layernorm. This is useful when training the model with auxiliary decoding losses.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer\n plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in\n the self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,\n used to compute the weighted average in the cross-attention heads.\n intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):\n Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a\n layernorm."} +{"repo": "transformers", "function": "class Llama4VisionConfig(PretrainedConfig):\n base_model_tp_plan = {'model.layers.*.self_attn.q_proj': 'colwise', 'model.layers.*.self_attn.k_proj': 'colwise', 'model.layers.*.self_attn.v_proj': 'colwise', 'model.layers.*.self_attn.o_proj': 'rowwise', 'vision_adapter.mlp.fc1': 'colwise', 'vision_adapter.mlp.fc2': 'rowwise', 'patch_embedding.linear': 'colwise_rep'}\n model_type = 'llama4_vision_model'\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size: int=768, hidden_act: str='gelu', num_hidden_layers: int=34, num_attention_heads: int=16, num_channels: int=3, intermediate_size: int=5632, vision_output_dim: int=7680, image_size: int=448, patch_size: int=14, norm_eps: float=1e-05, vision_feature_layer=-1, vision_feature_select_strategy='default', initializer_range: float=0.02, pixel_shuffle_ratio=0.5, projector_input_dim=4096, projector_output_dim=4096, multi_modal_projector_bias=False, projector_dropout=0.0, attention_dropout=0.0, rope_theta=10000, **kwargs):\n self.hidden_size = hidden_size\n self.hidden_act = hidden_act\n self.num_hidden_layers = num_hidden_layers\n self.num_channels = num_channels\n self.intermediate_size = intermediate_size\n self.image_size = image_size\n self.vision_output_dim = vision_output_dim\n self.patch_size = patch_size\n self.norm_eps = norm_eps\n self.num_attention_heads = num_attention_heads\n self.initializer_range = initializer_range\n self.pixel_shuffle_ratio = pixel_shuffle_ratio\n self.projector_input_dim = projector_input_dim\n self.projector_output_dim = projector_output_dim\n self.multi_modal_projector_bias = multi_modal_projector_bias\n self.projector_dropout = projector_dropout\n self.attention_dropout = attention_dropout\n self.vision_feature_layer = vision_feature_layer\n self.vision_feature_select_strategy = vision_feature_select_strategy\n self.rope_theta = rope_theta\n super().__init__(**kwargs)", "docstring": "This is the configuration class to store the configuration of a [`Llama4VisionModel`]. It is used to instantiate a\nLlama4 vision model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the Llama4 109B.\n\ne.g. [meta-llama/Llama-4-Scout-17B-16E](https://huggingface.co/meta-llama/Llama-4-Scout-17B-16E)\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` `\"quick_gelu\"` are supported.\n num_hidden_layers (`int`, *optional*, defaults to 34):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n Number of channels in the input image.\n intermediate_size (`int`, *optional*, defaults to 5632):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the Transformer encoder.\n vision_output_dim (`int`, *optional*, defaults to 7680):\n Dimensionality of the vision model output. Includes output of transformer\n encoder with intermediate layers and global transformer encoder.\n image_size (`int`, *optional*, defaults to 448):\n The size (resolution) of each image *tile*.\n patch_size (`int`, *optional*, defaults to 14):\n The size (resolution) of each patch.\n norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n vision_feature_layer (``, *optional*, defaults to -1): TODO\n vision_feature_select_strategy (`int`, *optional*, defaults to `\"default\"`): TODO\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n pixel_shuffle_ratio (`int`, *optional*, defaults to 0.5): TODO\n projector_input_dim (`int`, *optional*, defaults to 4096): TODO\n projector_output_dim (`int`, *optional*, defaults to 4096): TODO\n multi_modal_projector_bias (`int`, *optional*, defaults to `False`): TODO\n projector_dropout (`int`, *optional*, defaults to 0.0): TODO\n attention_dropout (`int`, *optional*, defaults to 0.0): TODO\n rope_theta (`int`, *optional*, defaults to 10000): TODO"} +{"repo": "tensorflow", "function": "def _get_gcc_major_version(path_to_gcc: str) -> int:\n logging.info('Running echo __GNUC__ | %s -E -P -', path_to_gcc)\n gcc_version_proc = subprocess.run([path_to_gcc, '-E', '-P', '-'], input='__GNUC__', check=True, capture_output=True, text=True)\n major_version = int(gcc_version_proc.stdout)\n logging.info('%s reports major version %s.', path_to_gcc, major_version)\n return major_version", "docstring": "Gets the major version of the gcc at `path_to_gcc`.\n\nArgs:\n path_to_gcc: Path to a gcc executable\n\nReturns:\n The major version."} +{"repo": "tensorflow", "function": "def __init__(self, x, name):\n self.x = x\n self.name = name", "docstring": "Construct DivideDelegateWithName.\n\nArgs:\n x: Tensor to use as left operand in operator overloads\n name: The name that is preferred for the op created."} +{"repo": "beam", "function": "def InsertAll(self, request, global_params=None):\n config = self.GetMethodConfig('InsertAll')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Streams data into BigQuery one record at a time without needing to run a load job. Requires the WRITER dataset role.\n\nArgs:\n request: (BigqueryTabledataInsertAllRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (TableDataInsertAllResponse) The response message."} +{"repo": "transformers", "function": "class DFineConfig(PretrainedConfig):\n model_type = 'd_fine'\n layer_types = ['basic', 'bottleneck']\n attribute_map = {'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads'}\n\n def __init__(self, initializer_range=0.01, initializer_bias_prior_prob=None, layer_norm_eps=1e-05, batch_norm_eps=1e-05, backbone_config=None, backbone=None, use_pretrained_backbone=False, use_timm_backbone=False, freeze_backbone_batch_norms=True, backbone_kwargs=None, encoder_hidden_dim=256, encoder_in_channels=[512, 1024, 2048], feat_strides=[8, 16, 32], encoder_layers=1, encoder_ffn_dim=1024, encoder_attention_heads=8, dropout=0.0, activation_dropout=0.0, encode_proj_layers=[2], positional_encoding_temperature=10000, encoder_activation_function='gelu', activation_function='silu', eval_size=None, normalize_before=False, hidden_expansion=1.0, d_model=256, num_queries=300, decoder_in_channels=[256, 256, 256], decoder_ffn_dim=1024, num_feature_levels=3, decoder_n_points=4, decoder_layers=6, decoder_attention_heads=8, decoder_activation_function='relu', attention_dropout=0.0, num_denoising=100, label_noise_ratio=0.5, box_noise_scale=1.0, learn_initial_query=False, anchor_image_size=None, with_box_refine=True, is_encoder_decoder=True, matcher_alpha=0.25, matcher_gamma=2.0, matcher_class_cost=2.0, matcher_bbox_cost=5.0, matcher_giou_cost=2.0, use_focal_loss=True, auxiliary_loss=True, focal_loss_alpha=0.75, focal_loss_gamma=2.0, weight_loss_vfl=1.0, weight_loss_bbox=5.0, weight_loss_giou=2.0, weight_loss_fgl=0.15, weight_loss_ddf=1.5, eos_coefficient=0.0001, eval_idx=-1, layer_scale=1, max_num_bins=32, reg_scale=4.0, depth_mult=1.0, top_prob_values=4, lqe_hidden_dim=64, lqe_layers=2, decoder_offset_scale=0.5, decoder_method='default', up=0.5, **kwargs):\n self.initializer_range = initializer_range\n self.initializer_bias_prior_prob = initializer_bias_prior_prob\n self.layer_norm_eps = layer_norm_eps\n self.batch_norm_eps = batch_norm_eps\n if backbone_config is None and backbone is None:\n logger.info('`backbone_config` and `backbone` are `None`. Initializing the config with the default `HGNet-V2` backbone.')\n backbone_model_type = 'hgnet_v2'\n config_class = CONFIG_MAPPING[backbone_model_type]\n backbone_config = config_class(num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type='bottleneck', hidden_act='relu', downsample_in_first_stage=False, downsample_in_bottleneck=False, out_features=None, out_indices=[2, 3, 4])\n elif isinstance(backbone_config, dict):\n backbone_model_type = backbone_config.pop('model_type')\n config_class = CONFIG_MAPPING[backbone_model_type]\n backbone_config = config_class.from_dict(backbone_config)\n verify_backbone_config_arguments(use_timm_backbone=use_timm_backbone, use_pretrained_backbone=use_pretrained_backbone, backbone=backbone, backbone_config=backbone_config, backbone_kwargs=backbone_kwargs)\n self.backbone_config = backbone_config\n self.backbone = backbone\n self.use_pretrained_backbone = use_pretrained_backbone\n self.use_timm_backbone = use_timm_backbone\n self.freeze_backbone_batch_norms = freeze_backbone_batch_norms\n self.backbone_kwargs = backbone_kwargs\n self.encoder_hidden_dim = encoder_hidden_dim\n self.encoder_in_channels = encoder_in_channels\n self.feat_strides = feat_strides\n self.encoder_attention_heads = encoder_attention_heads\n self.encoder_ffn_dim = encoder_ffn_dim\n self.dropout = dropout\n self.activation_dropout = activation_dropout\n self.encode_proj_layers = encode_proj_layers\n self.encoder_layers = encoder_layers\n self.positional_encoding_temperature = positional_encoding_temperature\n self.eval_size = eval_size\n self.normalize_before = normalize_before\n self.encoder_activation_function = encoder_activation_function\n self.activation_function = activation_function\n self.hidden_expansion = hidden_expansion\n self.d_model = d_model\n self.num_queries = num_queries\n self.decoder_ffn_dim = decoder_ffn_dim\n self.decoder_in_channels = decoder_in_channels\n self.num_feature_levels = num_feature_levels\n self.decoder_n_points = decoder_n_points\n self.decoder_layers = decoder_layers\n self.decoder_attention_heads = decoder_attention_heads\n self.decoder_activation_function = decoder_activation_function\n self.attention_dropout = attention_dropout\n self.num_denoising = num_denoising\n self.label_noise_ratio = label_noise_ratio\n self.box_noise_scale = box_noise_scale\n self.learn_initial_query = learn_initial_query\n self.anchor_image_size = anchor_image_size\n self.auxiliary_loss = auxiliary_loss\n self.with_box_refine = with_box_refine\n self.matcher_alpha = matcher_alpha\n self.matcher_gamma = matcher_gamma\n self.matcher_class_cost = matcher_class_cost\n self.matcher_bbox_cost = matcher_bbox_cost\n self.matcher_giou_cost = matcher_giou_cost\n self.use_focal_loss = use_focal_loss\n self.focal_loss_alpha = focal_loss_alpha\n self.focal_loss_gamma = focal_loss_gamma\n self.weight_loss_vfl = weight_loss_vfl\n self.weight_loss_bbox = weight_loss_bbox\n self.weight_loss_giou = weight_loss_giou\n self.weight_loss_fgl = weight_loss_fgl\n self.weight_loss_ddf = weight_loss_ddf\n self.eos_coefficient = eos_coefficient\n self.eval_idx = eval_idx\n self.layer_scale = layer_scale\n self.max_num_bins = max_num_bins\n self.reg_scale = reg_scale\n self.depth_mult = depth_mult\n self.decoder_offset_scale = decoder_offset_scale\n self.decoder_method = decoder_method\n self.top_prob_values = top_prob_values\n self.lqe_hidden_dim = lqe_hidden_dim\n self.lqe_layers = lqe_layers\n self.up = up\n if isinstance(self.decoder_n_points, list):\n if len(self.decoder_n_points) != self.num_feature_levels:\n raise ValueError(f'Length of decoder_n_points list ({len(self.decoder_n_points)}) must match num_feature_levels ({self.num_feature_levels}).')\n head_dim = self.d_model // self.decoder_attention_heads\n if head_dim * self.decoder_attention_heads != self.d_model:\n raise ValueError(f'Embedded dimension {self.d_model} must be divisible by decoder_attention_heads {self.decoder_attention_heads}')\n super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)\n\n @property\n def num_attention_heads(self) -> int:\n return self.encoder_attention_heads\n\n @property\n def hidden_size(self) -> int:\n return self.d_model\n\n @classmethod\n def from_backbone_configs(cls, backbone_config: PretrainedConfig, **kwargs):\n \"\"\"Instantiate a [`DFineConfig`] (or a derived class) from a pre-trained backbone model configuration and DETR model\n configuration.\n\n Args:\n backbone_config ([`PretrainedConfig`]):\n The backbone configuration.\n\n Returns:\n [`DFineConfig`]: An instance of a configuration object\n \"\"\"\n return cls(backbone_config=backbone_config, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`DFineModel`]. It is used to instantiate a D-FINE\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of D-FINE-X-COCO \"[ustc-community/dfine-xlarge-coco\"](https://huggingface.co/ustc-community/dfine-xlarge-coco\").\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n initializer_range (`float`, *optional*, defaults to 0.01):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_bias_prior_prob (`float`, *optional*):\n The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.\n If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n batch_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the batch normalization layers.\n backbone_config (`Dict`, *optional*, defaults to `RTDetrResNetConfig()`):\n The configuration of the backbone model.\n backbone (`str`, *optional*):\n Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this\n will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`\n is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.\n use_pretrained_backbone (`bool`, *optional*, defaults to `False`):\n Whether to use pretrained weights for the backbone.\n use_timm_backbone (`bool`, *optional*, defaults to `False`):\n Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers\n library.\n freeze_backbone_batch_norms (`bool`, *optional*, defaults to `True`):\n Whether to freeze the batch normalization layers in the backbone.\n backbone_kwargs (`dict`, *optional*):\n Keyword arguments to be passed to AutoBackbone when loading from a checkpoint\n e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.\n encoder_hidden_dim (`int`, *optional*, defaults to 256):\n Dimension of the layers in hybrid encoder.\n encoder_in_channels (`list`, *optional*, defaults to `[512, 1024, 2048]`):\n Multi level features input for encoder.\n feat_strides (`List[int]`, *optional*, defaults to `[8, 16, 32]`):\n Strides used in each feature map.\n encoder_layers (`int`, *optional*, defaults to 1):\n Total of layers to be used by the encoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 1024):\n Dimension of the \"intermediate\" (often named feed-forward) layer in decoder.\n encoder_attention_heads (`int`, *optional*, defaults to 8):\n Number of attention heads for each attention layer in the Transformer encoder.\n dropout (`float`, *optional*, defaults to 0.0):\n The ratio for all dropout layers.\n activation_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for activations inside the fully connected layer.\n encode_proj_layers (`List[int]`, *optional*, defaults to `[2]`):\n Indexes of the projected layers to be used in the encoder.\n positional_encoding_temperature (`int`, *optional*, defaults to 10000):\n The temperature parameter used to create the positional encodings.\n encoder_activation_function (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n activation_function (`str`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the general layer. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n eval_size (`Tuple[int, int]`, *optional*):\n Height and width used to computes the effective height and width of the position embeddings after taking\n into account the stride.\n normalize_before (`bool`, *optional*, defaults to `False`):\n Determine whether to apply layer normalization in the transformer encoder layer before self-attention and\n feed-forward modules.\n hidden_expansion (`float`, *optional*, defaults to 1.0):\n Expansion ratio to enlarge the dimension size of RepVGGBlock and CSPRepLayer.\n d_model (`int`, *optional*, defaults to 256):\n Dimension of the layers exclude hybrid encoder.\n num_queries (`int`, *optional*, defaults to 300):\n Number of object queries.\n decoder_in_channels (`list`, *optional*, defaults to `[256, 256, 256]`):\n Multi level features dimension for decoder\n decoder_ffn_dim (`int`, *optional*, defaults to 1024):\n Dimension of the \"intermediate\" (often named feed-forward) layer in decoder.\n num_feature_levels (`int`, *optional*, defaults to 3):\n The number of input feature levels.\n decoder_n_points (`int`, *optional*, defaults to 4):\n The number of sampled keys in each feature level for each attention head in the decoder.\n decoder_layers (`int`, *optional*, defaults to 6):\n Number of decoder layers.\n decoder_attention_heads (`int`, *optional*, defaults to 8):\n Number of attention heads for each attention layer in the Transformer decoder.\n decoder_activation_function (`str`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the decoder. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n num_denoising (`int`, *optional*, defaults to 100):\n The total number of denoising tasks or queries to be used for contrastive denoising.\n label_noise_ratio (`float`, *optional*, defaults to 0.5):\n The fraction of denoising labels to which random noise should be added.\n box_noise_scale (`float`, *optional*, defaults to 1.0):\n Scale or magnitude of noise to be added to the bounding boxes.\n learn_initial_query (`bool`, *optional*, defaults to `False`):\n Indicates whether the initial query embeddings for the decoder should be learned during training\n anchor_image_size (`Tuple[int, int]`, *optional*):\n Height and width of the input image used during evaluation to generate the bounding box anchors. If None, automatic generate anchor is applied.\n with_box_refine (`bool`, *optional*, defaults to `True`):\n Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes\n based on the predictions from the previous layer.\n is_encoder_decoder (`bool`, *optional*, defaults to `True`):\n Whether the architecture has an encoder decoder structure.\n matcher_alpha (`float`, *optional*, defaults to 0.25):\n Parameter alpha used by the Hungarian Matcher.\n matcher_gamma (`float`, *optional*, defaults to 2.0):\n Parameter gamma used by the Hungarian Matcher.\n matcher_class_cost (`float`, *optional*, defaults to 2.0):\n The relative weight of the class loss used by the Hungarian Matcher.\n matcher_bbox_cost (`float`, *optional*, defaults to 5.0):\n The relative weight of the bounding box loss used by the Hungarian Matcher.\n matcher_giou_cost (`float`, *optional*, defaults to 2.0):\n The relative weight of the giou loss of used by the Hungarian Matcher.\n use_focal_loss (`bool`, *optional*, defaults to `True`):\n Parameter informing if focal focal should be used.\n auxiliary_loss (`bool`, *optional*, defaults to `True`):\n Whether auxiliary decoding losses (loss at each decoder layer) are to be used.\n focal_loss_alpha (`float`, *optional*, defaults to 0.75):\n Parameter alpha used to compute the focal loss.\n focal_loss_gamma (`float`, *optional*, defaults to 2.0):\n Parameter gamma used to compute the focal loss.\n weight_loss_vfl (`float`, *optional*, defaults to 1.0):\n Relative weight of the varifocal loss in the object detection loss.\n weight_loss_bbox (`float`, *optional*, defaults to 5.0):\n Relative weight of the L1 bounding box loss in the object detection loss.\n weight_loss_giou (`float`, *optional*, defaults to 2.0):\n Relative weight of the generalized IoU loss in the object detection loss.\n weight_loss_fgl (`float`, *optional*, defaults to 0.15):\n Relative weight of the fine-grained localization loss in the object detection loss.\n weight_loss_ddf (`float`, *optional*, defaults to 1.5):\n Relative weight of the decoupled distillation focal loss in the object detection loss.\n eos_coefficient (`float`, *optional*, defaults to 0.0001):\n Relative classification weight of the 'no-object' class in the object detection loss.\n eval_idx (`int`, *optional*, defaults to -1):\n Index of the decoder layer to use for evaluation. If negative, counts from the end\n (e.g., -1 means use the last layer). This allows for early prediction in the decoder\n stack while still training later layers.\n layer_scale (`float`, *optional*, defaults to `1.0`):\n Scaling factor for the hidden dimension in later decoder layers. Used to adjust the\n model capacity after the evaluation layer.\n max_num_bins (`int`, *optional*, defaults to 32):\n Maximum number of bins for the distribution-guided bounding box refinement.\n Higher values allow for more fine-grained localization but increase computation.\n reg_scale (`float`, *optional*, defaults to 4.0):\n Scale factor for the regression distribution. Controls the range and granularity\n of the bounding box refinement process.\n depth_mult (`float`, *optional*, defaults to 1.0):\n Multiplier for the number of blocks in RepNCSPELAN4 layers. Used to scale the model's\n depth while maintaining its architecture.\n top_prob_values (`int`, *optional*, defaults to 4):\n Number of top probability values to consider from each corner's distribution.\n lqe_hidden_dim (`int`, *optional*, defaults to 64):\n Hidden dimension size for the Location Quality Estimator (LQE) network.\n lqe_layers (`int`, *optional*, defaults to 2):\n Number of layers in the Location Quality Estimator MLP.\n decoder_offset_scale (`float`, *optional*, defaults to 0.5):\n Offset scale used in deformable attention.\n decoder_method (`str`, *optional*, defaults to `\"default\"`):\n The method to use for the decoder: `\"default\"` or `\"discrete\"`.\n up (`float`, *optional*, defaults to 0.5):\n Controls the upper bounds of the Weighting Function."} +{"repo": "tensorflow", "function": "def _VerifyGeneratedGradients(grads, op: ops.Operation):\n if op.type == 'While' or op.type == 'StatelessWhile':\n return\n if len(grads) != len(op.inputs):\n raise ValueError(f'Num gradients {len(grads)} generated for op {op.node_def} do not match num inputs {len(op.inputs)}')", "docstring": "Verify that gradients are valid in number and type.\n\nArgs:\n grads: List of generated gradients.\n op: Operation for which the gradients where generated.\n\nRaises:\n ValueError: if sizes of gradients and inputs don't match.\n TypeError: if type of any gradient is not valid for its input."} +{"repo": "temporian", "function": "def calendar_month(self: EventSetOrNode, tz: Union[str, float, int]=0) -> EventSetOrNode:\n from temporian.core.operators.calendar.month import calendar_month\n return calendar_month(self, tz)", "docstring": "Obtains the month the timestamps in an\n[`EventSet`][temporian.EventSet]'s sampling are in.\n\nFeatures in the input are ignored, only the timestamps are used and\nthey must be unix timestamps (`is_unix_timestamp=True`).\n\nOutput feature contains numbers between 1 and 12.\n\nBy default, the timezone is UTC unless the `tz` argument is specified,\nas an offset in hours or a timezone name. See\n[`EventSet.calendar_hour()`][temporian.EventSet.calendar_hour] for an\nexample using timezones.\n\nUsage example:\n ```python\n >>> a = tp.event_set(\n ... timestamps=[\"2023-02-04\", \"2023-02-20\", \"2023-03-01\", \"2023-05-07\"],\n ... name='special_events'\n ... )\n >>> b = a.calendar_month()\n >>> b\n indexes: ...\n features: [('calendar_month', int32)]\n events:\n (4 events):\n timestamps: [...]\n 'calendar_month': [2 2 3 5]\n ...\n\n ```\n\nArgs:\n tz: timezone name (see `pytz.all_timezones`) or UTC offset in hours.\n\nReturns:\n EventSet with a single feature with the month each timestamp in\n `sampling` belongs to."} +{"repo": "transformers", "function": "class DeiTConfig(PretrainedConfig):\n model_type = 'deit'\n\n def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, encoder_stride=16, pooler_output_size=None, pooler_act='tanh', **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.qkv_bias = qkv_bias\n self.encoder_stride = encoder_stride\n self.pooler_output_size = pooler_output_size if pooler_output_size else hidden_size\n self.pooler_act = pooler_act", "docstring": "This is the configuration class to store the configuration of a [`DeiTModel`]. It is used to instantiate an DeiT\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the DeiT\n[facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224)\narchitecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries, keys and values.\n encoder_stride (`int`, *optional*, defaults to 16):\n Factor to increase the spatial resolution by in the decoder head for masked image modeling.\n pooler_output_size (`int`, *optional*):\n Dimensionality of the pooler layer. If None, defaults to `hidden_size`.\n pooler_act (`str`, *optional*, defaults to `\"tanh\"`):\n The activation function to be used by the pooler. Keys of ACT2FN are supported for Flax and\n Pytorch, and elements of https://www.tensorflow.org/api_docs/python/tf/keras/activations are\n supported for Tensorflow.\n\nExample:\n\n```python\n>>> from transformers import DeiTConfig, DeiTModel\n\n>>> # Initializing a DeiT deit-base-distilled-patch16-224 style configuration\n>>> configuration = DeiTConfig()\n\n>>> # Initializing a model (with random weights) from the deit-base-distilled-patch16-224 style configuration\n>>> model = DeiTModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "keras", "function": "class KerasFileEditor:\n\n def __init__(self, filepath):\n self.filepath = filepath\n self.metadata = None\n self.config = None\n self.model = None\n self.console = rich.console.Console(highlight=False)\n if filepath.endswith('.keras'):\n zf = zipfile.ZipFile(filepath, 'r')\n weights_store = H5IOStore(saving_lib._VARS_FNAME + '.h5', archive=zf, mode='r')\n with zf.open(saving_lib._CONFIG_FILENAME, 'r') as f:\n config_json = f.read()\n with zf.open(saving_lib._METADATA_FILENAME, 'r') as f:\n metadata_json = f.read()\n self.config = json.loads(config_json)\n self.metadata = json.loads(metadata_json)\n elif filepath.endswith('.weights.h5'):\n weights_store = H5IOStore(filepath, mode='r')\n else:\n raise ValueError(f'Invalid filename: expected a `.keras` `.weights.h5` extension. Received: filepath={filepath}')\n weights_dict, object_metadata = self._extract_weights_from_store(weights_store.h5_file)\n weights_store.close()\n self.weights_dict = weights_dict\n self.object_metadata = object_metadata\n self.console.print(self._generate_filepath_info(rich_style=True))\n if self.metadata is not None:\n self.console.print(self._generate_metadata_info(rich_style=True))\n\n def summary(self):\n \"\"\"Prints the weight structure of the opened file.\"\"\"\n self._weights_summary_cli()\n\n def compare(self, reference_model):\n \"\"\"Compares the opened file to a reference model.\n\n This method will list all mismatches between the\n currently opened file and the provided reference model.\n\n Args:\n reference_model: Model instance to compare to.\n\n Returns:\n Dict with the following keys:\n `'status'`, `'error_count'`, `'match_count'`.\n Status can be `'success'` or `'error'`.\n `'error_count'` is the number of mismatches found.\n `'match_count'` is the number of matching weights found.\n \"\"\"\n self.console.print('Running comparison')\n ref_spec = {}\n get_weight_spec_of_saveable(reference_model, ref_spec)\n\n def _compare(target, ref_spec, inner_path, target_name, ref_name, error_count, match_count, checked_paths):\n base_inner_path = inner_path\n for ref_key, ref_val in ref_spec.items():\n inner_path = base_inner_path + '/' + ref_key\n if inner_path in checked_paths:\n continue\n if ref_key not in target:\n error_count += 1\n checked_paths.add(inner_path)\n if isinstance(ref_val, dict):\n self.console.print(f'[color(160)]...Object [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n self.console.print(f' In {ref_name}, {inner_path} contains the following keys: {list(ref_val.keys())}')\n else:\n self.console.print(f'[color(160)]...Weight [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]')\n elif isinstance(ref_val, dict):\n _error_count, _match_count = _compare(target[ref_key], ref_spec[ref_key], inner_path, target_name, ref_name, error_count=error_count, match_count=match_count, checked_paths=checked_paths)\n error_count += _error_count\n match_count += _match_count\n elif target[ref_key].shape != ref_val.shape:\n error_count += 1\n checked_paths.add(inner_path)\n self.console.print(f'[color(160)]...Weight shape mismatch for [bold]{inner_path}[/][/]\\n In {ref_name}: shape={ref_val.shape}\\n In {target_name}: shape={target[ref_key].shape}')\n else:\n match_count += 1\n return (error_count, match_count)\n checked_paths = set()\n error_count, match_count = _compare(self.weights_dict, ref_spec, inner_path='', target_name='saved file', ref_name='reference model', error_count=0, match_count=0, checked_paths=checked_paths)\n _error_count, _ = _compare(ref_spec, self.weights_dict, inner_path='', target_name='reference model', ref_name='saved file', error_count=0, match_count=0, checked_paths=checked_paths)\n error_count += _error_count\n self.console.print('\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500')\n if error_count == 0:\n status = 'success'\n self.console.print('[color(28)][bold]Comparison successful:[/] saved file is compatible with the reference model[/]')\n if match_count == 1:\n plural = ''\n else:\n plural = 's'\n self.console.print(f' Found {match_count} matching weight{plural}')\n else:\n status = 'error'\n if error_count == 1:\n plural = ''\n else:\n plural = 's'\n self.console.print(f'[color(160)][bold]Found {error_count} error{plural}:[/] saved file is not compatible with the reference model[/]')\n return {'status': status, 'error_count': error_count, 'match_count': match_count}\n\n def _edit_object(self, edit_fn, source_name, target_name=None):\n if target_name is not None and '/' in target_name:\n raise ValueError(f\"Argument `target_name` should be a leaf name, not a full path name. Received: target_name='{target_name}'\")\n if '/' in source_name:\n elements = source_name.split('/')\n weights_dict = self.weights_dict\n for e in elements[:-1]:\n if e not in weights_dict:\n raise ValueError(f\"Path '{source_name}' not found in model.\")\n weights_dict = weights_dict[e]\n if elements[-1] not in weights_dict:\n raise ValueError(f\"Path '{source_name}' not found in model.\")\n edit_fn(weights_dict, source_name=elements[-1], target_name=target_name)\n else:\n\n def count_occurences(d, name, count=0):\n for k in d:\n if isinstance(d[k], dict):\n count += count_occurences(d[k], name, count)\n if name in d:\n count += 1\n return count\n occurrences = count_occurences(self.weights_dict, source_name)\n if occurrences > 1:\n raise ValueError(f\"Name '{source_name}' occurs more than once in the model; try passing a complete path\")\n if occurrences == 0:\n raise ValueError(f\"Source name '{source_name}' does not appear in the model. Use `editor.weights_summary()` to list all objects.\")\n\n def _edit(d):\n for k in d:\n if isinstance(d[k], dict):\n _edit(d[k])\n if source_name in d:\n edit_fn(d, source_name=source_name, target_name=target_name)\n _edit(self.weights_dict)\n\n def rename_object(self, object_name, new_name):\n \"\"\"Rename an object in the file (e.g. a layer).\n\n Args:\n object_name: String, name or path of the\n object to rename (e.g. `\"dense_2\"` or\n `\"layers/dense_2\"`).\n new_name: String, new name of the object.\n \"\"\"\n\n def rename_fn(weights_dict, source_name, target_name):\n weights_dict[target_name] = weights_dict[source_name]\n weights_dict.pop(source_name)\n self._edit_object(rename_fn, object_name, new_name)\n\n def delete_object(self, object_name):\n \"\"\"Removes an object from the file (e.g. a layer).\n\n Args:\n object_name: String, name or path of the\n object to delete (e.g. `\"dense_2\"` or\n `\"layers/dense_2\"`).\n \"\"\"\n\n def delete_fn(weights_dict, source_name, target_name=None):\n weights_dict.pop(source_name)\n self._edit_object(delete_fn, object_name)\n\n def add_object(self, object_path, weights):\n \"\"\"Add a new object to the file (e.g. a layer).\n\n Args:\n object_path: String, full path of the\n object to add (e.g. `\"layers/dense_2\"`).\n weights: Dict mapping weight names to weight\n values (arrays),\n e.g. `{\"0\": kernel_value, \"1\": bias_value}`.\n \"\"\"\n if not isinstance(weights, dict):\n raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n if '/' in object_path:\n elements = object_path.split('/')\n partial_path = '/'.join(elements[:-1])\n weights_dict = self.weights_dict\n for e in elements[:-1]:\n if e not in weights_dict:\n raise ValueError(f\"Path '{partial_path}' not found in model.\")\n weights_dict = weights_dict[e]\n weights_dict[elements[-1]] = weights\n else:\n self.weights_dict[object_path] = weights\n\n def delete_weight(self, object_name, weight_name):\n \"\"\"Removes a weight from an existing object.\n\n Args:\n object_name: String, name or path of the\n object from which to remove the weight\n (e.g. `\"dense_2\"` or `\"layers/dense_2\"`).\n weight_name: String, name of the weight to\n delete (e.g. `\"0\"`).\n \"\"\"\n\n def delete_weight_fn(weights_dict, source_name, target_name=None):\n if weight_name not in weights_dict[source_name]:\n raise ValueError(f'Weight {weight_name} not found in object {object_name}. Weights found: {list(weights_dict[source_name].keys())}')\n weights_dict[source_name].pop(weight_name)\n self._edit_object(delete_weight_fn, object_name)\n\n def add_weights(self, object_name, weights):\n \"\"\"Add one or more new weights to an existing object.\n\n Args:\n object_name: String, name or path of the\n object to add the weights to\n (e.g. `\"dense_2\"` or `\"layers/dense_2\"`).\n weights: Dict mapping weight names to weight\n values (arrays),\n e.g. `{\"0\": kernel_value, \"1\": bias_value}`.\n \"\"\"\n if not isinstance(weights, dict):\n raise ValueError(f\"Argument `weights` should be a dict where keys are weight names (usually '0', '1', etc.) and values are NumPy arrays. Received: type(weights)={type(weights)}\")\n\n def add_weight_fn(weights_dict, source_name, target_name=None):\n weights_dict[source_name].update(weights)\n self._edit_object(add_weight_fn, object_name)\n\n def save(self, filepath):\n \"\"\"Save the edited weights file.\n\n Args:\n filepath: Path to save the file to.\n Must be a `.weights.h5` file.\n \"\"\"\n filepath = str(filepath)\n if not filepath.endswith('.weights.h5'):\n raise ValueError(f'Invalid `filepath` argument: expected a `.weights.h5` extension. Received: filepath={filepath}')\n weights_store = H5IOStore(filepath, mode='w')\n\n def _save(weights_dict, weights_store, inner_path):\n vars_to_create = {}\n for name, value in weights_dict.items():\n if isinstance(value, dict):\n if value:\n _save(weights_dict[name], weights_store, inner_path=inner_path + '/' + name)\n else:\n vars_to_create[name] = value\n if vars_to_create:\n var_store = weights_store.make(inner_path)\n for name, value in vars_to_create.items():\n var_store[name] = value\n _save(self.weights_dict, weights_store, inner_path='')\n weights_store.close()\n\n def resave_weights(self, filepath):\n self.save(filepath)\n\n def _extract_weights_from_store(self, data, metadata=None, inner_path=''):\n metadata = metadata or {}\n object_metadata = {}\n for k, v in data.attrs.items():\n object_metadata[k] = v\n if object_metadata:\n metadata[inner_path] = object_metadata\n result = collections.OrderedDict()\n for key in data.keys():\n inner_path = inner_path + '/' + key\n value = data[key]\n if isinstance(value, h5py.Group):\n if len(value) == 0:\n continue\n if 'vars' in value.keys() and len(value['vars']) == 0:\n continue\n if hasattr(value, 'keys'):\n if 'vars' in value.keys():\n result[key], metadata = self._extract_weights_from_store(value['vars'], metadata=metadata, inner_path=inner_path)\n else:\n result[key], metadata = self._extract_weights_from_store(value, metadata=metadata, inner_path=inner_path)\n else:\n result[key] = value[()]\n return (result, metadata)\n\n def _generate_filepath_info(self, rich_style=False):\n if rich_style:\n filepath = f\"'{self.filepath}'\"\n filepath = f'{summary_utils.highlight_symbol(filepath)}'\n else:\n filepath = f\"'{self.filepath}'\"\n return f'Keras model file {filepath}'\n\n def _generate_config_info(self, rich_style=False):\n return pprint.pformat(self.config)\n\n def _generate_metadata_info(self, rich_style=False):\n version = self.metadata['keras_version']\n date = self.metadata['date_saved']\n if rich_style:\n version = f'{summary_utils.highlight_symbol(version)}'\n date = f'{summary_utils.highlight_symbol(date)}'\n return f'Saved with Keras {version} - date: {date}'\n\n def _print_weights_structure(self, weights_dict, indent=0, is_first=True, prefix='', inner_path=''):\n for idx, (key, value) in enumerate(weights_dict.items()):\n inner_path = inner_path + '/' + key\n is_last = idx == len(weights_dict) - 1\n if is_first:\n is_first = False\n connector = '> '\n elif is_last:\n connector = '\u2514\u2500 '\n else:\n connector = '\u251c\u2500 '\n if isinstance(value, dict):\n bold_key = summary_utils.bold_text(key)\n object_label = f'{prefix}{connector}{bold_key}'\n if inner_path in self.object_metadata:\n metadata = self.object_metadata[inner_path]\n if 'name' in metadata:\n name = metadata['name']\n object_label += f\" ('{name}')\"\n self.console.print(object_label)\n if is_last:\n appended = ' '\n else:\n appended = '\u2502 '\n new_prefix = prefix + appended\n self._print_weights_structure(value, indent + 1, is_first=is_first, prefix=new_prefix, inner_path=inner_path)\n elif hasattr(value, 'shape'):\n bold_key = summary_utils.bold_text(key)\n self.console.print(f'{prefix}{connector}{bold_key}:' + f' shape={value.shape}, dtype={value.dtype}')\n else:\n self.console.print(f'{prefix}{connector}{key}: {value}')\n\n def _weights_summary_cli(self):\n self.console.print('Weights structure')\n self._print_weights_structure(self.weights_dict, prefix=' ' * 2)\n\n def _weights_summary_interactive(self):\n\n def _generate_html_weights(dictionary, margin_left=0, font_size=1):\n html = ''\n for key, value in dictionary.items():\n if isinstance(value, dict) and value:\n html += f'
' + '{key}' + _generate_html_weights(value, margin_left + 20, font_size - 1) + '
'\n else:\n html += f'
' + f'' + f'{key} : shape={value.shape}' + f', dtype={value.dtype}' + f'
' + f'{display_weight(value)}' + '
' + '
'\n return html\n output = 'Weights structure'\n initialize_id_counter()\n output += _generate_html_weights(self.weights_dict)\n ipython.display.display(ipython.display.HTML(output))", "docstring": "Utility to inspect, edit, and resave Keras weights files.\n\nYou will find this class useful when adapting\nan old saved weights file after having made\narchitecture changes to a model.\n\nArgs:\n filepath: The path to a local file to inspect and edit.\n\nExamples:\n\n```python\neditor = KerasFileEditor(\"my_model.weights.h5\")\n\n# Displays current contents\neditor.summary()\n\n# Remove the weights of an existing layer\neditor.delete_object(\"layers/dense_2\")\n\n# Add the weights of a new layer\neditor.add_object(\"layers/einsum_dense\", weights={\"0\": ..., \"1\": ...})\n\n# Save the weights of the edited model\neditor.resave_weights(\"edited_model.weights.h5\")\n```"} +{"repo": "tensorflow", "function": "def cardinality(dataset):\n return gen_dataset_ops.dataset_cardinality(dataset._variant_tensor)", "docstring": "Returns the cardinality of `dataset`, if known.\n\nThe operation returns the cardinality of `dataset`. The operation may return\n`tf.data.experimental.INFINITE_CARDINALITY` if `dataset` contains an infinite\nnumber of elements or `tf.data.experimental.UNKNOWN_CARDINALITY` if the\nanalysis fails to determine the number of elements in `dataset` (e.g. when the\ndataset source is a file).\n\n>>> dataset = tf.data.Dataset.range(42)\n>>> print(tf.data.experimental.cardinality(dataset).numpy())\n42\n>>> dataset = dataset.repeat()\n>>> cardinality = tf.data.experimental.cardinality(dataset)\n>>> print((cardinality == tf.data.experimental.INFINITE_CARDINALITY).numpy())\nTrue\n>>> dataset = dataset.filter(lambda x: True)\n>>> cardinality = tf.data.experimental.cardinality(dataset)\n>>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy())\nTrue\n\nArgs:\n dataset: A `tf.data.Dataset` for which to determine cardinality.\n\nReturns:\n A scalar `tf.int64` `Tensor` representing the cardinality of `dataset`. If\n the cardinality is infinite or unknown, the operation returns the named\n constant `INFINITE_CARDINALITY` and `UNKNOWN_CARDINALITY` respectively."} +{"repo": "tensorflow", "function": "def _ready(op: ops.Operation, sess: session.Session, msg) -> Tuple[bool, Optional[str]]:\n if op is None:\n return (True, None)\n else:\n try:\n ready_value = sess.run(op)\n if ready_value is None or ready_value.dtype == np.int32 or ready_value.size == 0:\n return (True, None)\n else:\n non_initialized_varnames = ', '.join([i.decode('utf-8') for i in ready_value])\n return (False, 'Variables not initialized: ' + non_initialized_varnames)\n except errors.FailedPreconditionError as e:\n if 'uninitialized' not in str(e):\n logging.warning('%s : error [%s]', msg, str(e))\n raise e\n return (False, str(e))", "docstring": "Checks if the model is ready or not, as determined by op.\n\nArgs:\n op: An op, either _ready_op or _ready_for_local_init_op, which defines the\n readiness of the model.\n sess: A `Session`.\n msg: A message to log to warning if not ready\n\nReturns:\n A tuple (is_ready, msg), where is_ready is True if ready and False\n otherwise, and msg is `None` if the model is ready, a `String` with the\n reason why it is not ready otherwise."} +{"repo": "tensorflow", "function": "def enter(self, layer, inputs, build_graph, training, saving=None):\n state = {'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving}\n return CallContextManager(self, state)", "docstring": "Push a Layer and its inputs and state onto the current call context.\n\nArgs:\n layer: The `Layer` whose `call` is currently active.\n inputs: The inputs to the currently active `Layer`.\n build_graph: Whether currently inside a Graph or FuncGraph.\n training: Whether currently executing in training or inference mode.\n saving: Whether currently saving to SavedModel.\n\nReturns:\n Context manager."} +{"repo": "tensorflow", "function": "def _get_pmap_impl(f, devices, has_tpu):\n if has_tpu:\n output_is_list = [False]\n\n def recorder(args, kwargs, res):\n del args, kwargs\n output_is_list[0] = isinstance(res, list)\n return res\n f = _record_result_type(recorder, f)\n\n def tf_f(*tf_args):\n \"\"\"A wrapper for `f` that takes/returns tensors.\"\"\"\n np_args = _tf_to_np(tf_args)\n np_out = f(*np_args)\n return np_out\n if has_tpu:\n\n @polymorphic_function.function(autograph=False)\n def fn(inputs):\n res = tpu.replicate(tf_f, inputs)\n if res and isinstance(res[0], list) and (len(res[0]) == 1) and (not output_is_list[0]):\n res = [x[0] for x in res]\n return res\n return fn\n else:\n jit_tf_f = polymorphic_function.function(tf_f, autograph=False)\n\n @polymorphic_function.function(autograph=False)\n def fn(all_per_device_args):\n \"\"\"Multi-device function with calls placed on the correct device.\"\"\"\n results = []\n for per_device_args, device in zip(all_per_device_args, devices):\n with ops.device(device):\n results.append(jit_tf_f(*per_device_args))\n return results\n return fn", "docstring": "This is a helper function to return the pmap impl.\n\nArgs:\n f: a function that takes ndarrays and returns ndarrays.\n devices: a list of strings; the device list.\n has_tpu: boolean; whether `devices` contains TPU devices.\n\nReturns:\n A function that takes tensors and returns tensors."} +{"repo": "tensorflow", "function": "def standardize_sample_or_class_weights(x_weight, output_names, weight_type):\n if x_weight is None or (isinstance(x_weight, (list, tuple)) and len(x_weight) == 0):\n return [None for _ in output_names]\n if len(output_names) == 1:\n if isinstance(x_weight, (list, tuple)) and len(x_weight) == 1:\n return x_weight\n if isinstance(x_weight, dict) and output_names[0] in x_weight:\n return [x_weight[output_names[0]]]\n else:\n return [x_weight]\n if isinstance(x_weight, (list, tuple)):\n if len(x_weight) != len(output_names):\n raise ValueError('Provided `' + weight_type + '` was a list of ' + str(len(x_weight)) + ' elements, but the model has ' + str(len(output_names)) + ' outputs. You should provide one `' + weight_type + '`array per model output.')\n return x_weight\n if isinstance(x_weight, collections.abc.Mapping):\n generic_utils.check_for_unexpected_keys(weight_type, x_weight, output_names)\n x_weights = []\n for name in output_names:\n x_weights.append(x_weight.get(name))\n return x_weights\n else:\n raise TypeError('The model has multiple outputs, so `' + weight_type + '` should be either a list or a dict. Provided `' + weight_type + '` type not understood: ' + str(x_weight))", "docstring": "Maps `sample_weight` or `class_weight` to model outputs.\n\nArgs:\n x_weight: User-provided `sample_weight` or `class_weight` argument.\n output_names: List of output names (strings) in the model.\n weight_type: A string used purely for exception printing.\n\nReturns:\n A list of `sample_weight` or `class_weight` where there are exactly\n one element per model output.\n\nRaises:\n ValueError: In case of invalid user-provided argument."} +{"repo": "mobly", "function": "def get_settable_properties(cls):\n results = []\n for attr, value in vars(cls).items():\n if isinstance(value, property) and value.fset is not None:\n results.append(attr)\n return results", "docstring": "Gets the settable properties of a class.\n\nOnly returns the explicitly defined properties with setters.\n\nArgs:\n cls: A class in Python."} +{"repo": "tensorflow", "function": "def transform_function(self, fn, user_context):\n cache_subkey = self.get_caching_key(user_context)\n if self._cache.has(fn, cache_subkey):\n factory = self._cached_factory(fn, cache_subkey)\n else:\n with self._cache_lock:\n if self._cache.has(fn, cache_subkey):\n factory = self._cached_factory(fn, cache_subkey)\n else:\n logging.log(1, '%s is not cached for subkey %s', fn, cache_subkey)\n nodes, ctx = super(PyToPy, self).transform_function(fn, user_context)\n if isinstance(nodes, gast.Lambda):\n nodes = gast.Assign(targets=[gast.Name(ctx.info.name, ctx=gast.Store(), annotation=None, type_comment=None)], value=nodes)\n else:\n nodes.name = ctx.info.name\n if logging.has_verbosity(2):\n logging.log(2, 'Transformed %s:\\n\\n%s\\n', fn, parser.unparse(nodes))\n factory = _PythonFnFactory(ctx.info.name, fn.__code__.co_freevars, self.get_extra_locals())\n factory.create(nodes, ctx.namer, future_features=ctx.info.future_features)\n self._cache[fn][cache_subkey] = factory\n transformed_fn = factory.instantiate(globals_=fn.__globals__, closure=fn.__closure__ or (), defaults=fn.__defaults__, kwdefaults=getattr(fn, '__kwdefaults__', None))\n return (transformed_fn, factory.module, factory.source_map)", "docstring": "Transforms a function. See GenericTranspiler.transform_function.\n\nThis overload wraps the parent's `transform_function`, adding caching and\nfacilities to instantiate the output as a Python object. It also\nadds facilities to make new symbols available to the generated Python code,\nvisible as local variables - see `get_extra_locals`.\n\nArgs:\n fn: A function or lambda.\n user_context: An opaque object (may be None) that is forwarded to\n transform_ast, through the ctx.user attribute.\n\nReturns:\n A tuple:\n * A function or lambda with the same signature and closure as `fn`\n * The temporary module into which the transformed function was loaded\n * The source map as a\n Dict[origin_info.LineLocation, origin_info.OriginInfo]"} +{"repo": "transformers", "function": "class Adafactor(Optimizer):\n\n def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False):\n if lr is not None and relative_step:\n raise ValueError('Cannot combine manual `lr` and `relative_step=True` options')\n if warmup_init and (not relative_step):\n raise ValueError('`warmup_init=True` requires `relative_step=True`')\n defaults = {'lr': lr, 'eps': eps, 'clip_threshold': clip_threshold, 'decay_rate': decay_rate, 'beta1': beta1, 'weight_decay': weight_decay, 'scale_parameter': scale_parameter, 'relative_step': relative_step, 'warmup_init': warmup_init}\n super().__init__(params, defaults)\n\n @staticmethod\n def _get_lr(param_group, param_state):\n rel_step_sz = param_group['lr']\n if param_group['relative_step']:\n min_step = 1e-06 * param_state['step'] if param_group['warmup_init'] else 0.01\n rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state['step']))\n param_scale = 1.0\n if param_group['scale_parameter']:\n param_scale = max(param_group['eps'][1], param_state['RMS'])\n return param_scale * rel_step_sz\n\n @staticmethod\n def _get_options(param_group, param_shape):\n factored = len(param_shape) >= 2\n use_first_moment = param_group['beta1'] is not None\n return (factored, use_first_moment)\n\n @staticmethod\n def _rms(tensor):\n return tensor.norm(2) / tensor.numel() ** 0.5\n\n @staticmethod\n def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):\n r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)\n c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()\n return torch.mul(r_factor, c_factor)\n\n @torch.no_grad()\n def step(self, closure=None):\n \"\"\"\n Performs a single optimization step\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n grad = p.grad\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError('Adafactor does not support sparse gradients.')\n state = self.state[p]\n grad_shape = grad.shape\n factored, use_first_moment = self._get_options(group, grad_shape)\n if len(state) == 0:\n state['step'] = 0\n if use_first_moment:\n state['exp_avg'] = torch.zeros_like(grad)\n if factored:\n state['exp_avg_sq_row'] = torch.zeros(grad_shape[:-1]).to(grad)\n state['exp_avg_sq_col'] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)\n else:\n state['exp_avg_sq'] = torch.zeros_like(grad)\n state['RMS'] = 0\n else:\n if use_first_moment:\n state['exp_avg'] = state['exp_avg'].to(grad)\n if factored:\n state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad)\n state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad)\n else:\n state['exp_avg_sq'] = state['exp_avg_sq'].to(grad)\n p_data_fp32 = p\n if p.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n state['step'] += 1\n state['RMS'] = self._rms(p_data_fp32)\n lr = self._get_lr(group, state)\n beta2t = 1.0 - math.pow(state['step'], group['decay_rate'])\n update = grad ** 2 + group['eps'][0]\n if factored:\n exp_avg_sq_row = state['exp_avg_sq_row']\n exp_avg_sq_col = state['exp_avg_sq_col']\n exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t)\n exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t)\n update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)\n update.mul_(grad)\n else:\n exp_avg_sq = state['exp_avg_sq']\n exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)\n update = exp_avg_sq.rsqrt().mul_(grad)\n update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0))\n update.mul_(lr)\n if use_first_moment:\n exp_avg = state['exp_avg']\n exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1'])\n update = exp_avg\n if group['weight_decay'] != 0:\n p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay'] * lr)\n p_data_fp32.add_(-update)\n if p.dtype in {torch.float16, torch.bfloat16}:\n p.copy_(p_data_fp32)\n return loss", "docstring": "AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:\nhttps://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py\n\nPaper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://huggingface.co/papers/1804.04235 Note that\nthis optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and\n`warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and\n`relative_step=False`.\n\nArguments:\n params (`Iterable[nn.parameter.Parameter]`):\n Iterable of parameters to optimize or dictionaries defining parameter groups.\n lr (`float`, *optional*):\n The external learning rate.\n eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`):\n Regularization constants for square gradient and parameter scale respectively\n clip_threshold (`float`, *optional*, defaults to 1.0):\n Threshold of root mean square of final gradient update\n decay_rate (`float`, *optional*, defaults to -0.8):\n Coefficient used to compute running averages of square\n beta1 (`float`, *optional*):\n Coefficient used for computing running averages of gradient\n weight_decay (`float`, *optional*, defaults to 0.0):\n Weight decay (L2 penalty)\n scale_parameter (`bool`, *optional*, defaults to `True`):\n If True, learning rate is scaled by root mean square\n relative_step (`bool`, *optional*, defaults to `True`):\n If True, time-dependent learning rate is computed instead of external learning rate\n warmup_init (`bool`, *optional*, defaults to `False`):\n Time-dependent learning rate computation depends on whether warm-up initialization is being used\n\nThis implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.\n\nRecommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):\n\n - Training without LR warmup or clip_threshold is not recommended.\n\n - use scheduled LR warm-up to fixed LR\n - use clip_threshold=1.0 (https://huggingface.co/papers/1804.04235)\n - Disable relative updates\n - Use scale_parameter=False\n - Additional optimizer operations like gradient clipping should not be used alongside Adafactor\n\nExample:\n\n```python\nAdafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)\n```\n\nOthers reported the following combination to work well:\n\n```python\nAdafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)\n```\n\nWhen using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]\nscheduler as following:\n\n```python\nfrom transformers.optimization import Adafactor, AdafactorSchedule\n\noptimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)\nlr_scheduler = AdafactorSchedule(optimizer)\ntrainer = Trainer(..., optimizers=(optimizer, lr_scheduler))\n```\n\nUsage:\n\n```python\n# replace AdamW with Adafactor\noptimizer = Adafactor(\n model.parameters(),\n lr=1e-3,\n eps=(1e-30, 1e-3),\n clip_threshold=1.0,\n decay_rate=-0.8,\n beta1=None,\n weight_decay=0.0,\n relative_step=False,\n scale_parameter=False,\n warmup_init=False,\n)\n```"} +{"repo": "transformers", "function": "def pad(self, images: 'torch.Tensor', size: int) -> 'torch.Tensor':\n height, width = get_image_size(images, ChannelDimension.FIRST)\n pad_height = (height // size + 1) * size - height\n pad_width = (width // size + 1) * size - width\n return F.pad(images, (0, 0, pad_width, pad_height), padding_mode='symmetric')", "docstring": "Pad an image to make the height and width divisible by `size`.\n\nArgs:\n images (`torch.Tensor`):\n Images to pad.\n size (`int`):\n The size to make the height and width divisible by.\n\nReturns:\n `torch.Tensor`: The padded images."} +{"repo": "pyglove", "function": "def register_converter(src_type: Union[Type[Any], Tuple[Type[Any], ...]], dest_type: Union[Type[Any], Tuple[Type[Any], ...]], convert_fn: Callable[[Any], Any]) -> None:\n _TYPE_CONVERTER_REGISTRY.register(src_type, dest_type, convert_fn)", "docstring": "Register converter from source type to destination type.\n\nExamples::\n\n # Add converter from int to float.\n pg.typing.register_converter(int, float, float)\n\n assert pg.typing.Float().apply(1) is 1.0\n\n # Add converter from a dict to class A.\n def from_dict(d):\n return A(**d)\n\n assert isinstance(pg.typing.Object(A).apply({'x': 1, 'y': 2}), A)\n\nArgs:\n src_type: Source value type.\n dest_type: Target value type.\n convert_fn: Function that performs the conversion, in signature\n (src_type) -> dest_type."} +{"repo": "tensorflow", "function": "def _infer_num_gpus_per_worker(devices):\n if _is_device_list_single_worker(devices):\n return sum((1 for d in devices if _is_gpu_device(d)))\n else:\n device_dict = _group_device_list(devices)\n num_gpus = None\n for _, devices_in_task in device_dict.items():\n for device_in_task in devices_in_task:\n if num_gpus is None:\n num_gpus = sum((1 for d in device_in_task if _is_gpu_device(d)))\n elif num_gpus != sum((1 for d in device_in_task if _is_gpu_device(d))):\n raise ValueError('All workers should have the same number of GPUs.')\n for d in device_in_task:\n d_spec = tf_device.DeviceSpec.from_string(d)\n if d_spec.device_type == 'GPU' and d_spec.device_index >= num_gpus:\n raise ValueError('GPU `device_index` on a worker should be consecutive and start from 0.')\n return num_gpus", "docstring": "Infers the number of GPUs on each worker.\n\nCurrently to make multi-worker cross device ops work, we need all workers to\nhave the same number of GPUs.\n\nArgs:\n devices: a list of device strings, can be either local devices or remote\n devices.\n\nReturns:\n number of GPUs per worker.\n\nRaises:\n ValueError if workers have different number of GPUs or GPU indices are not\n consecutive and starting from 0."} +{"repo": "tensorflow", "function": "def _delegate_property(keras_tensor_cls, property_name):\n property_access = property(lambda self: InstanceProperty(property_name)(self))\n setattr(keras_tensor_cls, property_name, property_access)", "docstring": "Register property on a KerasTensor class.\n\nCalling this multiple times with the same arguments should be a no-op.\n\nThis method exposes a property on the KerasTensor class that will use an\n`InstanceProperty` layer to access the property on the represented\nintermediate values in the model.\n\nArgs:\n keras_tensor_cls: The KerasTensor subclass that should expose the property.\n property_name: The name of the property to expose and delegate to the\n represented (Composite)Tensor."} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, encoder_hidden_states: np.ndarray | tf.Tensor | None=None, encoder_attention_mask: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, cross_attn_head_mask: np.ndarray | tf.Tensor | None=None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')\n elif input_ids is not None:\n input_shape = shape_list(input_ids)\n elif inputs_embeds is not None:\n input_shape = shape_list(inputs_embeds)[:-1]\n else:\n raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')\n past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0\n if position_ids is None:\n positions = self.embed_positions(input_shape, past_key_values_length)\n else:\n positions = self.embed_positions(input_shape, position_ids=position_ids)\n if inputs_embeds is None:\n check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n hidden_states = inputs_embeds\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)\n else:\n combined_attention_mask = _expand_mask(tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1])\n if attention_mask is not None:\n combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])\n hidden_states = self.layernorm_embedding(hidden_states + positions)\n hidden_states = self.dropout(hidden_states, training=training)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attns = () if output_attentions and encoder_hidden_states is not None else None\n present_key_values = () if use_cache else None\n for attn_mask_name, attn_mask in [('head_mask', head_mask), ('cross_attn_head_mask', cross_attn_head_mask)]:\n if attn_mask is not None:\n tf.debugging.assert_equal(shape_list(attn_mask)[0], len(self.layers), message=f'The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for {shape_list(attn_mask)[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n dropout_probability = random.uniform(0, 1)\n if training and dropout_probability < self.layerdrop:\n continue\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value)\n if use_cache:\n present_key_values += (present_key_value,)\n if output_attentions:\n all_self_attns += (layer_self_attn,)\n if encoder_hidden_states is not None:\n all_cross_attns += (layer_cross_attn,)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if not return_dict:\n return (hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns)\n else:\n return TFBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns)", "docstring": "Args:\n input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):\n Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up\n decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`tf.tTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple."} +{"repo": "beam", "function": "def _normalize_var_keyword_hint(hint, arg_name):\n if not hint or type(hint) != dict:\n raise TypeCheckError('Unexpected VAR_KEYWORD value: %s' % hint)\n keys = list(hint.keys())\n values = list(hint.values())\n if len(values) == 1 and keys[0] == arg_name and isinstance(values[0], typehints.DictConstraint):\n return values[0]\n else:\n return typehints.Dict[str, typehints.Union[values]]", "docstring": "Converts a var_keyword hint into Dict[, ] form.\n\nArgs:\n hint: (dict) Should either contain a pair (arg_name,\n Dict[, ]), or one or more possible types for the\n value.\n arg_name: (str) The keyword receiving this hint.\n\nRaises:\n TypeCheckError if hint does not have the right form."} +{"repo": "transformers", "function": "def resize(self, image: torch.Tensor, size: SizeDict, interpolation: 'F.InterpolationMode'=None, **kwargs) -> torch.Tensor:\n interpolation = interpolation if interpolation is not None else F.InterpolationMode.BICUBIC\n if size.shortest_edge:\n shortest_edge = int(256 / 224 * size['shortest_edge'])\n new_size = get_resize_output_image_size(image, size=shortest_edge, default_to_square=False, input_data_format=ChannelDimension.FIRST)\n elif size.height and size.width:\n new_size = (size.height, size.width)\n else:\n raise ValueError(f\"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size.keys()} {size.keys()}.\")\n return F.resize(image, size=new_size, interpolation=interpolation, **kwargs)", "docstring": "Resize an image.\n\nIf size is a dict with keys \"width\" and \"height\", the image will be resized to `(size[\"height\"],\nsize[\"width\"])`.\n\nIf size is a dict with key \"shortest_edge\", the shortest edge value `c` is rescaled to `int(c * (256/224))`.\nThe smaller edge of the image will be matched to this value i.e, if height > width, then image will be rescaled\nto `(size[\"shortest_egde\"] * height / width, size[\"shortest_egde\"])`.\n\nArgs:\n image (`torch.Tensor`):\n Image to resize.\n size (`SizeDict`):\n Size of the output image after resizing. If size is a dict with keys \"width\" and \"height\", the image\n will be resized to (height, width). If size is a dict with key \"shortest_edge\", the shortest edge value\n `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value\n i.e, if height > width, then image will be rescaled to (size * height / width, size).\n interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BICUBIC`):\n Resampling filter to use when resiizing the image."} +{"repo": "tf-quant-finance", "function": "def actual_360(*, start_date, end_date, schedule_info=None, dtype=None, name=None):\n del schedule_info\n with tf.name_scope(name or 'actual_360'):\n end_date = dt.convert_to_date_tensor(end_date)\n start_date = dt.convert_to_date_tensor(start_date)\n dtype = dtype or tf.constant(0.0).dtype\n actual_days = tf.cast(start_date.days_until(end_date), dtype=dtype)\n return actual_days / 360", "docstring": "Computes the year fraction between the specified dates.\n\nThe actual/360 convention specifies the year fraction between the start and\nend date as the actual number of days between the two dates divided by 360.\n\nNote that the schedule info is not needed for this convention and is ignored\nif supplied.\n\nFor more details see:\nhttps://en.wikipedia.org/wiki/Day_count_convention#Actual/360\n\nArgs:\n start_date: A `DateTensor` object of any shape.\n end_date: A `DateTensor` object of compatible shape with `start_date`.\n schedule_info: The schedule info. Ignored for this convention.\n dtype: The dtype of the result. Either `tf.float32` or `tf.float64`. If not\n supplied, `tf.float32` is returned.\n name: Python `str` name prefixed to ops created by this function. If not\n supplied, `actual_360` is used.\n\nReturns:\n A real `Tensor` of supplied `dtype` and shape of `start_date`. The year\n fraction between the start and end date as computed by Actual/360\n convention."} +{"repo": "beam", "function": "def __init__(self, project_name=None, bq_table=None, bq_dataset=None, publish_to_bq=False, influxdb_options: Optional['InfluxDBMetricsPublisherOptions']=None, namespace=None, filters=None):\n self._namespace = namespace\n self.publishers: List[MetricsPublisher] = []\n self.publishers.append(ConsoleMetricsPublisher())\n bq_check = project_name and bq_table and bq_dataset and publish_to_bq\n if bq_check:\n bq_publisher = BigQueryMetricsPublisher(project_name, bq_table, bq_dataset)\n self.publishers.append(bq_publisher)\n if influxdb_options and influxdb_options.validate():\n self.publishers.append(InfluxDBMetricsPublisher(influxdb_options))\n else:\n _LOGGER.info('Missing InfluxDB options. Metrics will not be published to InfluxDB')\n self.filters = filters", "docstring": "Initializes :class:`MetricsReader` .\n\nArgs:\n project_name (str): project with BigQuery where metrics will be saved\n bq_table (str): BigQuery table where metrics will be saved\n bq_dataset (str): BigQuery dataset where metrics will be saved\n namespace (str): Namespace of the metrics\n filters: MetricFilter to query only filtered metrics"} +{"repo": "transformers", "function": "def get_parameters(model: nn.Module) -> Iterable[torch.Tensor]:\n for name, module in model._modules.items():\n for attr_name, attr in module.__dict__.items():\n if isinstance(attr, torch.Tensor) and attr.requires_grad:\n yield attr\n for param in get_parameters(module):\n yield param", "docstring": "Get all parameters from a model by iterating over its modules.\nThis is an alternative to model.parameters() that works with DTensor models.\n\nArgs:\n model (nn.Module): The model to get parameters from\n\nReturns:\n Iterable[torch.Tensor]: An iterator over all parameters in the model"} +{"repo": "tensorflow", "function": "def add(self, arg, tag=None, name=None, aggregate=None, index_override=None):\n if tag is None:\n if aggregate is not None:\n raise ValueError('You must specify `tag` if using aggregate.')\n global_index = self._get_new_global_index(index_override)\n sort_index = None\n else:\n if aggregate is None:\n raise ValueError('You must specify `aggregate` if using tag.')\n if tag not in self._tag_to_global_index:\n self._tag_to_global_index[tag] = self._get_new_global_index(index_override)\n self._tag_to_next_sort_index[tag] = 0\n elif index_override and index_override != self._tag_to_global_index[tag]:\n raise ValueError('Tag %r was called with two indices %r and %r' % (tag, index_override, self._tag_to_global_index[tag]))\n global_index = self._tag_to_global_index[tag]\n sort_index = self._tag_to_next_sort_index[tag]\n self._tag_to_next_sort_index[tag] += 1\n uuid = self._unique_function_id\n name = '%s-%s-%s-%r-%r-%s' % (self._node_name_prefix, self._function_name, uuid, global_index, sort_index, name)\n identity_op = _array_ops.identity(arg, name=name)\n identity_op.op._set_attr(OpHint.FUNCTION_NAME_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._function_name)))\n identity_op.op._set_attr(OpHint.FUNCTION_UUID_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(self._unique_function_id)))\n identity_op.op._set_attr(self._attr_name, _attr_value_pb2.AttrValue(i=global_index))\n identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR, _attr_value_pb2.AttrValue(i=self._level))\n if self._children_inputs_mappings:\n identity_op.op._set_attr(OpHint.CHILDREN_INPUTS_MAPPINGS, _attr_value_pb2.AttrValue(s=_compat.as_bytes(_json.dumps(self._children_inputs_mappings))))\n if sort_index is not None:\n identity_op.op._set_attr(OpHint.FUNCTION_SORT_INDEX_ATTR, _attr_value_pb2.AttrValue(i=sort_index))\n if aggregate is not None:\n identity_op.op._set_attr(OpHint.FUNCTION_AGGREGATE_ATTR, _attr_value_pb2.AttrValue(s=_compat.as_bytes(aggregate)))\n return identity_op", "docstring": "Return a wrapped tensor of an input tensor as an argument.\n\nArgs:\n arg: A TensorFlow tensor that should be considered an argument.\n tag: String tag to identify arguments that should be packed.\n name: Name of argument. This is included in the Identity hint op names.\n aggregate: Strategy to aggregate.\n Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,\n and OpHint.AGGREGATE_STACK.\n Note, aggregate is only valid if tag is specified.\n index_override: Specify what input/output index should this be in the\n final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the\n final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than\n the default call order based ordering.\n\nReturns:\n A tensor representing the wrapped argument.\n\nRaises:\n ValueError: When indices are not consistent."} +{"repo": "tensorflow", "function": "def from_concrete_functions(cls, funcs, trackable_obj=None):\n TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.TF_CONCRETE_FUNCTIONS)\n if trackable_obj is None:\n logging.warning('Please consider providing the trackable_obj argument in the from_concrete_functions. Providing without the trackable_obj argument is deprecated and it will use the deprecated conversion path.')\n for func in funcs:\n if not isinstance(func, _function.ConcreteFunction):\n message = 'This function takes in a list of ConcreteFunction.'\n if isinstance(func, _def_function.Function):\n message += ' To get the ConcreteFunction from a Function, call get_concrete_function.'\n raise ValueError(message)\n return cls(funcs, trackable_obj)", "docstring": "Creates a TFLiteConverter object from ConcreteFunctions.\n\nArgs:\n funcs: List of TensorFlow ConcreteFunctions. The list should not contain\n duplicate elements. Currently converter can only convert a single\n ConcreteFunction. Converting multiple functions is under development.\n trackable_obj: An `AutoTrackable` object (typically `tf.module`)\n associated with `funcs`. A reference to this object needs to be\n maintained so that Variables do not get garbage collected since\n functions have a weak reference to Variables.\n\nReturns:\n TFLiteConverter object.\n\nRaises:\n Invalid input type."} +{"repo": "transformers", "function": "def create_model_card(self, output_dir, model_name: str, language: Optional[str]=None, license: Optional[str]=None, tags: Optional[str]=None, finetuned_from: Optional[str]=None, tasks: Optional[str]=None, dataset_tags: Optional[Union[str, List[str]]]=None, dataset: Optional[Union[str, List[str]]]=None, dataset_args: Optional[Union[str, List[str]]]=None):\n from .modelcard import TrainingSummary\n training_summary = TrainingSummary.from_keras(self, keras_history=self.history, language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args)\n model_card = training_summary.to_model_card()\n with open(os.path.join(output_dir, 'README.md'), 'w') as f:\n f.write(model_card)", "docstring": "Creates a draft of a model card using the information available to the `Trainer`.\n\nArgs:\n output_dir (`str` or `os.PathLike`):\n The folder in which to create the model card.\n model_name (`str`, *optional*):\n The name of the model.\n language (`str`, *optional*):\n The language of the model (if applicable)\n license (`str`, *optional*):\n The license of the model. Will default to the license of the pretrained model used, if the original\n model given to the `Trainer` comes from a repo on the Hub.\n tags (`str` or `List[str]`, *optional*):\n Some tags to be included in the metadata of the model card.\n finetuned_from (`str`, *optional*):\n The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo\n of the original model given to the `Trainer` (if it comes from the Hub).\n tasks (`str` or `List[str]`, *optional*):\n One or several task identifiers, to be included in the metadata of the model card.\n dataset_tags (`str` or `List[str]`, *optional*):\n One or several dataset tags, to be included in the metadata of the model card.\n dataset (`str` or `List[str]`, *optional*):\n One or several dataset identifiers, to be included in the metadata of the model card.\n dataset_args (`str` or `List[str]`, *optional*):\n One or several dataset arguments, to be included in the metadata of the model card."} +{"repo": "tensorflow", "function": "def get_tensor_sharding(tensor):\n if isinstance(tensor, resource_variable_ops.BaseResourceVariable) and context.xla_sharding_for_resource_variables_enabled():\n sharding = tensor._get_xla_sharding()\n if sharding is None:\n return None\n else:\n return sharding.SerializeToString()\n try:\n return get_op_sharding(tensor.op)\n except AttributeError:\n return None", "docstring": "Returns sharding attribute of a Tensor.\n\nArgs:\n tensor: a Tensor.\n\nReturns:\n The attribute representing XLA sharding on tensor's op."} +{"repo": "keras", "function": "def log10(x):\n if any_symbolic_tensors((x,)):\n return Log10().symbolic_call(x)\n return backend.numpy.log10(x)", "docstring": "Return the base 10 logarithm of the input tensor, element-wise.\n\nArgs:\n x: Input tensor.\n\nReturns:\n Output tensor, element-wise base 10 logarithm of `x`."} +{"repo": "transformers", "function": "def find_executable_batch_size(function: Optional[callable]=None, starting_batch_size: int=128, auto_find_batch_size: bool=False):\n if function is None:\n return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size, auto_find_batch_size=auto_find_batch_size)\n if auto_find_batch_size:\n requires_backends(find_executable_batch_size, 'accelerate')\n from accelerate.utils import find_executable_batch_size as accelerate_find_executable_batch_size\n return accelerate_find_executable_batch_size(function=function, starting_batch_size=starting_batch_size)\n return functools.partial(function, batch_size=starting_batch_size)", "docstring": "Args:\nA basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or\nCUDNN, the batch size is cut in half and passed to `function`. `function` must take in a `batch_size` parameter as\nits first argument.\n function (`callable`, *optional*)\n A function to wrap\n starting_batch_size (`int`, *optional*)\n The batch size to try and fit into memory\n auto_find_batch_size (`bool`, *optional*)\n If False, will just execute `function`"} +{"repo": "tensorflow", "function": "def trace(service_addr, logdir, duration_ms, worker_list='', num_tracing_attempts=3, options=None):\n if duration_ms <= 0:\n raise errors.InvalidArgumentError(None, None, 'duration_ms must be greater than zero.')\n opts = dict(options._asdict()) if options is not None else {}\n _pywrap_profiler_plugin.trace(_strip_addresses(service_addr, _GRPC_PREFIX), logdir, worker_list, True, duration_ms, num_tracing_attempts, opts)", "docstring": "Sends gRPC requests to one or more profiler servers to perform on-demand profiling.\n\nThis method will block the calling thread until it receives responses from all\nservers or until deadline expiration. Both single host and multiple host\nprofiling are supported on CPU, GPU, and TPU.\nThe profiled results will be saved by each server to the specified TensorBoard\nlog directory (i.e. the directory you save your model checkpoints). Use the\nTensorBoard profile plugin to view the visualization and analysis results.\n\nArgs:\n service_addr: A comma delimited string of gRPC addresses of the workers to\n profile.\n e.g. service_addr='grpc://localhost:6009'\n service_addr='grpc://10.0.0.2:8466,grpc://10.0.0.3:8466'\n service_addr='grpc://localhost:12345,grpc://localhost:23456'\n logdir: Path to save profile data to, typically a TensorBoard log directory.\n This path must be accessible to both the client and server.\n e.g. logdir='gs://your_tb_dir'\n duration_ms: Duration of tracing or monitoring in milliseconds. Must be\n greater than zero.\n worker_list: An optional TPU only configuration. The list of workers to\n profile in the current session.\n num_tracing_attempts: Optional. Automatically retry N times when no trace\n event is collected (default 3).\n options: profiler.experimental.ProfilerOptions namedtuple for miscellaneous\n profiler options.\n\nRaises:\n InvalidArgumentError: For when arguments fail validation checks.\n UnavailableError: If no trace event was collected.\n\nExample usage (CPU/GPU):\n\n```python\n # Start a profiler server before your model runs.\n tf.profiler.experimental.server.start(6009)\n # (Model code goes here).\n # Send gRPC request to the profiler server to collect a trace of your model.\n tf.profiler.experimental.client.trace('grpc://localhost:6009',\n '/nfs/tb_log', 2000)\n```\n\nExample usage (Multiple GPUs):\n\n```python\n # E.g. your worker IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you\n # would like to schedule start of profiling 1 second from now, for a\n # duration of 2 seconds.\n options['delay_ms'] = 1000\n tf.profiler.experimental.client.trace(\n 'grpc://10.0.0.2:8466,grpc://10.0.0.3:8466,grpc://10.0.0.4:8466',\n 'gs://your_tb_dir',\n 2000,\n options=options)\n```\n\nExample usage (TPU):\n\n```python\n # Send gRPC request to a TPU worker to collect a trace of your model. A\n # profiler service has been started in the TPU worker at port 8466.\n # E.g. your TPU IP address is 10.0.0.2 and you want to profile for 2 seconds\n # .\n tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466',\n 'gs://your_tb_dir', 2000)\n```\n\nExample usage (Multiple TPUs):\n\n```python\n # Send gRPC request to a TPU pod to collect a trace of your model on\n # multiple TPUs. A profiler service has been started in all the TPU workers\n # at the port 8466.\n # E.g. your TPU IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you want\n # to profile for 2 seconds.\n tf.profiler.experimental.client.trace(\n 'grpc://10.0.0.2:8466',\n 'gs://your_tb_dir',\n 2000,\n '10.0.0.2:8466,10.0.0.3:8466,10.0.0.4:8466')\n```\n\nLaunch TensorBoard and point it to the same logdir you provided to this API.\n\n```shell\n # logdir can be gs://your_tb_dir as in the above examples.\n $ tensorboard --logdir=/tmp/tb_log\n```\n\nOpen your browser and go to localhost:6006/#profile to view profiling results."} +{"repo": "tensorflow", "function": "def initialize_multi_client_cluster(job_name: str, dtensor_jobs: List[str], client_id: int, collective_leader: str, port: Optional[int]=None, gpu_use_nccl_communication: bool=False, enable_coordination_service: bool=True):\n assert context.executing_eagerly()\n if not collective_leader.startswith('/job:'):\n collective_leader = '/job:' + collective_leader\n context.context().configure_collective_ops(use_nccl_communication=gpu_use_nccl_communication, collective_leader=collective_leader)\n if enable_coordination_service:\n context.context().configure_coordination_service(service_type='standalone', service_leader=collective_leader)\n config_proto = context.get_config()\n cluster_def = cluster_pb2.ClusterDef()\n cluster_def.job.add(name=job_name, tasks=dict(enumerate(dtensor_jobs)))\n server_def = tensorflow_server_pb2.ServerDef(cluster=cluster_def, default_session_config=config_proto, job_name=job_name, task_index=client_id, protocol=remote_utils.get_default_communication_protocol(), port=port)\n server_def.default_session_config.rpc_options.num_channels_per_target = 4\n server_def.default_session_config.experimental.recv_buf_max_chunk = -1\n logging.info('Enabling collectives with server_def: %s', server_def)\n context.context().enable_collective_ops(server_def)\n context.ensure_initialized()", "docstring": "Initialize GRPC servers and collectives for multi-client DTensor setup.\n\nThis function can be used to initialize a multi-client cluster and enable\ncollective ops. GRPC servers are necessary in the multi-client mode, even\nwhen the number of clientis is 1.\n\nNOTE: this function must be called in an eager context.\n\nArgs:\n job_name: The job name used by all clients in the DTensor cluster.\n dtensor_jobs: A list of the DTensor client jobs participating in the\n cluster. Must be strings of the form \"hostname:port\".\n client_id: The ID of the DTensor client this function is being called in.\n collective_leader: The job/task that will be used to run collectives.\n port: The port this client's GRPC server will run on. If omitted, use the\n port from dtensor_jobs for this client.\n gpu_use_nccl_communication: if True, configure TensorFlow to use NCCL by\n default.\n enable_coordination_service: If true, enable distributed coordination\n service to make sure that workers know the devices on each other, a\n prerequisite for data transfer through cross-worker rendezvous.\n\nRaises:\n RuntimeError: If running inside a tf.function."} +{"repo": "transformers", "function": "def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)\n num_observed = observed_indicator.sum(self.dim, keepdim=True)\n scale = ts_sum / torch.clamp(num_observed, min=1)\n if self.default_scale is None:\n batch_sum = ts_sum.sum(dim=0)\n batch_observations = torch.clamp(num_observed.sum(0), min=1)\n default_scale = torch.squeeze(batch_sum / batch_observations)\n else:\n default_scale = self.default_scale * torch.ones_like(scale)\n scale = torch.where(num_observed > 0, scale, default_scale)\n scale = torch.clamp(scale, min=self.minimum_scale)\n scaled_data = data / scale\n if not self.keepdim:\n scale = scale.squeeze(dim=self.dim)\n return (scaled_data, torch.zeros_like(scale), scale)", "docstring": "Parameters:\n data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n input for Batch norm calculation\n observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n Calculating the scale on the observed indicator.\nReturns:\n tuple of `torch.Tensor` of shapes\n (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,\n `(batch_size, 1, num_input_channels)`)"} +{"repo": "beam", "function": "def get_metrics_namespace(self) -> str:\n return 'BeamML_Sklearn'", "docstring": "Returns:\n A namespace for metrics collected by the RunInference transform."} +{"repo": "tensorflow", "function": "def __init__(self, experimental_io_device=None, experimental_enable_async_checkpoint=False, experimental_write_callbacks=None, enable_async=False, experimental_skip_slot_variables=False, experimental_sharding_callback=None):\n self.experimental_io_device = experimental_io_device\n self.enable_async = experimental_enable_async_checkpoint or enable_async\n self.experimental_enable_async_checkpoint = self.enable_async\n if experimental_write_callbacks is not None:\n for callback in experimental_write_callbacks:\n assert len(inspect.signature(callback).parameters) <= 1\n self.experimental_write_callbacks = experimental_write_callbacks\n if experimental_sharding_callback is not None:\n if not isinstance(experimental_sharding_callback, sharding_util.ShardingCallback):\n raise ValueError(f'The experimental_sharding_callback checkpoint optionmust be of type ShardingCallback. The option providedwas of type {type(experimental_sharding_callback)}.')\n self.experimental_sharding_callback = experimental_sharding_callback\n self.experimental_skip_slot_variables = experimental_skip_slot_variables", "docstring": "Creates an object that stores options for a Checkpoint.\n\nArgs:\n experimental_io_device: string. Applies in a distributed setting.\n Tensorflow device to use to access the filesystem. If `None` (default)\n then for each variable the filesystem is accessed from the CPU:0 device\n of the host where that variable is assigned. If specified, the\n filesystem is instead accessed from that device for all variables. This\n is for example useful if you want to save to a local directory, such as\n \"/tmp\" when running in a distributed setting. In that case pass a device\n for the host where the \"/tmp\" directory is accessible.\n experimental_enable_async_checkpoint: bool Type. Deprecated, please use\n the enable_async option.\n experimental_write_callbacks: List[Callable]. A list of callback functions\n that will be executed after each saving event finishes (i.e. after\n `save()` or `write()`). For async checkpoint, the callbacks will be\n executed only after the async thread finishes saving. The return values\n of the callback(s) will be ignored. The callback(s) can optionally take\n the `save_path` (the result of `save()` or `write()`) as an argument.\n The callbacks will be executed in the same order of this list after the\n checkpoint has been written.\n enable_async: bool Type. Indicates whether async checkpointing is enabled.\n Default is False, i.e., no async checkpoint. Async checkpoint moves the\n checkpoint file writing off the main thread, so that the model can\n continue to train while the checkpoint file writing runs in the\n background. Async checkpoint reduces TPU device idle cycles and speeds\n up model training process, while memory consumption may increase.\n experimental_skip_slot_variables: bool Type. If true, ignores slot\n variables during restore. Context: TPU Embedding layers for Serving do\n not properly restore slot variables. This option is a way to omit\n restoring slot variables which are not required for Serving usecase\n anyways.(b/315912101)\n experimental_sharding_callback: `tf.train.experimental.ShardingCallback`.\n A pre-made or custom callback that determines how checkpoints are\n sharded on disk. Pre-made callback options are\n `tf.train.experimental.ShardByDevicePolicy` and\n `tf.train.experimental.MaxShardSizePolicy`. You may also write a custom\n callback, see `tf.train.experimental.ShardingCallback`."} +{"repo": "transformers", "function": "def amplitude_to_db(spectrogram: np.ndarray, reference: float=1.0, min_value: float=1e-05, db_range: Optional[float]=None) -> np.ndarray:\n if reference <= 0.0:\n raise ValueError('reference must be greater than zero')\n if min_value <= 0.0:\n raise ValueError('min_value must be greater than zero')\n reference = max(min_value, reference)\n spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)\n spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference))\n if db_range is not None:\n if db_range <= 0.0:\n raise ValueError('db_range must be greater than zero')\n spectrogram = np.clip(spectrogram, a_min=spectrogram.max() - db_range, a_max=None)\n return spectrogram", "docstring": "Converts an amplitude spectrogram to the decibel scale. This computes `20 * log10(spectrogram / reference)`, using\nbasic logarithm properties for numerical stability.\n\nThe motivation behind applying the log function on the (mel) spectrogram is that humans do not hear loudness on a\nlinear scale. Generally to double the perceived volume of a sound we need to put 8 times as much energy into it.\nThis means that large variations in energy may not sound all that different if the sound is loud to begin with.\nThis compression operation makes the (mel) spectrogram features match more closely what humans actually hear.\n\nArgs:\n spectrogram (`np.ndarray`):\n The input amplitude (mel) spectrogram.\n reference (`float`, *optional*, defaults to 1.0):\n Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set\n the loudest part to 0 dB. Must be greater than zero.\n min_value (`float`, *optional*, defaults to `1e-5`):\n The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking\n `log(0)`. The default of `1e-5` corresponds to a minimum of -100 dB. Must be greater than zero.\n db_range (`float`, *optional*):\n Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the\n peak value and the smallest value will never be more than 80 dB. Must be greater than zero.\n\nReturns:\n `np.ndarray`: the spectrogram in decibels"} +{"repo": "transformers", "function": "def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:\n out = F.dropout(x, p=prob, training=training)\n out = residual + out\n return out", "docstring": "Dropout add function\n\nArgs:\n x (`torch.tensor`):\n input tensor\n residual (`torch.tensor`):\n residual tensor\n prob (`float`):\n dropout probability\n training (`bool`):\n training mode"} +{"repo": "genai-processors", "function": "def __init__(self, live_api_processor: live_model.LiveProcessor, chattiness: float=1.0, unsafe_string_list: list[str] | None=None):\n self._processor = live_api_processor\n self._chattiness = chattiness\n self._commentator = CommentatorStateMachine()\n self.ttfts = collections.deque(maxlen=50)\n self._unsafe_string_list = unsafe_string_list\n if unsafe_string_list is not None:\n pattern = '|'.join((re.escape(s) for s in unsafe_string_list))\n self._processor += text.MatchProcessor(pattern=pattern, substream_input='output_transcription', substream_output='unsafe_regex', remove_from_input_stream=False, flush_fn=lambda x: x.get_metadata('generation_complete') or x.get_metadata('interrupted') or x.get_metadata('interrupt_request') or x.get_metadata('turn_complete') or x.get_metadata('go_away'))", "docstring": "Initializes the processor.\n\nArgs:\n live_api_processor: The live API processor to use.\n chattiness: Probability of triggering a comment when the model has\n finished talking or every 3 seconds. Set to 0 to disable commenting.\n unsafe_string_list: The strings to use for unsafe content. If None, the\n processor will not block unsafe content. If set, the processor will\n interrupt itself when it sees the string in the output."} +{"repo": "transformers", "function": "class PegasusXDecoder(PegasusXPreTrainedModel):\n\n def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding]=None):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.max_target_positions = config.max_position_embeddings\n embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0\n padding_idx = config.pad_token_id\n if embed_tokens is not None:\n self.embed_tokens = embed_tokens\n else:\n self.embed_tokens = PegasusXScaledWordEmbedding(config.vocab_size, config.d_model, padding_idx=padding_idx, embed_scale=embed_scale)\n self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model)\n self.layers = nn.ModuleList([PegasusXDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)])\n self.layer_norm = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def forward(self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None):\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values\n selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n inputs_embeds (`torch.FloatTensor` of\n shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing\n `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more\n control over how to convert `input_ids` indices into associated vectors than the model's internal\n embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if (input_ids is None) ^ (inputs_embeds is not None):\n raise ValueError('You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time')\n elif input_ids is not None:\n input = input_ids\n input_shape = input.shape\n input_ids = input_ids.view(-1, input_shape[-1])\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n input = inputs_embeds[:, :, -1]\n else:\n raise ValueError('You have to specify either decoder_input_ids or decoder_inputs_embeds')\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input)\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...')\n use_cache = False\n return_legacy_cache = False\n if use_cache and (not isinstance(past_key_values, Cache)):\n return_legacy_cache = True\n logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.')\n past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)\n batch_size, seq_length = inputs_embeds.size()[:-1]\n past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0\n if cache_position is None:\n cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device)\n if attention_mask is None and (not is_torchdynamo_compiling()):\n mask_seq_length = past_key_values_length + seq_length\n attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)\n self_attn_cache = past_key_values.self_attention_cache if isinstance(past_key_values, EncoderDecoderCache) else past_key_values\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, self_attn_cache)\n encoder_attention_mask = self._update_cross_attn_mask(encoder_hidden_states, encoder_attention_mask, input_shape, inputs_embeds)\n position_ids = cache_position.unsqueeze(1)\n position_ids = self.embed_positions(inputs_embeds, past_key_values_length, position_ids)\n position_ids = position_ids.to(inputs_embeds.device)\n hidden_states = inputs_embeds + position_ids\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n next_decoder_cache = None\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, encoder_hidden_states, encoder_attention_mask, None, output_attentions, use_cache, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[3 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n hidden_states = self.layer_norm(hidden_states)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if return_legacy_cache:\n next_cache = past_key_values.to_legacy_cache()\n if not return_dict:\n return tuple((v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None))\n return BaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`]\n\nArgs:\n config: PegasusXConfig\n embed_tokens (nn.Embedding): output embedding"} +{"repo": "tensorflow", "function": "def gather_nd(params: ragged_tensor.RaggedOrDense, indices: ragged_tensor.RaggedOrDense, batch_dims=0, name=None, bad_indices_policy=''):\n if not isinstance(batch_dims, int) or batch_dims != 0:\n raise ValueError('batch_dims != 0 is not supported for ragged gather yet.')\n if not (ragged_tensor.is_ragged(params) or ragged_tensor.is_ragged(indices)):\n return array_ops.gather_nd(params, indices, name=name, bad_indices_policy=bad_indices_policy)\n if bad_indices_policy not in ('', 'DEFAULT'):\n raise ValueError('non-default bad_indices_policy not supported for ragged gather')\n with ops.name_scope(name, 'RaggedGatherNd', [params, indices]):\n params = ragged_tensor.convert_to_tensor_or_ragged_tensor(params, name='params')\n indices = ragged_tensor.convert_to_tensor_or_ragged_tensor(indices, name='indices')\n params, indices = ragged_tensor.match_row_splits_dtypes(params, indices)\n indices_shape = indices.shape\n indices_ndims = indices_shape.ndims\n if indices_ndims is None:\n raise ValueError('indices.rank be statically known.')\n if indices_ndims == 0:\n raise ValueError('indices.rank must be at least 1.')\n if ragged_tensor.is_ragged(indices) and indices_ndims == indices.ragged_rank + 1:\n raise ValueError('The innermost dimension of indices may not be ragged')\n index_size = tensor_shape.dimension_value(indices_shape[-1])\n if index_size is None:\n raise ValueError('indices.shape[-1] must be statically known.')\n if indices_ndims > 2:\n indices_is_dense = not ragged_tensor.is_ragged(indices)\n if indices_is_dense:\n indices = ragged_tensor.RaggedTensor.from_tensor(indices, ragged_rank=indices_ndims - 2, row_splits_dtype=params.row_splits.dtype)\n result = indices.with_flat_values(gather_nd(params, indices.flat_values))\n if indices_is_dense and ragged_tensor.is_ragged(result) and (result.ragged_rank == indices_ndims - 2):\n result = ragged_tensor.RaggedTensor.to_tensor(result)\n return result\n assert not ragged_tensor.is_ragged(indices)\n assert ragged_tensor.is_ragged(params)\n if index_size == 0:\n params_ndims = params.ragged_rank + array_ops.rank(params.flat_values)\n for dim in range(indices_ndims - 1):\n params = ragged_array_ops.expand_dims(params, axis=0)\n multiples = array_ops.concat([array_ops.shape(indices)[:-1], array_ops.ones([params_ndims], dtypes.int32)], axis=0)\n return ragged_array_ops.tile(params, multiples)\n elif index_size == 1:\n flattened_index_tuples = array_ops.reshape(indices, [-1])\n return gather(params, flattened_index_tuples)\n else:\n indices = math_ops.cast(indices, params.row_splits.dtype)\n flattened_index_tuples = array_ops.gather(params.row_splits, indices[..., 0])\n flattened_index_tuples += indices[..., 1]\n flattened_params = params.values\n for dim in range(2, index_size):\n if not ragged_tensor.is_ragged(flattened_params):\n flattened_index_tuples = array_ops.expand_dims(flattened_index_tuples, axis=1)\n flattened_index_tuples = array_ops.concat([flattened_index_tuples, indices[..., dim:]], axis=1)\n return array_ops.gather_nd(flattened_params, flattened_index_tuples)\n flattened_index_tuples = array_ops.gather(flattened_params.row_starts(), flattened_index_tuples)\n flattened_index_tuples += indices[..., dim]\n flattened_params = flattened_params.values\n return gather(flattened_params, flattened_index_tuples)", "docstring": "Gather slices from `params` using `n`-dimensional indices.\n\nThis operation is similar to `gather`, but it uses the innermost dimension\nof `indices` to define a slice into `params`. In particular, if:\n\n* `indices` has shape `[A1...AN, I]`\n* `params` has shape `[B1...BM]`\n\nThen:\n\n* `result` has shape `[A1...AN, B_{I+1}...BM]`.\n* `result[a1...aN] = params[indices[a1...aN, :]]`\n\nArgs:\n params: A potentially ragged tensor with shape `[A1...AN, I]`.\n indices: A potentially ragged tensor with shape `[B1...BM]`.\n batch_dims: Must be zero.\n name: A name for the operation (optional).\n bad_indices_policy: A string. If `\"\"` or `\"DEFAULT\"`, the default behavior\n is used (error on CPU and ignore on GPU). If `\"IGNORE\"`, the bad indices\n are ignored and 0 is stored in the\n\nReturns:\n A potentially ragged tensor with shape `[A1...AN, B_{I+1}...BM]`.\n\n#### Examples:\n\n>>> params = tf.ragged.constant(\n... [ [ ['000', '001'], ['010' ] ],\n... [ ['100' ], ['110', '111', '112'], ['120'] ],\n... [ [ ], ['210' ] ] ])\n\n>>> # Gather 2D slices from a 3D tensor\n>>> tf.gather_nd(params, [[2], [0]])\n\n\n>>> # Gather 1D slices from a 3D tensor\n>>> tf.gather_nd(params, [[2, 1], [0, 0]])\n\n\n>>> # Gather scalars from a 3D tensor\n>>> tf.gather_nd(params, [[0, 0, 1], [1, 1, 2]]).numpy()\narray([b'001', b'112'], dtype=object)"} +{"repo": "transformers", "function": "def rescale(self, image: 'torch.Tensor', scale: float, **kwargs) -> 'torch.Tensor':\n return image * scale", "docstring": "Rescale an image by a scale factor. image = image * scale.\n\nArgs:\n image (`torch.Tensor`):\n Image to rescale.\n scale (`float`):\n The scaling factor to rescale pixel values by.\n\nReturns:\n `torch.Tensor`: The rescaled image."} +{"repo": "tensorflow", "function": "def wait_for_other_workers(self):\n if not self._worker_barrier:\n return\n self._worker_barrier.wait()", "docstring": "Waits for other workers to reach the same call to this method.\n\nRaises:\n ValueError: if `worker_barrier` is not passed to the __init__ method."} +{"repo": "tensorflow", "function": "def variance(self, name='variance'):\n with self._name_scope(name):\n try:\n return self._variance()\n except NotImplementedError as original_exception:\n try:\n return math_ops.square(self._stddev())\n except NotImplementedError:\n raise original_exception", "docstring": "Variance.\n\nVariance is defined as,\n\n```none\nVar = E[(X - E[X])**2]\n```\n\nwhere `X` is the random variable associated with this distribution, `E`\ndenotes expectation, and `Var.shape = batch_shape + event_shape`.\n\nArgs:\n name: Python `str` prepended to names of ops created by this function.\n\nReturns:\n variance: Floating-point `Tensor` with shape identical to\n `batch_shape + event_shape`, i.e., the same shape as `self.mean()`."} +{"repo": "tensorflow", "function": "def bessel_y0(x, name=None):\n with ops.name_scope(name, 'bessel_y0', [x]):\n return gen_special_math_ops.bessel_y0(x)", "docstring": "Computes the Bessel y0 function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_y0([0.5, 1., 2., 4.]).numpy()\narray([-0.44451873, 0.08825696, 0.51037567, -0.01694074], dtype=float32)\n\nArgs:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.y0\n@end_compatibility"} +{"repo": "transformers", "function": "def load_graph_from_args(pipeline_name: str, framework: str, model: str, tokenizer: Optional[str]=None, **models_kwargs) -> Pipeline:\n if tokenizer is None:\n tokenizer = model\n if framework == 'pt' and (not is_torch_available()):\n raise Exception('Cannot convert because PyTorch is not installed. Please install torch first.')\n if framework == 'tf' and (not is_tf_available()):\n raise Exception('Cannot convert because TF is not installed. Please install tensorflow first.')\n print(f'Loading pipeline (model: {model}, tokenizer: {tokenizer})')\n return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)", "docstring": "Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model\n\nArgs:\n pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)\n framework: The actual model to convert the pipeline from (\"pt\" or \"tf\")\n model: The model name which will be loaded by the pipeline\n tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value\n\nReturns: Pipeline object"} +{"repo": "transformers", "function": "def find_custom_args_with_details(file_content: str, custom_args_var_name: str) -> list[dict]:\n escaped_variable_name = re.escape(custom_args_var_name)\n regex_pattern = f'^\\\\s*({escaped_variable_name})\\\\s*=\\\\s*(r?\\\\\"\\\\\"\\\\\")(.*?)(\\\\\"\\\\\"\\\\\")'\n flags = re.MULTILINE | re.DOTALL\n match = re.search(regex_pattern, file_content, flags)\n if match:\n content = match.group(3).strip()\n return content\n return None", "docstring": "Find the given custom args variable in the file content and return its content.\n\nArgs:\n file_content: The string content of the Python file.\n custom_args_var_name: The name of the custom args variable."} +{"repo": "tensorflow", "function": "def restore_or_initialize(self):\n if self._latest_checkpoint is not None:\n self._checkpoint.restore(self._latest_checkpoint)\n if self._checkpoint_interval is not None:\n self._last_checkpoint_step = _evaluate(self._step_counter)\n return self._latest_checkpoint\n if self._init_fn is not None:\n self._init_fn()\n logging.info('Customized initialization is done through the passed `init_fn`.')\n return None", "docstring": "Restore items in `checkpoint` from the latest checkpoint file.\n\nThis method will first try to restore from the most recent checkpoint in\n`directory`. If no checkpoints exist in `directory`, and `init_fn` is\nspecified, this method will call `init_fn` to do customized\ninitialization. This can be used to support initialization from pretrained\nmodels.\n\nNote that unlike `tf.train.Checkpoint.restore()`, this method doesn't return\na load status object that users can run assertions on\n(e.g. assert_consumed()). Thus to run assertions, users should directly use\n`tf.train.Checkpoint.restore()` method.\n\nReturns:\n The restored checkpoint path if the latest checkpoint is found and\n restored. Otherwise None."} +{"repo": "keras", "function": "def deserialize(config, custom_objects=None):\n return serialization_lib.deserialize_keras_object(config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='decay')", "docstring": "Instantiates a `LearningRateSchedule` object from a serialized form.\n\nArgs:\n config: The serialized form of the `LearningRateSchedule`. Dictionary of\n the form {'class_name': str, 'config': dict}.\n custom_objects: A dictionary mapping class names (or function names) of\n custom (non-Keras) objects to class/functions.\n\nReturns:\n A `LearningRateSchedule` object.\n\nExample:\n\n```python\n# Configuration for PolynomialDecay\nconfig = {\n 'class_name': 'PolynomialDecay',\n 'config': {'cycle': False,\n 'decay_steps': 10000,\n 'end_learning_rate': 0.01,\n 'initial_learning_rate': 0.1,\n 'name': None,\n 'power': 0.5\n }\n}\nlr_schedule = keras.optimizers.schedules.deserialize(config)\n```"} +{"repo": "tensorflow", "function": "def __init__(self, strategy):\n if not getattr(self, '_has_initialized', False):\n if not hasattr(strategy, '_is_parameter_server_strategy_v2'):\n raise ValueError('Only `tf.distribute.experimental.ParameterServerStrategy` is supported to work with `tf.distribute.experimental.coordinator.ClusterCoordinator` currently.')\n self._strategy = strategy\n self.strategy.extended._used_with_coordinator = True\n self._cluster = Cluster(strategy)\n self._has_initialized = True", "docstring": "Initialization of a `ClusterCoordinator` instance.\n\nArgs:\n strategy: a supported `tf.distribute.Strategy` object. Currently, only\n `tf.distribute.experimental.ParameterServerStrategy` is supported.\n\nRaises:\n ValueError: if the strategy being used is not supported."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n logits = logits * self.logit_scale\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>> from transformers import AutoTokenizer, CohereForCausalLM\n\n>> model = CohereForCausalLM.from_pretrained(\"CohereForAI/c4ai-command-r-v01\")\n>> tokenizer = AutoTokenizer.from_pretrained(\"CohereForAI/c4ai-command-r-v01\")\n\n>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n>> # Generate\n>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n```"} +{"repo": "tensorflow", "function": "def restore_nodes(save_path, nodes_to_restore):\n if save_path is None:\n raise ValueError('save_path cannot be empty.')\n if not isinstance(nodes_to_restore, dict):\n raise ValueError('Expecting a dictionary of node_id to Trackable for nodes_to_restore.')\n ckpt_view = checkpoint_view.CheckpointView(save_path)\n ckpt_view_descendants = ckpt_view.descendants()\n for node_id, trackable in nodes_to_restore.items():\n if node_id not in ckpt_view_descendants or ckpt_view._object_graph_proto.nodes[node_id] is None:\n raise ValueError(f'The expected node_id: {node_id} to Trackable {trackable} to restore does not exist in the checkpoint.')\n if trackable is None or not isinstance(trackable, base.Trackable):\n raise ValueError(f'Expecting a valid Trackable to node_id: {node_id} but got trackable: {trackable}.')\n serialized_tensors = object_identity.ObjectIdentityDictionary()\n for node_id, current_trackable in nodes_to_restore.items():\n ckpt_contains_serialized_tensors = ckpt_view._object_graph_proto.nodes[node_id].attributes\n node = ckpt_view._object_graph_proto.nodes[node_id]\n trackable_has_serialize_to_tensor = saveable_object_util.trackable_has_serialize_to_tensor(current_trackable)\n if not trackable_has_serialize_to_tensor:\n if not node.attributes:\n if saveable_object_util.saveable_objects_from_trackable(current_trackable):\n raise ValueError(f'Trackable {current_trackable} expects checkpointed values but checkpoint does not contain serialized tensors for node_id: {node_id}.')\n else:\n continue\n object_names = object_identity.ObjectIdentityDictionary()\n object_names[current_trackable] = trackable_utils.extract_object_name(node.attributes[0].checkpoint_key)\n checkpoint_factory_map, _ = save_util_v1.get_checkpoint_factories_and_keys(object_names, None)\n saveable_objects = save_util_v1.generate_saveable_objects(checkpoint_factory_map)[0]\n if len(node.attributes) != len(saveable_objects):\n raise ValueError(f'Size for saveable_objects for Trackable: {len(saveable_objects)} did not match the size for serialized_tensors for checkpoint: {len(node.attributes)}.')\n current_trackable = saveable_object_util.SaveableCompatibilityConverter(current_trackable, saveable_objects)\n serialized_tensors[current_trackable] = current_trackable._serialize_to_tensors()\n trackable_expects_ckpted_value = bool(serialized_tensors[current_trackable])\n if trackable_expects_ckpted_value and (not ckpt_contains_serialized_tensors):\n raise ValueError(f'Trackable {current_trackable} expects checkpointed values but checkpoint does not contain serialized tensors for node_id: {node_id}.')\n if not trackable_expects_ckpted_value and ckpt_contains_serialized_tensors:\n raise ValueError(f'Trackable {current_trackable} does not expect checkpointed values but checkpoint contains serialized tensors: {ckpt_contains_serialized_tensors} for node_id: {node_id}.')\n if len(node.attributes) != len(serialized_tensors[current_trackable]):\n raise ValueError(f'Size for serialized_tensors for Trackable: {len(serialized_tensors[current_trackable])} did not match size for serialized_tensors for checkpoint: {len(node.attributes)}.')\n if not trackable_has_serialize_to_tensor:\n functional_saver.MultiDeviceSaver(serialized_tensors).restore(save_path)\n else:\n serialized_tensors_renamed = object_identity.ObjectIdentityDictionary()\n serialized_tensors_renamed[current_trackable] = {}\n for attribute in node.attributes:\n name = attribute.name\n checkpoint_key = attribute.checkpoint_key\n serialized_tensors_renamed[current_trackable][checkpoint_key] = serialized_tensors[current_trackable][name]\n functional_saver.MultiDeviceSaver(serialized_tensors_renamed).restore(save_path)", "docstring": "Restores nodes from a dict.\n\nRequires that the `Trackable` Python object has been bound to an object\nID in the checkpoint.\n\nArgs:\n save_path: a string represents path to the checkpoint.\n nodes_to_restore: a dict maps `node_id` to `trackable` to be restored."} +{"repo": "tensorflow", "function": "def __init__(self, learning_rate: Union[float, Callable[[], float]]=0.001, beta_1: float=0.9, beta_2: float=0.999, epsilon: float=1e-07, lazy_adam: bool=True, sum_inside_sqrt: bool=True, use_gradient_accumulation: bool=True, clip_weight_min: Optional[float]=None, clip_weight_max: Optional[float]=None, weight_decay_factor: Optional[float]=None, multiply_weight_decay_factor_by_learning_rate: Optional[bool]=None, slot_variable_creation_fn: Optional[SlotVarCreationFnType]=None, clipvalue: Optional[ClipValueType]=None, low_dimensional_packing_status: bool=False):\n super(Adam, self).__init__(learning_rate, use_gradient_accumulation, clip_weight_min, clip_weight_max, weight_decay_factor, multiply_weight_decay_factor_by_learning_rate, clipvalue, slot_variable_creation_fn, low_dimensional_packing_status)\n if beta_1 < 0.0 or beta_1 >= 1.0:\n raise ValueError(f'Argument `beta_1` must be >= 0 and < 1. Received: {beta_1}.')\n if beta_2 < 0.0 or beta_2 >= 1.0:\n raise ValueError(f'Argument `beta_2` must be >= 0 and < 1. Received: {beta_1}.')\n if epsilon <= 0.0:\n raise ValueError('epsilon must be positive; got {}.'.format(epsilon))\n if not use_gradient_accumulation and (not lazy_adam):\n raise ValueError('When disabling lazy Adam (`lazy_adam=False`), gradient accumulation must be used. Set `use_gradient_accumulation` to False.')\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.lazy_adam = lazy_adam\n self.sum_inside_sqrt = sum_inside_sqrt", "docstring": "Optimization parameters for Adam.\n\nSee 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for a\ncomplete description of these parameters and their impacts on the optimizer\nalgorithm.\n\nArgs:\n learning_rate: The learning rate. It should be a floating point value or a\n callable taking no arguments for a dynamic learning rate.\n beta_1: A float value. The exponential decay rate for the 1st moment\n estimates.\n beta_2: A float value. The exponential decay rate for the 2nd moment\n estimates.\n epsilon: A small constant for numerical stability.\n lazy_adam: Use lazy Adam instead of Adam. Lazy Adam trains faster.\n sum_inside_sqrt: When this is true, the Adam update formula is changed\n from `m / (sqrt(v) + epsilon)` to `m / sqrt(v + epsilon**2)`. This\n option improves the performance of TPU training and is not expected to\n harm model quality.\n use_gradient_accumulation: Setting this to `False` makes embedding\n gradients calculation less accurate but faster.\n clip_weight_min: the minimum value to clip by; None means -infinity.\n clip_weight_max: the maximum value to clip by; None means +infinity.\n weight_decay_factor: amount of weight decay to apply; None means that the\n weights are not decayed.\n multiply_weight_decay_factor_by_learning_rate: if true,\n `weight_decay_factor` is multiplied by the current learning rate.\n slot_variable_creation_fn: If you wish do directly control the creation of\n the slot variables, set this to a callable taking three parameters: a\n table variable, a list of slot names to create for it, and a list of\n initializers. This function should return a dict with the slot names as\n keys and the created variables as values with types matching the table\n variable. When set to None (the default), uses the built-in variable\n creation.\n clipvalue: Controls clipping of the gradient. Set to either a single\n positive scalar value to get clipping or a tiple of scalar values (min,\n max) to set a separate maximum or minimum. If one of the two entries is\n None, then there will be no clipping that direction.\n low_dimensional_packing_status: Status of the low-dimensional embedding\n packing optimization controls whether to optimize the packing of\n 1-dimensional, 2-dimensional, and 4-dimensional embedding tables in\n memory."} +{"repo": "transformers", "function": "def dummy_inputs(self):\n if self.config.use_lang_emb and self.config.n_langs > 1:\n return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32), 'langs': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}\n else:\n return {'input_ids': tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int32)}", "docstring": "Dummy inputs to build the network.\n\nReturns:\n tf.Tensor with dummy inputs"} +{"repo": "tf-quant-finance", "function": "def __sub__(self, period_tensor):\n return self + periods.PeriodTensor(-period_tensor.quantity(), period_tensor.period_type())", "docstring": "Subtracts a tensor of periods.\n\nWhen subtracting months or years, the resulting day of the month is\ndecreased to the largest valid value if necessary. E.g. 31.03.2020 - 1 month\n= 29.02.2020, 29.02.2020 - 1 year = 28.02.2019.\n\nArgs:\n period_tensor: a PeriodTensor object broadcastable to the shape of \"self\".\n\nReturns:\n The new instance of DateTensor."} +{"repo": "transformers", "function": "def resize(self, image, size, resample=None, default_to_square=True, max_size=None):\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n self._ensure_format_supported(image)\n if not isinstance(image, PIL.Image.Image):\n image = self.to_pil_image(image)\n if isinstance(size, list):\n size = tuple(size)\n if isinstance(size, int) or len(size) == 1:\n if default_to_square:\n size = (size, size) if isinstance(size, int) else (size[0], size[0])\n else:\n width, height = image.size\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size if isinstance(size, int) else size[0]\n if short == requested_new_short:\n return image\n new_short, new_long = (requested_new_short, int(requested_new_short * long / short))\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(f'max_size = {max_size} must be strictly greater than the requested size for the smaller edge size = {size}')\n if new_long > max_size:\n new_short, new_long = (int(max_size * new_short / new_long), max_size)\n size = (new_short, new_long) if width <= height else (new_long, new_short)\n return image.resize(size, resample=resample)", "docstring": "Resizes `image`. Enforces conversion of input to PIL.Image.\n\nArgs:\n image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):\n The image to resize.\n size (`int` or `Tuple[int, int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be\n matched to this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to\n this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a\n square (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*, defaults to `None`):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is\n greater than `max_size` after being resized according to `size`, then the image is resized again so\n that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller\n edge may be shorter than `size`. Only used if `default_to_square` is `False`.\n\nReturns:\n image: A resized `PIL.Image.Image`."} +{"repo": "keras", "function": "class MinMaxNorm(Constraint):\n\n def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):\n self.min_value = min_value\n self.max_value = max_value\n self.rate = rate\n self.axis = axis\n\n def __call__(self, w):\n w = backend.convert_to_tensor(w)\n norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))\n desired = self.rate * ops.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms\n return w * (desired / (backend.epsilon() + norms))\n\n def get_config(self):\n return {'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis}", "docstring": "MinMaxNorm weight constraint.\n\nConstrains the weights incident to each hidden unit\nto have the norm between a lower bound and an upper bound.\n\nArgs:\n min_value: the minimum norm for the incoming weights.\n max_value: the maximum norm for the incoming weights.\n rate: rate for enforcing the constraint: weights will be\n rescaled to yield\n `(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.\n Effectively, this means that rate=1.0 stands for strict\n enforcement of the constraint, while rate<1.0 means that\n weights will be rescaled at each step to slowly move\n towards a value inside the desired interval.\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`."} +{"repo": "pytype", "function": "def analyze(self) -> Sequence[_HasReturnT]:", "docstring": "Calls every signature of this function with appropriate fake arguments.\n\nReturns:\n A sequence of objects with information about the result of calling the\n function with each of its signatures, with get_return_value() methods\n that retrieve the return values."} +{"repo": "transformers", "function": "def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor':\n class_queries_logits = outputs.class_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n masks_queries_logits = torch.nn.functional.interpolate(masks_queries_logits, size=(384, 384), mode='bilinear', align_corners=False)\n masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n masks_probs = masks_queries_logits.sigmoid()\n segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n batch_size = class_queries_logits.shape[0]\n if target_sizes is not None:\n if batch_size != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n semantic_segmentation = []\n for idx in range(batch_size):\n resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = segmentation.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation", "docstring": "Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports\nPyTorch.\n\nArgs:\n outputs ([`Mask2FormerForUniversalSegmentation`]):\n Raw outputs of the model.\n target_sizes (`List[Tuple[int, int]]`, *optional*):\n List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\n final size (height, width) of each prediction. If left to None, predictions will not be resized.\nReturns:\n `List[torch.Tensor]`:\n A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\n corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n `torch.Tensor` correspond to a semantic class id."} +{"repo": "genai-processors", "function": "def get_dataclass(self, json_dataclass: type[T]) -> T:\n if not mime_types.is_dataclass(self.mimetype):\n raise ValueError('Part is not a dataclass.')\n try:\n return json_dataclass.from_json(self.text)\n except AttributeError as e:\n raise ValueError(f'{json_dataclass.__name__} is not a valid json dataclass') from e", "docstring": "Returns representation of the Part as a given dataclass.\n\nArgs:\n json_dataclass: A dataclass that can be converted to/from JSON.\n\nReturns:\n The dataclass representation of the Part."} +{"repo": "tensorflow", "function": "def map_structure(func, *structure, **kwargs):\n return nest_util.map_structure(nest_util.Modality.CORE, func, *structure, **kwargs)", "docstring": "Creates a new structure by applying `func` to each atom in `structure`.\n\nRefer to [tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)\nfor the definition of a structure.\n\nApplies `func(x[0], x[1], ...)` where x[i] enumerates all atoms in\n`structure[i]`. All items in `structure` must have the same arity,\nand the return value will contain results with the same structure layout.\n\nExamples:\n\n* A single Python dict:\n\n>>> a = {\"hello\": 24, \"world\": 76}\n>>> tf.nest.map_structure(lambda p: p * 2, a)\n{'hello': 48, 'world': 152}\n\n* Multiple Python dictionaries:\n\n>>> d1 = {\"hello\": 24, \"world\": 76}\n>>> d2 = {\"hello\": 36, \"world\": 14}\n>>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2)\n{'hello': 60, 'world': 90}\n\n* A single Python list:\n\n>>> a = [24, 76, \"ab\"]\n>>> tf.nest.map_structure(lambda p: p * 2, a)\n[48, 152, 'abab']\n\n* Scalars:\n\n>>> tf.nest.map_structure(lambda x, y: x + y, 3, 4)\n7\n\n* Empty structures:\n\n>>> tf.nest.map_structure(lambda x: x + 1, ())\n()\n\n* Check the types of iterables:\n\n>>> s1 = (((1, 2), 3), 4, (5, 6))\n>>> s1_list = [[[1, 2], 3], 4, [5, 6]]\n>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list)\nTraceback (most recent call last):\n...\nTypeError: The two structures don't have the same nested structure\n\n* Type check is set to False:\n\n>>> s1 = (((1, 2), 3), 4, (5, 6))\n>>> s1_list = [[[1, 2], 3], 4, [5, 6]]\n>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False)\n(((None, None), None), None, (None, None))\n\nArgs:\n func: A callable that accepts as many arguments as there are structures.\n *structure: atom or nested structure.\n **kwargs: Valid keyword args are:\n * `check_types`: If set to `True` (default) the types of iterables within\n the structures have to be same (e.g. `map_structure(func, [1], (1,))`\n raises a `TypeError` exception). To allow this set this argument to\n `False`. Note that namedtuples with identical name and fields are always\n considered to have the same shallow structure.\n * `expand_composites`: If set to `True`, then composite tensors such as\n `tf.sparse.SparseTensor` and `tf.RaggedTensor` are expanded into their\n component tensors. If `False` (the default), then composite tensors are\n not expanded.\n\nReturns:\n A new structure with the same arity as `structure[0]`, whose atoms\n correspond to `func(x[0], x[1], ...)` where `x[i]` is the atom in the\n corresponding location in `structure[i]`. If there are different structure\n types and `check_types` is `False` the structure types of the first\n structure will be used.\n\nRaises:\n TypeError: If `func` is not callable or if the structures do not match\n each other by depth tree.\n ValueError: If no structure is provided or if the structures do not match\n each other by type.\n ValueError: If wrong keyword arguments are provided."} +{"repo": "tensorflow", "function": "def build_graph(device, input_shape, output_sizes, axis):\n with ops.device('/%s:0' % device):\n inp = array_ops.zeros(input_shape)\n outputs = []\n for _ in range(100):\n outputs.extend(array_ops.split(inp, output_sizes, axis))\n return control_flow_ops.group(*outputs)", "docstring": "Build a graph containing a sequence of split operations.\n\nArgs:\n device: string, the device to run on.\n input_shape: shape of the input tensor.\n output_sizes: size of each output along axis.\n axis: axis to be split along.\n\nReturns:\n An array of tensors to run()"} +{"repo": "tensorflow", "function": "def _AssertProtoEquals(self, a, b, msg=None, relative_tolerance=None):\n if not compare.ProtoEq(a, b):\n compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg, relative_tolerance=relative_tolerance)", "docstring": "Asserts that a and b are the same proto.\n\nUses ProtoEq() first, as it returns correct results\nfor floating point attributes, and then use assertProtoEqual()\nin case of failure as it provides good error messages.\n\nArgs:\n a: a proto.\n b: another proto.\n msg: Optional message to report on failure.\n relative_tolerance: float. The allowable difference between the two values\n being compared is determined by multiplying the relative tolerance by\n the maximum of the two values. If this is not provided, then all floats\n are compared using string comparison."} +{"repo": "tensorflow", "function": "def pmap(f, axis_name=None, devices=None):\n if devices is None:\n devices = accelerators()\n if not isinstance(devices, (list, tuple)):\n raise ValueError('Must pass a list or tuple of devices')\n num_devices = len(devices)\n if not num_devices:\n raise ValueError('There must be at least 1 device')\n has_tpu = bool(tpu_devices(devices))\n pmap_fn = _get_pmap_impl(f, devices, has_tpu)\n\n def wrapper(*args):\n \"\"\"Wrapper that wraps/unwraps args, retvals, and runs the function.\"\"\"\n if _pmap_config.devices() is not None:\n raise ValueError('Found a surrounding pmap. Nested pmap is not supported yet.')\n flattened_input_args = nest.flatten(args)\n flattened_per_device_args = [[] for _ in devices]\n for arg in flattened_input_args:\n if isinstance(arg, tensor_lib.Tensor):\n if not arg.shape.rank or arg.shape[0] != len(devices):\n raise ValueError('Input tensors need to have a first dimension equal to the number of devices; got tensor of shape %s and %s devices' % (arg.shape, len(devices)))\n for j, device in enumerate(devices):\n updated_arg = array_ops.gather_v2(arg, j)\n if not has_tpu:\n with ops.device(device):\n updated_arg = array_ops.identity(updated_arg)\n flattened_per_device_args[j].append(updated_arg)\n elif isinstance(arg, ShardedNdArray):\n for device_args, tensor in zip(flattened_per_device_args, arg.tensors):\n device_args.append(tensor)\n else:\n for device_args in flattened_per_device_args:\n device_args.append(arg)\n all_per_device_args = [nest.pack_sequence_as(args, device_args) for device_args in flattened_per_device_args]\n with pmap_config(axis_name, devices):\n results = pmap_fn(all_per_device_args)\n flattened_results = [nest.flatten(result) for result in results]\n final_tree = []\n for i in range(len(flattened_results[0])):\n tensors = []\n for j, device in enumerate(devices):\n assert isinstance(flattened_results[j][i], tensor_lib.Tensor), 'currently only tensor return items are supported'\n tensors.append(flattened_results[j][i])\n final_tree.append(ShardedNdArray(tensors))\n return nest.pack_sequence_as(results[0], final_tree)\n return wrapper", "docstring": "Transforms a function into a multi-device function.\n\nThe semantics are similar to JAX's pmap.\n\nArgs:\n f: The function to be converted.\n axis_name: Used for nested pmap, which is not supported yet.\n devices: The devices over which the returned function will run.\n\nReturns:\n A function that runs the underlying function `f` on `devices`. Its arguments\n can be `ShardedNdArray`s, tensors or other Python objects, and its return\n values are all `ShardedNdArray`s. If an input is a tensor, the length of its\n first dimension must equal the number of devices, and the tensor will be\n splitted along its first dimension among the devices. If an input is an\n unknown Python object, it will be replicated among the devices."} +{"repo": "pyglove", "function": "def use_value_spec(self, value_spec: Optional[pg_typing.Dict], allow_partial: bool=False) -> 'Dict':\n if value_spec is None:\n self._value_spec = None\n self._accessor_writable = True\n return self\n if not isinstance(value_spec, pg_typing.Dict):\n raise ValueError(self._error_message(f'Value spec for list must be a `pg.typing.Dict` object. Encountered: {value_spec!r}'))\n if self._value_spec and self._value_spec != value_spec:\n raise RuntimeError(self._error_message(f'Dict is already bound with a different value spec: {self._value_spec}. New value spec: {value_spec}.'))\n self._allow_partial = allow_partial\n if flags.is_type_check_enabled():\n value_spec.apply(self, allow_partial=base.accepts_partial(self), child_transform=base.symbolic_transform_fn(self._allow_partial), root_path=self.sym_path)\n else:\n self._value_spec = value_spec\n return self", "docstring": "Applies a ``pg.typing.Dict`` as the value spec for current dict.\n\nArgs:\n value_spec: A Dict ValueSpec to apply to this Dict.\n If current Dict is schema-less (whose immediate members are not\n validated against schema), and `value_spec` is not None, the value spec\n will be applied to the Dict.\n Or else if current Dict is already symbolic (whose immediate members\n are under the constraint of a Dict value spec), and `value_spec` is\n None, current Dict will become schema-less. However, the schema\n constraints for non-immediate members will remain.\n allow_partial: Whether allow partial dict based on the schema. This flag\n will override allow_partial flag in __init__ for spec-less Dict.\n\nReturns:\n Self.\n\nRaises:\n ValueError: validation failed due to value error.\n RuntimeError: Dict is already bound with another spec.\n TypeError: type errors during validation.\n KeyError: key errors during validation."} +{"repo": "tensorflow", "function": "class InputLayer(base_layer.Layer):\n\n def __init__(self, input_shape=None, batch_size=None, dtype=None, input_tensor=None, sparse=None, name=None, ragged=None, type_spec=None, **kwargs):\n self._init_input_shape = input_shape\n self._init_batch_size = batch_size\n self._init_dtype = dtype\n self._init_sparse = sparse\n self._init_ragged = ragged\n self._init_type_spec = type_spec\n strategy = distribute_lib.get_strategy()\n if strategy and batch_size is not None and distributed_training_utils.global_batch_size_supported(strategy):\n if batch_size % strategy.num_replicas_in_sync != 0:\n raise ValueError('The `batch_size` argument ({}) must be divisible by the number of replicas ({})'.format(batch_size, strategy.num_replicas_in_sync))\n batch_size = batch_size // strategy.num_replicas_in_sync\n if 'batch_input_shape' in kwargs:\n batch_input_shape = kwargs.pop('batch_input_shape')\n if input_shape and batch_input_shape:\n raise ValueError('Only provide the input_shape OR batch_input_shape argument to InputLayer, not both at the same time.')\n if batch_input_shape:\n batch_size = batch_input_shape[0]\n input_shape = batch_input_shape[1:]\n if kwargs:\n raise ValueError('Unrecognized keyword arguments:', kwargs.keys())\n if sparse and ragged:\n raise ValueError('Cannot set both sparse and ragged to True in a Keras input.')\n if not name:\n prefix = 'input'\n name = prefix + '_' + str(backend.get_uid(prefix))\n if not dtype:\n if input_tensor is None:\n dtype = backend.floatx()\n else:\n dtype = backend.dtype(input_tensor)\n elif input_tensor is not None and input_tensor.dtype != dtype:\n raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' % (input_tensor.dtype, dtype))\n super(InputLayer, self).__init__(dtype=dtype, name=name)\n self.built = True\n self.sparse = True if sparse else False\n self.ragged = True if ragged else False\n self.batch_size = batch_size\n self.supports_masking = True\n if isinstance(input_shape, tensor_shape.TensorShape):\n input_shape = tuple(input_shape.as_list())\n elif isinstance(input_shape, int):\n input_shape = (input_shape,)\n if type_spec is not None:\n args_that_must_be_none = [('(input_)shape', self._init_input_shape), ('batch_size', self._init_batch_size), ('dtype', self._init_dtype), ('input_tensor', input_tensor), ('sparse', self._init_sparse), ('ragged', self._init_ragged)]\n for arg_name, arg in args_that_must_be_none:\n _assert_other_arg_none(arg_name, arg)\n if not ops.executing_eagerly_outside_functions():\n raise ValueError('Creating Keras inputs from a type_spec is only supported when eager execution is enabled.')\n input_tensor = keras_tensor.keras_tensor_from_type_spec(type_spec)\n if isinstance(input_tensor, keras_tensor.SparseKerasTensor):\n self.sparse = True\n if isinstance(input_tensor, keras_tensor.RaggedKerasTensor):\n self.ragged = True\n self.is_placeholder = True\n try:\n self._batch_input_shape = tuple(input_tensor.shape.as_list())\n except ValueError:\n self._batch_input_shape = None\n elif input_tensor is None:\n if input_shape is not None:\n batch_input_shape = (batch_size,) + tuple(input_shape)\n else:\n batch_input_shape = None\n graph = backend.get_graph()\n with graph.as_default():\n input_tensor = backend.placeholder(shape=batch_input_shape, dtype=dtype, name=self.name, sparse=sparse, ragged=ragged)\n self.is_placeholder = True\n self._batch_input_shape = batch_input_shape\n else:\n if ops.executing_eagerly_outside_functions():\n if not isinstance(input_tensor, keras_tensor.KerasTensor):\n input_tensor = keras_tensor.keras_tensor_from_tensor(input_tensor)\n elif not tf_utils.is_symbolic_tensor(input_tensor):\n raise ValueError('You should not pass an EagerTensor to `Input`. For example, instead of creating an InputLayer, you should instantiate your model and directly call it on your input.')\n self.is_placeholder = False\n try:\n self._batch_input_shape = tuple(input_tensor.shape.as_list())\n except ValueError:\n self._batch_input_shape = None\n input_tensor._keras_mask = None\n node_module.Node(layer=self, outputs=input_tensor)\n if isinstance(input_tensor, keras_tensor.KerasTensor) or tf_utils.is_extension_type(input_tensor):\n self._type_spec = input_tensor._type_spec\n else:\n self._type_spec = tensor_spec.TensorSpec(shape=input_tensor.shape, dtype=input_tensor.dtype, name=self.name)\n\n def get_config(self):\n if self._init_type_spec is not None:\n config = {'name': self.name, 'type_spec': self._init_type_spec}\n else:\n config = {'batch_input_shape': self._batch_input_shape, 'dtype': self.dtype, 'sparse': self.sparse, 'ragged': self.ragged, 'name': self.name}\n return config\n\n @property\n def _trackable_saved_model_saver(self):\n return layer_serialization.InputLayerSavedModelSaver(self)", "docstring": "Layer to be used as an entry point into a Network (a graph of layers).\n\nIt can either wrap an existing tensor (pass an `input_tensor` argument)\nor create a placeholder tensor (pass arguments `input_shape`, and\noptionally, `dtype`).\n\nIt is generally recommend to use the functional layer API via `Input`,\n(which creates an `InputLayer`) without directly using `InputLayer`.\n\nWhen using InputLayer with Keras Sequential model, it can be skipped by\nmoving the input_shape parameter to the first layer after the InputLayer.\n\nThis class can create placeholders for tf.Tensors, tf.SparseTensors, and\ntf.RaggedTensors by choosing 'sparse=True' or 'ragged=True'. Note that\n'sparse' and 'ragged' can't be configured to True at same time.\nUsage:\n\n```python\n# With explicit InputLayer.\nmodel = tf.keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=(4,)),\n tf.keras.layers.Dense(8)])\nmodel.compile(tf.optimizers.RMSprop(0.001), loss='mse')\nmodel.fit(np.zeros((10, 4)),\n np.ones((10, 8)))\n\n# Without InputLayer and let the first layer to have the input_shape.\n# Keras will add a input for the model behind the scene.\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(8, input_shape=(4,))])\nmodel.compile(tf.optimizers.RMSprop(0.001), loss='mse')\nmodel.fit(np.zeros((10, 4)),\n np.ones((10, 8)))\n```\n\nArgs:\n input_shape: Shape tuple (not including the batch axis), or `TensorShape`\n instance (not including the batch axis).\n batch_size: Optional input batch size (integer or None).\n dtype: Optional datatype of the input. When not provided, the Keras\n default float type will be used.\n input_tensor: Optional tensor to use as layer input. If set, the layer\n will use the `tf.TypeSpec` of this tensor rather\n than creating a new placeholder tensor.\n sparse: Boolean, whether the placeholder created is meant to be sparse.\n Default to False.\n ragged: Boolean, whether the placeholder created is meant to be ragged.\n In this case, values of 'None' in the 'shape' argument represent\n ragged dimensions. For more information about RaggedTensors, see\n [this guide](https://www.tensorflow.org/guide/ragged_tensors).\n Default to False.\n type_spec: A `tf.TypeSpec` object to create Input from. This `tf.TypeSpec`\n represents the entire batch. When provided, all other args except\n name must be None.\n name: Optional name of the layer (string)."} +{"repo": "etils", "function": "class DType(abc.ABC):\n name: str\n array_cls_name: str\n\n @classmethod\n def from_value(cls, value: Any) -> DType:\n \"\"\"Convert the value to dtype.\"\"\"\n if value is None:\n return AnyDType()\n elif isinstance(value, DType):\n return value\n elif numpy_utils.lazy.is_dtype(value):\n return NpDType(numpy_utils.lazy.as_dtype(value))\n elif value in _STD_TYPE_TO_DTYPE:\n return _STD_TYPE_TO_DTYPE[value]\n else:\n raise TypeError(f'Unsuported dtype: {value!r}')\n\n def asarray(self, array_like, *, xnp: numpy_utils.NpModule, casting: Union[Casting, str]=Casting.ALL):\n \"\"\"Creates an `xnp.ndarray` from the `array_like`.\n\n Args:\n array_like: Any array-like\n xnp: Target numpy module\n casting: If `NONE`, prevent casting.\n\n Returns:\n array: The xnp array.\n \"\"\"\n casting = Casting(casting)\n from_dtype = numpy_utils.lazy.dtype_from_array(array_like, strict=False)\n to_dtype = self._get_target_dtype(from_dtype)\n if casting == casting.NONE:\n if to_dtype is None:\n pass\n elif from_dtype is None:\n pass\n elif from_dtype != to_dtype:\n raise ValueError(f'Cannot cast {from_dtype} to {to_dtype} (casting={casting}).')\n elif casting == casting.ALL:\n pass\n else:\n raise NotImplementedError(f'Unsupported casting {casting}')\n if to_dtype is None:\n dtype_kwargs = {}\n else:\n dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(to_dtype, xnp=xnp)}\n if isinstance(array_like, np.ndarray) and array_like.shape == ():\n if not dtype_kwargs:\n dtype_kwargs = {'dtype': numpy_utils.lazy.as_dtype(array_like.dtype, xnp=xnp)}\n array_like = array_like.item()\n arr = xnp.asarray(array_like, **dtype_kwargs)\n return arr\n\n @abc.abstractmethod\n def _get_target_dtype(self, from_dtype: Optional[_NpDType]) -> Optional[_NpDType]:\n \"\"\"Validate and normalize the numpy dtype.\n\n Args:\n from_dtype: DType of the array to cast\n\n Returns:\n to_dtype: DType of the array after casting\n \"\"\"\n\n @abc.abstractmethod\n def __eq__(self, other) -> bool:\n raise NotImplementedError\n\n @abc.abstractmethod\n def __hash__(self) -> int:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n return f'DType({self.name})'", "docstring": "DType wrapper.\n\nThis allow to support more complex types, like dtype unions.\n\nThis is EXPERIMENTAL, so the API might change.\n\nAttributes:\n name: Representation name (e.g. np.uint8, AnyFloat...)\n array_cls_name: Name of the array class associated with the dtype (`f32`,\n `ui8`,...)."} +{"repo": "keras", "function": "def on_test_end(self, logs=None):", "docstring": "Called at the end of evaluation or validation.\n\nSubclasses should override for any actions to run.\n\nArgs:\n logs: Dict. Currently the output of the last call to\n `on_test_batch_end()` is passed to this argument for this method\n but that may change in the future."} +{"repo": "tensorflow", "function": "def train_step(self, data):\n data = data_adapter.expand_1d(data)\n x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)\n with backprop.GradientTape() as tape:\n y_pred = self(x, training=True)\n loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)\n self.optimizer.minimize(loss, self.trainable_variables, tape=tape)\n self.compiled_metrics.update_state(y, y_pred, sample_weight)\n return_metrics = {}\n for metric in self.metrics:\n result = metric.result()\n if isinstance(result, dict):\n return_metrics.update(result)\n else:\n return_metrics[metric.name] = result\n return return_metrics", "docstring": "The logic for one training step.\n\nThis method can be overridden to support custom training logic.\nFor concrete examples of how to override this method see\n[Customizing what happens in\nfit](https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit).\nThis method is called by `Model.make_train_function`.\n\nThis method should contain the mathematical logic for one step of training.\nThis typically includes the forward pass, loss calculation, backpropagation,\nand metric updates.\n\nConfiguration details for *how* this logic is run (e.g. `tf.function` and\n`tf.distribute.Strategy` settings), should be left to\n`Model.make_train_function`, which can also be overridden.\n\nArgs:\n data: A nested structure of `Tensor`s.\n\nReturns:\n A `dict` containing values that will be passed to\n `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the\n values of the `Model`'s metrics are returned. Example:\n `{'loss': 0.2, 'accuracy': 0.7}`."} +{"repo": "tensorflow", "function": "def __call__(self, **kwargs):\n if len(kwargs) != len(self._inputs):\n raise ValueError('Invalid number of inputs provided for running a SignatureDef, expected %s vs provided %s' % (len(self._inputs), len(kwargs)))\n for input_name, value in kwargs.items():\n if input_name not in self._inputs:\n raise ValueError('Invalid Input name (%s) for SignatureDef' % input_name)\n self._interpreter_wrapper.ResizeInputTensor(self._inputs[input_name], np.array(value.shape, dtype=np.int32), False, self._subgraph_index)\n self._interpreter_wrapper.AllocateTensors(self._subgraph_index)\n for input_name, value in kwargs.items():\n self._interpreter_wrapper.SetTensor(self._inputs[input_name], value, self._subgraph_index)\n self._interpreter_wrapper.Invoke(self._subgraph_index)\n result = {}\n for output_name, output_index in self._outputs:\n result[output_name] = self._interpreter_wrapper.GetTensor(output_index, self._subgraph_index)\n return result", "docstring": "Runs the SignatureDef given the provided inputs in arguments.\n\nArgs:\n **kwargs: key,value for inputs to the model. Key is the SignatureDef input\n name. Value is numpy array with the value.\n\nReturns:\n dictionary of the results from the model invoke.\n Key in the dictionary is SignatureDef output name.\n Value is the result Tensor."} +{"repo": "transformers", "function": "def read_video_opencv(video_path: str, sample_indices_fn: Callable, **kwargs):\n requires_backends(read_video_opencv, ['cv2'])\n import cv2\n video = cv2.VideoCapture(video_path)\n total_num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n video_fps = video.get(cv2.CAP_PROP_FPS)\n duration = total_num_frames / video_fps if video_fps else 0\n metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='opencv')\n indices = sample_indices_fn(metadata=metadata, **kwargs)\n index = 0\n frames = []\n while video.isOpened():\n success, frame = video.read()\n if not success:\n break\n if index in indices:\n height, width, channel = frame.shape\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frames.append(frame[0:height, 0:width, 0:channel])\n if success:\n index += 1\n if index >= total_num_frames:\n break\n video.release()\n metadata.frames_indices = indices\n return (np.stack(frames), metadata)", "docstring": "Decode a video using the OpenCV backend.\n\nArgs:\n video_path (`str`):\n Path to the video file.\n sample_indices_fn (`Callable`):\n A callable function that will return indices at which the video should be sampled. If the video has to be loaded using\n by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`.\n If not provided, simple uniform sampling with fps is performed.\n Example:\n def sample_indices_fn(metadata, **kwargs):\n return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int)\n\nReturns:\n Tuple[`np.array`, `VideoMetadata`]: A tuple containing:\n - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]).\n - `VideoMetadata` object."} +{"repo": "beam", "function": "def __init__(self, columns: list[str], min_value: float=0.0, max_value: float=1.0, name: Optional[str]=None):\n super().__init__(columns)\n self.min_value = min_value\n self.max_value = max_value\n self.name = name\n if self.max_value <= self.min_value:\n raise ValueError('max_value must be greater than min_value')", "docstring": "This function applies a scaling transformation on the given columns\nof incoming data. The transformation scales the input values to the\nrange [min_value, max_value].\n\nArgs:\n columns: A list of column names to apply the transformation on.\n min_value: The minimum value of the output range.\n max_value: The maximum value of the output range.\n name: A name for the operation (optional)."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "transformers", "function": "def __call__(self, raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], padding: Union[bool, str, PaddingStrategy]=True, pad_to_multiple_of: Optional[int]=2, max_length: Optional[int]=None, truncation: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, sampling_rate: Optional[int]=None, return_attention_mask: Optional[bool]=None, do_normalize_per_mel_bins: Optional[bool]=True, **kwargs) -> BatchFeature:\n if sampling_rate is not None:\n if sampling_rate != self.sampling_rate:\n raise ValueError(f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}.')\n else:\n logger.warning(f'It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. Failing to do so can result in silent errors that might be hard to debug.')\n return_attention_mask = return_attention_mask if return_attention_mask is not None else self.return_attention_mask\n is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1\n if is_batched_numpy and len(raw_speech.shape) > 3:\n raise ValueError(f'Only mono-channel or stereo-channel audio is supported for input to {self}')\n acceptable_types = (torch.Tensor, np.ndarray, tuple, list) if is_torch_available() else (np.ndarray, tuple, list)\n is_batched = is_batched_numpy or (isinstance(raw_speech, (list, tuple)) and isinstance(raw_speech[0], acceptable_types))\n if is_batched:\n raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]\n elif not is_batched and (not isinstance(raw_speech, np.ndarray)):\n raw_speech = np.asarray(raw_speech, dtype=np.float32)\n elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):\n raw_speech = raw_speech.astype(np.float32)\n if not is_batched:\n raw_speech = [raw_speech]\n features = [self._extract_fbank_features(waveform) for waveform in raw_speech]\n if do_normalize_per_mel_bins:\n features = [(x - np.expand_dims(x.mean(0), 0)) / np.sqrt(np.expand_dims(x.var(0, ddof=1), 0) + 1e-07) for x in features]\n encoded_inputs = BatchFeature({'input_features': features})\n padded_inputs = self.pad(encoded_inputs, padding=padding, max_length=max_length, truncation=truncation, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=True, return_tensors='np')\n input_features = padded_inputs.get('input_features')\n attention_mask = padded_inputs.pop('attention_mask')\n batch_size, num_frames, num_channels = input_features.shape\n remainder = num_frames % self.stride\n if remainder != 0:\n input_features = input_features[:, :num_frames - remainder, :]\n attention_mask = attention_mask[:, :num_frames - remainder]\n input_features = np.reshape(input_features, (batch_size, num_frames // self.stride, num_channels * self.stride))\n indices = np.arange(0, num_frames - remainder)\n attention_mask = attention_mask[:, indices % self.stride == 1]\n padded_inputs['input_features'] = input_features\n if return_attention_mask:\n padded_inputs['attention_mask'] = attention_mask\n if return_tensors is not None:\n padded_inputs = padded_inputs.convert_to_tensors(return_tensors)\n return padded_inputs", "docstring": "Main method to featurize and prepare for the model one or several sequence(s).\n\nArgs:\n raw_speech (`np.ndarray`, `torch.Tensor`, `List[float]`, `List[np.ndarray]`, `List[torch.Tensor]`,\n `List[List[float]]`, `List[List[List[float]]]`):\n The sequence or batch of sequences to be padded. Each sequence can be a numpy array,\n a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,\n a list of list of float values or a list of a list of list of float values.\n If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `List[float]`, `raw_speech` is\n considered a single-channel, single-sample sound. In all other cases, the first dimension of\n `raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `List[...]`,\n corresponds to the number of samples in the batch, and the number of channels\n (i.e. mono or stereo character) is derived from the other dimensions\n (1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n pad_to_multiple_of (`int`, *optional*, defaults to 2):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability\n `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n truncation (`bool`):\n Activates truncation to cut input sequences longer than *max_length* to *max_length*.\n return_attention_mask (`bool`, *optional*):\n Whether to return the attention mask. If left to the default, will return the attention mask according\n to the specific feature_extractor's default.\n\n [What are attention masks?](../glossary#attention-mask)\n\n \n\n For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle\n bugs.\n\n \n\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n sampling_rate (`int`, *optional*):\n The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors.\n do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):\n Whether or not to zero-mean unit-variance normalize the input per mel-channel.\n kwargs (*optional*):\n Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature\n extractor."} +{"repo": "keras", "function": "def compute_iou(boxes1, boxes2, bounding_box_format, use_masking=False, mask_val=-1, image_shape=None):\n boxes1_rank = len(ops.shape(boxes1))\n boxes2_rank = len(ops.shape(boxes2))\n if boxes1_rank not in [2, 3]:\n raise ValueError(f'compute_iou() expects boxes1 to be batched, or to be unbatched. Received len(boxes1.shape)={boxes1_rank}, len(boxes2.shape)={boxes2_rank}. Expected either len(boxes1.shape)=2 AND or len(boxes1.shape)=3.')\n if boxes2_rank not in [2, 3]:\n raise ValueError(f'compute_iou() expects boxes2 to be batched, or to be unbatched. Received len(boxes1.shape)={boxes1_rank}, len(boxes2.shape)={boxes2_rank}. Expected either len(boxes2.shape)=2 AND or len(boxes2.shape)=3.')\n target_format = 'yxyx'\n if 'rel' in bounding_box_format and image_shape is None:\n raise ValueError(f'When using relative bounding box formats (e.g. `rel_yxyx`) the `image_shape` argument must be provided.Received `image_shape`: {image_shape}')\n if image_shape is None:\n height, width = (None, None)\n else:\n height, width, _ = image_shape\n boxes1 = converters.convert_format(boxes1, source=bounding_box_format, target=target_format, height=height, width=width)\n boxes2 = converters.convert_format(boxes2, source=bounding_box_format, target=target_format, height=height, width=width)\n intersect_area = _compute_intersection(boxes1, boxes2)\n boxes1_area = _compute_area(boxes1)\n boxes2_area = _compute_area(boxes2)\n boxes2_area_rank = len(boxes2_area.shape)\n boxes2_axis = 1 if boxes2_area_rank == 2 else 0\n boxes1_area = ops.expand_dims(boxes1_area, axis=-1)\n boxes2_area = ops.expand_dims(boxes2_area, axis=boxes2_axis)\n union_area = boxes1_area + boxes2_area - intersect_area\n res = ops.divide(intersect_area, union_area + backend.epsilon())\n if boxes1_rank == 2:\n perm = [1, 0]\n else:\n perm = [0, 2, 1]\n if not use_masking:\n return res\n mask_val_t = ops.cast(mask_val, res.dtype) * ops.ones_like(res)\n boxes1_mask = ops.less(ops.max(boxes1, axis=-1, keepdims=True), 0.0)\n boxes2_mask = ops.less(ops.max(boxes2, axis=-1, keepdims=True), 0.0)\n background_mask = ops.logical_or(boxes1_mask, ops.transpose(boxes2_mask, perm))\n iou_lookup_table = ops.where(background_mask, mask_val_t, res)\n return iou_lookup_table", "docstring": "Computes a lookup table vector containing the ious for a given set boxes.\n\nThe lookup vector is to be indexed by [`boxes1_index`,`boxes2_index`] if\nboxes are unbatched and by [`batch`, `boxes1_index`,`boxes2_index`] if the\nboxes are batched.\n\nThe users can pass `boxes1` and `boxes2` to be different ranks. For example:\n1) `boxes1`: [batch_size, M, 4], `boxes2`: [batch_size, N, 4] -> return\n [batch_size, M, N].\n2) `boxes1`: [batch_size, M, 4], `boxes2`: [N, 4] -> return\n [batch_size, M, N]\n3) `boxes1`: [M, 4], `boxes2`: [batch_size, N, 4] -> return\n [batch_size, M, N]\n4) `boxes1`: [M, 4], `boxes2`: [N, 4] -> return [M, N]\n\nArgs:\n boxes1: a list of bounding boxes in 'corners' format. Can be batched or\n unbatched.\n boxes2: a list of bounding boxes in 'corners' format. Can be batched or\n unbatched.\n bounding_box_format: a case-insensitive string which is one of `\"xyxy\"`,\n `\"rel_xyxy\"`, `\"xyWH\"`, `\"center_xyWH\"`, `\"yxyx\"`, `\"rel_yxyx\"`.\n For detailed information on the supported format, see the\n use_masking: whether masking will be applied. This will mask all\n `boxes1` or `boxes2` that have values less than 0 in all its 4\n dimensions. Default to `False`.\n mask_val: int to mask those returned IOUs if the masking is True,\n defaults to -1.\n image_shape: `Tuple[int]`. The shape of the image (height, width, 3).\n When using relative bounding box format for `box_format` the\n `image_shape` is used for normalization.\n\nReturns:\n iou_lookup_table: a vector containing the pairwise ious of boxes1 and\n boxes2."} +{"repo": "tf-quant-finance", "function": "def equity_swap_price(rate_leg_coupon_rates, equity_leg_forward_prices, equity_leg_spots, rate_leg_notional, equity_leg_notional, rate_leg_daycount_fractions, rate_leg_discount_factors, equity_leg_discount_factors, equity_dividends=None, is_equity_receiver=None, dtype=None, name=None):\n name = name or 'equity_swap_price'\n with tf.name_scope(name):\n rate_leg_coupon_rates = tf.convert_to_tensor(rate_leg_coupon_rates, dtype=dtype, name='rate_leg_coupon_rates')\n dtype = dtype or rate_leg_coupon_rates.dtype\n equity_leg_forward_prices = tf.convert_to_tensor(equity_leg_forward_prices, dtype=dtype, name='equity_leg_forward_prices')\n equity_leg_spots = tf.convert_to_tensor(equity_leg_spots, dtype=dtype, name='equity_leg_spots')\n rate_leg_daycount_fractions = tf.convert_to_tensor(rate_leg_daycount_fractions, dtype=dtype, name='rate_leg_daycount_fractions')\n equity_dividends = equity_dividends or 0\n equity_dividends = tf.convert_to_tensor(equity_dividends, dtype=dtype, name='equity_dividends')\n rate_leg_notional = tf.convert_to_tensor(rate_leg_notional, dtype=dtype, name='rate_leg_notional')\n equity_leg_notional = tf.convert_to_tensor(equity_leg_notional, dtype=dtype, name='equity_leg_notional')\n rate_leg_discount_factors = tf.convert_to_tensor(rate_leg_discount_factors, dtype=dtype, name='rate_leg_discount_factors')\n equity_leg_discount_factors = tf.convert_to_tensor(equity_leg_discount_factors, dtype=dtype, name='equity_leg_discount_factors')\n if is_equity_receiver is None:\n is_equity_receiver = True\n is_equity_receiver = tf.convert_to_tensor(is_equity_receiver, dtype=tf.bool, name='is_equity_receiver')\n one = tf.ones([], dtype=dtype)\n equity_receiver = tf.where(is_equity_receiver, one, -one)\n equity_cashflows = equity_leg_cashflows(forward_prices=equity_leg_forward_prices, spots=equity_leg_spots, notional=equity_leg_notional, dividends=equity_dividends)\n rate_cashflows = rate_leg_cashflows(coupon_rates=rate_leg_coupon_rates, notional=rate_leg_notional, daycount_fractions=rate_leg_daycount_fractions)\n return equity_receiver * swap_price(rate_cashflows, equity_cashflows, rate_leg_discount_factors, equity_leg_discount_factors)", "docstring": "Computes prices of a batch of equity swaps.\n\nThe swap consists of an equity and interest rate legs.\n\n#### Example\n```python\nrate_leg_coupon_rates = [[0.1, 0.2, 0.05], [0.1, 0.05, 0.2]]\n# Two cashflows of 4 and 3 payments, respectively\nforward_prices = [[110, 120, 140, 150], [210, 220, 240, 0]]\nspots = [100, 200]\nnotional = 1000\npay_leg_daycount_fractions = 0.5\nrate_leg_daycount_fractions = [[0.5, 0.5, 0.5], [0.4, 0.5, 0.6]]\nrate_leg_discount_factors = [[0.95, 0.9, 0.85], [0.98, 0.92, 0.88]]\nequity_leg_discount_factors = [[0.95, 0.9, 0.85, 0.8],\n [0.98, 0.92, 0.88, 0.0]]\n\nequity_swap_price(\n rate_leg_coupon_rates=rate_leg_coupon_rates,\n equity_leg_forward_prices=forward_prices,\n equity_leg_spots=spots,\n rate_leg_notional=notional,\n equity_leg_notional=notional,\n rate_leg_daycount_fractions=rate_leg_daycount_fractions,\n rate_leg_discount_factors=rate_leg_discount_factors,\n equity_leg_discount_factors=equity_leg_discount_factors,\n is_equity_receiver=[True, False],\n dtype=tf.float64)\n# Expected: [216.87770563, -5.00952381]\nforward_rates(df_start_dates, df_end_dates, daycount_fractions,\n dtype=tf.float64)\n```\n\nArgs:\n rate_leg_coupon_rates: A real `Tensor` of shape\n `batch_shape + [num_rate_cashflows]`, where `num_rate_cashflows` is the\n number of cashflows for each batch element. Coupon rates for the\n interest rate leg.\n equity_leg_forward_prices: A `Tensor` of the same `dtype` as\n `rate_leg_coupon_rates` and of shape\n `batch_shape + [num_equity_cashflows]`, where `num_equity_cashflows` is\n the number of cashflows for each batch element. Equity leg forward\n prices.\n equity_leg_spots: A `Tensor` of the same `dtype` as\n `equity_leg_forward_prices` and of shape compatible with `batch_shape`.\n Spot prices for each batch element of the equity leg.\n rate_leg_notional: A `Tensor` of the same `dtype` as `rate_leg_coupon_rates`\n and of compatible shape. Notional amount for each cashflow.\n equity_leg_notional: A `Tensor` of the same `dtype` as\n `equity_leg_forward_prices` and of compatible shape. Notional amount for\n each cashflow.\n rate_leg_daycount_fractions: A `Tensor` of the same `dtype` as\n `rate_leg_coupon_rates` and of compatible shape. Year fractions for the\n coupon accrual.\n rate_leg_discount_factors: A `Tensor` of the same `dtype` as\n `rate_leg_coupon_rates` and of compatible shape. Discount factors for each\n cashflow of the rate leg.\n equity_leg_discount_factors: A `Tensor` of the same `dtype` as\n `equity_leg_forward_prices` and of compatible shape. Discount factors for\n each cashflow of the equity leg.\n equity_dividends: A `Tensor` of the same `dtype` as\n `equity_leg_forward_prices` and of compatible shape. Dividends paid at the\n leg reset times.\n Default value: None which maps to zero dividend.\n is_equity_receiver: A boolean `Tensor` of shape compatible with\n `batch_shape`. Indicates whether the swap holder is equity holder or\n receiver.\n Default value: None which means that all swaps are equity reiver swaps.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `rate_leg_coupon_rates`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'equity_swap_price'.\n\nReturns:\n A `Tensor` of the same `dtype` as `rate_leg_coupon_rates` and of shape\n `batch_shape`. Present values of the equity swaps."} +{"repo": "transformers", "function": "def add_content_to_file(file_name: Union[str, os.PathLike], content: str, add_after: Optional[Union[str, Pattern]]=None, add_before: Optional[Union[str, Pattern]]=None, exact_match: bool=False):\n with open(file_name, 'r', encoding='utf-8') as f:\n old_content = f.read()\n new_content = add_content_to_text(old_content, content, add_after=add_after, add_before=add_before, exact_match=exact_match)\n with open(file_name, 'w', encoding='utf-8') as f:\n f.write(new_content)", "docstring": "A utility to add some content inside a given file.\n\nArgs:\n file_name (`str` or `os.PathLike`): The name of the file in which we want to insert some content.\n content (`str`): The content to add.\n add_after (`str` or `Pattern`):\n The pattern to test on a line of `text`, the new content is added after the first instance matching it.\n add_before (`str` or `Pattern`):\n The pattern to test on a line of `text`, the new content is added before the first instance matching it.\n exact_match (`bool`, *optional*, defaults to `False`):\n A line is considered a match with `add_after` or `add_before` if it matches exactly when `exact_match=True`,\n otherwise, if `add_after`/`add_before` is present in the line.\n\n\n\nThe arguments `add_after` and `add_before` are mutually exclusive, and one exactly needs to be provided.\n\n"} +{"repo": "tensorflow", "function": "def random_flip_up_down(image, seed=None):\n random_func = functools.partial(random_ops.random_uniform, seed=seed)\n return _random_flip(image, 0, random_func, 'random_flip_up_down')", "docstring": "Randomly flips an image vertically (upside down).\n\nWith a 1 in 2 chance, outputs the contents of `image` flipped along the first\ndimension, which is `height`. Otherwise, output the image as-is.\nWhen passing a batch of images, each image will be randomly flipped\nindependent of other images.\n\nExample usage:\n\n>>> image = np.array([[[1], [2]], [[3], [4]]])\n>>> tf.image.random_flip_up_down(image, 3).numpy().tolist()\n[[[3], [4]], [[1], [2]]]\n\nRandomly flip multiple images.\n\n>>> images = np.array(\n... [\n... [[[1], [2]], [[3], [4]]],\n... [[[5], [6]], [[7], [8]]]\n... ])\n>>> tf.image.random_flip_up_down(images, 4).numpy().tolist()\n[[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]]\n\nFor producing deterministic results given a `seed` value, use\n`tf.image.stateless_random_flip_up_down`. Unlike using the `seed` param\nwith `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the\nsame results given the same seed independent of how many times the function is\ncalled, and independent of global seed settings (e.g. tf.random.set_seed).\n\nArgs:\n image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor\n of shape `[height, width, channels]`.\n seed: A Python integer. Used to create a random seed. See\n `tf.compat.v1.set_random_seed` for behavior.\n\nReturns:\n A tensor of the same type and shape as `image`.\nRaises:\n ValueError: if the shape of `image` not supported."} +{"repo": "transformers", "function": "class AdamWeightDecay(Adam):\n\n def __init__(self, learning_rate: Union[float, schedules.LearningRateSchedule]=0.001, beta_1: float=0.9, beta_2: float=0.999, epsilon: float=1e-07, amsgrad: bool=False, weight_decay_rate: float=0.0, include_in_weight_decay: Optional[list[str]]=None, exclude_from_weight_decay: Optional[list[str]]=None, name: str='AdamWeightDecay', **kwargs):\n super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)\n self.weight_decay_rate = weight_decay_rate\n self._include_in_weight_decay = include_in_weight_decay\n self._exclude_from_weight_decay = exclude_from_weight_decay\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates an optimizer from its config with WarmUp custom object.\"\"\"\n custom_objects = {'WarmUp': WarmUp}\n return super().from_config(config, custom_objects=custom_objects)\n\n def _prepare_local(self, var_device, var_dtype, apply_state):\n super()._prepare_local(var_device, var_dtype, apply_state)\n apply_state[var_device, var_dtype]['weight_decay_rate'] = tf.constant(self.weight_decay_rate, name='adam_weight_decay_rate')\n\n def _decay_weights_op(self, var, learning_rate, apply_state):\n do_decay = self._do_use_weight_decay(var.name)\n if do_decay:\n return var.assign_sub(learning_rate * var * apply_state[var.device, var.dtype.base_dtype]['weight_decay_rate'], use_locking=self._use_locking)\n return tf.no_op()\n\n def apply_gradients(self, grads_and_vars, name=None, **kwargs):\n grads, tvars = list(zip(*grads_and_vars))\n return super().apply_gradients(zip(grads, tvars), name=name, **kwargs)\n\n def _get_lr(self, var_device, var_dtype, apply_state):\n \"\"\"Retrieves the learning rate with the given state.\"\"\"\n if apply_state is None:\n return (self._decayed_lr_t[var_dtype], {})\n apply_state = apply_state or {}\n coefficients = apply_state.get((var_device, var_dtype))\n if coefficients is None:\n coefficients = self._fallback_apply_state(var_device, var_dtype)\n apply_state[var_device, var_dtype] = coefficients\n return (coefficients['lr_t'], {'apply_state': apply_state})\n\n def _resource_apply_dense(self, grad, var, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super()._resource_apply_dense(grad, var, **kwargs)\n\n def _resource_apply_sparse(self, grad, var, indices, apply_state=None):\n lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)\n decay = self._decay_weights_op(var, lr_t, apply_state)\n with tf.control_dependencies([decay]):\n return super()._resource_apply_sparse(grad, var, indices, **kwargs)\n\n def get_config(self):\n config = super().get_config()\n config.update({'weight_decay_rate': self.weight_decay_rate})\n return config\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if self.weight_decay_rate == 0:\n return False\n if self._include_in_weight_decay:\n for r in self._include_in_weight_decay:\n if re.search(r, param_name) is not None:\n return True\n if self._exclude_from_weight_decay:\n for r in self._exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True", "docstring": "Adam enables L2 weight decay and clip_by_global_norm on gradients. Just adding the square of the weights to the\nloss function is *not* the correct way of using L2 regularization/weight decay with Adam, since that will interact\nwith the m and v parameters in strange ways as shown in [Decoupled Weight Decay\nRegularization](https://huggingface.co/papers/1711.05101).\n\nInstead we want to decay the weights in a manner that doesn't interact with the m/v parameters. This is equivalent\nto adding the square of the weights to the loss with plain (non-momentum) SGD.\n\nArgs:\n learning_rate (`Union[float, LearningRateSchedule]`, *optional*, defaults to 0.001):\n The learning rate to use or a schedule.\n beta_1 (`float`, *optional*, defaults to 0.9):\n The beta1 parameter in Adam, which is the exponential decay rate for the 1st momentum estimates.\n beta_2 (`float`, *optional*, defaults to 0.999):\n The beta2 parameter in Adam, which is the exponential decay rate for the 2nd momentum estimates.\n epsilon (`float`, *optional*, defaults to 1e-07):\n The epsilon parameter in Adam, which is a small constant for numerical stability.\n amsgrad (`bool`, *optional*, defaults to `False`):\n Whether to apply AMSGrad variant of this algorithm or not, see [On the Convergence of Adam and\n Beyond](https://huggingface.co/papers/1904.09237).\n weight_decay_rate (`float`, *optional*, defaults to 0.0):\n The weight decay to apply.\n include_in_weight_decay (`List[str]`, *optional*):\n List of the parameter names (or re patterns) to apply weight decay to. If none is passed, weight decay is\n applied to all parameters by default (unless they are in `exclude_from_weight_decay`).\n exclude_from_weight_decay (`List[str]`, *optional*):\n List of the parameter names (or re patterns) to exclude from applying weight decay to. If a\n `include_in_weight_decay` is passed, the names in it will supersede this list.\n name (`str`, *optional*, defaults to `\"AdamWeightDecay\"`):\n Optional name for the operations created when applying gradients.\n kwargs (`Dict[str, Any]`, *optional*):\n Keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by\n norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time\n inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use\n `learning_rate` instead."} +{"repo": "transformers", "function": "def from_text_vision_configs(cls, text_config: AltCLIPTextConfig, vision_config: AltCLIPVisionConfig, **kwargs):\n return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)", "docstring": "Instantiate a [`AltCLIPConfig`] (or a derived class) from altclip text model configuration and altclip vision\nmodel configuration.\n\nReturns:\n [`AltCLIPConfig`]: An instance of a configuration object"} +{"repo": "transformers", "function": "def group_entities(self, entities: List[dict]) -> List[dict]:\n entity_groups = []\n entity_group_disagg = []\n for entity in entities:\n if not entity_group_disagg:\n entity_group_disagg.append(entity)\n continue\n bi, tag = self.get_tag(entity['entity'])\n last_bi, last_tag = self.get_tag(entity_group_disagg[-1]['entity'])\n if tag == last_tag and bi != 'B':\n entity_group_disagg.append(entity)\n else:\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n entity_group_disagg = [entity]\n if entity_group_disagg:\n entity_groups.append(self.group_sub_entities(entity_group_disagg))\n return entity_groups", "docstring": "Find and group together the adjacent tokens with the same entity predicted.\n\nArgs:\n entities (`dict`): The entities predicted by the pipeline."} +{"repo": "pyglove", "function": "def eq(left: Any, right: Any) -> bool:\n if left is right:\n return True\n if isinstance(left, list) and isinstance(right, list) or (isinstance(left, tuple) and isinstance(right, tuple)):\n if len(left) != len(right):\n return False\n for x, y in zip(left, right):\n if ne(x, y):\n return False\n return True\n elif isinstance(left, dict):\n if not isinstance(right, dict) or len(left) != len(right) or set(left.keys()) != set(right.keys()):\n return False\n left_items = left.sym_items if isinstance(left, Symbolic) else left.items\n right_item = right.sym_getattr if isinstance(right, Symbolic) else right.__getitem__\n for k, v in left_items():\n if ne(v, right_item(k)):\n return False\n return True\n elif hasattr(left, 'sym_eq') and (not inspect.isclass(left)) and (left.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n return left.sym_eq(right)\n elif hasattr(right, 'sym_eq') and (not inspect.isclass(right)) and (right.sym_eq.__code__ is not Symbolic.sym_eq.__code__):\n return right.sym_eq(left)\n return pg_typing.callable_eq(left, right)", "docstring": "Compares if two values are equal. Use symbolic equality if possible.\n\nExample::\n\n @pg.members([\n ('x', pg.typing.Any())\n ])\n class A(pg.Object):\n def sym_eq(self, right):\n if super().sym_eq(right):\n return True\n return pg.eq(self.x, right)\n\n class B:\n pass\n\n assert pg.eq(1, 1)\n assert pg.eq(A(1), A(1))\n # This is True since A has override `sym_eq`.\n assert pg.eq(A(1), 1)\n # Objects of B are compared by references.\n assert not pg.eq(A(B()), A(B()))\n\nArgs:\n left: The left-hand value to compare.\n right: The right-hand value to compare.\n\nReturns:\n True if left and right is equal or symbolically equal. Otherwise False."} +{"repo": "mobly", "function": "def _set_details(self, content):\n try:\n self.details = str(content)\n except UnicodeEncodeError:\n logging.error('Unable to decode \"%s\" in Py3, encoding in utf-8.', content)\n self.details = content.encode('utf-8')", "docstring": "Sets the `details` field.\n\nArgs:\n content: the content to extract details from."} +{"repo": "tensorflow", "function": "def write_dirpath(dirpath, strategy):\n if strategy is None:\n strategy = distribute_lib.get_strategy()\n if strategy is None:\n return dirpath\n if not strategy.extended._in_multi_worker_mode():\n return dirpath\n if strategy.extended.should_checkpoint:\n return dirpath\n return _get_temp_dir(dirpath, strategy)", "docstring": "Returns the writing dir that should be used to save file distributedly.\n\n`dirpath` would be created if it doesn't exist.\n\nArgs:\n dirpath: Original dirpath that would be used without distribution.\n strategy: The tf.distribute strategy object currently used.\n\nReturns:\n The writing dir path that should be used to save with distribution."} +{"repo": "tensorflow", "function": "def load_from_saved_model(saved_model_path, custom_objects=None):\n warnings.warn('`tf.keras.experimental.load_from_saved_model` is deprecatedand will be removed in a future version. Please switch to `tf.keras.models.load_model`.')\n model_json_filepath = os.path.join(compat.as_bytes(saved_model_path), compat.as_bytes(constants.ASSETS_DIRECTORY), compat.as_bytes(SAVED_MODEL_FILENAME_JSON))\n with gfile.Open(model_json_filepath, 'r') as f:\n model_json = f.read()\n model = model_config.model_from_json(model_json, custom_objects=custom_objects)\n checkpoint_prefix = os.path.join(compat.as_text(saved_model_path), compat.as_text(constants.VARIABLES_DIRECTORY), compat.as_text(constants.VARIABLES_FILENAME))\n model.load_weights(checkpoint_prefix)\n return model", "docstring": "Loads a keras Model from a SavedModel created by `export_saved_model()`.\n\nThis function reinstantiates model state by:\n1) loading model topology from json (this will eventually come\n from metagraph).\n2) loading model weights from checkpoint.\n\nExample:\n\n```python\nimport tensorflow as tf\n\n# Create a tf.keras model.\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Dense(1, input_shape=[10]))\nmodel.summary()\n\n# Save the tf.keras model in the SavedModel format.\npath = '/tmp/simple_keras_model'\ntf.keras.experimental.export_saved_model(model, path)\n\n# Load the saved keras model back.\nnew_model = tf.keras.experimental.load_from_saved_model(path)\nnew_model.summary()\n```\n\nArgs:\n saved_model_path: a string specifying the path to an existing SavedModel.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n\nReturns:\n a keras.Model instance."} +{"repo": "transformers", "function": "def _inference_forward(self, z: torch.Tensor, mask: Optional[torch.Tensor]=None, inplace_chunk_size: Optional[int]=None, with_add: bool=True):\n if mask is None:\n mask = z.new_ones(z.shape[:-1])\n mask = mask.unsqueeze(-1)\n\n def compute_projection_helper(pair, mask, a=True):\n if a:\n linear_g = self.linear_a_g\n linear_p = self.linear_a_p\n else:\n linear_g = self.linear_b_g\n linear_p = self.linear_b_p\n pair = self.layer_norm_in(pair)\n p = linear_g(pair)\n p.sigmoid_()\n p *= linear_p(pair)\n p *= mask\n p = permute_final_dims(p, (2, 0, 1))\n return p\n\n def compute_projection(pair, mask, a=True, chunked=True):\n need_transpose = self._outgoing ^ a\n if not chunked:\n p = compute_projection_helper(pair, mask, a)\n if need_transpose:\n p = p.transpose(-1, -2)\n else:\n linear_g = self.linear_a_g if a else self.linear_b_g\n c = linear_g.bias.shape[-1]\n out_shape = pair.shape[:-3] + (c,) + pair.shape[-3:-1]\n p = pair.new_zeros(out_shape)\n for i in range(0, pair.shape[-3], inplace_chunk_size):\n pair_chunk = pair[..., i:i + inplace_chunk_size, :, :]\n pair_chunk = compute_projection_helper(pair[..., i:i + inplace_chunk_size, :, :], mask[..., i:i + inplace_chunk_size, :, :], a)\n if need_transpose:\n pair_chunk = pair_chunk.transpose(-1, -2)\n p[..., i:i + inplace_chunk_size] = pair_chunk\n else:\n p[..., i:i + inplace_chunk_size, :] = pair_chunk\n del pair_chunk\n return p\n a = compute_projection(z, mask, True, chunked=True)\n if inplace_chunk_size is not None:\n n = a.shape[-1]\n half_n = n // 2 + n % 2\n row_dim = -3\n col_dim = -2\n b_chunk_dim = row_dim if self._outgoing else col_dim\n\n def empty_slicer(t):\n return [slice(None) for _ in t.shape]\n\n def slice_tensor(t, start, end, dim):\n s = empty_slicer(t)\n s[dim] = slice(start, end)\n return t[s]\n\n def flip_z_cache_(z_cache, z):\n quadrant_3 = slice_tensor(z_cache, half_n, None, row_dim)\n z_cache = z_cache.transpose(row_dim, col_dim)\n z_cache = z_cache[..., :n // 2, :, :]\n first_half_slicer = empty_slicer(z_cache)\n first_half_slicer[col_dim] = slice(0, half_n)\n z_cache[first_half_slicer] = quadrant_3\n quadrant_4 = slice_tensor(z, half_n, None, row_dim)\n quadrant_4 = slice_tensor(quadrant_4, half_n, None, col_dim)\n quadrant_3_slicer = empty_slicer(z_cache)\n quadrant_3_slicer[col_dim] = slice(half_n, None)\n z_cache[quadrant_3_slicer] = quadrant_4\n return z_cache\n z_cache_shape = list(z.shape)\n z_cache_shape[col_dim] = half_n\n z_cache = z.new_zeros(z_cache_shape)\n z_cache_slicer = empty_slicer(z_cache)\n z_cache_slicer[col_dim] = slice(0, half_n)\n z_cache.copy_(z[z_cache_slicer])\n z_cache_rotated = False\n i_range = list(range(0, half_n, inplace_chunk_size))\n initial_offsets = [i_2 - i_1 for i_1, i_2 in zip(i_range, i_range[1:] + [half_n])]\n after_half = list(range(half_n, n, inplace_chunk_size))\n after_half_offsets = [inplace_chunk_size for _ in after_half]\n combined_range_with_offsets = zip(i_range + after_half, initial_offsets + after_half_offsets)\n for i, offset in combined_range_with_offsets:\n if not z_cache_rotated and i >= half_n:\n z_cache = flip_z_cache_(z_cache, z)\n z_cache_rotated = True\n z_chunk_b = slice_tensor(z, i, i + offset, b_chunk_dim)\n mask_chunk = slice_tensor(mask, i, i + offset, b_chunk_dim)\n z_chunk_b = z_chunk_b.clone()\n if b_chunk_dim == col_dim:\n z_chunk_b = slice_tensor(z, i, i + offset, col_dim)\n elif not z_cache_rotated:\n z_chunk_slicer = empty_slicer(z_chunk_b)\n z_chunk_slicer[col_dim] = slice(0, half_n)\n z_chunk_b[z_chunk_slicer] = slice_tensor(z_cache, i, i + offset, row_dim)\n else:\n z_cache_offset = i - half_n\n z_chunk_b = slice_tensor(z_cache, z_cache_offset, z_cache_offset + offset, row_dim)\n b_chunk = compute_projection(z_chunk_b, mask_chunk, a=False, chunked=False)\n del z_chunk_b\n x_chunk = torch.matmul(a, b_chunk)\n x_chunk = permute_final_dims(x_chunk, (1, 2, 0))\n x_chunk = self.layer_norm_out(x_chunk)\n x_chunk = self.linear_z(x_chunk)\n z_chunk_g = slice_tensor(z, i, i + offset, col_dim)\n g_chunk = self.linear_g(self.layer_norm_in(z_chunk_g))\n g_chunk.sigmoid_()\n del z_chunk_g\n x_chunk *= g_chunk\n z_slicer = empty_slicer(z)\n z_slicer[col_dim] = slice(i, i + offset)\n if with_add:\n z[z_slicer] += x_chunk\n else:\n z[z_slicer] = x_chunk\n else:\n b = compute_projection(z, mask, False, False)\n x = torch.matmul(a, b)\n x = self.layer_norm_out(x)\n x = self.linear_z(x)\n g = self.linear_g(z)\n g.sigmoid_()\n x *= g\n if with_add:\n z += x\n else:\n z = x\n return z", "docstring": "Args:\n z:\n A [*, N, N, C_z] pair representation\n mask:\n A [*, N, N] pair mask\n inplace_chunk_size:\n Size of chunks used in the main computation. Increase to trade memory for speed.\n with_add:\n If True, z is overwritten with (z + update). Otherwise, it is overwritten with (update).\nReturns:\n A reference to the overwritten z\n\nMore memory-efficient, inference-only version of the forward function. Uses in-place operations, fusion of the\naddition that happens after this module in the Evoformer, a smidge of recomputation, and a cache of overwritten\nvalues to lower peak memory consumption of this module from 5x the size of the input tensor z to 2.5x its size.\nUseful for inference on extremely long sequences.\n\nIt works as follows. We will make reference to variables used in the default forward implementation below.\nNaively, triangle multiplication attention requires the manifestation of 5 tensors the size of z: 1) z, the\n\"square\" input tensor, 2) a, the first projection of z, 3) b, the second projection of b, 4) g, a z-sized mask,\nand 5) a z-sized tensor for intermediate computations. For large N, this is prohibitively expensive; for\nN=4000, for example, z is more than 8GB alone. To avoid this problem, we compute b, g, and all intermediate\ntensors in small chunks, noting that the chunks required to compute a chunk of the output depend only on the\ntensor a and corresponding vertical and horizontal chunks of z. This suggests an algorithm that loops over\npairs of chunks of z: hereafter \"columns\" and \"rows\" of z, even though each \"column\" and \"row\" in fact contains\ninplace_chunk_size contiguous true columns and rows of z. Writing output chunks to a new tensor would bring\ntotal memory consumption down to 3x the size of z. However, more memory can be saved by writing output chunks\ndirectly to z in-place. WLOG, we choose to write output chunks vertically, overwriting the ith \"column\" of z at\nthe end of the ith iteration of the main loop. Despite this overwriting, the ith column is always one column\nahead of previously overwritten columns and can be recovered directly from z. After the first iteration,\nhowever, the ith row of z is always at least partially overwritten. For this reason, we introduce the z-cache,\na tensor one-half the size of z. The z-cache initially contains the left half (2nd and 3rd quadrants) of z. For\n0 < i < N/2, the missing left part of the ith row of z is recovered from this cache at the beginning of the ith\niteration. Once i exceeds n/2, the cache is \"reoriented\" to encompass the 3rd and 4th quadrants of z instead.\nThough the 3rd quadrant of the original z is entirely overwritten at this point, it can be recovered from the\nz-cache itself. Thereafter, the ith row of z can be recovered in its entirety from the reoriented z-cache.\nAfter the final iteration, z has been completely overwritten and contains the triangular multiplicative update.\nIf with_add is True, it instead contains the sum of z and the triangular multiplicative update. In either case,\npeak memory consumption is just 2.5x the size of z, disregarding memory used for chunks and other small\nvariables."} +{"repo": "keras", "function": "def get_tf_dataset(self):\n raise NotImplementedError", "docstring": "Get a `tf.data.Dataset` instance for the DataAdapter.\n\nNote that the dataset returned does not repeat for epoch, so caller\nmight need to create new iterator for the same dataset at the beginning\nof the epoch. This behavior might change in the future.\n\nReturns:\n A `tf.data.Dataset`. Caller might use the dataset in different\n context, e.g. iter(dataset) in eager to get the value directly, or\n in graph mode, provide the iterator tensor to Keras model function."} +{"repo": "keras", "function": "class Node:\n\n def __init__(self, operation, call_args=None, call_kwargs=None, outputs=None):\n self.operation = operation\n self.arguments = SymbolicArguments(*call_args, **call_kwargs)\n self.outputs = [] if outputs is None else tree.flatten(outputs)\n for x in self.outputs:\n if not isinstance(x, KerasTensor):\n raise ValueError(f'All operation outputs must be tensors. Operation {operation} returned a non-tensor. Non-tensor received: {x}')\n zero_history = any((not x.record_history for x in self.arguments.keras_tensors))\n if not zero_history:\n for tensor in self.arguments.keras_tensors:\n if not hasattr(tensor, '_keras_history'):\n tensor._keras_history = KerasHistory(operation=None, node_index=0, tensor_index=0)\n self.operation._inbound_nodes.append(self)\n for kt in self.arguments.keras_tensors:\n inbound_op = kt._keras_history.operation\n if inbound_op is not None:\n inbound_op._outbound_nodes.append(self)\n if not zero_history:\n node_index = len(self.operation._inbound_nodes) - 1\n for i, tensor in enumerate(self.outputs):\n tensor._keras_history = KerasHistory(operation=operation, node_index=node_index, tensor_index=i)\n self.is_input = not self.arguments.keras_tensors\n\n def __repr__(self):\n return f''\n\n @property\n def input_tensors(self):\n return self.arguments.keras_tensors\n\n @property\n def output_tensors(self):\n return self.outputs\n\n @property\n def parent_nodes(self):\n \"\"\"The parent `Node`s.\n\n Returns:\n all the `Node`s whose output this node immediately depends on.\n \"\"\"\n node_deps = []\n for kt in self.arguments.keras_tensors:\n op = kt._keras_history.operation\n node_index = kt._keras_history.node_index\n if op is not None:\n node_deps.append(op._inbound_nodes[node_index])\n return node_deps", "docstring": "A `Node` describes an operation `__call__()` event.\n\nA Keras Function is a DAG with `Node` instances as nodes, and\n`KerasTensor` instances as edges. Nodes aren't `Operation` instances,\nbecause a single operation could be called multiple times, which would\nresult in graph cycles.\n\nA `__call__()` event involves input tensors (and other input arguments),\nthe operation that was called, and the resulting output tensors.\nA `Node` will include all this information.\n\nSince a single `Operation` could be called multiple times,\nthe `Node` instances are stored on operations as a list.\nEach time an operation is called, a node is added to `op._inbound_nodes`.\nEach time the output of an operation is used by another operation,\na node is added to `op._outbound_nodes`.\n\nEvery `KerasTensor` instance has a `KerasHistory` object attached,\nwhich tracks the `Node` that records the `__call__()` event that created\nthe tensor. By recursively walking through `Node` instances\nvia the `KerasHistory` metadata of `KerasTensor` instances, once can\nretrieve the entire DAG of a Keras Function.\n\nArgs:\n operation: The Operation that was called in the `op.__call__()`\n event that this node represents.\n call_args: The positional arguments the operation was called with.\n call_kwargs: The keyword arguments the operation was called with.\n outputs: The output tensors of the `op.__call__()` call."} +{"repo": "transformers", "function": "class OneFormerImageProcessor(BaseImageProcessor):\n model_input_names = ['pixel_values', 'pixel_mask', 'task_inputs']\n\n @deprecate_kwarg('reduce_labels', new_name='do_reduce_labels', version='4.44.0')\n @deprecate_kwarg('max_size', version='4.27.0', warn_if_greater_or_equal_version=True)\n @filter_out_non_signature_kwargs(extra=['max_size', 'metadata', *INIT_SERVICE_KWARGS])\n def __init__(self, do_resize: bool=True, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=PILImageResampling.BILINEAR, do_rescale: bool=True, rescale_factor: float=1 / 255, do_normalize: bool=True, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, repo_path: Optional[str]='shi-labs/oneformer_demo', class_info_file: Optional[str]=None, num_text: Optional[int]=None, num_labels: Optional[int]=None, **kwargs):\n super().__init__(**kwargs)\n self._max_size = kwargs.pop('max_size', 1333)\n size = size if size is not None else {'shortest_edge': 800, 'longest_edge': self._max_size}\n size = get_size_dict(size, max_size=self._max_size, default_to_square=False)\n if class_info_file is None:\n raise ValueError('You must provide a `class_info_file`')\n self.do_resize = do_resize\n self.size = size\n self.resample = resample\n self.do_rescale = do_rescale\n self.rescale_factor = rescale_factor\n self.do_normalize = do_normalize\n self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN\n self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD\n self.ignore_index = ignore_index\n self.do_reduce_labels = do_reduce_labels\n self.class_info_file = class_info_file\n self.repo_path = repo_path\n self.metadata = prepare_metadata(load_metadata(repo_path, class_info_file))\n self.num_text = num_text\n self.num_labels = num_labels\n\n @classmethod\n def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs):\n \"\"\"\n Overrides the `from_dict` method from the base class to save support of deprecated `reduce_labels` in old configs\n \"\"\"\n image_processor_dict = image_processor_dict.copy()\n if 'reduce_labels' in image_processor_dict:\n image_processor_dict['do_reduce_labels'] = image_processor_dict.pop('reduce_labels')\n return super().from_dict(image_processor_dict, **kwargs)\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary. This method calls the superclass method and then removes the\n `_max_size` attribute from the dictionary.\n \"\"\"\n image_processor_dict = super().to_dict()\n image_processor_dict.pop('_max_size', None)\n return image_processor_dict\n\n @deprecate_kwarg('max_size', version='4.27.0', warn_if_greater_or_equal_version=True)\n @filter_out_non_signature_kwargs(extra=['max_size'])\n def resize(self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:\n \"\"\"\n Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an\n int, smaller edge of the image will be matched to this number.\n \"\"\"\n max_size = kwargs.pop('max_size', None)\n size = get_size_dict(size, max_size=max_size, default_to_square=False)\n if 'shortest_edge' in size and 'longest_edge' in size:\n size, max_size = (size['shortest_edge'], size['longest_edge'])\n elif 'height' in size and 'width' in size:\n size = (size['height'], size['width'])\n max_size = None\n else:\n raise ValueError(f\"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.\")\n size = get_oneformer_resize_output_image_size(image=image, size=size, max_size=max_size, default_to_square=False, input_data_format=input_data_format)\n image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format)\n return image\n\n def rescale(self, image: np.ndarray, rescale_factor: float, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"\n Rescale the image by the given factor. image = image * rescale_factor.\n\n Args:\n image (`np.ndarray`):\n Image to rescale.\n rescale_factor (`float`):\n The value to use for rescaling.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. If unset, is inferred from the input image. Can be\n one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n \"\"\"\n return rescale(image, rescale_factor, data_format=data_format, input_data_format=input_data_format)\n\n def convert_segmentation_map_to_binary_masks(self, segmentation_map: 'np.ndarray', instance_id_to_semantic_id: Optional[Dict[int, int]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False):\n do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels\n ignore_index = ignore_index if ignore_index is not None else self.ignore_index\n return convert_segmentation_map_to_binary_masks(segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)\n\n def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature:\n return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs)\n\n def _preprocess(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n if do_resize:\n image = self.resize(image, size=size, resample=resample, input_data_format=input_data_format)\n if do_rescale:\n image = self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)\n if do_normalize:\n image = self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n return image\n\n def _preprocess_image(self, image: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, data_format: Optional[Union[str, ChannelDimension]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"Preprocesses a single image.\"\"\"\n image = to_numpy_array(image)\n if do_rescale and is_scaled_image(image):\n logger.warning_once('It looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.')\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n image = self._preprocess(image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, input_data_format=input_data_format)\n if data_format is not None:\n image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n return image\n\n def _preprocess_mask(self, segmentation_map: ImageInput, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"Preprocesses a single mask.\"\"\"\n segmentation_map = to_numpy_array(segmentation_map)\n if segmentation_map.ndim == 2:\n added_channel_dim = True\n segmentation_map = segmentation_map[None, ...]\n input_data_format = ChannelDimension.FIRST\n else:\n added_channel_dim = False\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)\n segmentation_map = self._preprocess(image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, input_data_format=input_data_format)\n if added_channel_dim:\n segmentation_map = segmentation_map.squeeze(0)\n return segmentation_map\n\n @filter_out_non_signature_kwargs()\n def preprocess(self, images: ImageInput, task_inputs: Optional[List[str]]=None, segmentation_maps: Optional[ImageInput]=None, instance_id_to_semantic_id: Optional[Dict[int, int]]=None, do_resize: Optional[bool]=None, size: Optional[Dict[str, int]]=None, resample: PILImageResampling=None, do_rescale: Optional[bool]=None, rescale_factor: Optional[float]=None, do_normalize: Optional[bool]=None, image_mean: Optional[Union[float, List[float]]]=None, image_std: Optional[Union[float, List[float]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: Optional[bool]=None, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Union[str, ChannelDimension]=ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:\n if task_inputs is None:\n task_inputs = ['panoptic']\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False, max_size=self._max_size)\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n ignore_index = ignore_index if ignore_index is not None else self.ignore_index\n do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels\n if not valid_images(images):\n raise ValueError('Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n validate_preprocess_arguments(do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, do_resize=do_resize, size=size, resample=resample)\n if segmentation_maps is not None and (not valid_images(segmentation_maps)):\n raise ValueError('Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.')\n images = make_list_of_images(images)\n if segmentation_maps is not None:\n segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)\n if segmentation_maps is not None and len(images) != len(segmentation_maps):\n raise ValueError('Images and segmentation maps must have the same length.')\n images = [self._preprocess_image(image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, input_data_format=input_data_format) for image in images]\n if segmentation_maps is not None:\n segmentation_maps = [self._preprocess_mask(segmentation_map, do_resize, size, input_data_format=input_data_format) for segmentation_map in segmentation_maps]\n encoded_inputs = self.encode_inputs(images, task_inputs, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, input_data_format=data_format)\n return encoded_inputs\n\n def _pad_image(self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]]=0, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format)\n return padded_image\n\n def pad(self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]]=0, return_pixel_mask: bool=True, return_tensors: Optional[Union[str, TensorType]]=None, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> BatchFeature:\n \"\"\"\n Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\n in the batch and optionally returns their corresponding pixel mask.\n\n Args:\n image (`np.ndarray`):\n Image to pad.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n pad_size = get_max_height_width(images, input_data_format=input_data_format)\n padded_images = [self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format, input_data_format=input_data_format) for image in images]\n data = {'pixel_values': padded_images}\n if return_pixel_mask:\n masks = [make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format) for image in images]\n data['pixel_mask'] = masks\n return BatchFeature(data=data, tensor_type=return_tensors)\n\n def get_semantic_annotations(self, label, num_class_obj):\n annotation_classes = label['classes']\n annotation_masks = label['masks']\n texts = ['a semantic photo'] * self.num_text\n classes = []\n masks = []\n for idx in range(len(annotation_classes)):\n class_id = annotation_classes[idx]\n mask = annotation_masks[idx]\n if not np.all(mask is False):\n if class_id not in classes:\n cls_name = self.metadata[str(class_id)]\n classes.append(class_id)\n masks.append(mask)\n num_class_obj[cls_name] += 1\n else:\n idx = classes.index(class_id)\n masks[idx] += mask\n masks[idx] = np.clip(masks[idx], 0, 1)\n num = 0\n for i, cls_name in enumerate(self.metadata['class_names']):\n if num_class_obj[cls_name] > 0:\n for _ in range(num_class_obj[cls_name]):\n if num >= len(texts):\n break\n texts[num] = f'a photo with a {cls_name}'\n num += 1\n classes = np.array(classes)\n masks = np.array(masks)\n return (classes, masks, texts)\n\n def get_instance_annotations(self, label, num_class_obj):\n annotation_classes = label['classes']\n annotation_masks = label['masks']\n texts = ['an instance photo'] * self.num_text\n classes = []\n masks = []\n for idx in range(len(annotation_classes)):\n class_id = annotation_classes[idx]\n mask = annotation_masks[idx]\n if class_id in self.metadata['thing_ids']:\n if not np.all(mask is False):\n cls_name = self.metadata[str(class_id)]\n classes.append(class_id)\n masks.append(mask)\n num_class_obj[cls_name] += 1\n num = 0\n for i, cls_name in enumerate(self.metadata['class_names']):\n if num_class_obj[cls_name] > 0:\n for _ in range(num_class_obj[cls_name]):\n if num >= len(texts):\n break\n texts[num] = f'a photo with a {cls_name}'\n num += 1\n classes = np.array(classes)\n masks = np.array(masks)\n return (classes, masks, texts)\n\n def get_panoptic_annotations(self, label, num_class_obj):\n annotation_classes = label['classes']\n annotation_masks = label['masks']\n texts = ['an panoptic photo'] * self.num_text\n classes = []\n masks = []\n for idx in range(len(annotation_classes)):\n class_id = annotation_classes[idx]\n mask = annotation_masks[idx].data\n if not np.all(mask is False):\n cls_name = self.metadata[str(class_id)]\n classes.append(class_id)\n masks.append(mask)\n num_class_obj[cls_name] += 1\n num = 0\n for i, cls_name in enumerate(self.metadata['class_names']):\n if num_class_obj[cls_name] > 0:\n for _ in range(num_class_obj[cls_name]):\n if num >= len(texts):\n break\n texts[num] = f'a photo with a {cls_name}'\n num += 1\n classes = np.array(classes)\n masks = np.array(masks)\n return (classes, masks, texts)\n\n def encode_inputs(self, pixel_values_list: List[ImageInput], task_inputs: List[str], segmentation_maps: ImageInput=None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]]=None, ignore_index: Optional[int]=None, do_reduce_labels: bool=False, return_tensors: Optional[Union[str, TensorType]]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None):\n \"\"\"\n Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.\n\n OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps\n will be converted to lists of binary masks and their respective labels. Let's see an example, assuming\n `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =\n [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for\n each mask.\n\n Args:\n pixel_values_list (`List[ImageInput]`):\n List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,\n width)`.\n\n task_inputs (`List[str]`):\n List of task values.\n\n segmentation_maps (`ImageInput`, *optional*):\n The corresponding semantic segmentation maps with the pixel-wise annotations.\n\n (`bool`, *optional*, defaults to `True`):\n Whether or not to pad images up to the largest image in a batch and create a pixel mask.\n\n If left to the default, will return a pixel mask that is:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n\n instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*):\n A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an\n instance segmentation map where each pixel represents an instance id. Can be provided as a single\n dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map\n instance ids in each image separately.\n\n return_tensors (`str` or [`~file_utils.TensorType`], *optional*):\n If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`\n objects.\n\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n\n Returns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **pixel_values** -- Pixel values to be fed to a model.\n - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in\n `self.model_input_names`).\n - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model\n (when `annotations` are provided).\n - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when\n `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of\n `mask_labels[i][j]` if `class_labels[i][j]`.\n - **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are\n provided). They identify the binary masks present in the image.\n \"\"\"\n ignore_index = self.ignore_index if ignore_index is None else ignore_index\n do_reduce_labels = self.do_reduce_labels if do_reduce_labels is None else do_reduce_labels\n pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(pixel_values_list[0])\n pad_size = get_max_height_width(pixel_values_list, input_data_format=input_data_format)\n encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors, input_data_format=input_data_format)\n annotations = None\n if segmentation_maps is not None:\n segmentation_maps = map(np.array, segmentation_maps)\n annotations = []\n for idx, segmentation_map in enumerate(segmentation_maps):\n if isinstance(instance_id_to_semantic_id, list):\n instance_id = instance_id_to_semantic_id[idx]\n else:\n instance_id = instance_id_to_semantic_id\n masks, classes = self.convert_segmentation_map_to_binary_masks(segmentation_map, instance_id, ignore_index=ignore_index, do_reduce_labels=do_reduce_labels)\n annotations.append({'masks': masks, 'classes': classes})\n if annotations is not None:\n mask_labels = []\n class_labels = []\n text_inputs = []\n num_class_obj = {}\n for cls_name in self.metadata['class_names']:\n num_class_obj[cls_name] = 0\n for i, label in enumerate(annotations):\n task = task_inputs[i]\n if task == 'semantic':\n classes, masks, texts = self.get_semantic_annotations(label, num_class_obj)\n elif task == 'instance':\n classes, masks, texts = self.get_instance_annotations(label, num_class_obj)\n elif task == 'panoptic':\n classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj)\n else:\n raise ValueError(f'{task} was not expected, expected `semantic`, `instance` or `panoptic`')\n masks = [mask[None, ...] for mask in masks]\n masks = [self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks]\n masks = np.concatenate(masks, axis=0)\n mask_labels.append(torch.from_numpy(masks))\n class_labels.append(torch.from_numpy(classes).long())\n text_inputs.append(texts)\n encoded_inputs['mask_labels'] = mask_labels\n encoded_inputs['class_labels'] = class_labels\n encoded_inputs['text_inputs'] = text_inputs\n encoded_inputs['task_inputs'] = [f'the task is {task_input}' for task_input in task_inputs]\n return encoded_inputs\n\n def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None) -> 'torch.Tensor':\n \"\"\"\n Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports\n PyTorch.\n\n Args:\n outputs ([`MaskFormerForInstanceSegmentation`]):\n Raw outputs of the model.\n target_sizes (`List[Tuple[int, int]]`, *optional*):\n List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\n final size (height, width) of each prediction. If left to None, predictions will not be resized.\n Returns:\n `List[torch.Tensor]`:\n A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)\n corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each\n `torch.Tensor` correspond to a semantic class id.\n \"\"\"\n class_queries_logits = outputs.class_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]\n masks_probs = masks_queries_logits.sigmoid()\n segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs)\n batch_size = class_queries_logits.shape[0]\n if target_sizes is not None:\n if batch_size != len(target_sizes):\n raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits')\n semantic_segmentation = []\n for idx in range(batch_size):\n resized_logits = torch.nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False)\n semantic_map = resized_logits[0].argmax(dim=0)\n semantic_segmentation.append(semantic_map)\n else:\n semantic_segmentation = segmentation.argmax(dim=1)\n semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]\n return semantic_segmentation\n\n def post_process_instance_segmentation(self, outputs, task_type: str='instance', is_demo: bool=True, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, target_sizes: Optional[List[Tuple[int, int]]]=None, return_coco_annotation: Optional[bool]=False):\n \"\"\"\n Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation\n predictions. Only supports PyTorch.\n\n Args:\n outputs ([`OneFormerForUniversalSegmentationOutput`]):\n The outputs from [`OneFormerForUniversalSegmentationOutput`].\n task_type (`str`, *optional*, defaults to \"instance\"):\n The post processing depends on the task token input. If the `task_type` is \"panoptic\", we need to\n ignore the stuff predictions.\n is_demo (`bool`, *optional)*, defaults to `True`):\n Whether the model is in demo mode. If true, use threshold to predict final masks.\n threshold (`float`, *optional*, defaults to 0.5):\n The probability score threshold to keep predicted instance masks.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):\n The overlap mask area threshold to merge or discard small disconnected parts within each binary\n instance mask.\n target_sizes (`List[Tuple]`, *optional*):\n List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\n final size (height, width) of each prediction in batch. If left to None, predictions will not be\n resized.\n return_coco_annotation (`bool`, *optional)*, defaults to `False`):\n Whether to return predictions in COCO format.\n\n Returns:\n `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:\n - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set\n to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized\n to the corresponding `target_sizes` entry.\n - **segments_info** -- A dictionary that contains additional information on each segment.\n - **id** -- an integer representing the `segment_id`.\n - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.\n - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.\n Multiple instances of the same class / label were fused and assigned a single `segment_id`.\n - **score** -- Prediction score of segment with `segment_id`.\n \"\"\"\n class_queries_logits = outputs.class_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n device = masks_queries_logits.device\n batch_size = class_queries_logits.shape[0]\n num_queries = class_queries_logits.shape[1]\n num_classes = class_queries_logits.shape[-1] - 1\n results: List[Dict[str, torch.Tensor]] = []\n for i in range(batch_size):\n scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1]\n labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)\n scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False)\n labels_per_image = labels[topk_indices]\n topk_indices = torch.div(topk_indices, num_classes, rounding_mode='floor')\n mask_pred = masks_queries_logits[i][topk_indices]\n if is_demo:\n keep = scores_per_image > threshold\n scores_per_image = scores_per_image[keep]\n labels_per_image = labels_per_image[keep]\n mask_pred = mask_pred[keep]\n if task_type == 'panoptic':\n keep = torch.zeros_like(scores_per_image).bool()\n for j, lab in enumerate(labels_per_image):\n keep[j] = lab in self.metadata['thing_ids']\n scores_per_image = scores_per_image[keep]\n labels_per_image = labels_per_image[keep]\n mask_pred = mask_pred[keep]\n if mask_pred.shape[0] <= 0:\n height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:]\n segmentation = torch.zeros((height, width)) - 1\n results.append({'segmentation': segmentation, 'segments_info': []})\n continue\n if 'ade20k' in self.class_info_file and (not is_demo) and ('instance' in task_type):\n for j in range(labels_per_image.shape[0]):\n labels_per_image[j] = self.metadata['thing_ids'].index(labels_per_image[j].item())\n target_size = target_sizes[i] if target_sizes is not None else None\n segmentation, segments = compute_segments(mask_pred, scores_per_image, labels_per_image, mask_threshold, overlap_mask_area_threshold, set(), target_size)\n if return_coco_annotation:\n segmentation = convert_segmentation_to_rle(segmentation)\n results.append({'segmentation': segmentation, 'segments_info': segments})\n return results\n\n def post_process_panoptic_segmentation(self, outputs, threshold: float=0.5, mask_threshold: float=0.5, overlap_mask_area_threshold: float=0.8, label_ids_to_fuse: Optional[Set[int]]=None, target_sizes: Optional[List[Tuple[int, int]]]=None) -> List[Dict]:\n \"\"\"\n Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation\n predictions. Only supports PyTorch.\n\n Args:\n outputs ([`MaskFormerForInstanceSegmentationOutput`]):\n The outputs from [`MaskFormerForInstanceSegmentation`].\n threshold (`float`, *optional*, defaults to 0.5):\n The probability score threshold to keep predicted instance masks.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):\n The overlap mask area threshold to merge or discard small disconnected parts within each binary\n instance mask.\n label_ids_to_fuse (`Set[int]`, *optional*):\n The labels in this state will have all their instances be fused together. For instance we could say\n there can only be one sky in an image, but several persons, so the label ID for sky would be in that\n set, but not the one for person.\n target_sizes (`List[Tuple]`, *optional*):\n List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested\n final size (height, width) of each prediction in batch. If left to None, predictions will not be\n resized.\n\n Returns:\n `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:\n - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set\n to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized\n to the corresponding `target_sizes` entry.\n - **segments_info** -- A dictionary that contains additional information on each segment.\n - **id** -- an integer representing the `segment_id`.\n - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.\n - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.\n Multiple instances of the same class / label were fused and assigned a single `segment_id`.\n - **score** -- Prediction score of segment with `segment_id`.\n \"\"\"\n if label_ids_to_fuse is None:\n logger.warning('`label_ids_to_fuse` unset. No instance will be fused.')\n label_ids_to_fuse = set()\n class_queries_logits = outputs.class_queries_logits\n masks_queries_logits = outputs.masks_queries_logits\n batch_size = class_queries_logits.shape[0]\n num_labels = class_queries_logits.shape[-1] - 1\n mask_probs = masks_queries_logits.sigmoid()\n pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)\n results: List[Dict[str, TensorType]] = []\n for i in range(batch_size):\n mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels)\n if mask_probs_item.shape[0] <= 0:\n height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]\n segmentation = torch.zeros((height, width)) - 1\n results.append({'segmentation': segmentation, 'segments_info': []})\n continue\n target_size = target_sizes[i] if target_sizes is not None else None\n segmentation, segments = compute_segments(mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size)\n results.append({'segmentation': segmentation, 'segments_info': segments})\n return results", "docstring": "Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and\noptional text inputs and targets for the model.\n\nThis image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should\nrefer to this superclass for more information regarding those methods.\n\nArgs:\n do_resize (`bool`, *optional*, defaults to `True`):\n Whether to resize the input to a certain `size`.\n size (`int`, *optional*, defaults to 800):\n Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a\n sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of\n the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *\n height / width, size)`.\n resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):\n An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,\n `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,\n `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set\n to `True`.\n do_rescale (`bool`, *optional*, defaults to `True`):\n Whether to rescale the input to a certain `scale`.\n rescale_factor (`float`, *optional*, defaults to `1/ 255`):\n Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `True`):\n Whether or not to normalize the input with mean and standard deviation.\n image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):\n The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.\n image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):\n The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the\n ImageNet std.\n ignore_index (`int`, *optional*):\n Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels\n denoted with 0 (background) will be replaced with `ignore_index`.\n do_reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0\n is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k).\n The background label will be replaced by `ignore_index`.\n repo_path (`str`, *optional*, defaults to `\"shi-labs/oneformer_demo\"`):\n Path to hub repo or local directory containing the JSON file with class information for the dataset.\n If unset, will look for `class_info_file` in the current working directory.\n class_info_file (`str`, *optional*):\n JSON file containing class information for the dataset. See `shi-labs/oneformer_demo/cityscapes_panoptic.json` for an example.\n num_text (`int`, *optional*):\n Number of text entries in the text input list.\n num_labels (`int`, *optional*):\n The number of labels in the segmentation map."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "keras", "function": "def EfficientNetV2(width_coefficient, depth_coefficient, default_size, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, min_depth=8, bn_momentum=0.9, activation='swish', blocks_args='default', name='efficientnetv2', include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, classifier_activation='softmax', include_preprocessing=True, weights_name=None):\n if blocks_args == 'default':\n blocks_args = DEFAULT_BLOCKS_ARGS[name]\n if not (weights in {'imagenet', None} or file_utils.exists(weights)):\n raise ValueError(f'The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.Received: weights={weights}')\n if weights == 'imagenet' and include_top and (classes != 1000):\n raise ValueError('If using `weights=\"imagenet\"` with `include_top` as true, `classes` should be 1000')\n input_shape = imagenet_utils.obtain_input_shape(input_shape, default_size=default_size, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights)\n if input_tensor is None:\n img_input = layers.Input(shape=input_shape)\n elif not backend.is_keras_tensor(input_tensor):\n img_input = layers.Input(tensor=input_tensor, shape=input_shape)\n else:\n img_input = input_tensor\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n x = img_input\n if include_preprocessing:\n num_channels = input_shape[bn_axis - 1]\n if name.split('-')[-1].startswith('b') and num_channels == 3:\n x = layers.Rescaling(scale=1.0 / 255)(x)\n if backend.image_data_format() == 'channels_first':\n mean = [[[[0.485]], [[0.456]], [[0.406]]]]\n variance = [[[[0.229 ** 2]], [[0.224 ** 2]], [[0.225 ** 2]]]]\n else:\n mean = [0.485, 0.456, 0.406]\n variance = [0.229 ** 2, 0.224 ** 2, 0.225 ** 2]\n x = layers.Normalization(mean=mean, variance=variance, axis=bn_axis)(x)\n else:\n x = layers.Rescaling(scale=1.0 / 128.0, offset=-1)(x)\n stem_filters = round_filters(filters=blocks_args[0]['input_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor)\n x = layers.Conv2D(filters=stem_filters, kernel_size=3, strides=2, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', use_bias=False, name='stem_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name='stem_bn')(x)\n x = layers.Activation(activation, name='stem_activation')(x)\n blocks_args = copy.deepcopy(blocks_args)\n b = 0\n blocks = float(sum((args['num_repeat'] for args in blocks_args)))\n for i, args in enumerate(blocks_args):\n assert args['num_repeat'] > 0\n args['input_filters'] = round_filters(filters=args['input_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor)\n args['output_filters'] = round_filters(filters=args['output_filters'], width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor)\n block = {0: MBConvBlock, 1: FusedMBConvBlock}[args.pop('conv_type')]\n repeats = round_repeats(repeats=args.pop('num_repeat'), depth_coefficient=depth_coefficient)\n for j in range(repeats):\n if j > 0:\n args['strides'] = 1\n args['input_filters'] = args['output_filters']\n x = block(activation=activation, bn_momentum=bn_momentum, survival_probability=drop_connect_rate * b / blocks, name=f'block{i + 1}{chr(j + 97)}_', **args)(x)\n b += 1\n top_filters = round_filters(filters=1280, width_coefficient=width_coefficient, min_depth=min_depth, depth_divisor=depth_divisor)\n x = layers.Conv2D(filters=top_filters, kernel_size=1, strides=1, kernel_initializer=CONV_KERNEL_INITIALIZER, padding='same', data_format=backend.image_data_format(), use_bias=False, name='top_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, momentum=bn_momentum, name='top_bn')(x)\n x = layers.Activation(activation=activation, name='top_activation')(x)\n if include_top:\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n if dropout_rate > 0:\n x = layers.Dropout(dropout_rate, name='top_dropout')(x)\n imagenet_utils.validate_activation(classifier_activation, weights)\n x = layers.Dense(classes, activation=classifier_activation, kernel_initializer=DENSE_KERNEL_INITIALIZER, bias_initializer=initializers.Constant(0.0), name='predictions')(x)\n elif pooling == 'avg':\n x = layers.GlobalAveragePooling2D(name='avg_pool')(x)\n elif pooling == 'max':\n x = layers.GlobalMaxPooling2D(name='max_pool')(x)\n if input_tensor is not None:\n inputs = operation_utils.get_source_inputs(input_tensor)\n else:\n inputs = img_input\n model = Functional(inputs, x, name=name)\n if weights == 'imagenet':\n if include_top:\n file_suffix = '.h5'\n file_hash = WEIGHTS_HASHES[weights_name][0]\n else:\n file_suffix = '_notop.h5'\n file_hash = WEIGHTS_HASHES[weights_name][1]\n file_name = name + file_suffix\n weights_path = file_utils.get_file(file_name, BASE_WEIGHTS_PATH + file_name, cache_subdir='models', file_hash=file_hash)\n model.load_weights(weights_path)\n elif weights is not None:\n model.load_weights(weights)\n return model", "docstring": "Instantiates the EfficientNetV2 architecture using given scaling\ncoefficients.\n\nArgs:\n width_coefficient: float, scaling coefficient for network width.\n depth_coefficient: float, scaling coefficient for network depth.\n default_size: integer, default input image size.\n dropout_rate: float, dropout rate before final classifier layer.\n drop_connect_rate: float, dropout rate at skip connections.\n depth_divisor: integer, a unit of network width.\n min_depth: integer, minimum number of filters.\n bn_momentum: float. Momentum parameter for Batch Normalization layers.\n activation: activation function.\n blocks_args: list of dicts, parameters to construct block modules.\n name: string, model name.\n include_top: whether to include the fully-connected layer at the top of\n the network.\n weights: one of `None` (random initialization), `\"imagenet\"`\n (pre-training on ImageNet),\n or the path to the weights file to be loaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) or\n numpy array to use as image input for the model.\n input_shape: optional shape tuple, only to be specified if `include_top`\n is `False`. It should have exactly 3 inputs channels.\n pooling: optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be the\n 4D tensor output of the last convolutional layer.\n - \"avg\" means that global average pooling will be applied to\n the output of the last convolutional layer,\n and thus the output of the model will be a 2D tensor.\n - `\"max\"` means that global max pooling will be applied.\n classes: optional number of classes to classify images into,\n only to be specified if `include_top` is `True`, and if no `weights`\n argument is specified.\n classifier_activation: A string or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\"\n layer.\n include_preprocessing: Boolean, whether to include the preprocessing\n layer (`Rescaling`) at the bottom of the network.\n Defaults to `True`.\n\nReturns:\n A model instance."} +{"repo": "transformers", "function": "def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:\n try:\n from pycocotools import mask as coco_mask\n except ImportError:\n raise ImportError('Pycocotools is not installed in your environment.')\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = np.asarray(mask, dtype=np.uint8)\n mask = np.any(mask, axis=2)\n masks.append(mask)\n if masks:\n masks = np.stack(masks, axis=0)\n else:\n masks = np.zeros((0, height, width), dtype=np.uint8)\n return masks", "docstring": "Convert a COCO polygon annotation to a mask.\n\nArgs:\n segmentations (`List[List[float]]`):\n List of polygons, each polygon represented by a list of x-y coordinates.\n height (`int`):\n Height of the mask.\n width (`int`):\n Width of the mask."} +{"repo": "tensorflow", "function": "def pack_eager_tensors(tensors, ctx=None) -> EagerTensor:\n if not isinstance(tensors, list):\n raise TypeError(f'tensors must be a list, but got a {type(tensors)}')\n if not tensors:\n raise ValueError('Cannot pack an empty list of tensors.')\n dtype = tensors[0].dtype\n shape = tensors[0].shape\n handle_data = tensors[0]._handle_data\n is_resource = dtype == dtypes.resource\n for i in range(len(tensors)):\n t = tensors[i]\n if not isinstance(t, EagerTensor):\n raise TypeError(f'All tensors being packed must be EagerTensor. Found an item of type {type(t)}.')\n if t.dtype != dtype:\n raise ValueError(f'All tensors being packed should have the same dtype {dtype}, but the {i}-th tensor is of dtype {t.dtype}')\n if t.shape != shape:\n raise ValueError(f'All tensors being packed should have the same shape {shape}, but the {i}-th tensor is of shape {t.shape}')\n if is_resource and t._handle_data != handle_data:\n raise ValueError(f'All tensors being packed should have the same handle data {handle_data}, but the {i}-th tensor is of handle data {t._handle_data}')\n if ctx is None:\n ctx = context.context()\n packed_tensor = ctx.pack_eager_tensors(tensors)\n if handle_data is not None:\n packed_tensor._handle_data = handle_data\n\n def grad_fun(_):\n raise ValueError('Computing gradients through pack_eager_tensors is not supported.')\n record.record_operation('pack_eager_tensors', [packed_tensor], tensors, grad_fun)\n return packed_tensor", "docstring": "Pack multiple `EagerTensor`s of the same dtype and shape.\n\nArgs:\n tensors: a list of EagerTensors to pack.\n ctx: context.context().\n\nReturns:\n A packed EagerTensor."} +{"repo": "keras", "function": "class BinaryFocalCrossentropy(LossFunctionWrapper):\n\n def __init__(self, apply_class_balancing=False, alpha=0.25, gamma=2.0, from_logits=False, label_smoothing=0.0, axis=-1, reduction='sum_over_batch_size', name='binary_focal_crossentropy', dtype=None):\n super().__init__(binary_focal_crossentropy, name=name, reduction=reduction, dtype=dtype, apply_class_balancing=apply_class_balancing, alpha=alpha, gamma=gamma, from_logits=from_logits, label_smoothing=label_smoothing, axis=axis)\n self.from_logits = from_logits\n self.label_smoothing = label_smoothing\n self.axis = axis\n self.apply_class_balancing = apply_class_balancing\n self.alpha = alpha\n self.gamma = gamma\n\n def get_config(self):\n config = Loss.get_config(self)\n config.update({'from_logits': self.from_logits, 'label_smoothing': self.label_smoothing, 'axis': self.axis, 'apply_class_balancing': self.apply_class_balancing, 'alpha': self.alpha, 'gamma': self.gamma})\n return config", "docstring": "Computes focal cross-entropy loss between true labels and predictions.\n\nBinary cross-entropy loss is often used for binary (0 or 1) classification\ntasks. The loss function requires the following inputs:\n\n- `y_true` (true label): This is either 0 or 1.\n- `y_pred` (predicted value): This is the model's prediction, i.e, a single\n floating-point value which either represents a\n [logit](https://en.wikipedia.org/wiki/Logit), (i.e, value in [-inf, inf]\n when `from_logits=True`) or a probability (i.e, value in `[0., 1.]` when\n `from_logits=False`).\n\nAccording to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it\nhelps to apply a \"focal factor\" to down-weight easy examples and focus more\non hard examples. By default, the focal tensor is computed as follows:\n\n`focal_factor = (1 - output) ** gamma` for class 1\n`focal_factor = output ** gamma` for class 0\nwhere `gamma` is a focusing parameter. When `gamma=0`, this function is\nequivalent to the binary crossentropy loss.\n\nArgs:\n apply_class_balancing: A bool, whether to apply weight balancing on the\n binary classes 0 and 1.\n alpha: A weight balancing factor for class 1, default is `0.25` as\n mentioned in reference [Lin et al., 2018](\n https://arxiv.org/pdf/1708.02002.pdf). The weight for class 0 is\n `1.0 - alpha`.\n gamma: A focusing parameter used to compute the focal factor, default is\n `2.0` as mentioned in the reference\n [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf).\n from_logits: Whether to interpret `y_pred` as a tensor of\n [logit](https://en.wikipedia.org/wiki/Logit) values. By default, we\n assume that `y_pred` are probabilities (i.e., values in `[0, 1]`).\n label_smoothing: Float in `[0, 1]`. When `0`, no smoothing occurs.\n When > `0`, we compute the loss between the predicted labels\n and a smoothed version of the true labels, where the smoothing\n squeezes the labels towards `0.5`.\n Larger values of `label_smoothing` correspond to heavier smoothing.\n axis: The axis along which to compute crossentropy (the features axis).\n Defaults to `-1`.\n reduction: Type of reduction to apply to the loss. In almost all cases\n this should be `\"sum_over_batch_size\"`. Supported options are\n `\"sum\"`, `\"sum_over_batch_size\"`, `\"mean\"`,\n `\"mean_with_sample_weight\"` or `None`. `\"sum\"` sums the loss,\n `\"sum_over_batch_size\"` and `\"mean\"` sum the loss and divide by the\n sample size, and `\"mean_with_sample_weight\"` sums the loss and\n divides by the sum of the sample weights. `\"none\"` and `None`\n perform no aggregation. Defaults to `\"sum_over_batch_size\"`.\n name: Optional name for the loss instance.\n dtype: The dtype of the loss's computations. Defaults to `None`, which\n means using `keras.backend.floatx()`. `keras.backend.floatx()` is a\n `\"float32\"` unless set to different value\n (via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is\n provided, then the `compute_dtype` will be utilized.\n\nExamples:\n\nWith the `compile()` API:\n\n```python\nmodel.compile(\n loss=keras.losses.BinaryFocalCrossentropy(\n gamma=2.0, from_logits=True),\n ...\n)\n```\n\nAs a standalone function:\n\n>>> # Example 1: (batch_size = 1, number of samples = 4)\n>>> y_true = np.array([0, 1, 0, 0])\n>>> y_pred = np.array([-18.6, 0.51, 2.94, -12.8])\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... gamma=2, from_logits=True)\n>>> loss(y_true, y_pred)\n0.691\n\n>>> # Apply class weight\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... apply_class_balancing=True, gamma=2, from_logits=True)\n>>> loss(y_true, y_pred)\n0.51\n\n>>> # Example 2: (batch_size = 2, number of samples = 4)\n>>> y_true = np.array([[0, 1], [0, 0]])\n>>> y_pred = np.array([[-18.6, 0.51], [2.94, -12.8]])\n>>> # Using default 'auto'/'sum_over_batch_size' reduction type.\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... gamma=3, from_logits=True)\n>>> loss(y_true, y_pred)\n0.647\n\n>>> # Apply class weight\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... apply_class_balancing=True, gamma=3, from_logits=True)\n>>> loss(y_true, y_pred)\n0.482\n\n>>> # Using 'sample_weight' attribute with focal effect\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... gamma=3, from_logits=True)\n>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])\n0.133\n\n>>> # Apply class weight\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... apply_class_balancing=True, gamma=3, from_logits=True)\n>>> loss(y_true, y_pred, sample_weight=[0.8, 0.2])\n0.097\n\n>>> # Using 'sum' reduction` type.\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... gamma=4, from_logits=True,\n... reduction=\"sum\")\n>>> loss(y_true, y_pred)\n1.222\n\n>>> # Apply class weight\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... apply_class_balancing=True, gamma=4, from_logits=True,\n... reduction=\"sum\")\n>>> loss(y_true, y_pred)\n0.914\n\n>>> # Using 'none' reduction type.\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... gamma=5, from_logits=True,\n... reduction=None)\n>>> loss(y_true, y_pred)\narray([0.0017 1.1561], dtype=float32)\n\n>>> # Apply class weight\n>>> loss = keras.losses.BinaryFocalCrossentropy(\n... apply_class_balancing=True, gamma=5, from_logits=True,\n... reduction=None)\n>>> loss(y_true, y_pred)\narray([0.0004 0.8670], dtype=float32)"} +{"repo": "pyglove", "function": "def _get_relationships(self, dna: pg.DNA) -> Tuple[List[pg.DNA], List[Optional[pg.DNA]], List[Optional[int]]]:\n\n def is_mutable_node(obj):\n return self._is_mutable_node(obj)\n results = pg.query(dna, where=is_mutable_node, enter_selected=True)\n child_nodes = list(results.values())\n parent_nodes = [n.parent_dna for n in child_nodes]\n child_indexes = [n.sym_path.key if n.parent_dna else None for n in child_nodes]\n return (child_nodes, parent_nodes, child_indexes)", "docstring": "Extracts the parent-child node relationships in a DNA.\n\nNote that PyGlove represents the nodes in a DNA instance as DNA instances\nthemselves.\n\nArgs:\n dna: the DNA that will be mutated.\n\nReturns:\n A tuple of 3 lists of the same length with corresponding elements:\n -child_nodes: a list of every node in the DNA.\n -parent_nodes: a list of the parent node of the corresponding node in\n `child_nodes`.\n -child_indexes: a list of indexes. For all j, child_nodes[j] is the i-th\n child of parent_nodes[j], where i = child_indexes[j].\n Note that the root is included as a \"child\" with a `None` parent."} +{"repo": "tensorflow", "function": "def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001):\n if ndim(x) == 4:\n if axis == 1 or axis == -3:\n tf_data_format = 'NCHW'\n elif axis == 3 or axis == -1:\n tf_data_format = 'NHWC'\n else:\n tf_data_format = None\n if tf_data_format == 'NHWC' or (tf_data_format == 'NCHW' and _has_nchw_support()):\n if ndim(mean) > 1:\n mean = array_ops.reshape(mean, [-1])\n if ndim(var) > 1:\n var = array_ops.reshape(var, [-1])\n if beta is None:\n beta = zeros_like(mean)\n elif ndim(beta) > 1:\n beta = array_ops.reshape(beta, [-1])\n if gamma is None:\n gamma = ones_like(mean)\n elif ndim(gamma) > 1:\n gamma = array_ops.reshape(gamma, [-1])\n y, _, _ = nn.fused_batch_norm(x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False)\n return y\n return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)", "docstring": "Applies batch normalization on x given mean, var, beta and gamma.\n\nI.e. returns:\n`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`\n\nArgs:\n x: Input tensor or variable.\n mean: Mean of batch.\n var: Variance of batch.\n beta: Tensor with which to center the input.\n gamma: Tensor by which to scale the input.\n axis: Integer, the axis that should be normalized.\n (typically the features axis).\n epsilon: Fuzz factor.\n\nReturns:\n A tensor."} +{"repo": "tensorflow", "function": "def _infer_graph(self, inputs, clusters):\n assert isinstance(inputs, list)\n scores = self._distance_graph(inputs, clusters, self._distance_metric)\n output = []\n if self._distance_metric == COSINE_DISTANCE and (not self._clusters_l2_normalized()):\n with ops.colocate_with(clusters, ignore_existing=True):\n clusters = nn_impl.l2_normalize(clusters, axis=1)\n for inp, score in zip(inputs, scores):\n with ops.colocate_with(inp, ignore_existing=True):\n indices, distances = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)\n if self._distance_metric == COSINE_DISTANCE:\n distances *= 0.5\n output.append((score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1])))\n return zip(*output)", "docstring": "Maps input to closest cluster and the score.\n\nArgs:\n inputs: list of input Tensors.\n clusters: Tensor of cluster centers.\n\nReturns:\n List of tuple, where each value in tuple corresponds to a value in inp.\n The tuple has following three elements:\n all_scores: distance of each input to each cluster center.\n score: distance of each input to closest cluster center.\n cluster_idx: index of cluster center closest to the corresponding input."} +{"repo": "transformers", "function": "class MMBTForClassification(nn.Module):\n\n def __init__(self, config, transformer, encoder):\n super().__init__()\n self.num_labels = config.num_labels\n self.mmbt = MMBTModel(config, transformer, encoder)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, input_modal, input_ids=None, modal_start_tokens=None, modal_end_tokens=None, attention_mask=None, token_type_ids=None, modal_token_type_ids=None, position_ids=None, modal_position_ids=None, head_mask=None, inputs_embeds=None, labels=None, return_dict=None):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.mmbt(input_modal=input_modal, input_ids=input_ids, modal_start_tokens=modal_start_tokens, modal_end_tokens=modal_end_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, modal_token_type_ids=modal_token_type_ids, position_ids=position_ids, modal_position_ids=modal_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, return_dict=return_dict)\n pooled_output = outputs[1]\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n if not return_dict:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "**labels**: (*optional*) `torch.LongTensor` of shape `(batch_size,)`:\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\nReturns: *Tuple* comprising various elements depending on the configuration (config) and inputs: **loss**:\n(*optional*, returned when `labels` is provided) `torch.FloatTensor` of shape `(1,)`: Classification (or\nregression if config.num_labels==1) loss. **logits**:\n `torch.FloatTensor` of shape `(batch_size, config.num_labels)` Classification (or regression if\n config.num_labels==1) scores (before SoftMax).\n**hidden_states**: (*optional*, returned when `output_hidden_states=True`) list of `torch.FloatTensor` (one for\nthe output of each layer + the output of the embeddings) of shape `(batch_size, sequence_length, hidden_size)`:\nHidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**:\n(*optional*, returned when `output_attentions=True`) list of `torch.FloatTensor` (one for each layer) of shape\n`(batch_size, num_heads, sequence_length, sequence_length)`: Attentions weights after the attention softmax, used\nto compute the weighted average in the self-attention heads.\n\nExamples:\n\n```python\n# For example purposes. Not runnable.\ntransformer = BertModel.from_pretrained(\"google-bert/bert-base-uncased\")\nencoder = ImageEncoder(args)\nmodel = MMBTForClassification(config, transformer, encoder)\noutputs = model(input_modal, input_ids, labels=labels)\nloss, logits = outputs[:2]\n```"} +{"repo": "beam", "function": "def _remove_lines(self, lines, sublist_lengths, num_to_remove):\n curr = 0\n result = []\n for offset in sublist_lengths:\n end = curr + offset\n start = min(curr + num_to_remove, end)\n result += lines[start:end]\n curr += offset\n return result", "docstring": "Utility function to remove num_to_remove lines from each sublist.\n\nArgs:\n lines: list of items.\n sublist_lengths: list of integers representing length of sublist\n corresponding to each source file.\n num_to_remove: number of lines to remove from each sublist.\nReturns:\n remaining lines."} +{"repo": "tf-quant-finance", "function": "def __init__(self, start_date, end_date, coupon_spec, first_coupon_date=None, penultimate_coupon_date=None, dtype=None, name=None):\n super(FixedCashflowStream, self).__init__()\n self._name = name or 'fixed_cashflow_stream'\n with tf.name_scope(self._name):\n self._start_date = dates.convert_to_date_tensor(start_date)\n self._end_date = dates.convert_to_date_tensor(end_date)\n self._batch_size = self._start_date.shape[0]\n self._dtype = dtype\n if first_coupon_date is None:\n self._first_coupon_date = None\n else:\n self._first_coupon_date = dates.convert_to_date_tensor(first_coupon_date)\n if penultimate_coupon_date is None:\n self._penultimate_coupon_date = None\n else:\n self._penultimate_coupon_date = dates.convert_to_date_tensor(penultimate_coupon_date)\n self._setup(coupon_spec)", "docstring": "Initialize a batch of fixed cashflow streams.\n\nArgs:\n start_date: A rank 1 `DateTensor` specifying the starting dates of the\n accrual of the first coupon of the cashflow stream. The shape of the\n input correspond to the numbercof streams being created.\n end_date: A rank 1 `DateTensor` specifying the end dates for accrual of\n the last coupon in each cashflow stream. The shape of the input should\n be the same as that of `start_date`.\n coupon_spec: A scalar or a list of `FixedCouponSpecs` specifying the\n details of the coupon payment for the cashflow stream. If specified as\n a list then the length of the list should be the same as the number of\n streams being created and each coupon within the list must have the\n same daycount_convention and businessday_rule. If specified as\n a scalar, then the elements of the namedtuple must be of the same shape\n as (or compatible to) the shape of `start_date`.\n first_coupon_date: An optional rank 1 `DateTensor` specifying the payment\n dates of the first coupon of the cashflow stream. Use this input for\n cashflows with irregular first coupon.\n Default value: None which implies regular first coupon.\n penultimate_coupon_date: An optional rank 1 `DateTensor` specifying the\n payment date of the penultimate (next to last) coupon of the cashflow\n stream. Use this input for cashflows with irregular last coupon.\n Default value: None which implies regular last coupon.\n dtype: `tf.Dtype`. If supplied the dtype for the real variables or ops\n either supplied to the FixedCashflowStream object or created by the\n object.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'fixed_cashflow_stream'."} +{"repo": "mobly", "function": "def list_adb_devices_by_usb_id():\n out = adb.AdbProxy().devices(['-l'])\n clean_lines = str(out, 'utf-8').strip().split('\\n')\n results = []\n for line in clean_lines:\n tokens = line.strip().split()\n if len(tokens) > 2 and tokens[1] == 'device':\n results.append(tokens[2])\n return results", "docstring": "List the usb id of all android devices connected to the computer that\nare detected by adb.\n\nReturns:\n A list of strings that are android device usb ids. Empty if there's\n none."} +{"repo": "tensorflow", "function": "def pad(tensor, paddings, mode='CONSTANT', name=None, constant_values=0):\n mode = mode.upper()\n if mode == 'CONSTANT':\n if not tensor_util.is_tf_type(constant_values) and np.ndim(constant_values) == 0 and (constant_values == np.zeros_like(constant_values)):\n result = gen_array_ops.pad(tensor, paddings, name=name)\n else:\n result = gen_array_ops.pad_v2(tensor, paddings, constant_values, name=name)\n elif mode == 'REFLECT':\n result = gen_array_ops.mirror_pad(tensor, paddings, mode='REFLECT', name=name)\n elif mode == 'SYMMETRIC':\n result = gen_array_ops.mirror_pad(tensor, paddings, mode='SYMMETRIC', name=name)\n else:\n raise ValueError(f'Value of argument `mode` expected to be one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\". Received `mode` = {mode}')\n if not context.executing_eagerly():\n paddings_constant = _get_paddings_constant(paddings)\n input_shape = tensor_shape.TensorShape(tensor.shape) if isinstance(tensor, tensor_lib.Tensor) else result.op.inputs[0].shape\n if input_shape.ndims is not None and (not result.shape.is_fully_defined()) and (paddings_constant is not None):\n new_shape = []\n for padding, dim in zip(paddings_constant, input_shape.as_list()):\n if padding is None or dim is None or any((x is None for x in padding)):\n new_shape.append(None)\n else:\n new_shape.append(sum(padding) + dim)\n result.set_shape(new_shape)\n return result", "docstring": "Pads a tensor.\n\nThis operation pads a `tensor` according to the `paddings` you specify.\n`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\nmany values to add before the contents of `tensor` in that dimension, and\n`paddings[D, 1]` indicates how many values to add after the contents of\n`tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\nand `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n`mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\nno greater than `tensor.dim_size(D)`.\n\nThe padded size of each dimension D of the output is:\n\n`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\nFor example:\n\n```python\nt = tf.constant([[1, 2, 3], [4, 5, 6]])\npaddings = tf.constant([[1, 1,], [2, 2]])\n# 'constant_values' is 0.\n# rank of 't' is 2.\ntf.pad(t, paddings, \"CONSTANT\") # [[0, 0, 0, 0, 0, 0, 0],\n # [0, 0, 1, 2, 3, 0, 0],\n # [0, 0, 4, 5, 6, 0, 0],\n # [0, 0, 0, 0, 0, 0, 0]]\n\ntf.pad(t, paddings, \"REFLECT\") # [[6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1],\n # [6, 5, 4, 5, 6, 5, 4],\n # [3, 2, 1, 2, 3, 2, 1]]\n\ntf.pad(t, paddings, \"SYMMETRIC\") # [[2, 1, 1, 2, 3, 3, 2],\n # [2, 1, 1, 2, 3, 3, 2],\n # [5, 4, 4, 5, 6, 6, 5],\n # [5, 4, 4, 5, 6, 6, 5]]\n```\n\nArgs:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\" (case-insensitive)\n name: A name for the operation (optional).\n constant_values: In \"CONSTANT\" mode, the scalar pad value to use. Must be\n same type as `tensor`.\n\nReturns:\n A `Tensor`. Has the same type as `tensor`.\n\nRaises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\"."} +{"repo": "transformers", "function": "def build_string_from_input(prompt, bos_token, image_seq_len, image_token, num_images):\n return f'{image_token * image_seq_len * num_images}{bos_token}{prompt}\\n'", "docstring": "Builds a string from the input prompt and image tokens.\nFor example, for the call:\nbuild_string_from_input(\n prompt=\"Prefix str\"\n bos_token=\"\",\n image_seq_len=3,\n image_token=\"\",\n)\nThe output will be:\n\"Initial str\"\nArgs:\n prompt (`List[Union[str, ImageInput]]`): The input prompt.\n bos_token (`str`): The beginning of sentence token.\n image_seq_len (`int`): The length of the image sequence.\n image_token (`str`): The image token.\n num_images (`int`): Number of images in the prompt."} +{"repo": "temporian", "function": "def __init__(self, features: List[np.ndarray], timestamps: np.ndarray, schema: Optional[Schema]=None) -> None:\n self.features = features\n self.timestamps = timestamps\n if schema is not None:\n self.check_schema(schema)", "docstring": "Initializes the IndexData object by checking and setting the features\nand timestamps.\n\nRaises:\n ValueError: If features are not one-dimensional arrays.\n ValueError: If the number of elements in features and timestamps\n do not match."} +{"repo": "tensorflow", "function": "def __init__(self, cluster_resolver=None, communication_options=None):\n if communication_options is None:\n communication_options = collective_util.Options()\n super(CollectiveAllReduceStrategy, self).__init__(CollectiveAllReduceExtended(self, cluster_resolver=cluster_resolver, communication_options=communication_options))\n distribute_lib.distribution_strategy_gauge.get_cell('V2').set('MultiWorkerMirroredStrategy')\n distribute_lib.distribution_strategy_replica_gauge.get_cell('num_workers').set(self.extended._num_workers)\n distribute_lib.distribution_strategy_replica_gauge.get_cell('num_replicas_per_worker').set(self.extended._num_devices_per_worker)", "docstring": "Creates the strategy.\n\nArgs:\n cluster_resolver: optional\n `tf.distribute.cluster_resolver.ClusterResolver`. If `None`,\n `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.\n communication_options: optional\n `tf.distribute.experimental.CommunicationOptions`. This configures the\n default options for cross device communications. It can be overridden by\n options provided to the communication APIs like\n `tf.distribute.ReplicaContext.all_reduce`. See\n `tf.distribute.experimental.CommunicationOptions` for details."} +{"repo": "transformers", "function": "def add_2d_positional_embeddings(self, grid, interpolate_pos_encoding: bool=False):\n batch_size, height, width, hidden_dim = grid.shape\n row_height = min(self.max_grid_row_position_embeddings, height)\n row_position_ids = torch.arange(row_height, dtype=torch.long, device=grid.device)\n row_position_embeddings = self.row_position_embeddings(row_position_ids)\n row_shape = (1,) * (len(grid.shape) - 3) + (row_height, 1, hidden_dim)\n row_position_embeddings = row_position_embeddings.view(*row_shape)\n row_width = min(self.max_grid_col_position_embeddings, width)\n col_position_ids = torch.arange(row_width, dtype=torch.long, device=grid.device)\n col_position_embeddings = self.col_position_embeddings(col_position_ids)\n col_shape = (batch_size, 1, row_width, hidden_dim)\n col_position_embeddings = col_position_embeddings.view(*col_shape)\n positional_embeddings = row_position_embeddings + col_position_embeddings\n if interpolate_pos_encoding and (height > self.max_grid_row_position_embeddings or width > self.max_grid_col_position_embeddings):\n grid = grid + self.interpolate_pos_encoding(positional_embeddings, height, width)\n else:\n grid = grid + positional_embeddings\n return grid", "docstring": "Args:\n grid: (batch_size, height, width, hidden_dim)\n interpolate_pos_encoding: (`bool`, *optional*, defaults to `False`):\n Whether to interpolate the pre-trained position encodings.\nReturns:\n grid + col_position_embeddings.view(*col_shape): (batch_size, *, height, width, hidden_dim)"} +{"repo": "tensorflow", "function": "def choose_from_datasets(datasets, choice_dataset, stop_on_empty_dataset=True) -> 'DatasetV2':\n from tensorflow.python.data.ops import choose_from_datasets_op\n return choose_from_datasets_op._choose_from_datasets(datasets, choice_dataset, stop_on_empty_dataset)", "docstring": "Creates a dataset that deterministically chooses elements from `datasets`.\n\nFor example, given the following datasets:\n\n```python\ndatasets = [tf.data.Dataset.from_tensors(\"foo\").repeat(),\n tf.data.Dataset.from_tensors(\"bar\").repeat(),\n tf.data.Dataset.from_tensors(\"baz\").repeat()]\n\n# Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.\nchoice_dataset = tf.data.Dataset.range(3).repeat(3)\n\nresult = tf.data.Dataset.choose_from_datasets(datasets, choice_dataset)\n```\n\nThe elements of `result` will be:\n\n```\n\"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\"\n```\n\nArgs:\n datasets: A non-empty list of `tf.data.Dataset` objects with compatible\n structure.\n choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between\n `0` and `len(datasets) - 1`.\n stop_on_empty_dataset: If `True`, selection stops if it encounters an\n empty dataset. If `False`, it skips empty datasets. It is recommended to\n set it to `True`. Otherwise, the selected elements start off as the user\n intends, but may change as input datasets become empty. This can be\n difficult to detect since the dataset starts off looking correct.\n Defaults to `True`.\n\nReturns:\n A new `Dataset` with the transformation applied as described above.\n\nRaises:\n TypeError: If `datasets` or `choice_dataset` has the wrong type.\n ValueError: If `datasets` is empty."} +{"repo": "beam", "function": "def invoke_process_element(self, sdf_invoker, output_processor, element, restriction, watermark_estimator_state, *args, **kwargs):\n assert isinstance(sdf_invoker, DoFnInvoker)\n\n class CheckpointState(object):\n\n def __init__(self):\n self.checkpointed = None\n self.residual_restriction = None\n checkpoint_state = CheckpointState()\n\n def initiate_checkpoint():\n with self._checkpoint_lock:\n if checkpoint_state.checkpointed:\n return\n checkpoint_state.checkpointed = object()\n split = sdf_invoker.try_split(0)\n if split:\n _, checkpoint_state.residual_restriction = split\n else:\n checkpoint_state.checkpointed = None\n output_processor.reset()\n Timer(self._max_duration, initiate_checkpoint).start()\n sdf_invoker.invoke_process(element, additional_args=args, restriction=restriction, watermark_estimator_state=watermark_estimator_state)\n assert output_processor.output_iter is not None\n output_count = 0\n process_continuation = None\n for output in output_processor.output_iter:\n assert not process_continuation\n if isinstance(output, ProcessContinuation):\n initiate_checkpoint()\n process_continuation = output\n continue\n yield output\n output_count += 1\n if self._max_num_outputs and output_count >= self._max_num_outputs:\n initiate_checkpoint()\n result = SDFProcessElementInvoker.Result(residual_restriction=checkpoint_state.residual_restriction) if checkpoint_state.residual_restriction else SDFProcessElementInvoker.Result()\n yield result", "docstring": "Invokes `process()` method of a Splittable `DoFn` for a given element.\n\nArgs:\n sdf_invoker: a `DoFnInvoker` for the Splittable `DoFn`.\n element: the element to process\nReturns:\n a `SDFProcessElementInvoker.Result` object."} +{"repo": "transformers", "function": "class TvltConfig(PretrainedConfig):\n model_type = 'tvlt'\n\n def __init__(self, image_size=224, spectrogram_length=2048, frequency_length=128, image_patch_size=[16, 16], audio_patch_size=[16, 16], num_image_channels=3, num_audio_channels=1, num_frames=8, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, qkv_bias=True, use_mean_pooling=False, decoder_num_attention_heads=16, decoder_hidden_size=512, decoder_num_hidden_layers=8, decoder_intermediate_size=2048, pixel_mask_ratio=0.75, audio_mask_ratio=0.15, audio_mask_type='frame-level', task_matching=True, task_mae=True, loss_type='classification', **kwargs):\n super().__init__(**kwargs)\n if audio_mask_type not in ('frame-level', 'patch_level'):\n raise ValueError(f\"audio_mask_type must be one of two acceptable strategies - {{'frame_level', 'patch-level') got {audio_mask_type}\")\n self.image_size = image_size\n self.spectrogram_length = spectrogram_length\n self.frequency_length = frequency_length\n self.image_patch_size = image_patch_size\n self.audio_patch_size = audio_patch_size\n self.num_image_channels = num_image_channels\n self.num_audio_channels = num_audio_channels\n self.num_frames = num_frames\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.qkv_bias = qkv_bias\n self.use_mean_pooling = use_mean_pooling\n self.decoder_num_attention_heads = decoder_num_attention_heads\n self.decoder_hidden_size = decoder_hidden_size\n self.decoder_num_hidden_layers = decoder_num_hidden_layers\n self.decoder_intermediate_size = decoder_intermediate_size\n self.pixel_mask_ratio = pixel_mask_ratio\n self.audio_mask_ratio = audio_mask_ratio\n self.audio_mask_type = audio_mask_type\n self.task_matching = task_matching\n self.task_mae = task_mae\n self.loss_type = loss_type", "docstring": "This is the configuration class to store the configuration of a [`TvltModel`]. It is used to instantiate a TVLT\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the TVLT\n[ZinengTang/tvlt-base](https://huggingface.co/ZinengTang/tvlt-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n spectrogram_length (`int`, *optional*, defaults to 2048):\n The time length of each audio spectrogram.\n frequency_length (`int`, *optional*, defaults to 128):\n The frequency length of audio spectrogram.\n image_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`):\n The size (resolution) of each image patch.\n audio_patch_size (`List[int]`, *optional*, defaults to `[16, 16]`):\n The size (resolution) of each audio patch.\n num_image_channels (`int`, *optional*, defaults to 3):\n The number of input image channels.\n num_audio_channels (`int`, *optional*, defaults to 1):\n The number of input audio channels.\n num_frames (`int`, *optional*, defaults to 8):\n The maximum number of frames for an input video.\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries, keys and values.\n use_mean_pooling (`bool`, *optional*, defaults to `False`):\n Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token.\n decoder_num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the decoder.\n decoder_hidden_size (`int`, *optional*, defaults to 512):\n Dimensionality of the decoder.\n decoder_num_hidden_layers (`int`, *optional*, defaults to 8):\n Number of hidden layers in the decoder.\n decoder_intermediate_size (`int`, *optional*, defaults to 2048):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the decoder.\n pixel_mask_ratio (`float`, *optional*, defaults to 0.75):\n Image patch masking ratio.\n audio_mask_ratio (`float`, *optional*, defaults to 0.15):\n Audio patch masking ratio.\n audio_mask_type (`str`, *optional*, defaults to `\"frame-level\"`):\n Audio patch masking type, choose between \"frame-level\" and \"patch-level\".\n task_matching (`bool`, *optional*, defaults to `True`):\n Whether to use vision audio matching task in pretraining.\n task_mae (`bool`, *optional*, defaults to `True`):\n Whether to use the masked auto-encoder (MAE) in pretraining.\n loss_type (`str`, *optional*, defaults to `\"classification\"`):\n Loss types including regression and classification.\n\nExample:\n\n```python\n>>> from transformers import TvltConfig, TvltModel\n\n>>> # # Initializing a TVLT ZinengTang/tvlt-base style configuration\n>>> configuration = TvltConfig()\n\n>>> # # Initializing a model (with random weights) from the ZinengTang/tvlt-base style configuration\n>>> model = TvltModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "pyglove", "function": "def try_listify_dict_with_int_keys(src: Dict[Any, Any], convert_when_sparse: bool=False) -> Tuple[Union[List[Any], Dict[Any, Any]], bool]:\n if not src:\n return (src, False)\n min_key = None\n max_key = None\n for key in src.keys():\n if not isinstance(key, int):\n return (src, False)\n if min_key is None or min_key > key:\n min_key = key\n if max_key is None or max_key < key:\n max_key = key\n if convert_when_sparse or (min_key == 0 and max_key == len(src) - 1):\n return ([src[key] for key in sorted(src.keys())], True)\n return (src, False)", "docstring": "Try to convert a dictionary with consequentive integer keys to a list.\n\nArgs:\n src: A dict whose keys may be int type and their range form a perfect\n range(0, N) list unless convert_when_sparse is set to True.\n convert_when_sparse: When src is a int-key dict, force convert\n it to a list ordered by key, even it's sparse.\n\nReturns:\n converted list or src unchanged."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, bbox: Optional[torch.Tensor]=None, attention_mask: Optional[torch.FloatTensor]=None, token_type_ids: Optional[torch.LongTensor]=None, position_ids: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n outputs = self.lilt(input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n loss = None\n if labels is not None:\n labels = labels.to(logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = 'regression'\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = 'single_label_classification'\n else:\n self.config.problem_type = 'multi_label_classification'\n if self.config.problem_type == 'regression':\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == 'single_label_classification':\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == 'multi_label_classification':\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return (loss,) + output if loss is not None else output\n return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):\n Bounding boxes of each input sequence tokens. Selected in the range `[0,\n config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)\n format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,\n y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization.\nlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification\n>>> from datasets import load_dataset\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n>>> model = AutoModelForSequenceClassification.from_pretrained(\"SCUT-DLVCLab/lilt-roberta-en-base\")\n\n>>> dataset = load_dataset(\"nielsr/funsd-layoutlmv3\", split=\"train\", trust_remote_code=True)\n>>> example = dataset[0]\n>>> words = example[\"tokens\"]\n>>> boxes = example[\"bboxes\"]\n\n>>> encoding = tokenizer(words, boxes=boxes, return_tensors=\"pt\")\n\n>>> outputs = model(**encoding)\n>>> predicted_class_idx = outputs.logits.argmax(-1).item()\n>>> predicted_class = model.config.id2label[predicted_class_idx]\n```"} +{"repo": "beam", "function": "def extract_distribution(monitoring_info_proto):\n if not is_distribution(monitoring_info_proto):\n raise ValueError('Unsupported type %s' % monitoring_info_proto.type)\n return _decode_distribution(coders.VarIntCoder(), monitoring_info_proto.payload)", "docstring": "Returns a tuple of (count, sum, min, max).\n\nArgs:\n proto: The monitoring info for the distribution."} +{"repo": "tensorflow", "function": "def _distance_graph(cls, inputs, clusters, distance_metric):\n assert isinstance(inputs, list)\n if distance_metric == SQUARED_EUCLIDEAN_DISTANCE:\n return cls._compute_euclidean_distance(inputs, clusters)\n elif distance_metric == COSINE_DISTANCE:\n return cls._compute_cosine_distance(inputs, clusters, inputs_normalized=True)\n else:\n assert False, str(distance_metric)", "docstring": "Computes distance between each input and each cluster center.\n\nArgs:\n inputs: list of input Tensors.\n clusters: cluster Tensor.\n distance_metric: distance metric used for clustering\n\nReturns:\n list of Tensors, where each element corresponds to each element in inputs.\n The value is the distance of each row to all the cluster centers.\n Currently only Euclidean distance and cosine distance are supported."} +{"repo": "tensorflow", "function": "def in_place_subclassed_model_state_restoration(model):\n assert not model._is_graph_network\n if hasattr(model, '_original_attributes_cache') and model._original_attributes_cache is not None:\n setattr_tracking = model._setattr_tracking\n model._setattr_tracking = False\n model._self_tracked_trackables = []\n for name, value in model._original_attributes_cache.items():\n setattr(model, name, value)\n if isinstance(value, Layer):\n model._self_tracked_trackables.append(value)\n model._original_attributes_cache = None\n model._setattr_tracking = setattr_tracking\n else:\n _reset_build_compile_trackers(model)", "docstring": "Restores the original state of a model after it was \"reset\".\n\nThis undoes this action of `_in_place_subclassed_model_reset`, which is called\nin `clone_and_build_model` if `in_place_reset` is set to True.\n\nArgs:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called."} +{"repo": "tensorflow", "function": "def count_up_to(self, limit):\n return gen_state_ops.resource_count_up_to(self.handle, limit=limit, T=self.dtype)", "docstring": "Increments this variable until it reaches `limit`.\n\nWhen that Op is run it tries to increment the variable by `1`. If\nincrementing the variable would bring it above `limit` then the Op raises\nthe exception `OutOfRangeError`.\n\nIf no error is raised, the Op outputs the value of the variable before\nthe increment.\n\nThis is essentially a shortcut for `count_up_to(self, limit)`.\n\nArgs:\n limit: value at which incrementing the variable raises an error.\n\nReturns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct."} +{"repo": "transformers", "function": "class ImagesKwargs(TypedDict, total=False):\n do_resize: Optional[bool]\n size: Optional[dict[str, int]]\n size_divisor: Optional[int]\n crop_size: Optional[Dict[str, int]]\n resample: Optional[Union['PILImageResampling', int]]\n do_rescale: Optional[bool]\n rescale_factor: Optional[float]\n do_normalize: Optional[bool]\n image_mean: Optional[Union[float, list[float]]]\n image_std: Optional[Union[float, list[float]]]\n do_pad: Optional[bool]\n pad_size: Optional[dict[str, int]]\n do_center_crop: Optional[bool]\n data_format: Optional[ChannelDimension]\n input_data_format: Optional[Union[str, ChannelDimension]]\n device: Optional[str]", "docstring": "Keyword arguments for image processing. For extended documentation, check the appropriate ImageProcessor\nclass methods and docstrings.\n\nAttributes:\n do_resize (`bool`, *optional*):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*):\n Resize the shorter side of the input to `size[\"shortest_edge\"]`.\n size_divisor (`int`, *optional*):\n The size by which to make sure both the height and width can be divided.\n crop_size (`Dict[str, int]`, *optional*):\n Desired output size when applying center-cropping.\n resample (`PILImageResampling`, *optional*):\n Resampling filter to use if resizing the image.\n do_rescale (`bool`, *optional*):\n Whether to rescale the image by the specified scale `rescale_factor`.\n rescale_factor (`int` or `float`, *optional*):\n Scale factor to use if rescaling the image.\n do_normalize (`bool`, *optional*):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*):\n Mean to use if normalizing the image.\n image_std (`float` or `List[float]`, *optional*):\n Standard deviation to use if normalizing the image.\n do_pad (`bool`, *optional*):\n Whether to pad the image to the `(max_height, max_width)` of the images in the batch.\n pad_size (`Dict[str, int]`, *optional*):\n The size `{\"height\": int, \"width\" int}` to pad the images to.\n do_center_crop (`bool`, *optional*):\n Whether to center crop the image.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image.\n device (`str`, *optional*):\n The device to use for processing (e.g. \"cpu\", \"cuda\"), only relevant for fast image processing."} +{"repo": "tensorflow", "function": "def _create_and_save_tf1_conv_model(self, saved_model_path: str, signature_key: str, tags: Collection[str], input_key: str, output_key: str, *, input_shape: Sequence[int]=(1, 3, 4, 3), filter_shape: Sequence[int]=(2, 3, 3, 2), use_variable: bool=False) -> core.Tensor:\n with ops.Graph().as_default(), session.Session() as sess:\n in_placeholder, output_tensor = self._create_simple_tf1_conv_model(input_shape=input_shape, filter_shape=filter_shape, use_variable_for_filter=use_variable)\n if use_variable:\n sess.run(variables.global_variables_initializer())\n self._save_tf1_model(sess, saved_model_path, signature_key, tags, inputs={input_key: in_placeholder}, outputs={output_key: output_tensor})\n return in_placeholder", "docstring": "Creates and saves a simple convolution model.\n\nThis is intended to be used for TF1 (graph mode) tests.\n\nArgs:\n saved_model_path: Directory to save the model.\n signature_key: The key to the SignatureDef that inputs & outputs\n correspond to.\n tags: Set of tags associated with the model.\n input_key: The key to the input tensor.\n output_key: The key to the output tensor.\n input_shape: Shape of the input tensor.\n filter_shape: Shape of the filter.\n use_variable: Setting this to `True` makes the filter for the conv\n operation a `tf.Variable`.\n\nReturns:\n in_placeholder: The placeholder tensor used as an input to the model."} +{"repo": "transformers", "function": "def forward(self, input_points: Optional[Tuple[torch.Tensor, torch.Tensor]], input_labels: Optional[torch.Tensor], input_boxes: Optional[torch.Tensor], input_masks: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n sparse_embeddings = None\n batch_size = 1\n target_device = self.shared_embedding.positional_embedding.device\n if input_points is not None:\n batch_size, point_batch_size = input_points.shape[:2]\n if input_labels is None:\n raise ValueError('If points are provided, labels must also be provided.')\n point_embeddings = self._embed_points(input_points, input_labels, pad=input_boxes is None)\n sparse_embeddings = point_embeddings\n if input_boxes is not None:\n batch_size = input_boxes.shape[0]\n box_embeddings = self._embed_boxes(input_boxes)\n if sparse_embeddings is None:\n sparse_embeddings = box_embeddings\n else:\n sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=2)\n if input_masks is not None:\n dense_embeddings = self.mask_embed(input_masks)\n else:\n dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(batch_size, -1, self.image_embedding_size[0], self.image_embedding_size[1])\n if sparse_embeddings is None:\n sparse_embeddings = torch.zeros((batch_size, 1, 1, self.hidden_size), device=target_device)\n return (sparse_embeddings, dense_embeddings)", "docstring": "Embeds different types of prompts, returning both sparse and dense embeddings.\n\nArgs:\n points (`torch.Tensor`, *optional*):\n point coordinates and labels to embed.\n boxes (`torch.Tensor`, *optional*):\n boxes to embed\n masks (`torch.Tensor`, *optional*):\n masks to embed"} +{"repo": "beam", "function": "def get_output_batch_type(self, input_element_type) -> typing.Optional[typing.Union[TypeConstraint, type]]:\n output_batch_type = None\n if self._process_defined and self._process_yields_batches:\n output_batch_type = self._get_element_type_from_return_annotation(self.process, input_element_type)\n if self._process_batch_defined and (not self._process_batch_yields_elements):\n process_batch_type = self._get_element_type_from_return_annotation(self.process_batch, self._get_input_batch_type_normalized(input_element_type))\n if output_batch_type is not None and (not process_batch_type == output_batch_type):\n raise TypeError(f'DoFn {self!r} yields batches from both process and process_batch, but they produce different types:\\n process: {output_batch_type}\\n process_batch: {process_batch_type!r}')\n output_batch_type = process_batch_type\n return output_batch_type", "docstring": "Determine the batch type produced by this DoFn's ``process_batch``\nimplementation and/or its ``process`` implementation with\n``@yields_batch``.\n\nThe default implementation of this method observes the return type\nannotations on ``process_batch`` and/or ``process``. A Batched DoFn may\noverride this method if a dynamic approach is required.\n\nArgs:\n input_element_type: The **element type** of the input PCollection this\n DoFn is being applied to.\n\nReturns:\n ``None`` if this DoFn will never yield batches, else a Beam typehint or\n a native Python typehint."} +{"repo": "tensorflow", "function": "def identity(x, name=None):\n return array_ops.identity(x, name=name)", "docstring": "Returns a tensor with the same content as the input tensor.\n\nArgs:\n x: The input tensor.\n name: String, name for the variable to create.\n\nReturns:\n A tensor of the same shape, type and content."} +{"repo": "budoux", "function": "def parse(self, sentence: str) -> typing.List[str]:\n if sentence == '':\n return []\n chunks = [sentence[0]]\n base_score = -sum((sum(g.values()) for g in self.model.values())) * 0.5\n for i in range(1, len(sentence)):\n score = base_score\n if i > 2:\n score += self.model.get('UW1', {}).get(sentence[i - 3], 0)\n if i > 1:\n score += self.model.get('UW2', {}).get(sentence[i - 2], 0)\n score += self.model.get('UW3', {}).get(sentence[i - 1], 0)\n score += self.model.get('UW4', {}).get(sentence[i], 0)\n if i + 1 < len(sentence):\n score += self.model.get('UW5', {}).get(sentence[i + 1], 0)\n if i + 2 < len(sentence):\n score += self.model.get('UW6', {}).get(sentence[i + 2], 0)\n if i > 1:\n score += self.model.get('BW1', {}).get(sentence[i - 2:i], 0)\n score += self.model.get('BW2', {}).get(sentence[i - 1:i + 1], 0)\n if i + 1 < len(sentence):\n score += self.model.get('BW3', {}).get(sentence[i:i + 2], 0)\n if i > 2:\n score += self.model.get('TW1', {}).get(sentence[i - 3:i], 0)\n if i > 1:\n score += self.model.get('TW2', {}).get(sentence[i - 2:i + 1], 0)\n if i + 1 < len(sentence):\n score += self.model.get('TW3', {}).get(sentence[i - 1:i + 2], 0)\n if i + 2 < len(sentence):\n score += self.model.get('TW4', {}).get(sentence[i:i + 3], 0)\n if score > 0:\n chunks.append(sentence[i])\n else:\n chunks[-1] += sentence[i]\n return chunks", "docstring": "Parses the input sentence and returns a list of semantic chunks.\n\nArgs:\n sentence (str): An input sentence.\n\nReturns:\n A list of semantic chunks (List[str])."} +{"repo": "transformers", "function": "def get_numpy_to_framework_fn(arr) -> Callable:\n if isinstance(arr, np.ndarray):\n return np.array\n if is_tf_available() and is_tf_tensor(arr):\n import tensorflow as tf\n return tf.convert_to_tensor\n if is_torch_available() and is_torch_tensor(arr):\n import torch\n return torch.tensor\n if is_flax_available() and is_jax_tensor(arr):\n import jax.numpy as jnp\n return jnp.array\n raise ValueError(f'Cannot convert arrays of type {type(arr)}')", "docstring": "Returns a function that converts a numpy array to the framework of the input array.\n\nArgs:\n arr (`np.ndarray`): The array to convert."} +{"repo": "tensorflow", "function": "def get_header_from_ops_and_kernels(ops_and_kernels, include_all_ops_and_kernels):\n ops_and_kernels = sorted(ops_and_kernels)\n ops = set((op for op, _ in ops_and_kernels))\n result_list = []\n\n def append(s):\n result_list.append(s)\n _, script_name = os.path.split(sys.argv[0])\n append('// This file was autogenerated by %s' % script_name)\n append('#ifndef OPS_TO_REGISTER')\n append('#define OPS_TO_REGISTER')\n if include_all_ops_and_kernels:\n append('#define SHOULD_REGISTER_OP(op) true')\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) true')\n append('#define SHOULD_REGISTER_OP_GRADIENT true')\n else:\n line = \"\\n namespace {\\n constexpr const char* skip(const char* x) {\\n return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;\\n }\\n\\n constexpr bool isequal(const char* x, const char* y) {\\n return (*skip(x) && *skip(y))\\n ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))\\n : (!*skip(x) && !*skip(y));\\n }\\n\\n template\\n struct find_in {\\n static constexpr bool f(const char* x, const char* const y[N]) {\\n return isequal(x, y[0]) || find_in::f(x, y + 1);\\n }\\n };\\n\\n template<>\\n struct find_in<0> {\\n static constexpr bool f(const char* x, const char* const y[]) {\\n return false;\\n }\\n };\\n } // end namespace\\n \"\n line += 'constexpr const char* kNecessaryOpKernelClasses[] = {\\n'\n for _, kernel_class in ops_and_kernels:\n if kernel_class is None:\n continue\n line += '\"%s\",\\n' % kernel_class\n line += '};'\n append(line)\n append('#define SHOULD_REGISTER_OP_KERNEL(clz) (find_in::f(clz, kNecessaryOpKernelClasses))')\n append('')\n append('constexpr inline bool ShouldRegisterOp(const char op[]) {')\n append(' return false')\n for op in sorted(ops):\n append(' || isequal(op, \"%s\")' % op)\n append(' ;')\n append('}')\n append('#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)')\n append('')\n append('#define SHOULD_REGISTER_OP_GRADIENT ' + ('true' if 'SymbolicGradient' in ops else 'false'))\n append('#endif')\n return '\\n'.join(result_list)", "docstring": "Returns a header for use with tensorflow SELECTIVE_REGISTRATION.\n\nArgs:\n ops_and_kernels: a set of (op_name, kernel_class_name) pairs to include.\n include_all_ops_and_kernels: if True, ops_and_kernels is ignored and all op\n kernels are included.\n\nReturns:\n the string of the header that should be written as ops_to_register.h."} +{"repo": "tf-quant-finance", "function": "def segment_diff(x, segment_ids, order=1, exclusive=False, dtype=None, name=None):\n with tf.compat.v1.name_scope(name, default_name='segment_diff', values=[x]):\n x = tf.convert_to_tensor(x, dtype=dtype)\n raw_diffs = diff_ops.diff(x, order=order, exclusive=exclusive)\n if segment_ids is None:\n return raw_diffs\n has_segment_changed = tf.concat([[False], tf.not_equal(segment_ids[1:] - segment_ids[:-1], 0)], axis=0)\n segment_start_index = tf.cast(tf.where(has_segment_changed), dtype=tf.int32)\n segment_end_index = tf.concat([tf.reshape(segment_start_index, [-1])[1:], [tf.size(segment_ids)]], axis=0)\n segment_end_index = tf.reshape(segment_end_index, [-1, 1])\n fix_indices = segment_start_index + tf.range(order, dtype=segment_start_index.dtype)\n in_bounds = tf.where(fix_indices < segment_end_index)\n fix_indices = tf.reshape(tf.gather_nd(fix_indices, in_bounds), [-1, 1])\n needs_fix = tf.scatter_nd(fix_indices, tf.reshape(tf.ones_like(fix_indices, dtype=tf.int32), [-1]), shape=tf.shape(x))\n needs_fix = tf.cast(needs_fix, dtype=tf.bool)\n if not exclusive:\n return tf.where(needs_fix, x, raw_diffs)\n return tf.boolean_mask(raw_diffs, tf.logical_not(needs_fix[order:]))", "docstring": "Computes difference of successive elements in a segment.\n\nFor a complete description of segment_* ops see documentation of\n`tf.segment_max`. This op extends the `diff` functionality to segmented\ninputs.\n\nThe behaviour of this op is the same as that of the op `diff` within each\nsegment. The result is effectively a concatenation of the results of `diff`\napplied to each segment.\n\n#### Example\n\n```python\n x = tf.constant([2, 5, 1, 7, 9] + [32, 10, 12, 3] + [4, 8, 5])\n segments = tf.constant([0, 0, 0, 0, 0] + [1, 1, 1, 1] + [2, 2, 2])\n # First order diff. Expected result: [3, -4, 6, 2, -22, 2, -9, 4, -3]\n dx1 = segment_diff(\n x, segment_ids=segments, order=1, exclusive=True)\n # Non-exclusive, second order diff.\n # Expected result: [2, 5, -1, 2, 8, 32, 10, -20, -7, 4, 8, 1]\n dx2 = segment_diff(\n x, segment_ids=segments, order=2, exclusive=False)\n```\n\nArgs:\n x: A rank 1 `Tensor` of any dtype for which arithmetic operations are\n permitted.\n segment_ids: A `Tensor`. Must be one of the following types: int32, int64. A\n 1-D tensor whose size is equal to the size of `x`. Values should be sorted\n and can be repeated.\n order: Positive Python int. The order of the difference to compute. `order =\n 1` corresponds to the difference between successive elements.\n Default value: 1\n exclusive: Python bool. See description above.\n Default value: False\n dtype: Optional `tf.Dtype`. If supplied, the dtype for `x` to use when\n converting to `Tensor`.\n Default value: None which maps to the default dtype inferred by TF.\n name: Python `str` name prefixed to Ops created by this class.\n Default value: None which is mapped to the default name 'segment_diff'.\n\nReturns:\n diffs: A `Tensor` of the same dtype as `x`. Assuming that each segment is\n of length greater than or equal to order, if `exclusive` is True,\n then the size is `n-order*k` where `n` is the size of x,\n `k` is the number of different segment ids supplied if `segment_ids` is\n not None or 1 if `segment_ids` is None. If any of the segments is of\n length less than the order, then the size is:\n `n-sum(min(order, length(segment_j)), j)` where the sum is over segments.\n If `exclusive` is False, then the size is `n`."} +{"repo": "sprockets", "function": "class Context(object):\n\n def __init__(self):\n self.g = None\n self.left = None\n self.right = None\n self.labels = None\n self.num_matched = 0\n self.s = None\n self.t = None\n self.matches = None\n self.slack = None\n self.slackx = None\n self.prev = None\n\n def MaxBipartiteMatching(self, graph):\n \"\"\"Find a maximum matching for a bipartite graph.\n\n This is O(n^3) implementation of the Hungarian method for complete bipartite\n matching problems.\n\n Args:\n graph: A networkx graph object, assumed to be bipartite.\n Returns:\n A dictionary keyed on node names in left to node names in right.\n \"\"\"\n self.g = nx.Graph(graph)\n self.left = set((n for n, d in self.g.nodes(data=True) if not d['bipartite']))\n self.right = set(self.g) - self.left\n self.num_matched = 0\n self.s = set()\n self.t = set()\n self.matches = {}\n self.slack = {}\n self.slackx = {}\n self.prev = {}\n self.labels = {}\n for x in self.left:\n self.labels[x] = max([val['weight'] for val in self.g[x].values()])\n for y in self.right:\n self.labels[y] = 0\n while self.num_matched != len(self.left):\n self._Augment()\n ret = {}\n for k in self.left:\n ret[k] = self.matches[k]\n return ret\n\n def _Augment(self):\n \"\"\"Find an augmenting path starting from an unmatched node in |left|.\n\n Start with a root node in |left| and attempt to find an augmenting path\n starting from |root|. In order for a path to be augmenting, each edge in the\n path must have: weight(x, y) == labels[x] + labels[y]. The set of all edges\n which have this property is known as the \"equality subgraph\" for the current\n vertex labeling.\n\n In addition, an augmenting path must start with an unmatched edge and end\n with an unmatched edge; augmenting the path flips the matched-ness of each\n edge, so that the total number of matched edges increases by 1.\n\n If an augmenting path does not exist, we update the labels of all nodes in\n S and T to force new edges into the equality subgraph. Eventually, an\n augmenting path will be generated this way.\n \"\"\"\n self.s = set()\n self.t = set()\n self.prev = {}\n queue = collections.deque()\n root = list(self.left - set(self.matches.keys()))[0]\n queue.append(root)\n self.s.add(root)\n for y in self.right:\n self.slack[y] = self._CalcSlack(root, y)\n self.slackx[y] = root\n while True:\n path_exists, x, y = self._FindAugmentingPath(queue)\n if path_exists:\n break\n self._UpdateLabels()\n queue.clear()\n path_exists, x, y = self._FindAugmentingEdge(queue)\n if path_exists:\n break\n self._InvertPath(x, y)\n\n def _FindAugmentingPath(self, queue):\n \"\"\"Find an augmenting path for the current labeling.\n\n Perform a BFS to find an augmenting path for the current labeling.\n\n Args:\n queue: Queue for performing BFS traversal.\n Returns:\n found: True if path was found.\n x: Left vertex of final path edge.\n y: Right vertex of final path edge.\n \"\"\"\n while queue:\n x = queue.popleft()\n for y in self.right - self.t:\n if not self._InEqualitySubgraph(x, y):\n continue\n if y not in self.matches:\n return (True, x, y)\n self.t.add(y)\n queue.append(self.matches[y])\n self._AddToTree(self.matches[y], x)\n return (False, None, None)\n\n def _FindAugmentingEdge(self, queue):\n \"\"\"Find a final edge for an augmenting path after updating labels.\n\n At least one new edge should have been added to the equality subgraph, so\n we check if any new edges will create an augmenting path.\n\n Args:\n queue: Queue for performing BFS traversal.\n Returns:\n found: True if path was found.\n x: Left vertex of final path edge.\n y: Right vertex of final path edge.\n \"\"\"\n for y in (v for v in self.right - self.t if self.slack[v] == 0):\n if y not in self.matches:\n return (True, self.slackx[y], y)\n self.t.add(y)\n if self.matches[y] not in self.s:\n queue.append(self.matches[y])\n self._AddToTree(self.matches[y], self.slackx[y])\n return (False, None, None)\n\n def _InvertPath(self, x, y):\n \"\"\"Invert the augmenting path whose final edge is (x, y).\"\"\"\n self.num_matched += 1\n while True:\n if x in self.matches:\n ty = self.matches[x]\n self.matches[y] = x\n self.matches[x] = y\n if x not in self.prev:\n break\n y = ty\n x = self.prev[x]\n\n def _UpdateLabels(self):\n \"\"\"Update labels to expand the equality subgraph.\n\n We will find the smallest slack value of a vertex in Right - T. The labels\n for vertices in S will decrease by slack, while the vertices in T increase\n by slack. This guarantees at least one vertex in Right will have a slack\n value of 0, thereby adding it to the equality subgraph.\n \"\"\"\n delta = float('inf')\n for y in self.right - self.t:\n delta = min(delta, self.slack[y])\n for x in self.s:\n self.labels[x] -= delta\n for y in self.t:\n self.labels[y] += delta\n for y in self.right - self.t:\n self.slack[y] -= delta\n\n def _AddToTree(self, x, prevx):\n \"\"\"Adds |x| to the current augmenting tree.\n\n x is a node which has already been matched to a node y in Right (which is\n itself connected to prevx via a non-matching edge in the equality subgraph).\n We indicate prevx comes before x in the tree so we can trace the path later.\n\n Args:\n x: Node which has already been matched to a node y in right\n prevx: Previous node in Left along the path.\n \"\"\"\n self.s.add(x)\n self.prev[x] = prevx\n for y in self.right:\n slack = self._CalcSlack(x, y)\n if slack < self.slack[y]:\n self.slack[y] = slack\n self.slackx[y] = x\n\n def _InEqualitySubgraph(self, x, y):\n \"\"\"Return True if x and y are in the equality subgraph.\"\"\"\n return self.g[x][y]['weight'] == self.labels[x] + self.labels[y]\n\n def _CalcSlack(self, x, y):\n \"\"\"Calculate the slack for an edge (x, y).\"\"\"\n return self.labels[x] + self.labels[y] - self.g[x][y]['weight']", "docstring": "Context object for running the Hungarian algorithm.\n\nContains algorithm variables.\n\nAttributes:\n g: The complete bipartite graph we will find a maximum matching for.\n left: The set of left nodes.\n right: The set of right nodes.\n labels: Dictionary of vertex names to numeric algorithm label.\n num_matched: Current number of matched edges\n s: Set of left nodes in the augmenting tree\n t: Set of right nodes in the augmenting tree\n matches: Dictionary where matches[n] = m (and matches[m] = n) if n and m are\n currently matched.\n slack: Dictionary keyed on y, where slack[y] = min(for x in S: labels[x] +\n labels[y] - weight(x, y))\n slackx: Dictionary keyed on y, where slackx[y] is the node in right which\n gives y its current slack value.\n prev: Dictionary keyed on a node x in S, where prev[x] is the previous node\n in S along x's path in the augmenting tree."} +{"repo": "beam", "function": "def Lease(self, request, global_params=None):\n config = self.GetMethodConfig('Lease')\n return self._RunMethod(config, request, global_params=global_params)", "docstring": "Leases a dataflow WorkItem to run.\n\nArgs:\n request: (DataflowProjectsLocationsJobsWorkItemsLeaseRequest) input message\n global_params: (StandardQueryParameters, default: None) global arguments\nReturns:\n (LeaseWorkItemResponse) The response message."} +{"repo": "transformers", "function": "class ViTMSNConfig(PretrainedConfig):\n model_type = 'vit_msn'\n\n def __init__(self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-06, image_size=224, patch_size=16, num_channels=3, qkv_bias=True, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.image_size = image_size\n self.patch_size = patch_size\n self.num_channels = num_channels\n self.qkv_bias = qkv_bias", "docstring": "This is the configuration class to store the configuration of a [`ViTMSNModel`]. It is used to instantiate an ViT\nMSN model according to the specified arguments, defining the model architecture. Instantiating a configuration with\nthe defaults will yield a similar configuration to that of the ViT\n[facebook/vit_msn_base](https://huggingface.co/facebook/vit_msn_base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\n\nArgs:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n qkv_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries, keys and values.\n\nExample:\n\n```python\n>>> from transformers import ViTMSNModel, ViTMSNConfig\n\n>>> # Initializing a ViT MSN vit-msn-base style configuration\n>>> configuration = ViTConfig()\n\n>>> # Initializing a model from the vit-msn-base style configuration\n>>> model = ViTMSNModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "tensorflow", "function": "def _mark_func_graph_as_unsaveable(graph, learning_phase):\n if graph.building_function and is_placeholder(learning_phase):\n graph.mark_as_unsaveable('The keras learning phase placeholder was used inside a function. Exporting placeholders is not supported when saving out a SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` in the function to set the learning phase to a constant value.')", "docstring": "Mark func graph as unsaveable due to use of symbolic keras learning phase.\n\nFunctions that capture the symbolic learning phase cannot be exported to\nSavedModel. Mark the funcgraph as unsaveable, so that an error will be raised\nif it is exported.\n\nArgs:\n graph: Graph or FuncGraph object.\n learning_phase: Learning phase placeholder or int defined in the graph."} +{"repo": "transformers", "function": "def extract_entities_with_patch_indices(text):\n pattern = '(?:(([^<]+)))?((?:)*)'\n matches = re.finditer(pattern, text)\n entities_with_patch_indices = []\n for match in matches:\n span = match.span(2)\n phrase_tag, phrase, match_content = match.groups()\n if not phrase_tag:\n phrase = None\n span = (match.span(0)[0], match.span(0)[0])\n patch_index_pairs = match_content.split('')\n entity_bboxes = []\n for pair in patch_index_pairs:\n x = re.search('', pair)\n y = re.search('', pair[1:])\n if x and y:\n if phrase:\n entity_bboxes.append((int(x.group(1)), int(y.group(1))))\n else:\n entity_bboxes.append((int(x.group(1)), int(y.group(1))))\n if phrase:\n entities_with_patch_indices.append((phrase, span, entity_bboxes))\n else:\n for bbox in entity_bboxes:\n entity = f''\n entities_with_patch_indices.append((entity, span, [bbox]))\n return entities_with_patch_indices", "docstring": "Extract entities contained in `text`. The bounding bboxes is given in the form of patch indices.\n\nThis functioin is only intended to be used within `clean_text_and_extract_entities_with_bboxes` where further\nprocessing happens, including converting to normalized coordinates and whitespace character cleaning up.\n\nExamples:\n\n```python\n>>> text = \" An image of a snowman warming himself by a fire.\"\n>>> entities = extract_entities_with_patch_indices(text)\n>>> entities\n[(' a snowman', (31, 41), [(44, 863)]), (' a fire', (130, 137), [(5, 911)])]\n```"} +{"repo": "tensorflow", "function": "def __init__(self, inputs, num_clusters, initial_clusters, distance_metric, random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length, cluster_centers, cluster_centers_updated, cluster_centers_initialized):\n self._inputs = inputs\n self._num_clusters = num_clusters\n self._initial_clusters = initial_clusters\n self._distance_metric = distance_metric\n self._seed = random_seed\n self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries\n self._kmc2_chain_length = kmc2_chain_length\n self._cluster_centers = cluster_centers\n self._cluster_centers_updated = cluster_centers_updated\n self._cluster_centers_initialized = cluster_centers_initialized\n self._num_selected = array_ops.shape(self._cluster_centers)[0]\n self._num_remaining = self._num_clusters - self._num_selected\n self._num_data = math_ops.add_n([array_ops.shape(i)[0] for i in self._inputs])", "docstring": "Creates an op factory.\n\nArgs:\n inputs: See KMeans constructor.\n num_clusters: An integer Tensor providing the number of clusters.\n initial_clusters: See KMeans constructor.\n distance_metric: See KMeans constructor.\n random_seed: See KMeans constructor.\n kmeans_plus_plus_num_retries: See KMeans constructor.\n kmc2_chain_length: See KMeans constructor.\n cluster_centers: The TF variable holding the initial centers. It may\n already contain some centers when the op is executed.\n cluster_centers_updated: A second TF variable to hold a copy of the\n initial centers, used for full-batch mode. In mini-batch mode,\n cluster_centers_updated is the same variable as cluster_centers.\n cluster_centers_initialized: A boolean TF variable that will be set to\n true when all the initial centers have been chosen."} +{"repo": "tensorflow", "function": "def build(self, names_to_saveables, reshape=False, sharded=False, max_to_keep=5, keep_checkpoint_every_n_hours=10000.0, name=None, restore_sequentially=False, filename='model'):\n return self._build_internal(names_to_saveables=names_to_saveables, reshape=reshape, sharded=sharded, max_to_keep=max_to_keep, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours, name=name, restore_sequentially=restore_sequentially, filename=filename)", "docstring": "Builds save/restore graph nodes or runs save/restore in eager mode.\n\nArgs:\n names_to_saveables: A dictionary mapping name to a Variable or\n SaveableObject. Each name will be associated with the corresponding\n variable in the checkpoint.\n reshape: If True, allow restoring parameters from a checkpoint that where\n the parameters have a different shape. This is only needed when you try\n to restore from a Dist-Belief checkpoint, and only some times.\n sharded: If True, shard the checkpoints, one per device that has Variable\n nodes.\n max_to_keep: Maximum number of checkpoints to keep. As new checkpoints\n are created, old ones are deleted. If None or 0, no checkpoints are\n deleted from the filesystem but only the last one is kept in the\n `checkpoint` file. Presently the number is only roughly enforced. For\n example in case of restarts more than max_to_keep checkpoints may be\n kept.\n keep_checkpoint_every_n_hours: How often checkpoints should be kept.\n Defaults to 10,000 hours.\n name: String. Optional name to use as a prefix when adding operations.\n restore_sequentially: A Bool, which if true, causes restore of different\n variables to happen sequentially within each device.\n filename: If known at graph construction time, filename used for variable\n loading/saving. If None, then the default name \"model\" will be used.\n\nReturns:\n A SaverDef proto.\n\nRaises:\n TypeError: If 'names_to_saveables' is not a dictionary mapping string\n keys to variable Tensors.\n ValueError: If any of the keys or values in 'names_to_saveables' is not\n unique."} +{"repo": "tensorflow", "function": "def maybe_promote_tensors(*tensors, force_same_dtype=False):\n if ops.is_auto_dtype_conversion_enabled():\n return tensors\n if not tensors:\n return tensors\n if not ops.is_numpy_style_type_promotion():\n if not force_same_dtype:\n return tensors\n promoted_tensors = []\n promoted_tensors.append(tensors[0])\n dtype = tensors[0].dtype.base_dtype\n for tensor in tensors[1:]:\n promoted_tensors.append(ops.convert_to_tensor(tensor, dtype, name='x'))\n return promoted_tensors\n result_type = np_dtypes._result_type(*[_maybe_get_dtype(x) for x in nest.flatten(tensors)])\n\n def _promote_or_cast(x):\n if isinstance(x, tensor_lib.Tensor):\n x = gen_math_ops.cast(x, result_type)\n else:\n x = ops.convert_to_tensor(x, result_type)\n return x\n return [_promote_or_cast(x) for x in tensors]", "docstring": "Promotes tensors if numpy style promotion is enabled.\n\nThis function promotes `tensors` according to numpy promotion rules\nif numpy style promotion is enabled. Otherwise, if\n`force_same_dtype` is `True`, it force-casts `tensors[1:]` to\n`tensor[0]`'s dtype. Note that this force-cast can be problematic.\nFor example, when some `tensors[1:]` elements can be silently\ndowncasted.\n\nArgs:\n *tensors: the list of tensors to promote.\n force_same_dtype: bool (optional, default to `False`). When numpy\n style promotion is disabled and `force_same_dtype` is `True`,\n this function will force-casts `tensors[1:]` to `tensor[0]`'s\n dtype (which could be problematic).\n\nReturns:\n The promoted list of tensors."} +{"repo": "transformers", "function": "class _LazyLoadAllMappings(OrderedDict[str, str]):\n\n def __init__(self, mapping):\n self._mapping = mapping\n self._initialized = False\n self._data = {}\n\n def _initialize(self):\n if self._initialized:\n return\n for model_type, map_name in self._mapping.items():\n module_name = model_type_to_module_name(model_type)\n module = importlib.import_module(f'.{module_name}', 'transformers.models')\n mapping = getattr(module, map_name)\n self._data.update(mapping)\n self._initialized = True\n\n def __getitem__(self, key):\n self._initialize()\n return self._data[key]\n\n def keys(self) -> KeysView[str]:\n self._initialize()\n return self._data.keys()\n\n def values(self) -> ValuesView[str]:\n self._initialize()\n return self._data.values()\n\n def items(self) -> KeysView[str]:\n self._initialize()\n return self._data.keys()\n\n def __iter__(self) -> Iterator[str]:\n self._initialize()\n return iter(self._data)\n\n def __contains__(self, item: object) -> bool:\n self._initialize()\n return item in self._data", "docstring": "A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,\netc.)\n\nArgs:\n mapping: The mapping to load."} +{"repo": "tensorflow", "function": "def show_path(from_op, tensors, sources):\n if isinstance(from_op, tensor_lib.Tensor):\n from_op = from_op.op\n if not isinstance(tensors, list):\n tensors = [tensors]\n final_ops = [_as_operation(tensor) for tensor in tensors]\n visited_ops = set((x.op for x in sources))\n ops_to_visit = list(final_ops)\n some_op_output = {}\n while ops_to_visit:\n op = ops_to_visit.pop()\n if op in visited_ops:\n continue\n visited_ops.add(op)\n if op == from_op:\n path_op = op\n path = [path_op]\n while path_op not in final_ops:\n path_op = some_op_output[path_op]\n path.append(path_op)\n return ' <- '.join(('%s (%s)' % (x.name, x.type) for x in reversed(path)))\n else:\n for inp in graph_inputs(op):\n if inp not in visited_ops and inp not in sources:\n some_op_output[inp] = op\n ops_to_visit.append(inp)\n return '??'", "docstring": "Find one path from `from_op` to any of `tensors`, ignoring `sources`.\n\nArgs:\n from_op: A `tf.Operation`.\n tensors: A `tf.Operation`, a `tf.Tensor`, or a list thereof.\n sources: A list of `tf.Tensor`.\n\nReturns:\n A python string containing the path, or \"??\" if none is found."} +{"repo": "transformers", "function": "def create_module_to_test_map(reverse_map: Optional[Dict[str, List[str]]]=None, filter_models: bool=False) -> Dict[str, List[str]]:\n if reverse_map is None:\n reverse_map = create_reverse_dependency_map()\n\n def is_test(fname):\n if fname.startswith('tests'):\n return True\n if fname.startswith('examples') and fname.split(os.path.sep)[-1].startswith('test'):\n return True\n return False\n test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()}\n if not filter_models:\n return test_map\n num_model_tests = len(list(PATH_TO_TESTS.glob('models/*')))\n\n def has_many_models(tests):\n model_tests = {Path(t).parts[2] for t in tests if t.startswith('tests/models/')}\n return len(model_tests) > num_model_tests // 2\n\n def filter_tests(tests, module=''):\n filtered_tests = []\n for t in tests:\n if not t.startswith('tests/models/') or Path(t).parts[2] in IMPORTANT_MODELS or '/'.join(Path(t).parts[1:3]) in module:\n filtered_tests += [t]\n return filtered_tests\n return {module: filter_tests(tests, module=module) if has_many_models(tests) else tests for module, tests in test_map.items()}", "docstring": "Extract the tests from the reverse_dependency_map and potentially filters the model tests.\n\nArgs:\n reverse_map (`Dict[str, List[str]]`, *optional*):\n The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of\n that function if not provided.\n filter_models (`bool`, *optional*, defaults to `False`):\n Whether or not to filter model tests to only include core models if a file impacts a lot of models.\n\nReturns:\n `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified."} +{"repo": "tensorflow", "function": "def with_dependencies(dependencies, output_tensor, name=None):\n if context.executing_eagerly():\n return output_tensor\n with ops.name_scope(name, 'control_dependency', list(dependencies) + [output_tensor]) as name:\n with ops.colocate_with(output_tensor):\n with ops.control_dependencies(dependencies):\n output_tensor = ops.convert_to_tensor_or_composite(output_tensor)\n if isinstance(output_tensor, indexed_slices.IndexedSlices):\n return indexed_slices.IndexedSlices(_Identity(output_tensor.values, name=name), output_tensor.indices, output_tensor.dense_shape)\n else:\n return _Identity(output_tensor, name=name)", "docstring": "Produces the content of `output_tensor` only after `dependencies`.\n\nIn some cases, a user may want the output of an operation to be\nconsumed externally only after some other dependencies have run\nfirst. This function ensures returns `output_tensor`, but only after all\noperations in `dependencies` have run. Note that this means that there is\nno guarantee that `output_tensor` will be evaluated after any `dependencies`\nhave run.\n\nSee also `tf.tuple` and `tf.group`.\n\nArgs:\n dependencies: Iterable of operations to run before this op finishes.\n output_tensor: A `Tensor` or `IndexedSlices` that will be returned.\n name: (Optional) A name for this operation.\n\nReturns:\n Same as `output_tensor`.\n\nRaises:\n TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`."} +{"repo": "mobly", "function": "def set_error_message(self, error_message):\n self._empty = False\n self.error_message = error_message", "docstring": "Sets an error message on an instrumentation block.\n\nThis method is used exclusively to indicate that a test method failed\nto complete, which is usually cause by a crash of some sort such that\nthe test method is marked as error instead of ignored.\n\nArgs:\n error_message: string, an error message to be added to the\n TestResultRecord to explain that something wrong happened."} +{"repo": "tensorflow", "function": "def add_meta_graph_and_variables(self, sess, tags, signature_def_map=None, assets_list=None, clear_devices=False, init_op=None, train_op=None, strip_default_attrs=False, saver=None):\n if self._has_saved_variables:\n raise AssertionError('Graph state including variables and assets has already been saved. Please invoke `add_meta_graph()` instead.')\n signature_def_map = signature_def_map or {}\n self._validate_signature_def_map(signature_def_map)\n _add_op_to_signature_def_map(signature_def_map, init_op, constants.INIT_OP_SIGNATURE_KEY)\n _add_op_to_signature_def_map(signature_def_map, train_op, constants.TRAIN_OP_SIGNATURE_KEY)\n path_helpers.get_or_create_variables_dir(self._export_dir)\n variables_path = path_helpers.get_variables_path(self._export_dir)\n saver = self._maybe_create_saver(saver)\n saver.save(sess, variables_path, write_meta_graph=False, write_state=False)\n meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices, strip_default_attrs=strip_default_attrs)\n self._save_and_write_assets(meta_graph_def, assets_list)\n self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map)\n self._has_saved_variables = True", "docstring": "Adds the current meta graph to the SavedModel and saves variables.\n\nCreates a Saver to save the variables from the provided session. Exports the\ncorresponding meta graph def. This function assumes that the variables to be\nsaved have been initialized. For a given `SavedModelBuilder`, this API must\nbe called exactly once and for the first meta graph to save. For subsequent\nmeta graph defs to be added, the `add_meta_graph()` API must be used.\n\nArgs:\n sess: The TensorFlow session from which to save the meta graph and\n variables.\n tags: The set of tags with which to save the meta graph.\n signature_def_map: The map of signature def map to add to the meta graph\n def.\n assets_list: Assets to be saved with SavedModel.\n clear_devices: Set to true if the device info on the default graph should\n be cleared.\n init_op: Op or group of ops to execute when the graph is loaded. Note\n that when the init_op is specified it is run after the restore op at\n load-time.\n train_op: Op or group of ops that trains the model when run. This will\n not be run automatically when the graph is loaded, instead saved in\n a SignatureDef accessible through the exported MetaGraph.\n strip_default_attrs: Boolean. If `True`, default-valued attributes will be\n removed from the NodeDefs. For a detailed guide, see\n [Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).\n saver: An instance of tf.compat.v1.train.Saver that will be used to export the\n metagraph and save variables. If None, a sharded Saver that restores\n all variables will be used."} +{"repo": "tensorflow", "function": "def get_tensors(graph):\n if not isinstance(graph, ops.Graph):\n raise TypeError('Expected a graph, got: {}'.format(type(graph)))\n ts = []\n for op in graph.get_operations():\n ts += op.outputs\n return ts", "docstring": "get all the tensors which are input or output of an op in the graph.\n\nArgs:\n graph: a `tf.Graph`.\nReturns:\n A list of `tf.Tensor`.\nRaises:\n TypeError: if graph is not a `tf.Graph`."} +{"repo": "transformers", "function": "def __call__(self, images: Union[ImageInput, List[ImageInput], List[List[ImageInput]]]=None, text: Union[TextInput, 'PreTokenizedInput', List[TextInput], List['PreTokenizedInput']]=None, audio=None, videos=None, image_seq_len: Optional[int]=None, **kwargs: Unpack[Idefics3ProcessorKwargs]) -> BatchEncoding:\n if text is None and images is None:\n raise ValueError('You must provide either `text` or `images`.')\n output_kwargs = self._merge_kwargs(Idefics3ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len\n return_mm_token_type_ids = output_kwargs['text_kwargs'].pop('return_mm_token_type_ids', False)\n return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)\n n_images_in_text = []\n n_images_in_images = []\n inputs = {}\n if text is not None:\n if isinstance(text, str):\n text = [text]\n elif not isinstance(text, list) and (not isinstance(text[0], str)):\n raise ValueError('Invalid input text. Please provide a string, or a list of strings')\n n_images_in_text = [sample.count(self.image_token) for sample in text]\n if images is not None:\n if is_image_or_image_url(images):\n images = [[images]]\n elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):\n if text is not None:\n if sum(n_images_in_text) != len(images):\n raise ValueError(f'The total number of {self.image_token} tokens in the prompts should be the same as the number of images passed. Found {sum(n_images_in_text)} {self.image_token} tokens and {len(images)} images.')\n cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))\n images = [images[cumsum_images_in_text[i]:cumsum_images_in_text[i + 1]] for i in range(len(n_images_in_text))]\n else:\n images = [images]\n elif not isinstance(images, (list, tuple)) and (not isinstance(images[0], (list, tuple))) and (not is_image_or_image_url(images[0][0])):\n raise ValueError('Invalid input images. Please provide a single image or a list of images or a list of list of images.')\n n_images_in_images = [len(sample) for sample in images]\n images = [[load_image(im) if is_url(im) else im for im in sample] for sample in images]\n image_inputs = self.image_processor(images, **output_kwargs['images_kwargs'])\n inputs.update(image_inputs)\n if text is not None:\n if n_images_in_images != n_images_in_text:\n raise ValueError(f'The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same.')\n image_rows = inputs.pop('rows', [[0] * len(text)])\n image_cols = inputs.pop('cols', [[0] * len(text)])\n fake_image_token = self.fake_image_token\n image_token = self.image_token\n global_img_token = self.global_image_tag\n prompt_strings = []\n batch_image_seq_lengths = []\n for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):\n image_prompt_strings = []\n image_seq_lengths = []\n for n_rows, n_cols in zip(sample_rows, sample_cols):\n image_prompt_string = get_image_prompt_string(n_rows, n_cols, image_seq_len, image_token=image_token, fake_token_around_image=fake_image_token, global_img_token=global_img_token)\n row_length = (self.image_seq_len + 2) * n_cols + 1\n image_seq_lengths.append(self.image_seq_len + 3 + row_length * n_rows)\n image_prompt_strings.append(image_prompt_string)\n batch_image_seq_lengths.append(image_seq_lengths)\n split_sample = sample.split(image_token)\n if len(split_sample) == 0:\n raise ValueError('The image token should be present in the text.')\n sample = split_sample[0]\n for i, image_prompt_string in enumerate(image_prompt_strings):\n sample += image_prompt_string + split_sample[i + 1]\n prompt_strings.append(sample)\n text_inputs = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'])\n self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=['image'])\n inputs.update(text_inputs)\n elif text is not None:\n if any(n_images_in_text):\n raise ValueError(f'Found {sum(n_images_in_text)} {self.image_token} tokens in the text but no images were passed.')\n text_inputs = self.tokenizer(text=text, **output_kwargs['text_kwargs'])\n inputs.update(text_inputs)\n if return_mm_token_type_ids:\n array_ids = np.array(inputs['input_ids'])\n mm_token_type_ids = np.zeros_like(array_ids)\n for i, seq_lengths in enumerate(batch_image_seq_lengths):\n image_start_positions = np.where(array_ids[i] == self.fake_image_token_id)[0]\n j = 0\n for seq_len in seq_lengths:\n if j >= len(image_start_positions):\n break\n start = image_start_positions[j]\n end = start + seq_len\n mm_token_type_ids[i, start:end] = 1\n j = np.searchsorted(image_start_positions, end)\n inputs['mm_token_type_ids'] = mm_token_type_ids.tolist()\n return BatchFeature(data=inputs, tensor_type=return_tensors)", "docstring": "Processes the input prompts and returns a BatchEncoding.\n\nExample:\n\n```python\n>>> import requests\n>>> from transformers import Idefics3Processor\n>>> from transformers.image_utils import load_image\n\n>>> processor = Idefics3Processor.from_pretrained(\"HuggingFaceM4/Idefics3-8B-Llama3\")\n>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example\n\n>>> url1 = \"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg\"\n>>> url2 = \"https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg\"\n\n>>> image1, image2 = load_image(url1), load_image(url2)\n>>> images = [[image1], [image2]]\n\n>>> text = [\n... \"In this image, we see\",\n... \"bla bla bla\",\n... ]\n>>> outputs = processor(images=images, text=text, return_tensors=\"pt\", padding=True)\n>>> input_ids = outputs.input_ids\n>>> input_tokens = processor.tokenizer.batch_decode(input_ids)\n>>> print(input_tokens)\n['<|begin_of_text|>(()*169) In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla(()*169)']\n```\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. If is of type `List[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.\n text (`Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]`, *optional*):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n Wherever an image token, `` is encountered it is expanded to\n `` + `` + `` * `image_seq_len` * `.\n image_seq_len (`int`, *optional*):\n The length of the image sequence. If not provided, the default value of self.image_seq_len is used.\n image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2))\n return_tensors (`Union[str, TensorType]`, *optional*):\n If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more\n information."} +{"repo": "pyglove", "function": "def __init__(self, default: typing.Optional[bool]=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False):\n super().__init__(bool, default, is_noneable=is_noneable, frozen=frozen)", "docstring": "Constructor.\n\nArgs:\n default: Default value for the value spec.\n is_noneable: If True, None is acceptable.\n frozen: If True, values other than the default value is not accceptable."} +{"repo": "transformers", "function": "class SiglipEncoder(nn.Module):\n\n def __init__(self, config: SiglipConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n @can_return_tuple\n def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> BaseModelOutput:\n \"\"\"\n Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for encoder_layer in self.layers:\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`SiglipEncoderLayer`].\n\nArgs:\n config: SiglipConfig"} +{"repo": "starthinker", "function": "def recipe_sheets_clear(config, auth_read, sheets_sheet, sheets_tab, sheets_range):\n sheets(config, {'auth': auth_read, 'sheet': sheets_sheet, 'tab': sheets_tab, 'range': sheets_range, 'clear': True})", "docstring": "Clear data from a sheet.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n sheets_sheet (string) - NA\n sheets_tab (string) - NA\n sheets_range (string) - NA"} +{"repo": "keras", "function": "def clone_graph_nodes(inputs, outputs):\n nodes_to_clone = find_nodes_by_inputs_and_outputs(inputs, outputs)\n cloned_inputs = []\n cloned_outputs = []\n kt_id_mapping = {}\n op_id_mapping = {}\n for kt_input in tree.flatten(inputs):\n if is_input_keras_tensor(kt_input):\n cloned_inputs.append(kt_input)\n kt_id_mapping[id(kt_input)] = kt_input\n else:\n cloned_input = Input(batch_shape=kt_input.shape, dtype=kt_input.dtype, sparse=kt_input.sparse, name=kt_input.name + 'CLONE')\n cloned_inputs.append(cloned_input)\n kt_id_mapping[id(kt_input)] = cloned_input\n op_id_mapping[id(kt_input._keras_history[0])] = cloned_input._keras_history[0]\n cloned_inputs = tree.pack_sequence_as(inputs, cloned_inputs)\n for kt_output in tree.flatten(outputs):\n cpy = clone_single_keras_tensor(kt_output)\n cpy._keras_history = kt_output._keras_history\n cloned_outputs.append(cpy)\n kt_id_mapping[id(kt_output)] = cpy\n cloned_outputs = tree.pack_sequence_as(outputs, cloned_outputs)\n for node in nodes_to_clone:\n if id(node.operation) in op_id_mapping:\n operation = op_id_mapping[id(node.operation)]\n else:\n operation = node.operation\n output_copy = clone_keras_tensors(node.output_tensors, kt_id_mapping)\n if not isinstance(operation, InputLayer):\n call_args_copy = clone_keras_tensors(node.arguments.args, kt_id_mapping)\n call_kwargs_copy = clone_keras_tensors(node.arguments.kwargs, kt_id_mapping)\n else:\n call_args_copy = ()\n call_kwargs_copy = {}\n Node(operation, call_args=call_args_copy, call_kwargs=call_kwargs_copy, outputs=output_copy)\n return (cloned_inputs, cloned_outputs)", "docstring": "Clone the `Node` between the inputs and output tensors.\n\nThis function is used to create a new functional model from any intermediate\nKeras tensors. The clone of the nodes mimic the behavior of reconstructing\nthe functional graph network by re-executing all the `__call__()` methods.\nThe cloned nodes will be appended to the layers.\n\nNote that a new `keras.Input` will be created for any items in the\n`inputs`\n\nArgs:\ninputs: A nested structure of `KerasTensor` instances.\noutputs: A nested structure of `KerasTensor` instances.\n\nReturns:\n A pair of inputs and outputs, with cloned `KerasTensor` instances.\n They can be used to create a new functional model."} +{"repo": "transformers", "function": "def call(self, pixel_values: tf.Tensor, input_ids: tf.Tensor | None=None, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, labels: tf.Tensor | None=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBlipForConditionalGenerationModelOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n image_embeds = vision_outputs[0]\n outputs = self.text_decoder(input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=image_embeds, labels=labels, return_dict=False, training=training)\n if not return_dict:\n outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]\n return tuple((output for output in outputs if output is not None))\n if labels is not None:\n loss = outputs[0]\n logits = outputs[1]\n else:\n loss = None\n logits = outputs[0]\n if loss is not None and loss.shape.rank == 0:\n loss = tf.reshape(loss, (1,))\n return TFBlipForConditionalGenerationModelOutput(loss=loss, logits=logits, image_embeds=image_embeds, last_hidden_state=vision_outputs.last_hidden_state, hidden_states=vision_outputs.hidden_states, attentions=vision_outputs.attentions)", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, TFBlipForConditionalGeneration\n\n>>> processor = AutoProcessor.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n>>> model = TFBlipForConditionalGeneration.from_pretrained(\"Salesforce/blip-image-captioning-base\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n>>> text = \"A picture of\"\n\n>>> inputs = processor(images=image, text=text, return_tensors=\"tf\")\n\n>>> outputs = model(**inputs)\n```"} +{"repo": "tf-quant-finance", "function": "def swap_price(pay_leg_cashflows, receive_leg_cashflows, pay_leg_discount_factors, receive_leg_discount_factors, dtype=None, name=None):\n name = name or 'swap_price'\n with tf.name_scope(name):\n pay_leg_cashflows = tf.convert_to_tensor(pay_leg_cashflows, dtype=dtype, name='pay_leg_cashflows')\n dtype = dtype or pay_leg_cashflows.dtype\n receive_leg_cashflows = tf.convert_to_tensor(receive_leg_cashflows, dtype=dtype, name='receive_leg_cashflows')\n pay_leg_discount_factors = tf.convert_to_tensor(pay_leg_discount_factors, dtype=dtype, name='pay_leg_discount_factors')\n receive_leg_discount_factors = tf.convert_to_tensor(receive_leg_discount_factors, dtype=dtype, name='receive_leg_discount_factors')\n receive_leg_pv = cashflows.present_value(receive_leg_cashflows, receive_leg_discount_factors)\n pay_leg_pv = cashflows.present_value(pay_leg_cashflows, pay_leg_discount_factors)\n return receive_leg_pv - pay_leg_pv", "docstring": "Computes prices of a batch of generic swaps.\n\n#### Example\n```python\npay_leg_cashflows = [[100, 100, 100], [200, 250, 300]]\nreceive_leg_cashflows = [[200, 250, 300, 300], [100, 100, 100, 100]]\npay_leg_discount_factors = [[0.95, 0.9, 0.8],\n [0.9, 0.85, 0.8]]\nreceive_leg_discount_factors = [[0.95, 0.9, 0.8, 0.75],\n [0.9, 0.85, 0.8, 0.75]]\nswap_price(pay_leg_cashflows=pay_leg_cashflows,\n receive_leg_cashflows=receive_leg_cashflows,\n pay_leg_discount_factors=pay_leg_discount_factors,\n receive_leg_discount_factors=receive_leg_discount_factors,\n dtype=tf.float64)\n# Expected: [615.0, -302.5]\n```\n\nArgs:\n pay_leg_cashflows: A real `Tensor` of shape\n `batch_shape + [num_pay_cashflows]`, where `num_pay_cashflows` is the\n number of cashflows for each batch element. Cashflows of the pay leg of\n the swaps.\n receive_leg_cashflows: A `Tensor` of the same `dtype` as `pay_leg_cashflows`\n and of shape `batch_shape + [num_receive_cashflows]` where\n `num_pay_cashflows` is the number of cashflows for each batch element.\n Cashflows of the receive leg of the swaps.\n pay_leg_discount_factors: A `Tensor` of the same `dtype` as\n `pay_leg_cashflows` and of compatible shape. Discount factors for each\n cashflow of the pay leg.\n receive_leg_discount_factors: A `Tensor` of the same `dtype` as\n `receive_leg_cashflows` and of compatible shape. Discount factors for each\n cashflow of the receive leg.\n dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.\n Default value: None which maps to the default dtype inferred from\n `pay_leg_cashflows`.\n name: Python str. The name to give to the ops created by this function.\n Default value: None which maps to 'floating_coupons'.\n\nReturns:\n A `Tensor` of the same `dtype` as `coupon_rates` and of shape `batch_shape`.\n Present values of swaps from receiver perspective."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.Tensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, attention_mask: Optional[torch.Tensor]=None, token_type_ids: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, inputs_embeds: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, **kwargs: Any) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:\n if 'pixel_values' in kwargs:\n warnings.warn('The `pixel_values` argument is deprecated and will be removed in v4.47, use `input_ids` instead.', FutureWarning)\n if input_ids is not None:\n raise ValueError('You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`.')\n input_ids = kwargs.pop('pixel_values')\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n transformer_outputs = self.transformer(input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n hidden_states = transformer_outputs[0]\n lm_logits = self.lm_head(hidden_states)\n loss = None\n if labels is not None:\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return (loss,) + output if loss is not None else output\n return CausalLMOutputWithCrossAttentions(loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions)", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.\nlabels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):\n Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling\n>>> import torch\n>>> import matplotlib.pyplot as plt\n>>> import numpy as np\n\n>>> image_processor = AutoImageProcessor.from_pretrained(\"openai/imagegpt-small\")\n>>> model = ImageGPTForCausalImageModeling.from_pretrained(\"openai/imagegpt-small\")\n>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n>>> model.to(device) # doctest: +IGNORE_RESULT\n\n>>> # unconditional generation of 8 images\n>>> batch_size = 4\n>>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token\n>>> context = context.to(device)\n>>> output = model.generate(\n... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40\n... )\n\n>>> clusters = image_processor.clusters\n>>> height = image_processor.size[\"height\"]\n>>> width = image_processor.size[\"width\"]\n\n>>> samples = output[:, 1:].detach().cpu().numpy()\n>>> samples_img = [\n... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples\n... ] # convert color cluster tokens back to pixels\n>>> f, axes = plt.subplots(1, batch_size, dpi=300)\n\n>>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT\n... ax.axis(\"off\")\n... ax.imshow(img)\n```"} +{"repo": "transformers", "function": "def call(self, pixel_values: TFModelInputType | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:\n outputs = self.groupvit(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n return outputs", "docstring": "Returns:\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, TFGroupViTVisionModel\n\n>>> processor = AutoProcessor.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n>>> model = TFGroupViTVisionModel.from_pretrained(\"nvidia/groupvit-gcc-yfcc\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = processor(images=image, return_tensors=\"tf\")\n\n>>> outputs = model(**inputs)\n>>> last_hidden_state = outputs.last_hidden_state\n>>> pooled_output = outputs.pooler_output # pooled CLS states\n```"} +{"repo": "keras", "function": "def cosine_similarity(y_true, y_pred, axis=-1):\n y_pred = ops.convert_to_tensor(y_pred)\n y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)\n y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)\n y_pred = normalize(y_pred, axis=axis)\n y_true = normalize(y_true, axis=axis)\n return ops.sum(y_true * y_pred, axis=axis)", "docstring": "Computes the cosine similarity between labels and predictions.\n\nFormula:\n\n```python\nloss = sum(l2_norm(y_true) * l2_norm(y_pred))\n```\n\nArgs:\n y_true: Tensor of true targets.\n y_pred: Tensor of predicted targets.\n axis: Axis along which to determine similarity. Defaults to `-1`.\n\nReturns:\n Cosine similarity tensor.\n\nExample:\n\n>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]\n>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]\n>>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)\n[0., 0.99999994, -0.99999994]"} +{"repo": "beam", "function": "def try_split(self, position):\n raise NotImplementedError", "docstring": "Atomically splits the current range.\n\nDetermines a position to split the current range, split_position, based on\nthe given position. In most cases split_position and position will be the\nsame.\n\nSplits the current range '[self.start_position, self.stop_position)'\ninto a \"primary\" part '[self.start_position, split_position)' and a\n\"residual\" part '[split_position, self.stop_position)', assuming the\ncurrent last-consumed position is within\n'[self.start_position, split_position)' (i.e., split_position has not been\nconsumed yet).\n\nIf successful, updates the current range to be the primary and returns a\ntuple (split_position, split_fraction). split_fraction should be the\nfraction of size of range '[self.start_position, split_position)' compared\nto the original (before split) range\n'[self.start_position, self.stop_position)'.\n\nIf the split_position has already been consumed, returns ``None``.\n\n** Thread safety **\n\nMethods of the class ``RangeTracker`` including this method may get invoked\nby different threads, hence must be made thread-safe, e.g. by using a single\nlock object.\n\nArgs:\n position: suggested position where the current range should try to\n be split at.\nReturns:\n a tuple containing the split position and split fraction if split is\n successful. Returns ``None`` otherwise."} +{"repo": "tensorflow", "function": "def _is_png(contents, name=None):\n with ops.name_scope(name, 'is_png'):\n substr = string_ops.substr(contents, 0, 3)\n return math_ops.equal(substr, b'\\x89PN', name=name)", "docstring": "Convenience function to check if the 'contents' encodes a PNG image.\n\nArgs:\n contents: 0-D `string`. The encoded image bytes.\n name: A name for the operation (optional)\n\nReturns:\n A scalar boolean tensor indicating if 'contents' may be a PNG image.\n is_png is susceptible to false positives."} +{"repo": "transformers", "function": "def upsample_like(pixel_values: Tensor, like: Tensor, mode: str='bilinear') -> Tensor:\n _, _, height, width = like.shape\n upsampled = nn.functional.interpolate(pixel_values, size=(height, width), mode=mode, align_corners=False)\n return upsampled", "docstring": "An utility function that upsamples `pixel_values` to match the dimension of `like`.\n\nArgs:\n pixel_values (`torch.Tensor`):\n The tensor we wish to upsample.\n like (`torch.Tensor`):\n The tensor we wish to use as size target.\n mode (str, *optional*, defaults to `\"bilinear\"`):\n The interpolation mode.\n\nReturns:\n `torch.Tensor`: The upsampled tensor"} +{"repo": "tensorflow", "function": "def to_proto(self, export_scope=None):\n if export_scope is None or self._variable.name.startswith(export_scope):\n var_def = variable_pb2.VariableDef()\n var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope)\n if self._initial_value is not None:\n var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope)\n var_def.trainable = self.trainable\n var_def.synchronization = self.synchronization.value\n var_def.aggregation = self.aggregation.value\n var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope)\n var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope)\n if self._save_slice_info:\n var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope))\n return var_def\n else:\n return None", "docstring": "Converts a `Variable` to a `VariableDef` protocol buffer.\n\nArgs:\n export_scope: Optional `string`. Name scope to remove.\n\nReturns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope."} +{"repo": "transformers", "function": "def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is not None:\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1]\n return [1] + [0] * len(token_ids_0) + [1]", "docstring": "Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\nspecial tokens using the tokenizer `prepare_for_model` method.\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\nReturns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token."} +{"repo": "data-quality-monitor", "function": "def get_readrows_iterator(bq_read_client: BigQueryReadClient, table_metadata: TableMetadata, columns: Iterable[str] | None=None, data_format: DataFormat=DataFormat.AVRO) -> Iterable[Mapping]:\n requested_session = ReadSession(table=table_metadata.table_path, data_format=data_format.value, read_options={'selected_fields': columns})\n session = bq_read_client.create_read_session(parent=f'projects/{table_metadata.project_id}', read_session=requested_session, max_stream_count=1)\n stream_name = session.streams[0].name\n reader = bq_read_client.read_rows(stream_name)\n rows = reader.rows(session)\n return cast(Iterable[Mapping], rows)", "docstring": "Get an Iterator of row Mappings with the requested columns of the table,\nusing an authenticated BigQuery Storage API client.\n\nNote: Does NOT support nested columns.\n\nArgs:\n * bq_read_client: BigQuery Storage API Read client\n * table_metadata: TableMetadata object\n * columns (optional): List of columns to select\n * data_format: Format to fetch data in, one of:\n * DataFormat.AVRO\n * DataFormat.ARROW\n\nDefaults:\n * columns: None, i.e. select all columns\n * data_format: AVRO, since it auto-parses to Dict\n\nReturns:\n * Iterator of row Mappings"} +{"repo": "tensorflow", "function": "def run_functions_eagerly(run_eagerly):\n global RUN_FUNCTIONS_EAGERLY\n RUN_FUNCTIONS_EAGERLY = bool(run_eagerly)", "docstring": "Enables / disables eager execution of `tf.function`s.\n\nCalling `tf.config.run_functions_eagerly(True)` will make all\ninvocations of `tf.function` run eagerly instead of running as a traced graph\nfunction. This can be useful for debugging. As the code now runs line-by-line,\nyou can add arbitrary `print` messages or pdb breakpoints to monitor the\ninputs/outputs of each Tensorflow operation. However, you should avoid using\nthis for actual production because it significantly slows down execution.\n\n>>> def my_func(a):\n... print(f'a: {a}')\n... return a + a\n>>> a_fn = tf.function(my_func)\n\n>>> # A side effect the first time the function is traced\n>>> # In tracing time, `a` is printed with shape and dtype only\n>>> a_fn(tf.constant(1))\na: Tensor(\"a:0\", shape=(), dtype=int32)\n\n\n>>> # `print` is a python side effect, it won't execute as the traced function\n>>> # is called\n>>> a_fn(tf.constant(2))\n\n\n>>> # Now, switch to eager running\n>>> tf.config.run_functions_eagerly(True)\n>>> # The code now runs eagerly and the actual value of `a` is printed\n>>> a_fn(tf.constant(2))\na: 2\n\n\n>>> # Turn this back off\n>>> tf.config.run_functions_eagerly(False)\n\nNote: This flag has no effect on functions passed into tf.data transformations\nas arguments. tf.data functions are never executed eagerly and are always\nexecuted as a compiled Tensorflow Graph.\n\nArgs:\n run_eagerly: Boolean. Whether to run functions eagerly."} +{"repo": "transformers", "function": "class BlenderbotSmallTokenizerFast(PreTrainedTokenizerFast):\n vocab_files_names = VOCAB_FILES_NAMES\n slow_tokenizer_class = BlenderbotSmallTokenizer\n\n def __init__(self, vocab_file=None, merges_file=None, unk_token='<|endoftext|>', bos_token='<|endoftext|>', eos_token='<|endoftext|>', add_prefix_space=False, trim_offsets=True, **kwargs):\n super().__init__(ByteLevelBPETokenizer(vocab=vocab_file, merges=merges_file, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets), bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)\n self.add_prefix_space = add_prefix_space\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]\n if token_ids_1 is None:\n return output\n return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]\n\n def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall\n does not make use of token type ids, therefore a list of zeros is returned.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of zeros.\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]", "docstring": "Construct a \"fast\" BlenderbotSmall tokenizer (backed by HuggingFace's *tokenizers* library).\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file."} +{"repo": "transformers", "function": "class TFTopKLogitsWarper(TFLogitsWarper):\n\n def __init__(self, top_k: int, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n if not isinstance(top_k, int) or top_k <= 0:\n raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')\n self.top_k = max(top_k, min_tokens_to_keep)\n self.filter_value = filter_value\n\n def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:\n top_k = min(self.top_k, scores.shape[-1])\n indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]\n next_scores = tf.where(indices_to_remove, self.filter_value, scores)\n return next_scores", "docstring": "[`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.\n\nArgs:\n top_k (`int`):\n The number of highest probability vocabulary tokens to keep for top-k-filtering.\n filter_value (`float`, *optional*, defaults to -inf):\n All filtered values will be set to this float value.\n min_tokens_to_keep (`int`, *optional*, defaults to 1):\n Minimum number of tokens that cannot be filtered."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, pixel_values: Optional[torch.FloatTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, return_loss: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False, return_dict: Optional[bool]=None) -> Union[Tuple, XCLIPOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n batch_size, num_frames, num_channels, height, width = pixel_values.shape\n pixel_values = pixel_values.reshape(-1, num_channels, height, width)\n vision_outputs = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict)\n video_embeds = vision_outputs[1]\n video_embeds = self.visual_projection(video_embeds)\n cls_features = video_embeds.view(batch_size, num_frames, -1)\n mit_outputs = self.mit(cls_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n video_embeds = mit_outputs[1]\n img_features = vision_outputs[0][:, 1:, :]\n img_features = self.prompts_visual_layernorm(img_features)\n img_features = img_features @ self.prompts_visual_projection\n img_features = img_features.view(batch_size, num_frames, -1, video_embeds.shape[-1])\n img_features = img_features.mean(dim=1, keepdim=False)\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n text_embeds = text_outputs[1]\n text_embeds = self.text_projection(text_embeds)\n text_embeds = text_embeds.unsqueeze(0).expand(batch_size, -1, -1)\n text_embeds = text_embeds + self.prompts_generator(text_embeds, img_features)\n video_embeds = video_embeds / video_embeds.norm(p=2, dim=-1, keepdim=True)\n text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)\n logit_scale = self.logit_scale.exp()\n logits_per_video = torch.einsum('bd,bkd->bk', video_embeds, logit_scale * text_embeds)\n logits_per_text = logits_per_video.T\n loss = None\n if return_loss:\n loss = x_clip_loss(logits_per_text)\n if not return_dict:\n output = (logits_per_video, logits_per_text, text_embeds, video_embeds, text_outputs, vision_outputs)\n return (loss,) + output if loss is not None else output\n return XCLIPOutput(loss=loss, logits_per_video=logits_per_video, logits_per_text=logits_per_text, text_embeds=text_embeds, video_embeds=video_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, mit_output=mit_outputs)", "docstring": "return_loss (`bool`, *optional*):\n Whether or not to return the contrastive loss.\n\nExamples:\n\n```python\n>>> import av\n>>> import torch\n>>> import numpy as np\n\n>>> from transformers import AutoProcessor, AutoModel\n>>> from huggingface_hub import hf_hub_download\n\n>>> np.random.seed(0)\n\n\n>>> def read_video_pyav(container, indices):\n... '''\n... Decode the video with PyAV decoder.\n... Args:\n... container (`av.container.input.InputContainer`): PyAV container.\n... indices (`List[int]`): List of frame indices to decode.\n... Returns:\n... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).\n... '''\n... frames = []\n... container.seek(0)\n... start_index = indices[0]\n... end_index = indices[-1]\n... for i, frame in enumerate(container.decode(video=0)):\n... if i > end_index:\n... break\n... if i >= start_index and i in indices:\n... frames.append(frame)\n... return np.stack([x.to_ndarray(format=\"rgb24\") for x in frames])\n\n\n>>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):\n... '''\n... Sample a given number of frame indices from the video.\n... Args:\n... clip_len (`int`): Total number of frames to sample.\n... frame_sample_rate (`int`): Sample every n-th frame.\n... seg_len (`int`): Maximum allowed index of sample's last frame.\n... Returns:\n... indices (`List[int]`): List of sampled frame indices\n... '''\n... converted_len = int(clip_len * frame_sample_rate)\n... end_idx = np.random.randint(converted_len, seg_len)\n... start_idx = end_idx - converted_len\n... indices = np.linspace(start_idx, end_idx, num=clip_len)\n... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)\n... return indices\n\n\n>>> # video clip consists of 300 frames (10 seconds at 30 FPS)\n>>> file_path = hf_hub_download(\n... repo_id=\"nielsr/video-demo\", filename=\"eating_spaghetti.mp4\", repo_type=\"dataset\"\n... )\n>>> container = av.open(file_path)\n\n>>> # sample 8 frames\n>>> indices = sample_frame_indices(clip_len=8, frame_sample_rate=1, seg_len=container.streams.video[0].frames)\n>>> video = read_video_pyav(container, indices)\n\n>>> processor = AutoProcessor.from_pretrained(\"microsoft/xclip-base-patch32\")\n>>> model = AutoModel.from_pretrained(\"microsoft/xclip-base-patch32\")\n\n>>> inputs = processor(\n... text=[\"playing sports\", \"eating spaghetti\", \"go shopping\"],\n... videos=list(video),\n... return_tensors=\"pt\",\n... padding=True,\n... )\n\n>>> # forward pass\n>>> with torch.no_grad():\n... outputs = model(**inputs)\n\n>>> logits_per_video = outputs.logits_per_video # this is the video-text similarity score\n>>> probs = logits_per_video.softmax(dim=1) # we can take the softmax to get the label probabilities\n>>> print(probs)\ntensor([[1.9496e-04, 9.9960e-01, 2.0825e-04]])\n```"} +{"repo": "tf-quant-finance", "function": "def minimize(value_and_gradients_function: Callable[[types.RealTensor], Tuple[types.RealTensor, types.RealTensor]], initial_position: types.RealTensor, tolerance: types.RealTensor=1e-08, x_tolerance: types.RealTensor=0, f_relative_tolerance: types.RealTensor=0, max_iterations: types.IntTensor=50, parallel_iterations: types.IntTensor=1, stopping_condition: Callable[[types.BoolTensor, types.BoolTensor], types.BoolTensor]=None, params: ConjugateGradientParams=None, name: str=None) -> OptimizerResult:\n with tf.compat.v1.name_scope(name, 'minimize', [initial_position, tolerance]):\n if params is None:\n params = ConjugateGradientParams()\n initial_position = tf.convert_to_tensor(value=initial_position, name='initial_position')\n dtype = initial_position.dtype\n tolerance = tf.convert_to_tensor(value=tolerance, dtype=dtype, name='grad_tolerance')\n f_relative_tolerance = tf.convert_to_tensor(value=f_relative_tolerance, dtype=dtype, name='f_relative_tolerance')\n x_tolerance = tf.convert_to_tensor(value=x_tolerance, dtype=dtype, name='x_tolerance')\n max_iterations = tf.convert_to_tensor(value=max_iterations, name='max_iterations')\n stopping_condition = stopping_condition or converged_all\n delta = tf.convert_to_tensor(params.sufficient_decrease_param, dtype=dtype, name='delta')\n sigma = tf.convert_to_tensor(params.curvature_param, dtype=dtype, name='sigma')\n eps = tf.convert_to_tensor(params.threshold_use_approximate_wolfe_condition, dtype=dtype, name='sigma')\n eta = tf.convert_to_tensor(params.direction_update_param, dtype=dtype, name='eta')\n psi_1 = tf.convert_to_tensor(params.initial_guess_small_factor, dtype=dtype, name='psi_1')\n psi_2 = tf.convert_to_tensor(params.initial_guess_step_multiplier, dtype=dtype, name='psi_2')\n f0, df0 = value_and_gradients_function(initial_position)\n converged = _norm(df0) < tolerance\n initial_state = _OptimizerState(converged=converged, failed=tf.zeros_like(converged), num_iterations=tf.convert_to_tensor(value=0), num_objective_evaluations=tf.convert_to_tensor(value=1), position=initial_position, objective_value=f0, objective_gradient=df0, direction=-df0, prev_step=tf.ones_like(f0))\n\n def _cond(state):\n \"\"\"Continue if iterations remain and stopping condition is not met.\"\"\"\n return (state.num_iterations < max_iterations) & tf.logical_not(stopping_condition(state.converged, state.failed))\n\n def _body(state):\n \"\"\"Main optimization loop.\"\"\"\n x_k = state.position\n d_k = state.direction\n f_k = state.objective_value\n g_k = state.objective_gradient\n a_km1 = state.prev_step\n\n def ls_func(alpha):\n pt = x_k + tf.expand_dims(alpha, axis=-1) * d_k\n objective_value, gradient = value_and_gradients_function(pt)\n return ValueAndGradient(x=alpha, f=objective_value, df=_dot(gradient, d_k), full_gradient=gradient)\n phi_0 = f_k\n dphi_0 = _dot(g_k, d_k)\n ls_val_0 = ValueAndGradient(x=tf.zeros_like(phi_0), f=phi_0, df=dphi_0, full_gradient=g_k)\n step_guess_result = _init_step(ls_val_0, a_km1, ls_func, psi_1, psi_2, params.quad_step)\n init_step = step_guess_result.step\n c = init_step.x\n phi_lim = phi_0 + eps * tf.abs(phi_0)\n phi_c = init_step.f\n dphi_c = init_step.df\n suff_decrease_1 = delta * dphi_0 >= tf.math.divide_no_nan(phi_c - phi_0, c)\n curvature = dphi_c >= sigma * dphi_0\n wolfe1 = suff_decrease_1 & curvature\n suff_decrease_2 = (2 * delta - 1) * dphi_0 >= dphi_c\n curvature = dphi_c >= sigma * dphi_0\n wolfe2 = suff_decrease_2 & curvature & (phi_c <= phi_lim)\n wolfe = wolfe1 | wolfe2\n skip_line_search = step_guess_result.may_terminate & wolfe | state.failed | state.converged\n ls_result = linesearch.hager_zhang(ls_func, value_at_zero=ls_val_0, converged=skip_line_search, initial_step_size=init_step.x, value_at_initial_step=init_step, shrinkage_param=params.shrinkage_param, expansion_param=params.expansion_param, sufficient_decrease_param=delta, curvature_param=sigma, threshold_use_approximate_wolfe_condition=eps)\n a_k = tf.compat.v1.where(skip_line_search, init_step.x, ls_result.left.x)\n x_kp1 = state.position + tf.expand_dims(a_k, -1) * d_k\n f_kp1 = tf.compat.v1.where(skip_line_search, init_step.f, ls_result.left.f)\n g_kp1 = tf.compat.v1.where(skip_line_search, init_step.full_gradient, ls_result.left.full_gradient)\n y_k = g_kp1 - g_k\n d_dot_y = _dot(d_k, y_k)\n b_k = tf.math.divide_no_nan(_dot(y_k, g_kp1) - tf.math.divide_no_nan(_norm_sq(y_k) * _dot(g_kp1, d_k), d_dot_y), d_dot_y)\n eta_k = tf.math.divide_no_nan(eta * _dot(d_k, g_k), _norm_sq(d_k))\n b_k = tf.maximum(b_k, eta_k)\n d_kp1 = -g_kp1 + tf.expand_dims(b_k, -1) * d_k\n grad_converged = _norm_inf(g_kp1) <= tolerance\n x_converged = _norm_inf(x_kp1 - x_k) <= x_tolerance\n f_converged = tf.math.abs(f_kp1 - f_k) <= f_relative_tolerance * tf.math.abs(f_k)\n converged = ls_result.converged & (grad_converged | x_converged | f_converged)\n failed = ls_result.failed\n new_state = _OptimizerState(converged=converged, failed=failed, num_iterations=state.num_iterations + 1, num_objective_evaluations=state.num_objective_evaluations + step_guess_result.func_evals + ls_result.func_evals, position=tf.compat.v1.where(state.converged, x_k, x_kp1), objective_value=tf.compat.v1.where(state.converged, f_k, f_kp1), objective_gradient=tf.compat.v1.where(state.converged, g_k, g_kp1), direction=d_kp1, prev_step=a_k)\n return (new_state,)\n final_state = tf.while_loop(_cond, _body, (initial_state,), parallel_iterations=parallel_iterations)[0]\n return OptimizerResult(converged=final_state.converged, failed=final_state.failed, num_iterations=final_state.num_iterations, num_objective_evaluations=final_state.num_objective_evaluations, position=final_state.position, objective_value=final_state.objective_value, objective_gradient=final_state.objective_gradient)", "docstring": "Minimizes a differentiable function.\n\nImplementation of algorithm described in [HZ2006]. Updated formula for next\nsearch direction were taken from [HZ2013].\n\nSupports batches with 1-dimensional batch shape.\n\n#### References:\n[HZ2006] Hager, William W., and Hongchao Zhang. \"Algorithm 851: CG_DESCENT,\n a conjugate gradient method with guaranteed descent.\"\n http://users.clas.ufl.edu/hager/papers/CG/cg_compare.pdf\n[HZ2013] W. W. Hager and H. Zhang (2013) The limited memory conjugate gradient\n method.\n https://pdfs.semanticscholar.org/8769/69f3911777e0ff0663f21b67dff30518726b.pdf\n\n### Usage:\nThe following example demonstrates this optimizer attempting to find the\nminimum for a simple two dimensional quadratic objective function.\n\n```python\n minimum = np.array([1.0, 1.0]) # The center of the quadratic bowl.\n scales = np.array([2.0, 3.0]) # The scales along the two axes.\n\n # The objective function and the gradient.\n def quadratic(x):\n value = tf.reduce_sum(scales * (x - minimum) ** 2)\n return value, tf.gradients(value, x)[0]\n\n start = tf.constant([0.6, 0.8]) # Starting point for the search.\n optim_results = conjugate_gradient.minimize(\n quadratic, initial_position=start, tolerance=1e-8)\n\n with tf.Session() as session:\n results = session.run(optim_results)\n # Check that the search converged\n assert(results.converged)\n # Check that the argmin is close to the actual value.\n np.testing.assert_allclose(results.position, minimum)\n```\n\nArgs:\n value_and_gradients_function: A Python callable that accepts a point as a\n real `Tensor` and returns a tuple of `Tensor`s of real dtype containing\n the value of the function and its gradient at that point. The function to\n be minimized. The input should be of shape `[..., n]`, where `n` is the\n size of the domain of input points, and all others are batching\n dimensions. The first component of the return value should be a real\n `Tensor` of matching shape `[...]`. The second component (the gradient)\n should also be of shape `[..., n]` like the input value to the function.\n initial_position: Real `Tensor` of shape `[..., n]`. The starting point, or\n points when using batching dimensions, of the search procedure. At these\n points the function value and the gradient norm should be finite.\n tolerance: Scalar `Tensor` of real dtype. Specifies the gradient tolerance\n for the procedure. If the supremum norm of the gradient vector is below\n this number, the algorithm is stopped.\n x_tolerance: Scalar `Tensor` of real dtype. If the absolute change in the\n position between one iteration and the next is smaller than this number,\n the algorithm is stopped.\n f_relative_tolerance: Scalar `Tensor` of real dtype. If the relative change\n in the objective value between one iteration and the next is smaller than\n this value, the algorithm is stopped.\n max_iterations: Scalar positive int32 `Tensor`. The maximum number of\n iterations.\n parallel_iterations: Positive integer. The number of iterations allowed to\n run in parallel.\n stopping_condition: (Optional) A Python function that takes as input two\n Boolean tensors of shape `[...]`, and returns a Boolean scalar tensor. The\n input tensors are `converged` and `failed`, indicating the current status\n of each respective batch member; the return value states whether the\n algorithm should stop. The default is tfp.optimizer.converged_all which\n only stops when all batch members have either converged or failed. An\n alternative is tfp.optimizer.converged_any which stops as soon as one\n batch member has converged, or when all have failed.\n params: ConjugateGradientParams object with adjustable parameters of the\n algorithm. If not supplied, default parameters will be used.\n name: (Optional) Python str. The name prefixed to the ops created by this\n function. If not supplied, the default name 'minimize' is used.\n\nReturns:\n optimizer_results: A namedtuple containing the following items:\n converged: boolean tensor of shape `[...]` indicating for each batch\n member whether the minimum was found within tolerance.\n failed: boolean tensor of shape `[...]` indicating for each batch\n member whether a line search step failed to find a suitable step size\n satisfying Wolfe conditions. In the absence of any constraints on the\n number of objective evaluations permitted, this value will\n be the complement of `converged`. However, if there is\n a constraint and the search stopped due to available\n evaluations being exhausted, both `failed` and `converged`\n will be simultaneously False.\n num_objective_evaluations: The total number of objective\n evaluations performed.\n position: A tensor of shape `[..., n]` containing the last argument value\n found during the search from each starting point. If the search\n converged, then this value is the argmin of the objective function.\n objective_value: A tensor of shape `[...]` with the value of the\n objective function at the `position`. If the search converged, then\n this is the (local) minimum of the objective function.\n objective_gradient: A tensor of shape `[..., n]` containing the gradient\n of the objective function at the `position`. If the search converged\n the max-norm of this tensor should be below the tolerance."} +{"repo": "tensorflow", "function": "def __init__(self, name, pivot):\n super(XLACompileContext, self).__init__()\n self._name = name\n self._name_as_bytes = compat.as_bytes(name)\n self._unsupported_ops = []\n self._pivot = pivot", "docstring": "Builds a new XLACompileContext.\n\nArgs:\n name: a unique name for the context, used to populate the\n `_xla_compile_id` attribute.\n pivot: a pivot node. Nodes in the XLACompileContext that do not have any\n inputs will have a control dependency on the pivot node. This ensures\n that nodes are correctly included in any enclosing control flow\n contexts."} +{"repo": "tensorflow", "function": "def adjust_jpeg_quality(image, jpeg_quality, dct_method='', name=None):\n with ops.name_scope(name, 'adjust_jpeg_quality', [image]):\n image = ops.convert_to_tensor(image, name='image')\n channels = image.shape.as_list()[-1]\n orig_dtype = image.dtype\n image = convert_image_dtype(image, dtypes.uint8, saturate=True)\n if not _is_tensor(jpeg_quality):\n jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32)\n image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality)\n image = gen_image_ops.decode_jpeg(image, channels=channels, dct_method=dct_method)\n return convert_image_dtype(image, orig_dtype, saturate=True)", "docstring": "Adjust jpeg encoding quality of an image.\n\nThis is a convenience method that converts an image to uint8 representation,\nencodes it to jpeg with `jpeg_quality`, decodes it, and then converts back\nto the original data type.\n\n`jpeg_quality` must be in the interval `[0, 100]`.\n\nUsage Examples:\n\n>>> x = [[[0.01, 0.02, 0.03],\n... [0.04, 0.05, 0.06]],\n... [[0.07, 0.08, 0.09],\n... [0.10, 0.11, 0.12]]]\n>>> x_jpeg = tf.image.adjust_jpeg_quality(x, 75)\n>>> x_jpeg.numpy()\narray([[[0.00392157, 0.01960784, 0.03137255],\n [0.02745098, 0.04313726, 0.05490196]],\n [[0.05882353, 0.07450981, 0.08627451],\n [0.08235294, 0.09803922, 0.10980393]]], dtype=float32)\n\nNote that floating point values are expected to have values in the range\n[0,1) and values outside this range are clipped.\n\n>>> x = [[[1.0, 2.0, 3.0],\n... [4.0, 5.0, 6.0]],\n... [[7.0, 8.0, 9.0],\n... [10.0, 11.0, 12.0]]]\n>>> tf.image.adjust_jpeg_quality(x, 75)\n\n\nNote that `jpeg_quality` 100 is still lossy compression.\n\n>>> x = tf.constant([[[1, 2, 3],\n... [4, 5, 6]],\n... [[7, 8, 9],\n... [10, 11, 12]]], dtype=tf.uint8)\n>>> tf.image.adjust_jpeg_quality(x, 100)\n\n\nArgs:\n image: 3D image. The size of the last dimension must be None, 1 or 3.\n jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality.\n dct_method: An optional string. Specifies the DCT method to use for JPEG\n decompression. Currently available options are [\"INTEGER_FAST\",\n \"INTEGER_ACCURATE\"]. Defaults to \"\" which maps to \"INTEGER_FAST\",\n sacrificing image quality for speed.\n name: A name for this operation (optional).\n\nReturns:\n Adjusted image, same shape and DType as `image`.\n\nRaises:\n InvalidArgumentError: quality must be in [0,100]\n InvalidArgumentError: image must have 1 or 3 channels"} +{"repo": "pytype", "function": "def _order_code(dis_code: pycnite.types.DisassembledCode) -> OrderedCode:\n ops = opcodes.build_opcodes(dis_code)\n add_pop_block_targets(ops)\n blocks = compute_order(ops, dis_code.python_version)\n return OrderedCode(dis_code.code, ops, blocks)", "docstring": "Split a CodeType object into ordered blocks.\n\nThis takes a CodeType object (i.e., a piece of compiled Python code) and\nsplits it into ordered basic blocks.\n\nArgs:\n dis_code: A pycnite.types.DisassembledCode object.\n\nReturns:\n An OrderedCode instance."} +{"repo": "tensorflow", "function": "def assertDeviceEqual(self, device1, device2, msg=None):\n device1 = pydev.canonical_name(device1)\n device2 = pydev.canonical_name(device2)\n self.assertEqual(device1, device2, 'Devices %s and %s are not equal. %s' % (device1, device2, msg))", "docstring": "Asserts that the two given devices are the same.\n\nArgs:\n device1: A string device name or TensorFlow `DeviceSpec` object.\n device2: A string device name or TensorFlow `DeviceSpec` object.\n msg: Optional message to report on failure."} +{"repo": "beam", "function": "def execute_tests(self):\n with open(self.notebook_path, 'r') as nb_f:\n nb = nbformat.read(nb_f, as_version=4)\n ExecutePreprocessor.timeout = self.timeout_secs\n ep = ExecutePreprocessor(allow_errors=True)\n exec_nb, _ = ep.preprocess(nb, {'metadata': {'path': self.dir + '/'}})\n test_count = 0\n error_count = 0\n errors = OrderedDict()\n code_cells = {}\n for cell in exec_nb['cells']:\n if cell['cell_type'] == 'code':\n code_cells[cell['execution_count']] = cell\n for cell_num in sorted(self.tests.keys()):\n if cell_num not in code_cells:\n test_count += 1\n error_count += 1\n errors[cell_num, '', ''] = 'Given cell does not exist.'\n else:\n cell = code_cells[cell_num]\n for test in self.tests[cell_num]:\n cls, setup = list(test.items())[0]\n test_count += 1\n try:\n getattr(sys.modules['testlib'], cls)(setup).check(cell)\n except Exception as e:\n error_count += 1\n errors[cell_num, cls, setup] = str(e)\n return (test_count, error_count, errors)", "docstring": "Executes notebook and compares to test spec.\n\nReturns:\n # of tests, # of errors, error_dict\n where error_dict maps (cell number, test class, expected output) to string"} +{"repo": "transformers", "function": "def compute_mup_vector(config):\n intermediate_size = config.mamba_d_ssm if config.mamba_d_ssm is not None else int(config.mamba_expand * config.hidden_size)\n groups_time_state_size = config.mamba_n_groups * config.mamba_d_state\n num_heads = config.mamba_n_heads\n zxbcdt_multipliers = config.ssm_multipliers\n vector_shape = 2 * intermediate_size + 2 * groups_time_state_size + num_heads\n mup_vector = torch.ones(1, 1, vector_shape)\n mup_vector[:, :, :intermediate_size] *= zxbcdt_multipliers[0]\n mup_vector[:, :, intermediate_size:2 * intermediate_size] *= zxbcdt_multipliers[1]\n mup_vector[:, :, 2 * intermediate_size:2 * intermediate_size + groups_time_state_size] *= zxbcdt_multipliers[2]\n mup_vector[:, :, 2 * intermediate_size + groups_time_state_size:2 * intermediate_size + 2 * groups_time_state_size] *= zxbcdt_multipliers[3]\n mup_vector[:, :, 2 * intermediate_size + 2 * groups_time_state_size:] *= zxbcdt_multipliers[4]\n return mup_vector", "docstring": "Computes the MuP vector based on model configuration.\n\nFalconH1 applies different MuP multiplier for each dimension of the hidden states.\nThe MuP vector is partitioned into chunks, and each chunk is multiplied with its\ncorresponding projected dimension.\n\nArgs:\n config: FalconH1Config object\n\nReturns:\n torch.Tensor: The computed MuP vector"} +{"repo": "transformers", "function": "def _preprocess_input(self, inputs, error_message, expected_nesting=1, dtype=None):\n if inputs is None:\n return None\n if hasattr(inputs, 'numpy'):\n inputs = inputs.numpy().tolist()\n valid = isinstance(inputs, list)\n current = inputs\n for _ in range(expected_nesting):\n if not valid or not current:\n break\n valid = valid and isinstance(current[0], list)\n current = current[0] if current else None\n if not valid:\n raise ValueError(error_message)\n return [np.array(item, dtype=dtype) for item in inputs]", "docstring": "Preprocess input by converting torch tensors to numpy arrays and validating structure.\n\nArgs:\n inputs: The input to process\n error_message: Error message if validation fails\n expected_nesting: Expected nesting level (1 for points/labels, 2 for boxes)\n dtype: Optional data type for numpy array conversion\n\nReturns:\n Processed input as list of numpy arrays or None"} +{"repo": "transformers", "function": "def traced(func=None, *, span_name=None, standalone=False, additional_attributes: Optional[List[Tuple[str, str, Union[Any, Callable[[Any], Any]]]]]=None):\n\n def decorator(func):\n if not _has_opentelemetry:\n return func\n import functools\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n instance = args[0] if args and (hasattr(func, '__self__') and func.__self__ is not None) else None\n is_method = instance is not None\n if is_method and hasattr(instance, 'tracer'):\n tracer = instance.tracer\n else:\n tracer = get_tracer(f'transformers.{func.__module__}.{func.__name__}')\n name = span_name or func.__name__\n span_fn = tracer.start_span if standalone else tracer.start_as_current_span\n with span_fn(name) as span:\n span.set_attribute('function.name', func.__name__)\n span.set_attribute('function.module', func.__module__)\n span.set_attribute('function.is_method', is_method)\n if args:\n for i, arg in enumerate(args):\n if isinstance(arg, (str, int, float, bool)) or arg is None:\n span.set_attribute(f'args.{i}', str(arg))\n else:\n span.set_attribute(f'args.{i}', str(type(arg)))\n if kwargs:\n for key, value in kwargs.items():\n if isinstance(value, (str, int, float, bool)) or value is None:\n span.set_attribute(f'kwargs.{key}', str(value))\n else:\n span.set_attribute(f'kwargs.{key}', str(type(value)))\n if additional_attributes and is_method:\n for attr_config in additional_attributes:\n instance_attribute_name, span_attribute_key, value_or_transform_function = attr_config\n if hasattr(instance, instance_attribute_name):\n attribute_value = getattr(instance, instance_attribute_name)\n if callable(value_or_transform_function):\n transformed_value = value_or_transform_function(attribute_value)\n else:\n transformed_value = value_or_transform_function\n span.set_attribute(span_attribute_key, transformed_value)\n try:\n result = func(*args, **kwargs)\n return result\n except Exception as e:\n span.set_status(Status(StatusCode.ERROR))\n span.record_exception(e)\n raise\n return wrapper\n if func is None:\n return decorator\n return decorator(func)", "docstring": "Decorator to trace function calls with OpenTelemetry.\n\nCan be used as @traced or @traced(span_name=\"custom_name\")\n\nArgs:\n func: The function to trace\n span_name: Optional custom name for the span (defaults to function name)\n standalone: If True, creates a parentless span\n additional_attributes: Optional list of additional attributes to set on the span.\n Each item is a tuple of (instance_attribute_name, span_attribute_key, value_or_transform_function)\n where:\n - instance_attribute_name: Name of the attribute to get from the class instance\n - span_attribute_key: Key to use when setting the attribute on the span\n - value_or_transform_function: Either a raw value to use directly, or a function to transform\n the attribute value before setting it on the span\n\nReturns:\n Decorated function with tracing"} +{"repo": "tensorflow", "function": "def get_associated_prs(api: github_api.GitHubAPI, commit_hashes: Sequence[str]) -> Generator[int, None, None]:\n regex = re.compile('PR #(\\\\d+)')\n for commit_hash in commit_hashes:\n response = api.get_commit('openxla/xla', commit_hash)\n message = response['commit']['message']\n if (maybe_match := regex.match(message)):\n pr_number = maybe_match.group(1)\n print(f'Found PR #{pr_number} associated with commit hash {commit_hash}')\n yield int(pr_number)\n print(f\"Didn't find any PRs associated with commit hashes: {commit_hashes}\")", "docstring": "Finds PRs associated with commits.\n\nArguments:\n api: GitHubAPI object which will be used to make requests\n commit_hashes: A sequence of SHAs which may have PRs associated with them\n\nYields:\n Associated pairs of (PR number, SHA), both as strings"} +{"repo": "beam", "function": "class LanguageConnectorConfig:\n username: str\n password: str\n database_name: str\n instance_name: str\n ip_types: Optional[List[str]] = None\n enable_iam_auth: bool = False\n target_principal: Optional[str] = None\n delegates: Optional[List[str]] = None\n quota_project: Optional[str] = None\n connection_properties: Optional[Dict[str, str]] = None\n additional_properties: Optional[Dict[str, Any]] = None\n\n def _base_jdbc_properties(self) -> Dict[str, Any]:\n properties = {'cloudSqlInstance': self.instance_name}\n if self.ip_types:\n properties['ipTypes'] = ','.join(self.ip_types)\n if self.enable_iam_auth:\n properties['enableIamAuth'] = 'true'\n if self.target_principal:\n properties['cloudSqlTargetPrincipal'] = self.target_principal\n if self.delegates:\n properties['cloudSqlDelegates'] = ','.join(self.delegates)\n if self.quota_project:\n properties['cloudSqlAdminQuotaProject'] = self.quota_project\n if self.additional_properties:\n properties.update(self.additional_properties)\n return properties\n\n def _build_jdbc_url(self, socketFactory, database_type):\n url = f'jdbc:{database_type}:///{self.database_name}?'\n properties = self._base_jdbc_properties()\n properties['socketFactory'] = socketFactory\n property_string = '&'.join((f'{k}={v}' for k, v in properties.items()))\n return url + property_string\n\n def to_connection_config(self):\n return ConnectionConfig(jdbc_url=self.to_jdbc_url(), username=self.username, password=self.password, connection_properties=self.connection_properties, additional_jdbc_args=self.additional_jdbc_args())\n\n def additional_jdbc_args(self) -> Dict[str, List[Any]]:\n return {}", "docstring": "Configuration options for CloudSQL Java language connector.\n\nSet parameters to connect connection to a CloudSQL instance using\nJava language connector connector. For details see\nhttps://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory/blob/main/docs/jdbc.md\n\nAttributes:\n username: Database username.\n password: Database password. Can be empty string when using IAM.\n database_name: Name of the database to connect to.\n instance_name: Instance connection name. Format: \n '::'\n ip_type: Preferred order of IP types used to connect via a comma\n list of strings.\n enable_iam_auth: Whether to enable IAM authentication. Default is False\n target_principal: Optional service account to impersonate for\n connection.\n delegates: Optional list of service accounts for delegated\n impersonation.\n admin_service_endpoint: Optional custom API service endpoint.\n quota_project: Optional project ID for quota and billing.\n connection_properties: Optional JDBC connection properties dict.\n Example: {'ssl': 'true'}\n additional_properties: Additional properties to be added to the JDBC\n url. Example: {'someProperty': 'true'}"} +{"repo": "transformers", "function": "def _prepare_tables(self):\n values = torch.tensor([[[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]])\n row_index = IndexMap(indices=torch.tensor([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]]]), num_segments=3, batch_dims=1)\n col_index = IndexMap(indices=torch.tensor([[[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]]), num_segments=3, batch_dims=1)\n return (values, row_index, col_index)", "docstring": "Prepares two tables, both with three distinct rows.\nThe first table has two columns:\n1.0, 2.0 | 3.0\n2.0, 0.0 | 1.0\n1.0, 3.0 | 4.0\nThe second table has three columns:\n1.0 | 2.0 | 3.0\n2.0 | 0.0 | 1.0\n1.0 | 3.0 | 4.0\nReturns:\nSegmentedTensors with the tables."} +{"repo": "tensorflow", "function": "def get_input_details(self):\n return [self._get_tensor_details(i, subgraph_index=0) for i in self._interpreter.InputIndices()]", "docstring": "Gets model input tensor details.\n\nReturns:\n A list in which each item is a dictionary with details about\n an input tensor. Each dictionary contains the following fields\n that describe the tensor:\n\n + `name`: The tensor name.\n + `index`: The tensor index in the interpreter.\n + `shape`: The shape of the tensor.\n + `shape_signature`: Same as `shape` for models with known/fixed shapes.\n If any dimension sizes are unknown, they are indicated with `-1`.\n + `dtype`: The numpy data type (such as `np.int32` or `np.uint8`).\n + `quantization`: Deprecated, use `quantization_parameters`. This field\n only works for per-tensor quantization, whereas\n `quantization_parameters` works in all cases.\n + `quantization_parameters`: A dictionary of parameters used to quantize\n the tensor:\n ~ `scales`: List of scales (one if per-tensor quantization).\n ~ `zero_points`: List of zero_points (one if per-tensor quantization).\n ~ `quantized_dimension`: Specifies the dimension of per-axis\n quantization, in the case of multiple scales/zero_points.\n + `sparsity_parameters`: A dictionary of parameters used to encode a\n sparse tensor. This is empty if the tensor is dense."} +{"repo": "tensorflow", "function": "def set_number_of_partitions(self, number_of_partitions):\n if self._frozen:\n if self._number_of_partitions != number_of_partitions:\n raise ValueError(f\"Can't set number_of_partitions to {number_of_partitions} since it has been frozen to use {self._number_of_partitions}.\")\n else:\n self._number_of_partitions = number_of_partitions", "docstring": "Sets the number of partitions for the current policy.\n\nIf the policy has been frozen then shard_dimension must match the\nexisting setting.\n\nArgs:\n number_of_partitions: The number of partitions to use in the policy.\n\nRaises:\n ValueError: If the policy has been frozen and shard_dimension\n differs from the frozen value."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A BARThez sequence has the following format:\n\n- single sequence: ` X `\n- pair of sequences: ` A B `\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def set_visible_devices(devices, device_type=None):\n context.context().set_visible_devices(devices, device_type)", "docstring": "Set the list of visible devices.\n\nSpecifies which `PhysicalDevice` objects are visible to the runtime.\nTensorFlow will only allocate memory and place operations on visible\nphysical devices, as otherwise no `LogicalDevice` will be created on them.\nBy default all discovered devices are marked as visible.\n\nThe following example demonstrates disabling the first GPU on the machine.\n\n>>> physical_devices = tf.config.list_physical_devices('GPU')\n>>> try:\n... # Disable first GPU\n... tf.config.set_visible_devices(physical_devices[1:], 'GPU')\n... logical_devices = tf.config.list_logical_devices('GPU')\n... # Logical device was not created for first GPU\n... assert len(logical_devices) == len(physical_devices) - 1\n... except:\n... # Invalid device or cannot modify virtual devices once initialized.\n... pass\n\nArgs:\n devices: List of `PhysicalDevice`s to make visible\n device_type: (optional) Only configure devices matching this device type.\n For example \"CPU\" or \"GPU\". Other devices will be left unaltered.\n\nRaises:\n ValueError: If argument validation fails.\n RuntimeError: Runtime is already initialized."} +{"repo": "tensorflow", "function": "def mark_as_unsaveable(self, error_message):\n self._saveable = False\n if isinstance(error_message, str):\n error_message = [error_message]\n self._saving_errors.update(error_message)", "docstring": "Marks this FuncGraph as unsaveable.\n\nAny attempts to export this FuncGraph will raise an error with the specified\nmessage.\n\nArgs:\n error_message: List or string containing the error message to be raised\n when saving this FuncGraph to SavedModel."} +{"repo": "tensorflow", "function": "def __init__(self, num_rows, batch_shape=None, dtype=None, is_non_singular=True, is_self_adjoint=True, is_positive_definite=True, is_square=True, assert_proper_shapes=False, name='LinearOperatorIdentity'):\n parameters = dict(num_rows=num_rows, batch_shape=batch_shape, dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, assert_proper_shapes=assert_proper_shapes, name=name)\n dtype = dtype or dtypes.float32\n self._assert_proper_shapes = assert_proper_shapes\n with ops.name_scope(name):\n dtype = dtypes.as_dtype(dtype)\n if not is_self_adjoint:\n raise ValueError('An identity operator is always self adjoint.')\n if not is_non_singular:\n raise ValueError('An identity operator is always non-singular.')\n if not is_positive_definite:\n raise ValueError('An identity operator is always positive-definite.')\n if not is_square:\n raise ValueError('An identity operator is always square.')\n super(LinearOperatorIdentity, self).__init__(dtype=dtype, is_non_singular=is_non_singular, is_self_adjoint=is_self_adjoint, is_positive_definite=is_positive_definite, is_square=is_square, parameters=parameters, name=name)\n linear_operator_util.assert_not_ref_type(num_rows, 'num_rows')\n linear_operator_util.assert_not_ref_type(batch_shape, 'batch_shape')\n self._num_rows = linear_operator_util.shape_tensor(num_rows, name='num_rows')\n self._num_rows_static = tensor_util.constant_value(self._num_rows)\n self._check_num_rows_possibly_add_asserts()\n if batch_shape is None:\n self._batch_shape_arg = None\n else:\n self._batch_shape_arg = linear_operator_util.shape_tensor(batch_shape, name='batch_shape_arg')\n self._batch_shape_static = tensor_util.constant_value(self._batch_shape_arg)\n self._check_batch_shape_possibly_add_asserts()", "docstring": "Initialize a `LinearOperatorIdentity`.\n\nThe `LinearOperatorIdentity` is initialized with arguments defining `dtype`\nand shape.\n\nThis operator is able to broadcast the leading (batch) dimensions, which\nsometimes requires copying data. If `batch_shape` is `None`, the operator\ncan take arguments of any batch shape without copying. See examples.\n\nArgs:\n num_rows: Scalar non-negative integer `Tensor`. Number of rows in the\n corresponding identity matrix.\n batch_shape: Optional `1-D` integer `Tensor`. The shape of the leading\n dimensions. If `None`, this operator has no leading dimensions.\n dtype: Data type of the matrix that this operator represents.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n assert_proper_shapes: Python `bool`. If `False`, only perform static\n checks that initialization and method arguments have proper shape.\n If `True`, and static checks are inconclusive, add asserts to the graph.\n name: A name for this `LinearOperator`\n\nRaises:\n ValueError: If `num_rows` is determined statically to be non-scalar, or\n negative.\n ValueError: If `batch_shape` is determined statically to not be 1-D, or\n negative.\n ValueError: If any of the following is not `True`:\n `{is_self_adjoint, is_non_singular, is_positive_definite}`.\n TypeError: If `num_rows` or `batch_shape` is ref-type (e.g. Variable)."} +{"repo": "tensorflow", "function": "def __init__(self, name, default_name=None, values=None) -> None:\n if not (default_name is None or isinstance(default_name, str)):\n raise TypeError('`default_name` type (%s) is not a string type. You likely meant to pass this into the `values` kwarg.' % type(default_name))\n self._name = default_name if name is None else name\n self._default_name = default_name\n self._values = values", "docstring": "Initialize the context manager.\n\nArgs:\n name: The name argument that is passed to the op function.\n default_name: The default name to use if the `name` argument is `None`.\n values: The list of `Tensor` arguments that are passed to the op function.\n\nRaises:\n TypeError: if `default_name` is passed in but not a string."} +{"repo": "yapf", "function": "def DumpNodeToString(node):\n if isinstance(node, pytree.Leaf):\n fmt = '{name}({value}) [lineno={lineno}, column={column}, prefix={prefix}, penalty={penalty}]'\n return fmt.format(name=NodeName(node), value=_PytreeNodeRepr(node), lineno=node.lineno, column=node.column, prefix=repr(node.prefix), penalty=GetNodeAnnotation(node, Annotation.SPLIT_PENALTY, None))\n else:\n fmt = '{node} [{len} children] [child_indent=\"{indent}\"]'\n return fmt.format(node=NodeName(node), len=len(node.children), indent=GetNodeAnnotation(node, Annotation.CHILD_INDENT))", "docstring": "Dump a string representation of the given node. For debugging.\n\nArguments:\n node: the node.\n\nReturns:\n The string representation."} +{"repo": "keras", "function": "def get_test_data(train_samples, test_samples, input_shape, num_classes, random_seed=None):\n np.random.seed(random_seed)\n total_samples = train_samples + test_samples\n samples_per_class = total_samples // num_classes\n y = np.array([i for i in range(num_classes) for _ in range(samples_per_class)], dtype=np.int32)\n extra_samples = total_samples - len(y)\n y_extra = np.array([i % num_classes for i in range(extra_samples)], dtype=np.int64)\n y = np.concatenate([y, y_extra])\n templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)\n x = np.zeros((total_samples,) + input_shape, dtype=np.float32)\n for i in range(total_samples):\n x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1.0, size=input_shape)\n indices = np.arange(total_samples)\n np.random.shuffle(indices)\n x, y = (x[indices], y[indices])\n x_train, y_train, x_test, y_test = ([], [], [], [])\n for cls in range(num_classes):\n cls_indices = np.where(y == cls)[0]\n np.random.shuffle(cls_indices)\n train_count = int(train_samples / num_classes)\n x_train.extend(x[cls_indices[:train_count]])\n y_train.extend(y[cls_indices[:train_count]])\n x_test.extend(x[cls_indices[train_count:]])\n y_test.extend(y[cls_indices[train_count:]])\n x_train, y_train = (np.array(x_train), np.array(y_train))\n x_test, y_test = (np.array(x_test), np.array(y_test))\n train_indices = np.arange(len(x_train))\n test_indices = np.arange(len(x_test))\n np.random.shuffle(train_indices)\n np.random.shuffle(test_indices)\n x_train, y_train = (x_train[train_indices], y_train[train_indices])\n x_test, y_test = (x_test[test_indices], y_test[test_indices])\n return ((x_train, y_train), (x_test, y_test))", "docstring": "Generates balanced, stratified synthetic test data to train a model on.\n\nArgs:\n train_samples: Integer, how many training samples to generate.\n test_samples: Integer, how many test samples to generate.\n input_shape: Tuple of integers, shape of the inputs.\n num_classes: Integer, number of classes for the data and targets.\n random_seed: Integer, random seed used by Numpy to generate data.\n\nReturns:\n A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`."} +{"repo": "keras", "function": "def update_state(self, y_true, y_pred, sample_weight=None):\n metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, top_k=self.top_k, class_id=self.class_id, sample_weight=sample_weight)", "docstring": "Accumulates true positive and false positive statistics.\n\nArgs:\n y_true: The ground truth values, with the same dimensions as\n `y_pred`. Will be cast to `bool`.\n y_pred: The predicted values. Each element must be in the range\n `[0, 1]`.\n sample_weight: Optional weighting of each example. Defaults to `1`.\n Can be a tensor whose rank is either 0, or the same rank as\n `y_true`, and must be broadcastable to `y_true`."} +{"repo": "transformers", "function": "def forward(self, pixel_values: Optional[torch.Tensor]=None, pixel_attention_mask: Optional[torch.Tensor]=None, spatial_shapes: Optional[torch.LongTensor]=None, labels: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None) -> ImageClassifierOutput:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values, attention_mask=pixel_attention_mask, spatial_shapes=spatial_shapes, output_attentions=output_attentions, output_hidden_states=output_hidden_states)\n sequence_output = outputs.last_hidden_state\n if pixel_attention_mask is not None:\n pool_mask = pixel_attention_mask[..., None].to(sequence_output.device)\n sequence_output = torch.sum(sequence_output * pool_mask, dim=1) / torch.sum(pool_mask, dim=1)\n else:\n sequence_output = torch.mean(sequence_output, dim=1)\n logits = self.classifier(sequence_output)\n loss = None\n if labels is not None:\n labels = labels.to(logits.device)\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = 'regression'\n elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = 'single_label_classification'\n else:\n self.config.problem_type = 'multi_label_classification'\n if self.config.problem_type == 'regression':\n loss_fct = MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == 'single_label_classification':\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == 'multi_label_classification':\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n return ImageClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "pixel_attention_mask (`torch.Tensor` of shape `(batch_size, image_size, image_size)`, *optional*):\n Mask to avoid performing attention on padding pixel indices.\nspatial_shapes (`torch.LongTensor` of shape `(batch_size, 2)`):\n Tensor containing the spatial dimensions (height, width) of the input images.\nlabels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the image classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\nExamples:\n\n```python\n>>> from transformers import AutoImageProcessor, Siglip2ForImageClassification\n>>> import torch\n>>> from PIL import Image\n>>> import requests\n\n>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> # note: we are loading a `Siglip2Model` from the hub here,\n>>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.\n>>> image_processor = AutoImageProcessor.from_pretrained(\"google/siglip2-base-patch16-224\")\n>>> model = Siglip2ForImageClassification.from_pretrained(\"google/siglip2-base-patch16-224\")\n\n>>> inputs = image_processor(images=image, return_tensors=\"pt\")\n>>> outputs = model(**inputs)\n>>> logits = outputs.logits\n>>> # model predicts one of the two classes\n>>> predicted_class_idx = logits.argmax(-1).item()\n>>> print(\"Predicted class:\", model.config.id2label[predicted_class_idx])\nPredicted class: LABEL_1\n```"} +{"repo": "tensorflow", "function": "def __init__(self, graph, run_metadata):\n self._graph = graph\n self._run_metadata = run_metadata\n self._string_table = StringTable()\n self._functions = Functions(self._string_table)\n self._locations = Locations(self._functions)", "docstring": "Constructor.\n\nArgs:\n graph: A `Graph` instance.\n run_metadata: A list of `RunMetadata` objects."} +{"repo": "yapf", "function": "def _IdentifyParameterLists(line):\n func_stack = []\n param_stack = []\n for tok in line.tokens:\n if subtypes.FUNC_DEF in tok.subtypes:\n assert tok.next_token.value == '('\n func_stack.append(tok.next_token)\n continue\n if func_stack and tok.value == ')':\n if tok == func_stack[-1].matching_bracket:\n func_stack.pop()\n continue\n if subtypes.PARAMETER_START in tok.subtypes:\n param_stack.append(tok)\n if param_stack and subtypes.PARAMETER_STOP in tok.subtypes:\n start = param_stack.pop()\n func_stack[-1].parameters.append(object_state.Parameter(start, tok))", "docstring": "Visit the node to create a state for parameter lists.\n\nFor instance, a parameter is considered an \"object\" with its first and last\ntoken uniquely identifying the object.\n\nArguments:\n line: (LogicalLine) A logical line."} +{"repo": "starthinker", "function": "def recipe_sheets_to_bigquery(config, auth_read, auth_write, sheets_url, sheets_tab, sheets_range, dataset, table, sheets_header):\n sheets(config, {'auth': auth_read, 'sheet': sheets_url, 'tab': sheets_tab, 'range': sheets_range, 'header': sheets_header, 'out': {'auth': auth_write, 'bigquery': {'dataset': dataset, 'table': table}}})", "docstring": "Import data from a sheet and move it to a BigQuery table.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n auth_write (authentication) - Credentials used for writing data.\n sheets_url (string) - NA\n sheets_tab (string) - NA\n sheets_range (string) - NA\n dataset (string) - NA\n table (string) - NA\n sheets_header (boolean) - NA"} +{"repo": "transformers", "function": "def to(self, *args, **kwargs) -> 'BatchFeature':\n requires_backends(self, ['torch'])\n import torch\n new_data = {}\n device = kwargs.get('device')\n if device is None and len(args) > 0:\n arg = args[0]\n if is_torch_dtype(arg):\n pass\n elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):\n device = arg\n else:\n raise ValueError(f'Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.')\n\n def _to(elem):\n if torch.is_floating_point(elem):\n return elem.to(*args, **kwargs)\n if device is not None:\n return elem.to(device=device)\n return elem\n for k, v in self.items():\n if isinstance(v, list) and isinstance(v[0], list):\n new_v = []\n for elems in v:\n new_v.append([_to(elem) for elem in elems])\n new_data[k] = new_v\n elif isinstance(v, list):\n new_data[k] = [_to(elem) for elem in v]\n else:\n new_data[k] = _to(v)\n self.data = new_data\n return self", "docstring": "Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in\ndifferent `dtypes` and sending the `BatchFeature` to a different `device`.\n\nArgs:\n args (`Tuple`):\n Will be passed to the `to(...)` function of the tensors.\n kwargs (`Dict`, *optional*):\n Will be passed to the `to(...)` function of the tensors.\n\nReturns:\n [`BatchFeature`]: The same instance after modification."} +{"repo": "transformers", "function": "def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep", "docstring": "Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\nadding special tokens. A DeBERTa sequence has the following format:\n\n- single sequence: [CLS] X [SEP]\n- pair of sequences: [CLS] A [SEP] B [SEP]\n\nArgs:\n token_ids_0 (`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\nReturns:\n `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens."} +{"repo": "tensorflow", "function": "def smart_cond(pred, true_fn=None, false_fn=None, name=None):\n if isinstance(pred, variables.Variable):\n return cond.cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)\n return smart_module.smart_cond(pred, true_fn=true_fn, false_fn=false_fn, name=name)", "docstring": "Return either `true_fn()` if predicate `pred` is true else `false_fn()`.\n\nIf `pred` is a bool or has a constant value, we return either `true_fn()`\nor `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.\n\nArgs:\n pred: A scalar determining whether to return the result of `true_fn` or\n `false_fn`.\n true_fn: The callable to be performed if pred is true.\n false_fn: The callable to be performed if pred is false.\n name: Optional name prefix when using `tf.cond`.\n\nReturns:\n Tensors returned by the call to either `true_fn` or `false_fn`.\n\nRaises:\n TypeError: If `true_fn` or `false_fn` is not callable."} +{"repo": "yapf", "function": "def end(self):\n return (self.last.lineno, self.last.column + len(self.last.value))", "docstring": "The end of the logical line.\n\nReturns:\n A tuple of the ending line number and column."} +{"repo": "tensorflow", "function": "def _send_call_tracebacks(destinations, origin_stack, is_eager_execution=False, call_key=None, graph=None, send_source=True):\n if not isinstance(destinations, list):\n destinations = [destinations]\n destinations = [dest[len(common.GRPC_URL_PREFIX):] if dest.startswith(common.GRPC_URL_PREFIX) else dest for dest in destinations]\n call_type = debug_service_pb2.CallTraceback.EAGER_EXECUTION if is_eager_execution else debug_service_pb2.CallTraceback.GRAPH_EXECUTION\n graph_traceback = tfprof_logger.merge_default_with_oplog(graph, add_trainable_var=False) if graph else None\n call_traceback = debug_service_pb2.CallTraceback(call_type=call_type, call_key=call_key, graph_traceback=graph_traceback, graph_version=graph.version if graph else None)\n _format_origin_stack(origin_stack, call_traceback)\n if send_source:\n source_file_paths = set()\n source_file_paths.update(_source_file_paths_outside_tensorflow_py_library((log_entry.code_def for log_entry in call_traceback.graph_traceback.log_entries), call_traceback.graph_traceback.id_to_string))\n source_file_paths.update(_source_file_paths_outside_tensorflow_py_library([call_traceback.origin_stack], call_traceback.origin_id_to_string))\n debugged_source_files = []\n for file_path in source_file_paths:\n source_files = debug_pb2.DebuggedSourceFiles()\n _load_debugged_source_file(file_path, source_files.source_files.add())\n debugged_source_files.append(source_files)\n for destination in destinations:\n no_max_message_sizes = [('grpc.max_receive_message_length', -1), ('grpc.max_send_message_length', -1)]\n channel = grpc.insecure_channel(destination, options=no_max_message_sizes)\n stub = debug_service_pb2_grpc.EventListenerStub(channel)\n stub.SendTracebacks(call_traceback)\n if send_source:\n for source_files in debugged_source_files:\n stub.SendSourceFiles(source_files)", "docstring": "Send the tracebacks of a TensorFlow execution call.\n\nTo gRPC debug server(s). This applies to graph execution (`tf.Session.run()`)\ncalls and eager execution calls.\n\nIf `send_source`, also sends the underlying source files outside the\nTensorFlow library.\n\nArgs:\n destinations: gRPC destination addresses, a `str` or a `list` of `str`s,\n e.g., \"localhost:4242\". If a `list`, gRPC requests containing the same\n `CallTraceback` proto payload will be sent to all the destinations.\n origin_stack: The traceback stack for the origin of the execution call. For\n graph execution, this is the traceback of the `tf.Session.run()`\n invocation. For eager execution, this is the traceback of the Python\n line that executes the eager operation.\n is_eager_execution: (`bool`) whether an eager execution call (i.e., not a\n `tf.Session.run` or derived methods) is being sent.\n call_key: The key of the execution call, as a string. For graph execution,\n this is a string describing the feeds, fetches (and targets) names of the\n `tf.Session.run` call. For eager execution, this is ignored.\n graph: A Python `tf.Graph` object (i.e., *not* a `tf.compat.v1.GraphDef`),\n which contains op tracebacks, if applicable.\n send_source: Whether the source files involved in the op tracebacks but\n outside the TensorFlow library are to be sent."} +{"repo": "tensorflow", "function": "def list_variables(ckpt_dir_or_file):\n reader = load_checkpoint(ckpt_dir_or_file)\n variable_map = reader.get_variable_to_shape_map()\n names = sorted(variable_map.keys())\n result = []\n for name in names:\n result.append((name, variable_map[name]))\n return result", "docstring": "Lists the checkpoint keys and shapes of variables in a checkpoint.\n\nCheckpoint keys are paths in a checkpoint graph.\n\nExample usage:\n\n```python\nimport tensorflow as tf\nimport os\nckpt_directory = \"/tmp/training_checkpoints/ckpt\"\nckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)\nmanager = tf.train.CheckpointManager(ckpt, ckpt_directory, max_to_keep=3)\ntrain_and_checkpoint(model, manager)\ntf.train.list_variables(manager.latest_checkpoint)\n```\n\nArgs:\n ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint.\n\nReturns:\n List of tuples `(key, shape)`."} +{"repo": "tensorflow", "function": "def regex_full_match(input, pattern, name=None):\n if isinstance(pattern, util_compat.bytes_or_text_types):\n return gen_string_ops.static_regex_full_match(input=input, pattern=pattern, name=name)\n return gen_string_ops.regex_full_match(input=input, pattern=pattern, name=name)", "docstring": "Match elements of `input` with regex `pattern`.\n\nArgs:\n input: string `Tensor`, the source strings to process.\n pattern: string or scalar string `Tensor`, regular expression to use,\n see more details at https://github.com/google/re2/wiki/Syntax\n name: Name of the op.\n\nReturns:\n bool `Tensor` of the same shape as `input` with match results."} +{"repo": "transformers", "function": "def forward(self, input_features: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.Tensor]=None, decoder_head_mask: Optional[torch.Tensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, decoder_inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if encoder_outputs is None:\n encoder_outputs = self.encoder(input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n elif return_dict and (not isinstance(encoder_outputs, BaseModelOutput)):\n encoder_outputs = BaseModelOutput(last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None)\n if attention_mask is not None:\n encoder_attention_mask = self._get_feature_vector_attention_mask(encoder_outputs[0].shape[1], attention_mask)\n else:\n encoder_attention_mask = None\n decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=encoder_attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n if not return_dict:\n return decoder_outputs + encoder_outputs\n return Seq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions)", "docstring": "input_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):\n Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained\n by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.*\n via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the\n [`AutoFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a\n tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`]\ndecoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n\n SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see\n `past_key_values`).\ndecoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\n\n If you want to change padding behavior, you should read\n [`modeling_speech_to_text._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the\n paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.\ncross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\nExample:\n\n ```python\n >>> import torch\n >>> from transformers import Speech2TextModel, AutoFeatureExtractor\n >>> from datasets import load_dataset\n\n >>> model = Speech2TextModel.from_pretrained(\"facebook/s2t-small-librispeech-asr\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"facebook/s2t-small-librispeech-asr\")\n >>> ds = load_dataset(\"hf-internal-testing/librispeech_asr_dummy\", \"clean\", split=\"validation\")\n >>> inputs = feature_extractor(\n ... ds[0][\"audio\"][\"array\"], sampling_rate=ds[0][\"audio\"][\"sampling_rate\"], return_tensors=\"pt\"\n ... )\n >>> input_features = inputs.input_features\n >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id\n >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state\n >>> list(last_hidden_state.shape)\n [1, 2, 256]\n ```"} +{"repo": "tensorflow", "function": "def _extract_type_spec_recursively(value):\n if isinstance(value, composite_tensor.CompositeTensor):\n return value._type_spec\n if isinstance(value, variables.Variable):\n return resource_variable_ops.VariableSpec(value.shape, dtype=value.dtype, trainable=value.trainable)\n if tensor_util.is_tensor(value):\n return tensor_spec.TensorSpec(value.shape, value.dtype)\n if isinstance(value, list):\n return list((_extract_type_spec_recursively(v) for v in value))\n if isinstance(value, data_structures.TrackableDataStructure):\n return _extract_type_spec_recursively(value.__wrapped__)\n if isinstance(value, tuple):\n return type(value)((_extract_type_spec_recursively(x) for x in value))\n if isinstance(value, dict):\n return type(value)(((k, _extract_type_spec_recursively(v)) for k, v in value.items()))\n return value", "docstring": "Return (collection of) `TypeSpec`(s) for `value` if it includes `Tensor`s.\n\nIf `value` is a `Tensor` or `CompositeTensor`, return its `TypeSpec`. If\n`value` is a collection containing `Tensor` values, recursively supplant them\nwith their respective `TypeSpec`s in a collection of parallel stucture.\n\nIf `value` is none of the above, return it unchanged.\n\nArgs:\n value: a Python `object` to (possibly) turn into a (collection of)\n `tf.TypeSpec`(s).\n\nReturns:\n spec: the `TypeSpec` or collection of `TypeSpec`s corresponding to `value`\n or `value`, if no `Tensor`s are found."} +{"repo": "transformers", "function": "def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[GitProcessorKwargs]) -> BatchFeature:\n if text is None and images is None:\n raise ValueError('You have to specify either text or images. Both cannot be none.')\n images, text = _validate_images_text_input_order(images, text)\n output_kwargs = self._merge_kwargs(GitProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n data = {}\n if text is not None:\n text_features = self.tokenizer(text, **output_kwargs['text_kwargs'])\n data.update(text_features)\n if images is not None:\n image_features = self.image_processor(images, **output_kwargs['images_kwargs'])\n data.update(image_features)\n return BatchFeature(data=data, tensor_type=output_kwargs['common_kwargs'].get('return_tensors'))", "docstring": "Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\nand `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode\nthe text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\nCLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring\nof the above two methods for more information.\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`TextInput`, `PreTokenizedInput`, `List[TextInput]`, `List[PreTokenizedInput]`, *optional*):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`."} +{"repo": "keras", "function": "class Bidirectional(Layer):\n\n def __init__(self, layer, merge_mode='concat', weights=None, backward_layer=None, **kwargs):\n if not isinstance(layer, Layer):\n raise ValueError(f'Please initialize `Bidirectional` layer with a `keras.layers.Layer` instance. Received: {layer}')\n if backward_layer is not None and (not isinstance(backward_layer, Layer)):\n raise ValueError(f'`backward_layer` need to be a `keras.layers.Layer` instance. Received: {backward_layer}')\n if merge_mode not in ['sum', 'mul', 'ave', 'concat', None]:\n raise ValueError(f'Invalid merge mode. Received: {merge_mode}. Merge mode should be one of {{\"sum\", \"mul\", \"ave\", \"concat\", None}}')\n super().__init__(**kwargs)\n config = serialization_lib.serialize_keras_object(layer)\n config['config']['name'] = 'forward_' + utils.removeprefix(layer.name, 'forward_')\n self.forward_layer = serialization_lib.deserialize_keras_object(config)\n if backward_layer is None:\n config = serialization_lib.serialize_keras_object(layer)\n config['config']['go_backwards'] = True\n config['config']['name'] = 'backward_' + utils.removeprefix(layer.name, 'backward_')\n self.backward_layer = serialization_lib.deserialize_keras_object(config)\n else:\n self.backward_layer = backward_layer\n self._verify_layer_config()\n\n def force_zero_output_for_mask(layer):\n if getattr(layer, 'zero_output_for_mask', None) is not None:\n layer.zero_output_for_mask = layer.return_sequences\n force_zero_output_for_mask(self.forward_layer)\n force_zero_output_for_mask(self.backward_layer)\n self.merge_mode = merge_mode\n if weights:\n nw = len(weights)\n self.forward_layer.initial_weights = weights[:nw // 2]\n self.backward_layer.initial_weights = weights[nw // 2:]\n self.stateful = layer.stateful\n self.return_sequences = layer.return_sequences\n self.return_state = layer.return_state\n self.supports_masking = True\n self.input_spec = layer.input_spec\n\n def _verify_layer_config(self):\n \"\"\"Ensure the forward and backward layers have valid common property.\"\"\"\n if self.forward_layer.go_backwards == self.backward_layer.go_backwards:\n raise ValueError(f'Forward layer and backward layer should have different `go_backwards` value. Received: forward_layer.go_backwards {self.forward_layer.go_backwards}, backward_layer.go_backwards={self.backward_layer.go_backwards}')\n common_attributes = ('stateful', 'return_sequences', 'return_state')\n for a in common_attributes:\n forward_value = getattr(self.forward_layer, a)\n backward_value = getattr(self.backward_layer, a)\n if forward_value != backward_value:\n raise ValueError(f'Forward layer and backward layer are expected to have the same value for attribute \"{a}\", got \"{forward_value}\" for forward layer and \"{backward_value}\" for backward layer')\n\n def compute_output_shape(self, sequences_shape, initial_state_shape=None):\n output_shape = self.forward_layer.compute_output_shape(sequences_shape)\n if self.return_state:\n output_shape, state_shape = (output_shape[0], output_shape[1:])\n if self.merge_mode == 'concat':\n output_shape = list(output_shape)\n output_shape[-1] *= 2\n output_shape = tuple(output_shape)\n elif self.merge_mode is None:\n output_shape = [output_shape, output_shape]\n if self.return_state:\n if self.merge_mode is None:\n return tuple(output_shape) + state_shape + state_shape\n return tuple([output_shape]) + state_shape + state_shape\n return tuple(output_shape)\n\n def call(self, sequences, initial_state=None, mask=None, training=None):\n kwargs = {}\n if self.forward_layer._call_has_training_arg:\n kwargs['training'] = training\n if self.forward_layer._call_has_mask_arg:\n kwargs['mask'] = mask\n if initial_state is not None:\n forward_inputs, backward_inputs = (sequences, sequences)\n half = len(initial_state) // 2\n forward_state = initial_state[:half]\n backward_state = initial_state[half:]\n else:\n forward_inputs, backward_inputs = (sequences, sequences)\n forward_state, backward_state = (None, None)\n y = self.forward_layer(forward_inputs, initial_state=forward_state, **kwargs)\n y_rev = self.backward_layer(backward_inputs, initial_state=backward_state, **kwargs)\n if self.return_state:\n states = tuple(y[1:] + y_rev[1:])\n y = y[0]\n y_rev = y_rev[0]\n y = ops.cast(y, self.compute_dtype)\n y_rev = ops.cast(y_rev, self.compute_dtype)\n if self.return_sequences:\n y_rev = ops.flip(y_rev, axis=1)\n if self.merge_mode == 'concat':\n output = ops.concatenate([y, y_rev], axis=-1)\n elif self.merge_mode == 'sum':\n output = y + y_rev\n elif self.merge_mode == 'ave':\n output = (y + y_rev) / 2\n elif self.merge_mode == 'mul':\n output = y * y_rev\n elif self.merge_mode is None:\n output = (y, y_rev)\n else:\n raise ValueError(f'Unrecognized value for `merge_mode`. Received: {self.merge_mode}Expected one of {{\"concat\", \"sum\", \"ave\", \"mul\"}}.')\n if self.return_state:\n if self.merge_mode is None:\n return output + states\n return (output,) + states\n return output\n\n def reset_states(self):\n self.reset_state()\n\n def reset_state(self):\n if not self.stateful:\n raise AttributeError('Layer must be stateful.')\n self.forward_layer.reset_state()\n self.backward_layer.reset_state()\n\n @property\n def states(self):\n if self.forward_layer.states and self.backward_layer.states:\n return tuple(self.forward_layer.states + self.backward_layer.states)\n return None\n\n def build(self, sequences_shape, initial_state_shape=None):\n if not self.forward_layer.built:\n self.forward_layer.build(sequences_shape)\n if not self.backward_layer.built:\n self.backward_layer.build(sequences_shape)\n\n def compute_mask(self, _, mask):\n if isinstance(mask, list):\n mask = mask[0]\n if self.return_sequences:\n if not self.merge_mode:\n output_mask = (mask, mask)\n else:\n output_mask = mask\n else:\n output_mask = (None, None) if not self.merge_mode else None\n if self.return_state and self.states is not None:\n state_mask = (None for _ in self.states)\n if isinstance(output_mask, list):\n return output_mask + state_mask * 2\n return (output_mask,) + state_mask * 2\n return output_mask\n\n def get_config(self):\n config = {'merge_mode': self.merge_mode}\n config['layer'] = serialization_lib.serialize_keras_object(self.forward_layer)\n config['backward_layer'] = serialization_lib.serialize_keras_object(self.backward_layer)\n base_config = super().get_config()\n return {**base_config, **config}\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n config = copy.deepcopy(config)\n config['layer'] = serialization_lib.deserialize_keras_object(config['layer'], custom_objects=custom_objects)\n backward_layer_config = config.pop('backward_layer', None)\n if backward_layer_config is not None:\n backward_layer = serialization_lib.deserialize_keras_object(backward_layer_config, custom_objects=custom_objects)\n config['backward_layer'] = backward_layer\n layer = cls(**config)\n return layer", "docstring": "Bidirectional wrapper for RNNs.\n\nArgs:\n layer: `keras.layers.RNN` instance, such as\n `keras.layers.LSTM` or `keras.layers.GRU`.\n It could also be a `keras.layers.Layer` instance\n that meets the following criteria:\n 1. Be a sequence-processing layer (accepts 3D+ inputs).\n 2. Have a `go_backwards`, `return_sequences` and `return_state`\n attribute (with the same semantics as for the `RNN` class).\n 3. Have an `input_spec` attribute.\n 4. Implement serialization via `get_config()` and `from_config()`.\n Note that the recommended way to create new RNN layers is to write a\n custom RNN cell and use it with `keras.layers.RNN`, instead of\n subclassing `keras.layers.Layer` directly.\n When `return_sequences` is `True`, the output of the masked\n timestep will be zero regardless of the layer's original\n `zero_output_for_mask` value.\n merge_mode: Mode by which outputs of the forward and backward RNNs\n will be combined. One of `{\"sum\", \"mul\", \"concat\", \"ave\", None}`.\n If `None`, the outputs will not be combined,\n they will be returned as a list. Defaults to `\"concat\"`.\n backward_layer: Optional `keras.layers.RNN`,\n or `keras.layers.Layer` instance to be used to handle\n backwards input processing.\n If `backward_layer` is not provided, the layer instance passed\n as the `layer` argument will be used to generate the backward layer\n automatically.\n Note that the provided `backward_layer` layer should have properties\n matching those of the `layer` argument, in particular\n it should have the same values for `stateful`, `return_states`,\n `return_sequences`, etc. In addition, `backward_layer`\n and `layer` should have different `go_backwards` argument values.\n A `ValueError` will be raised if these requirements are not met.\n\nCall arguments:\n The call arguments for this layer are the same as those of the\n wrapped RNN layer. Beware that when passing the `initial_state`\n argument during the call of this layer, the first half in the\n list of elements in the `initial_state` list will be passed to\n the forward RNN call and the last half in the list of elements\n will be passed to the backward RNN call.\n\nNote: instantiating a `Bidirectional` layer from an existing RNN layer\ninstance will not reuse the weights state of the RNN layer instance -- the\n`Bidirectional` layer will have freshly initialized weights.\n\nExamples:\n\n```python\nmodel = Sequential([\n Input(shape=(5, 10)),\n Bidirectional(LSTM(10, return_sequences=True),\n Bidirectional(LSTM(10)),\n Dense(5, activation=\"softmax\"),\n])\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n\n# With custom backward layer\nforward_layer = LSTM(10, return_sequences=True)\nbackward_layer = LSTM(10, activation='relu', return_sequences=True,\n go_backwards=True)\nmodel = Sequential([\n Input(shape=(5, 10)),\n Bidirectional(forward_layer, backward_layer=backward_layer),\n Dense(5, activation=\"softmax\"),\n])\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop')\n```"} +{"repo": "tensorflow", "function": "def depthwise_conv2d_v2(input, filter, strides, padding, data_format=None, dilations=None, name=None):\n return depthwise_conv2d(input=input, filter=filter, strides=strides, padding=padding, rate=dilations, name=name, data_format=data_format)", "docstring": "Depthwise 2-D convolution.\n\nGiven a 4D input tensor ('NHWC' or 'NCHW' data formats)\nand a filter tensor of shape\n`[filter_height, filter_width, in_channels, channel_multiplier]`\ncontaining `in_channels` convolutional filters of depth 1, `depthwise_conv2d`\napplies a different filter to each input channel (expanding from 1 channel\nto `channel_multiplier` channels for each), then concatenates the results\ntogether. The output has `in_channels * channel_multiplier` channels.\n\nIn detail, with the default NHWC format,\n\n output[b, i, j, k * channel_multiplier + q] =\n sum_{di, dj} filter[di, dj, k, q] *\n input[b, strides[1] * i + dilations[0] * di,\n strides[2] * j + dilations[1] * dj, k]\n\nMust have `strides[0] = strides[3] = 1`. For the most common case of the\nsame horizontal and vertical strides, `strides = [1, stride, stride, 1]`.\nIf any value in `dilations` is greater than 1, we perform atrous depthwise\nconvolution, in which case all values in the `strides` tensor must be equal\nto 1.\n\nUsage Example:\n\n>>> x = np.array([\n... [1., 2.],\n... [3., 4.],\n... [5., 6.]\n... ], dtype=np.float32).reshape((1, 3, 2, 1))\n>>> kernel = np.array([\n... [1., 2.],\n... [3., 4]\n... ], dtype=np.float32).reshape((2, 1, 1, 2))\n>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],\n... padding='VALID').numpy()\n array([[[[10., 14.],\n [14., 20.]],\n [[18., 26.],\n [22., 32.]]]], dtype=float32)\n\n>>> tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1],\n... padding=[[0, 0], [1, 0], [1, 0], [0, 0]]).numpy()\n array([[[[ 0., 0.],\n [ 3., 4.],\n [ 6., 8.]],\n [[ 0., 0.],\n [10., 14.],\n [14., 20.]],\n [[ 0., 0.],\n [18., 26.],\n [22., 32.]]]], dtype=float32)\n\nArgs:\n input: 4-D with shape according to `data_format`.\n filter: 4-D with shape\n `[filter_height, filter_width, in_channels, channel_multiplier]`.\n strides: 1-D of size 4. The stride of the sliding window for each\n dimension of `input`.\n padding: Controls how to pad the image before applying the convolution. Can\n be the string `\"SAME\"` or `\"VALID\"` indicating the type of padding\n algorithm to use, or a list indicating the explicit paddings at the start\n and end of each dimension. See\n [here](https://www.tensorflow.org/api_docs/python/tf/nn#notes_on_padding_2)\n for more information. When explicit padding is used and data_format\n is `\"NHWC\"`, this should be in the form `[[0, 0], [pad_top, pad_bottom],\n [pad_left, pad_right], [0, 0]]`. When explicit padding used and\n data_format is `\"NCHW\"`, this should be in the form `[[0, 0], [0, 0],\n [pad_top, pad_bottom], [pad_left, pad_right]]`.\n data_format: The data format for input. Either \"NHWC\" (default) or \"NCHW\".\n dilations: 1-D of size 2. The dilation rate in which we sample input values\n across the `height` and `width` dimensions in atrous convolution. If it is\n greater than 1, then all values of strides must be 1.\n name: A name for this operation (optional).\n\nReturns:\n A 4-D `Tensor` with shape according to `data_format`. E.g., for\n \"NHWC\" format, shape is\n `[batch, out_height, out_width, in_channels * channel_multiplier].`"} +{"repo": "weather-tools", "function": "class MarsClient(Client):\n\n def retrieve(self, dataset: str, selection: t.Dict, manifest: Manifest) -> None:\n c = MARSECMWFServiceExtended('mars', key=os.environ.get('CLIENT_KEY'), url=os.environ.get('CLIENT_URL'), email=os.environ.get('CLIENT_EMAIL'), log=self.logger.debug, verbose=True)\n selection_ = optimize_selection_partition(selection)\n with StdoutLogger(self.logger, level=logging.DEBUG):\n manifest.set_stage(Stage.FETCH)\n precise_fetch_start_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat(timespec='seconds')\n manifest.prev_stage_precise_start_time = precise_fetch_start_time\n result = c.fetch(req=selection_)\n return result\n\n @property\n def license_url(self):\n return 'https://apps.ecmwf.int/datasets/licences/general/'\n\n @classmethod\n def num_requests_per_key(cls, dataset: str) -> int:\n \"\"\"Number of requests per key (or user) for the Mars API.\n\n Mars allows 2 active requests per user and 20 queued requests per user, as of Sept 27, 2021.\n To ensure we never hit a rate limit error during download, we only make use of the active\n requests.\n See: https://confluence.ecmwf.int/display/UDOC/Total+number+of+requests+a+user+can+submit+-+Web+API+FAQ\n\n Queued requests can _only_ be canceled manually from a web dashboard. If the\n `ERROR 101 (USER_QUEUED_LIMIT_EXCEEDED)` error occurs in the Beam pipeline, then go to\n http://apps.ecmwf.int/webmars/joblist/ and cancel queued jobs.\n \"\"\"\n return 2", "docstring": "A client to access data from the Meteorological Archival and Retrieval System (MARS).\n\nSee https://www.ecmwf.int/en/forecasts/datasets for a summary of datasets available\non MARS. Most notable, MARS provides access to ECMWF's Operational Archive\nhttps://www.ecmwf.int/en/forecasts/dataset/operational-archive.\n\nThe client config must contain three parameters to autheticate access to the MARS archive:\n`api_key`, `api_url`, and `api_email`. These can also be configued by setting the\ncommensurate environment variables: `MARSAPI_KEY`, `MARSAPI_URL`, and `MARSAPI_EMAIL`.\nThese credentials can be looked up by after registering for an ECMWF account\n(https://apps.ecmwf.int/registration/) and visitng: https://api.ecmwf.int/v1/key/.\n\nMARS server activity can be observed at https://apps.ecmwf.int/mars-activity/.\n\nAttributes:\n config: A config that contains pipeline parameters, such as API keys.\n level: Default log level for the client."} +{"repo": "pytype", "function": "def GetSubClasses():\n return utils.invert_dict(GetSuperClasses())", "docstring": "Get a reverse Python type hierarchy mapping.\n\nThis generates a dictionary that can be used to look up the (known)\nsubclasses of a type in the abstract base class hierarchy.\n\nReturns:\n A dictionary mapping a type, as string, to a list of direct\n subclasses (also as strings).\n E.g. \"Sized\" -> [\"Set\", \"Mapping\", \"MappingView\", \"Sequence\"]."} +{"repo": "tensorflow", "function": "def scatter_nd_min(self, indices, updates, name=None):\n return self._lazy_read(gen_state_ops.resource_scatter_nd_min(self.handle, indices, ops.convert_to_tensor(updates, self.dtype), name=name))", "docstring": "Updates this variable with the min of `tf.IndexedSlices` and itself.\n\n`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n`indices` must be integer tensor, containing indices into `ref`.\nIt must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\nThe innermost dimension of `indices` (with length `K`) corresponds to\nindices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\ndimension of `ref`.\n\n`updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n```\n[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n```\n\nSee `tf.scatter_nd` for more details about how to make updates to\nslices.\n\nArgs:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\nReturns:\n The updated variable."} +{"repo": "tensorflow", "function": "def compute_output_signature(self, input_signature):\n\n def check_type_return_shape(s):\n if not isinstance(s, tensor_lib.TensorSpec):\n raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n dtype = input_dtypes[0]\n return nest.map_structure(lambda s: tensor_lib.TensorSpec(dtype=dtype, shape=s), output_shape)", "docstring": "Compute the output tensor signature of the layer based on the inputs.\n\nUnlike a TensorShape object, a TensorSpec object contains both shape\nand dtype information for a tensor. This method allows layers to provide\noutput dtype information if it is different from the input dtype.\nFor any layer that doesn't implement this function,\nthe framework will fall back to use `compute_output_shape`, and will\nassume that the output dtype matches the input dtype.\n\nArgs:\n input_signature: Single TensorSpec or nested structure of TensorSpec\n objects, describing a candidate input for the layer.\n\nReturns:\n Single TensorSpec or nested structure of TensorSpec objects, describing\n how the layer would transform the provided input.\n\nRaises:\n TypeError: If input_signature contains a non-TensorSpec object."} +{"repo": "beam", "function": "class MaxScore(ScoreAggregation):\n\n def __init__(self, **kwargs):\n super().__init__(agg_func=max, **kwargs)", "docstring": "Aggregates anomaly scores by selecting the maximum score.\n\nThis `AggregationFn` selects the highest anomaly score from a collection\nof `AnomalyPrediction` objects as the aggregated score.\n\nArgs:\n **kwargs: Additional keyword arguments to pass to the base\n `ScoreAggregation` class."} +{"repo": "transformers", "function": "def sample_points_using_uncertainty(self, logits: torch.Tensor, uncertainty_function, num_points: int, oversample_ratio: int, importance_sample_ratio: float) -> torch.Tensor:\n num_boxes = logits.shape[0]\n num_points_sampled = int(num_points * oversample_ratio)\n point_coordinates = torch.rand(num_boxes, num_points_sampled, 2, device=logits.device)\n point_logits = sample_point(logits, point_coordinates, align_corners=False)\n point_uncertainties = uncertainty_function(point_logits)\n num_uncertain_points = int(importance_sample_ratio * num_points)\n num_random_points = num_points - num_uncertain_points\n idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]\n shift = num_points_sampled * torch.arange(num_boxes, dtype=torch.long, device=logits.device)\n idx += shift[:, None]\n point_coordinates = point_coordinates.view(-1, 2)[idx.view(-1), :].view(num_boxes, num_uncertain_points, 2)\n if num_random_points > 0:\n point_coordinates = torch.cat([point_coordinates, torch.rand(num_boxes, num_random_points, 2, device=logits.device)], dim=1)\n return point_coordinates", "docstring": "This function is meant for sampling points in [0, 1] * [0, 1] coordinate space based on their uncertainty. The\nuncertainty is calculated for each point using the passed `uncertainty function` that takes points logit\nprediction as input.\n\nArgs:\n logits (`float`):\n Logit predictions for P points.\n uncertainty_function:\n A function that takes logit predictions for P points and returns their uncertainties.\n num_points (`int`):\n The number of points P to sample.\n oversample_ratio (`int`):\n Oversampling parameter.\n importance_sample_ratio (`float`):\n Ratio of points that are sampled via importance sampling.\n\nReturns:\n point_coordinates (`torch.Tensor`):\n Coordinates for P sampled points."} +{"repo": "transformers", "function": "class CpmAntTokenizer(PreTrainedTokenizer):\n vocab_files_names = VOCAB_FILES_NAMES\n model_input_names = ['input_ids', 'attention_mask']\n add_prefix_space = False\n\n def __init__(self, vocab_file, bod_token='', eod_token='', bos_token='', eos_token='', pad_token='', unk_token='', line_token='', space_token='', padding_side='left', **kwargs):\n requires_backends(self, ['jieba'])\n self.bod_token = bod_token\n self.eod_token = eod_token\n self.encoder = load_vocab(vocab_file)\n self.encoder[' '] = self.encoder[space_token]\n self.encoder['\\n'] = self.encoder[line_token]\n del self.encoder[space_token]\n del self.encoder[line_token]\n self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.encoder, unk_token=unk_token)\n super().__init__(bod_token=bod_token, eod_token=eod_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, unk_token=unk_token, line_token=line_token, space_token=space_token, padding_side=padding_side, **kwargs)\n\n @property\n def bod_token_id(self):\n return self.encoder[self.bod_token]\n\n @property\n def eod_token_id(self):\n return self.encoder[self.eod_token]\n\n @property\n def newline_id(self):\n return self.encoder['\\n']\n\n @property\n def vocab_size(self) -> int:\n return len(self.encoder)\n\n def get_vocab(self):\n return dict(self.encoder, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n \"\"\"Tokenize a string.\"\"\"\n output_tokens = []\n for x in jieba.cut(text, cut_all=False):\n output_tokens.extend(self.wordpiece_tokenizer.tokenize(x))\n return output_tokens\n\n def _decode(self, token_ids, **kwargs):\n \"\"\"Decode ids into a string.\"\"\"\n token_ids = [i for i in token_ids if i >= 0]\n token_ids = [x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and (x != self.bos_token_id)]\n return super()._decode(token_ids, **kwargs)\n\n def check(self, token):\n return token in self.encoder\n\n def convert_tokens_to_string(self, tokens: List[str]) -> str:\n return ''.join(tokens)\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.decoder.get(index, self.unk_token)\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(save_directory, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])\n else:\n vocab_file = (filename_prefix + '-' if filename_prefix else '') + save_directory\n index = 0\n if ' ' in self.encoder:\n self.encoder[''] = self.encoder[' ']\n del self.encoder[' ']\n if '\\n' in self.encoder:\n self.encoder[''] = self.encoder['\\n']\n del self.encoder['\\n']\n self.encoder = collections.OrderedDict(sorted(self.encoder.items(), key=lambda x: x[1]))\n with open(vocab_file, 'w', encoding='utf-8') as writer:\n for token, token_index in self.encoder.items():\n if index != token_index:\n logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')\n index = token_index\n writer.write(token + '\\n')\n index += 1\n return (vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A CPMAnt sequence has the following format:\n\n - single sequence: `[BOS] Sequence`.\n\n Args:\n token_ids_0 (`List[int]`): The first tokenized sequence that special tokens will be added.\n token_ids_1 (`List[int]`): The optional second tokenized sequence that special tokens will be added.\n\n Returns:\n `List[int]`: The model input with special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.bos_token_id] + token_ids_0\n return [self.bos_token_id] + token_ids_0 + [self.bos_token_id] + token_ids_1\n\n def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`): List of IDs.\n token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)\n if token_ids_1 is not None:\n return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1)\n return [1] + [0] * len(token_ids_0)", "docstring": "Construct a CPMAnt tokenizer. Based on byte-level Byte-Pair-Encoding.\n\nArgs:\n vocab_file (`str`):\n Path to the vocabulary file.\n bod_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of document token.\n eod_token (`str`, *optional*, defaults to `\"\"`):\n The end of document token.\n bos_token (`str`, *optional*, defaults to `\"\"`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `\"\"`):\n The end of sequence token.\n pad_token (`str`, *optional*, defaults to `\"\"`):\n The token used for padding.\n unk_token (`str`, *optional*, defaults to `\"\"`):\n The unknown token.\n line_token (`str`, *optional*, defaults to `\"\"`):\n The line token.\n space_token (`str`, *optional*, defaults to `\"\"`):\n The space token."} +{"repo": "tensorflow", "function": "def build(self, per_replica_input_shapes=None, per_replica_batch_size=None):\n if self._built:\n return\n if self._using_tpu:\n if tpu_ops.is_tpu_embedding_initialized():\n raise RuntimeError('TPU is already initialized for embeddings. This may be caused by using multiple TPUEmbedding instances in a TPU scope which is unsupported')\n self._get_and_update_output_shapes_from_input(per_replica_input_shapes, per_replica_batch_size)\n self._config_proto = self._create_config_proto()\n logging.info('Initializing TPU Embedding engine.')\n tpu_embedding_v2_utils.log_tpu_embedding_configuration(self._config_proto)\n\n @def_function.function\n def load_config():\n tpu.initialize_system_for_tpu_embedding(self._config_proto)\n load_config()\n logging.info('Done initializing TPU Embedding engine.')\n self._variables = self._create_variables_and_slots()\n self._built = True\n self._load_variables()", "docstring": "Create the underlying variables and initializes the TPU for embeddings.\n\nThis method creates the underlying variables (including slot variables). If\ncreated under a TPUStrategy, this will also initialize the TPU for\nembeddings.\n\nThis function will automatically get called by enqueue, which will try to\ndetermine your output shapes. If this fails, you must manually\ncall this method before you call enqueue.\n\nArgs:\n per_replica_input_shapes: A nested structure of The per replica input\n shapes that matches the structure of the feature config. The input\n shapes should be the same as the input shape of the feature (except for\n ragged tensor) Note that it is fixed and the same per replica input\n shapes must be used for both training and evaluation. If you want to\n calculate this from the global input shapes, you can use\n `num_replicas_in_sync` property of your strategy object. May be set to\n None if not created under a TPUStrategy.\n per_replica_batch_size: (Deprecated) The per replica batch size that you\n intend to use. Note that is fixed and the same batch size must be used\n for both training and evaluation. If you want to calculate this from the\n global batch size, you can use `num_replicas_in_sync` property of your\n strategy object. May be set to None if not created under a TPUStrategy.\n\nRaises:\n ValueError: If per_replica_input_shapes is inconsistent with the output\n shapes stored in the feature config or the output shapes get from the\n input shapes are not fully defined.\n RuntimeError: If tpu embedding is already initialized on TPU."} +{"repo": "tensorflow", "function": "def __init__(self, logdir, max_queue=10, flush_secs=120, filename_suffix=None):\n self._logdir = str(logdir)\n gfile.MakeDirs(self._logdir)\n self._max_queue = max_queue\n self._flush_secs = flush_secs\n self._flush_complete = threading.Event()\n self._flush_sentinel = object()\n self._close_sentinel = object()\n self._ev_writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(os.path.join(self._logdir, 'events')))\n if filename_suffix:\n self._ev_writer.InitWithSuffix(compat.as_bytes(filename_suffix))\n self._initialize()\n self._closed = False", "docstring": "Creates a `EventFileWriter` and an event file to write to.\n\nOn construction the summary writer creates a new event file in `logdir`.\nThis event file will contain `Event` protocol buffers, which are written to\ndisk via the add_event method.\n\nThe other arguments to the constructor control the asynchronous writes to\nthe event file:\n\n* `flush_secs`: How often, in seconds, to flush the added summaries\n and events to disk.\n* `max_queue`: Maximum number of summaries or events pending to be\n written to disk before one of the 'add' calls block.\n\nArgs:\n logdir: A string. Directory where event file will be written.\n max_queue: Integer. Size of the queue for pending events and summaries.\n flush_secs: Number. How often, in seconds, to flush the\n pending events and summaries to disk.\n filename_suffix: A string. Every event file's name is suffixed with\n `filename_suffix`."} +{"repo": "transformers", "function": "def fn(x: tuple[int, ...]):\n return x", "docstring": "Test function\n\nArgs:\n x: The input\n\n\nReturns:\n The output"} +{"repo": "tensorflow", "function": "def fetches(self):\n return self._final_fetches", "docstring": "Return the unique names of tensors to fetch.\n\nReturns:\n A list of strings."} +{"repo": "transformers", "function": "def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):\n mask = input_ids.ne(padding_idx).int()\n incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask\n return incremental_indices.long() + padding_idx", "docstring": "Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols\nare ignored. This is modified from fairseq's `utils.make_positions`.\n\nArgs:\n x: torch.Tensor x:\n\nReturns: torch.Tensor"} +{"repo": "tensorflow", "function": "def _assert_tensorlike_all_close(self, sess: session.Session, tensorlike_value_1: core.TensorLike, tensorlike_value_2: core.TensorLike) -> None:\n if isinstance(tensorlike_value_1, core.Tensor):\n tensorlike_value_1 = tensorlike_value_1.eval(session=sess)\n if isinstance(tensorlike_value_2, core.Tensor):\n tensorlike_value_2 = tensorlike_value_2.eval(session=sess)\n self.assertAllClose(tensorlike_value_1, tensorlike_value_2)", "docstring": "Asserts that two different TensorLike values are \"all close\".\n\nArgs:\n sess: Session instance used to evaluate any tf.Tensors.\n tensorlike_value_1: A TensorLike value.\n tensorlike_value_2: A TensorLike value."} +{"repo": "transformers", "function": "def get_size_with_aspect_ratio(image_size: Tuple[int, int], size: int, max_size: Optional[int]=None, mod_size: int=16) -> Tuple[int, int]:\n height, width = image_size\n raw_size = None\n if max_size is not None:\n min_original_size = float(min((height, width)))\n max_original_size = float(max((height, width)))\n if max_original_size / min_original_size * size > max_size:\n raw_size = max_size * min_original_size / max_original_size\n size = int(round(raw_size))\n if width < height:\n ow = size\n if max_size is not None and raw_size is not None:\n oh = int(raw_size * height / width)\n else:\n oh = int(size * height / width)\n elif height <= width and height == size or (width <= height and width == size):\n oh, ow = (height, width)\n else:\n oh = size\n if max_size is not None and raw_size is not None:\n ow = int(raw_size * width / height)\n else:\n ow = int(size * width / height)\n if mod_size is not None:\n ow_mod = np.mod(ow, mod_size)\n oh_mod = np.mod(oh, mod_size)\n ow = ow - ow_mod\n oh = oh - oh_mod\n return (oh, ow)", "docstring": "Computes the output image size given the input image size and the desired output size with multiple of divisible_size.\n\nArgs:\n image_size (`Tuple[int, int]`):\n The input image size.\n size (`int`):\n The desired output size.\n max_size (`int`, *optional*):\n The maximum allowed output size.\n mod_size (`int`, *optional*):\n The size to make multiple of mod_size."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.FloatTensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]]=None, decoder_input_ids: Optional[torch.LongTensor]=None, decoder_attention_mask: Optional[torch.BoolTensor]=None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]]=None, context_input_ids: Optional[torch.LongTensor]=None, context_attention_mask: Optional[torch.LongTensor]=None, doc_scores: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, output_retrieved: Optional[bool]=None, do_marginalize: Optional[bool]=None, reduce_loss: Optional[bool]=None, labels: Optional[torch.LongTensor]=None, n_docs: Optional[int]=None, **kwargs) -> RetrievAugLMMarginOutput:\n n_docs = n_docs if n_docs is not None else self.config.n_docs\n do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize\n reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = labels\n use_cache = False\n outputs = self.rag(input_ids=input_ids, attention_mask=attention_mask, encoder_outputs=encoder_outputs, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, context_input_ids=context_input_ids, context_attention_mask=context_attention_mask, doc_scores=doc_scores, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_retrieved=output_retrieved, n_docs=n_docs)\n loss = None\n logits = outputs.logits\n if labels is not None:\n assert decoder_input_ids is not None\n loss = self.get_nll(outputs.logits, outputs.doc_scores, labels, reduce_loss=reduce_loss, epsilon=self.config.label_smoothing, n_docs=n_docs)\n if do_marginalize:\n logits = self.marginalize(logits, outputs.doc_scores, n_docs)\n return RetrievAugLMMarginOutput(loss=loss, logits=logits, doc_scores=outputs.doc_scores, past_key_values=outputs.past_key_values, context_input_ids=outputs.context_input_ids, context_attention_mask=outputs.context_attention_mask, retrieved_doc_embeds=outputs.retrieved_doc_embeds, retrieved_doc_ids=outputs.retrieved_doc_ids, question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state, question_enc_hidden_states=outputs.question_enc_hidden_states, question_enc_attentions=outputs.question_enc_attentions, generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state, generator_enc_hidden_states=outputs.generator_enc_hidden_states, generator_enc_attentions=outputs.generator_enc_attentions, generator_dec_hidden_states=outputs.generator_dec_hidden_states, generator_dec_attentions=outputs.generator_dec_attentions, generator_cross_attentions=outputs.generator_cross_attentions)", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies\n which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to\n obtain the indices.\n\n [What are input IDs?](../glossary#input-ids)\nencoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)\n Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,\n *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *\n sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the\n generator's encoder.\n\n Used by the ([`RagModel`]) model during decoding.\ndecoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Provide for generation tasks. `None` by default, construct as per instructions for the generator model\n you're using with your RAG instance.\ndecoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default.\ncontext_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):\n Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the\n retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to\n the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].\ncontext_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):\n Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the\n retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be\n provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].\ndoc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):\n Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and\n `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`\n has to be provided to the forward pass. `doc_scores` can be computed via\n `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.\noutput_retrieved (`bool`, *optional*):\n Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask`. See returned tensors for more detail.\ndo_marginalize (`bool`, *optional*):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\nreduce_loss (`bool`, *optional*):\n Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`\n operation.\nn_docs (`int`, *optional*):\n The number of documents to retrieve.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration\n>>> import torch\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/rag-token-nq\")\n>>> retriever = RagRetriever.from_pretrained(\n... \"facebook/rag-token-nq\", index_name=\"exact\", use_dummy_dataset=True\n... )\n>>> # initialize with RagRetriever to do everything in one forward call\n>>> model = RagTokenForGeneration.from_pretrained(\"facebook/rag-token-nq\", retriever=retriever)\n\n>>> inputs = tokenizer(\"How many people live in Paris?\", return_tensors=\"pt\")\n>>> targets = tokenizer(text_target=\"In Paris, there are 10 million people.\", return_tensors=\"pt\")\n>>> input_ids = inputs[\"input_ids\"]\n>>> labels = targets[\"input_ids\"]\n>>> outputs = model(input_ids=input_ids, labels=labels)\n\n>>> # or use retriever separately\n>>> model = RagTokenForGeneration.from_pretrained(\"facebook/rag-token-nq\", use_dummy_dataset=True)\n>>> # 1. Encode\n>>> question_hidden_states = model.question_encoder(input_ids)[0]\n>>> # 2. Retrieve\n>>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors=\"pt\")\n>>> doc_scores = torch.bmm(\n... question_hidden_states.unsqueeze(1), docs_dict[\"retrieved_doc_embeds\"].float().transpose(1, 2)\n... ).squeeze(1)\n>>> # 3. Forward to generator\n>>> outputs = model(\n... context_input_ids=docs_dict[\"context_input_ids\"],\n... context_attention_mask=docs_dict[\"context_attention_mask\"],\n... doc_scores=doc_scores,\n... decoder_input_ids=labels,\n... )\n\n>>> # or directly generate\n>>> generated = model.generate(\n... context_input_ids=docs_dict[\"context_input_ids\"],\n... context_attention_mask=docs_dict[\"context_attention_mask\"],\n... doc_scores=doc_scores,\n... )\n>>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)\n```"} +{"repo": "tensorflow", "function": "def write_examples(fp, examples):\n\n def write_tensor(fp, name, x):\n \"\"\"Write tensor in file format supported by TFLITE example.\"\"\"\n fp.write('name,%s\\n' % name)\n fp.write('dtype,%s\\n' % x.dtype)\n fp.write('shape,' + ','.join(map(str, x.shape)) + '\\n')\n fp.write('values,' + format_result(x) + '\\n')\n fp.write('test_cases,%d\\n' % len(examples))\n for example in examples:\n fp.write('inputs,%d\\n' % len(example['inputs']))\n for name, value in example['inputs'].items():\n if value is not None:\n write_tensor(fp, name, value)\n fp.write('outputs,%d\\n' % len(example['outputs']))\n for name, value in example['outputs'].items():\n write_tensor(fp, name, value)", "docstring": "Given a list `examples`, write a text format representation.\n\nThe file format is csv like with a simple repeated pattern. We would ike\nto use proto here, but we can't yet due to interfacing with the Android\nteam using this format.\n\nArgs:\n fp: File-like object to write to.\n examples: Example dictionary consisting of keys \"inputs\" and \"outputs\""} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, encoder_hidden_states: Optional[torch.Tensor]=None, encoder_attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, cross_attn_layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Cache]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True, cache_position: Optional[torch.Tensor]=None) -> torch.Tensor:\n residual = hidden_states\n hidden_states = self.self_attn_layer_norm(hidden_states)\n hidden_states, self_attn_weights, past_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, cache_position=cache_position)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n cross_attn_weights = None\n if encoder_hidden_states is not None:\n residual = hidden_states\n hidden_states = self.encoder_attn_layer_norm(hidden_states)\n hidden_states, cross_attn_weights, past_key_value = self.encoder_attn(hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=past_key_value, output_attentions=output_attentions)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n residual = hidden_states\n hidden_states = self.final_layer_norm(hidden_states)\n hidden_states = self.activation_fn(self.fc1(hidden_states))\n hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)\n hidden_states = self.fc2(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = residual + hidden_states\n outputs = (hidden_states,)\n if output_attentions:\n outputs += (self_attn_weights, cross_attn_weights)\n if use_cache:\n outputs += (past_key_value,)\n return outputs", "docstring": "Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n encoder_hidden_states (`torch.FloatTensor`):\n cross attention input to the layer of shape `(batch, seq_len, embed_dim)`\n encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size\n `(encoder_attention_heads,)`.\n cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of\n size `(decoder_attention_heads,)`.\n past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. It is used to update the\n cache in the correct position and to infer the complete sequence length."} +{"repo": "tensorflow", "function": "def retrieve_from_golden():\n out_dict = dict()\n with open(CUDA_CC_GOLDEN_DIR) as g_file:\n for line in g_file:\n line_items = line.split(',')\n val_list = []\n for item in line_items[1:]:\n val_list.append(item.strip('\\n'))\n out_dict[line_items[0]] = val_list\n return out_dict", "docstring": "Retrieves list of all CUDA compute capability from a golden file.\n\nThe following file is set as default:\n `./golden/compute_capability_golden.csv`\n\nReturns:\n Dictionary that lists of all CUDA compute capability in the following\n format:\n {'': ['.', ...], ...}\n\n If there are multiple versions available for a given GPU, then it\n appends all supported versions in the value list (in the key-value\n pair.)"} +{"repo": "tensorflow", "function": "def _get_main_op_tensor(meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):\n collection_def = meta_graph_def_to_load.collection_def\n init_op = None\n if init_op_key in collection_def:\n init_op_list = collection_def[init_op_key].node_list.value\n if len(init_op_list) != 1:\n raise RuntimeError(f'Expected exactly one SavedModel init op. Found {len(init_op_list)}: {init_op_list}.')\n init_op = ops.get_collection(init_op_key)[0]\n return init_op", "docstring": "Gets the main op tensor, if one exists.\n\nArgs:\n meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.\n init_op_key: name of the collection to check; should be one of MAIN_OP_KEY\n or the deprecated LEGACY_INIT_OP_KEY\n\nReturns:\n The main op tensor, if it exists and `None` otherwise.\n\nRaises:\n RuntimeError: If the collection def corresponding to the main op key has\n other than exactly one tensor."} +{"repo": "tensorflow", "function": "def set_tensor_shapes(tensors, shapes):\n if shapes:\n tensor_names_to_tensor = {get_tensor_name(tensor): tensor for tensor in tensors}\n for name, shape in shapes.items():\n if name not in tensor_names_to_tensor:\n raise ValueError(\"Invalid tensor '{}' found in tensor shapes map.\".format(name))\n if shape is not None:\n tensor = tensor_names_to_tensor[name]\n try:\n tensor.set_shape(shape)\n except ValueError as error:\n message = \"The shape of tensor '{0}' cannot be changed from {1} to {2}. {3}\".format(name, tensor.shape, shape, str(error))\n raise ValueError(message)", "docstring": "Sets Tensor shape for each tensor if the shape is defined.\n\nArgs:\n tensors: TensorFlow tensor.Tensor.\n shapes: Dict of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n\nRaises:\n ValueError:\n `shapes` contains an invalid tensor.\n `shapes` contains an invalid shape for a valid tensor."} +{"repo": "keras", "function": "def add(inputs, **kwargs):\n return Add(**kwargs)(inputs)", "docstring": "Functional interface to the `keras.layers.Add` layer.\n\nArgs:\n inputs: A list of input tensors with the same shape.\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor as the sum of the inputs. It has the same shape as the inputs.\n\nExamples:\n\n>>> input_shape = (2, 3, 4)\n>>> x1 = np.random.rand(*input_shape)\n>>> x2 = np.random.rand(*input_shape)\n>>> y = keras.layers.add([x1, x2])\n\nUsage in a Keras model:\n\n>>> input1 = keras.layers.Input(shape=(16,))\n>>> x1 = keras.layers.Dense(8, activation='relu')(input1)\n>>> input2 = keras.layers.Input(shape=(32,))\n>>> x2 = keras.layers.Dense(8, activation='relu')(input2)\n>>> added = keras.layers.add([x1, x2])\n>>> out = keras.layers.Dense(4)(added)\n>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)"} +{"repo": "tensorflow", "function": "def get_chief_queue_runner(self):\n if self._gradients_applied is False:\n raise ValueError('Should be called after apply_gradients().')\n return self._chief_queue_runner", "docstring": "Returns the QueueRunner for the chief to execute.\n\nThis includes the operations to synchronize replicas: aggregate gradients,\napply to variables, increment global step, insert tokens to token queue.\n\nNote that this can only be called after calling apply_gradients() which\nactually generates this queuerunner.\n\nReturns:\n A `QueueRunner` for chief to execute.\n\nRaises:\n ValueError: If this is called before apply_gradients()."} +{"repo": "tensorflow", "function": "def numpy_function(func=None, inp=None, Tout=None, stateful=True, name=None):\n decorator = _check_args_and_maybe_make_decorator(numpy_function, 'tf.numpy_function', func=func, inp=inp, Tout=Tout, stateful=stateful, name=name)\n if decorator is not None:\n return decorator\n return py_func_common(func, inp, Tout, stateful=stateful, name=name)", "docstring": "Wraps a python function and uses it as a TensorFlow op.\n\nGiven a python function `func` wrap this function as an operation in a\n`tf.function`. `func` must take numpy arrays as its arguments and\nreturn numpy arrays as its outputs.\n\nThere are two ways to use `tf.numpy_function`.\n\n### As a decorator\n\nWhen using `tf.numpy_function` as a decorator:\n\n* you must set `Tout`\n* you may set `name`\n* you must not set `func` or `inp`\n\n>>> @tf.numpy_function(Tout=tf.float32)\n... def my_numpy_func(x):\n... # x will be a numpy array with the contents of the input to the\n... # tf.function\n... print(f'executing eagerly, {x=}')\n... return np.sinh(x)\n\nThe function runs eagerly:\n\n>>> my_numpy_func(1.0).numpy()\nexecuting eagerly, x=1.0\n1.17520\n\nThe behavior doesn't change inside a `tf.function`:\n\n>>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])\n... def tf_function(input):\n... y = tf.numpy_function(my_numpy_func, [input], tf.float32)\n... return y\n>>> tf_function(tf.constant(1.)).numpy()\nexecuting eagerly, x=array(1.)\n1.17520\n\n### Inplace\n\nThis form can be useful if you don't control the function's source,\nbut it is harder to read.\n\nHere is the same function with no decorator:\n\n>>> def my_func(x):\n... # x will be a numpy array with the contents of the input to the\n... # tf.function\n... print(f'executing eagerly, {x=}')\n... return np.sinh(x)\n\nTo run `tf.numpy_function` in-place, pass the function, its inputs, and the\noutput type in a single call to `tf.numpy_function`:\n\n>>> tf.numpy_function(my_func, [tf.constant(1.0)], tf.float32)\nexecuting eagerly, x=array(1.)\n1.17520\n\n### More info\n\nComparison to `tf.py_function`:\n`tf.py_function` and `tf.numpy_function` are very similar, except that\n`tf.numpy_function` takes numpy arrays, and not `tf.Tensor`s. If you want the\nfunction to contain `tf.Tensors`, and have any TensorFlow operations executed\nin the function be differentiable, please use `tf.py_function`.\n\nNote: We recommend to avoid using `tf.numpy_function` outside of\nprototyping and experimentation due to the following known limitations:\n\n* Calling `tf.numpy_function` will acquire the Python Global Interpreter Lock\n (GIL) that allows only one thread to run at any point in time. This will\n preclude efficient parallelization and distribution of the execution of the\n program. Therefore, you are discouraged to use `tf.numpy_function` outside\n of prototyping and experimentation.\n\n* The body of the function (i.e. `func`) will not be serialized in a\n `tf.SavedModel`. Therefore, you should not use this function if you need to\n serialize your model and restore it in a different environment.\n\n* The operation must run in the same address space as the Python program\n that calls `tf.numpy_function()`. If you are using distributed\n TensorFlow, you must run a `tf.distribute.Server` in the same process as the\n program that calls `tf.numpy_function` you must pin the created\n operation to a device in that server (e.g. using `with tf.device():`).\n\n* Currently `tf.numpy_function` is not compatible with XLA. Calling\n `tf.numpy_function` inside `tf.function(jit_compile=True)` will raise an\n error.\n\n* Since the function takes numpy arrays, you cannot take gradients\n through a numpy_function. If you require something that is differentiable,\n please consider using tf.py_function.\n\nArgs:\n func: A Python function, which accepts `numpy.ndarray` objects as arguments\n and returns a list of `numpy.ndarray` objects (or a single\n `numpy.ndarray`). This function must accept as many arguments as there are\n tensors in `inp`, and these argument types will match the corresponding\n `tf.Tensor` objects in `inp`. The returns `numpy.ndarray`s must match the\n number and types defined `Tout`. Important Note: Input and output\n `numpy.ndarray`s of `func` are not guaranteed to be copies. In some cases\n their underlying memory will be shared with the corresponding TensorFlow\n tensors. In-place modification or storing `func` input or return values in\n python datastructures without explicit (np.)copy can have\n non-deterministic consequences.\n inp: A list of `tf.Tensor` objects.\n Tout: A list or tuple of tensorflow data types or a single tensorflow data\n type if there is only one, indicating what `func` returns.\n stateful: (Boolean.) Setting this argument to False tells the runtime to\n treat the function as stateless, which enables certain optimizations. A\n function is stateless when given the same input it will return the same\n output and have no side effects; its only purpose is to have a return\n value. The behavior for a stateful function with the `stateful` argument\n False is undefined. In particular, caution should be taken when mutating\n the input arguments as this is a stateful operation.\n name: (Optional) A name for the operation.\n\nReturns:\n * If `func` is `None` this returns a decorator that will ensure the\n decorated function will always run with eager execution even if called\n from a `tf.function`/`tf.Graph`.\n * If used `func` is not `None` this executes `func` with eager execution\n and returns the result: A single or list of `tf.Tensor` which `func`\n computes."} +{"repo": "keras", "function": "def det(x):\n if any_symbolic_tensors((x,)):\n return Det().symbolic_call(x)\n return _det(x)", "docstring": "Computes the determinant of a square tensor.\n\nArgs:\n x: Input tensor of shape `(..., M, M)`.\n\nReturns:\n A tensor of shape `(...,)` representing the determinant of `x`."} +{"repo": "beam", "function": "def __init__(self, pipeline_options):", "docstring": "Args:\n pipeline_options: Instance of ``PipelineOptions`` or dict of options and\n values (like ``RuntimeValueProvider.runtime_options``)."} +{"repo": "transformers", "function": "def mel_to_hertz(mels: Union[float, np.ndarray], mel_scale: str='htk') -> Union[float, np.ndarray]:\n if mel_scale not in ['slaney', 'htk', 'kaldi']:\n raise ValueError('mel_scale should be one of \"htk\", \"slaney\" or \"kaldi\".')\n if mel_scale == 'htk':\n return 700.0 * (np.power(10, mels / 2595.0) - 1.0)\n elif mel_scale == 'kaldi':\n return 700.0 * (np.exp(mels / 1127.0) - 1.0)\n min_log_hertz = 1000.0\n min_log_mel = 15.0\n logstep = np.log(6.4) / 27.0\n freq = 200.0 * mels / 3.0\n if isinstance(mels, np.ndarray):\n log_region = mels >= min_log_mel\n freq[log_region] = min_log_hertz * np.exp(logstep * (mels[log_region] - min_log_mel))\n elif mels >= min_log_mel:\n freq = min_log_hertz * np.exp(logstep * (mels - min_log_mel))\n return freq", "docstring": "Convert frequency from mels to hertz.\n\nArgs:\n mels (`float` or `np.ndarray`):\n The frequency, or multiple frequencies, in mels.\n mel_scale (`str`, *optional*, `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n\nReturns:\n `float` or `np.ndarray`: The frequencies in hertz."} +{"repo": "tensorflow", "function": "def _handle_per_output_metrics(self, metrics_dict, y_true, y_pred, mask, weights=None):\n metric_results = []\n for metric_name, metric_fn in metrics_dict.items():\n with backend.name_scope(metric_name):\n metric_result = training_utils_v1.call_metric_function(metric_fn, y_true, y_pred, weights=weights, mask=mask)\n metric_results.append(metric_result)\n return metric_results", "docstring": "Calls metric functions for a single output.\n\nArgs:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n y_true: Target output.\n y_pred: Predicted output.\n mask: Computed mask value for the current output.\n weights: Weights to be applied on the current output.\n\nReturns:\n A list of metric result tensors."} +{"repo": "mobly", "function": "class Logcat(base_service.BaseService):\n OUTPUT_FILE_TYPE = 'logcat'\n\n def __init__(self, android_device, configs=None):\n super().__init__(android_device, configs)\n self._ad = android_device\n self._adb_logcat_process = None\n self._adb_logcat_file_obj = None\n self.adb_logcat_file_path = None\n self._config = configs if configs else Config()\n\n def _enable_logpersist(self):\n \"\"\"Attempts to enable logpersist daemon to persist logs.\"\"\"\n if not self._ad.is_rootable:\n return\n logpersist_warning = '%s encountered an error enabling persistent logs, logs may not get saved.'\n if not self._ad.adb.has_shell_command('logpersist.start'):\n logging.warning(logpersist_warning, self)\n return\n try:\n self._ad.adb.shell('logpersist.stop --clear')\n self._ad.adb.shell('logpersist.start')\n except adb.AdbError:\n logging.warning(logpersist_warning, self)\n\n def _is_timestamp_in_range(self, target, begin_time, end_time):\n low = mobly_logger.logline_timestamp_comparator(begin_time, target) <= 0\n high = mobly_logger.logline_timestamp_comparator(end_time, target) >= 0\n return low and high\n\n def create_output_excerpts(self, test_info):\n \"\"\"Convenient method for creating excerpts of adb logcat.\n\n This copies logcat lines from self.adb_logcat_file_path to an excerpt\n file, starting from the location where the previous excerpt ended.\n\n Call this method at the end of: `setup_class`, `teardown_test`, and\n `teardown_class`.\n\n Args:\n test_info: `self.current_test_info` in a Mobly test.\n\n Returns:\n List of strings, the absolute paths to excerpt files.\n \"\"\"\n dest_path = test_info.output_path\n utils.create_dir(dest_path)\n filename = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, test_info, 'txt')\n excerpt_file_path = os.path.join(dest_path, filename)\n with open(excerpt_file_path, 'w', encoding='utf-8', errors='replace', newline='') as out:\n while self._adb_logcat_file_obj:\n line = self._adb_logcat_file_obj.readline()\n if not line:\n break\n out.write(line)\n self._ad.log.debug('logcat excerpt created at: %s', excerpt_file_path)\n return [excerpt_file_path]\n\n @property\n def is_alive(self):\n return True if self._adb_logcat_process else False\n\n def clear_adb_log(self):\n \"\"\"Clears cached adb content.\"\"\"\n try:\n self._ad.adb.logcat('-c')\n except adb.AdbError as e:\n if b'failed to clear' in e.stderr:\n self._ad.log.warning('Encountered known Android error to clear logcat.')\n else:\n raise\n\n def _assert_not_running(self):\n \"\"\"Asserts the logcat service is not running.\n\n Raises:\n Error, if the logcat service is running.\n \"\"\"\n if self.is_alive:\n raise Error(self._ad, 'Logcat thread is already running, cannot start another one.')\n\n def update_config(self, new_config):\n \"\"\"Updates the configuration for the service.\n\n The service needs to be stopped before updating, and explicitly started\n after the update.\n\n This will reset the service. Previous output files may be orphaned if\n output path is changed.\n\n Args:\n new_config: Config, the new config to use.\n \"\"\"\n self._assert_not_running()\n self._ad.log.info('[LogcatService] Changing config from %s to %s', self._config, new_config)\n self._config = new_config\n\n def _open_logcat_file(self):\n \"\"\"Create a file object that points to the beginning of the logcat file.\n Wait for the logcat file to be created by the subprocess if it doesn't\n exist.\n \"\"\"\n if not self._adb_logcat_file_obj:\n deadline = time.perf_counter() + CREATE_LOGCAT_FILE_TIMEOUT_SEC\n while not os.path.exists(self.adb_logcat_file_path):\n if time.perf_counter() > deadline:\n raise Error(self._ad, 'Timeout while waiting for logcat file to be created.')\n time.sleep(1)\n self._adb_logcat_file_obj = open(self.adb_logcat_file_path, 'r', encoding='utf-8', errors='replace', newline='')\n self._adb_logcat_file_obj.seek(0, os.SEEK_END)\n\n def _close_logcat_file(self):\n \"\"\"Closes and resets the logcat file object, if it exists.\"\"\"\n if self._adb_logcat_file_obj:\n self._adb_logcat_file_obj.close()\n self._adb_logcat_file_obj = None\n\n def start(self):\n \"\"\"Starts a standing adb logcat collection.\n\n The collection runs in a separate subprocess and saves logs in a file.\n \"\"\"\n if self._ad.is_bootloader:\n self._ad.log.warning('Skip starting logcat because the device is in fastboot mode.')\n return\n self._assert_not_running()\n if self._config.clear_log:\n self.clear_adb_log()\n self._start()\n self._open_logcat_file()\n\n def _start(self):\n \"\"\"The actual logic of starting logcat.\"\"\"\n self._enable_logpersist()\n if self._config.output_file_path:\n self._close_logcat_file()\n self.adb_logcat_file_path = self._config.output_file_path\n if not self.adb_logcat_file_path:\n f_name = self._ad.generate_filename(self.OUTPUT_FILE_TYPE, extension_name='txt')\n logcat_file_path = os.path.join(self._ad.log_path, f_name)\n self.adb_logcat_file_path = logcat_file_path\n utils.create_dir(os.path.dirname(self.adb_logcat_file_path))\n cmd = ' \"%s\" -s %s logcat -v threadtime -T 1 %s >> \"%s\" ' % (adb.ADB, self._ad.serial, self._config.logcat_params, self.adb_logcat_file_path)\n process = utils.start_standing_subprocess(cmd, shell=True)\n self._adb_logcat_process = process\n\n def stop(self):\n \"\"\"Stops the adb logcat service.\"\"\"\n self._close_logcat_file()\n self._stop()\n\n def _stop(self):\n \"\"\"Stops the background process for logcat.\"\"\"\n if not self._adb_logcat_process:\n return\n try:\n utils.stop_standing_subprocess(self._adb_logcat_process)\n except Exception:\n self._ad.log.exception('Failed to stop adb logcat.')\n self._adb_logcat_process = None\n\n def pause(self):\n \"\"\"Pauses logcat.\n\n Note: the service is unable to collect the logs when paused, if more\n logs are generated on the device than the device's log buffer can hold,\n some logs would be lost.\n \"\"\"\n self._stop()\n\n def resume(self):\n \"\"\"Resumes a paused logcat service.\"\"\"\n self._assert_not_running()\n self._start()", "docstring": "Android logcat service for Mobly's AndroidDevice controller.\n\nAttributes:\n adb_logcat_file_path: string, path to the file that the service writes\n adb logcat to by default."} +{"repo": "tensorflow", "function": "def _to_numpy(a):\n if isinstance(a, ops.EagerTensor):\n return a.numpy()\n if isinstance(a, tensor.Tensor):\n sess = ops.get_default_session()\n return sess.run(a)\n if isinstance(a, indexed_slices.IndexedSlicesValue):\n arr = np.zeros(a.dense_shape)\n assert len(a.values) == len(a.indices), 'IndexedSlicesValue has %s value slices but %s indices\\n%s' % (a.values, a.indices, a)\n for values_slice, index in zip(a.values, a.indices):\n assert 0 <= index < len(arr), 'IndexedSlicesValue has invalid index %s\\n%s' % (index, a)\n arr[index] += values_slice\n return arr\n return a", "docstring": "Converts Tensors, EagerTensors, and IndexedSlicesValue to numpy arrays.\n\nArgs:\n a: any value.\n\nReturns:\n If a is EagerTensor or Tensor, returns the evaluation of a by calling\n numpy() or run(). If a is IndexedSlicesValue, constructs the corresponding\n dense numpy array. Otherwise returns a unchanged."} +{"repo": "tensorflow", "function": "def conv1d(inputs, filters, kernel_size, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, reuse=None):\n warnings.warn('`tf.layers.conv1d` is deprecated and will be removed in a future version. Please Use `tf.keras.layers.Conv1D` instead.')\n layer = Conv1D(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, _reuse=reuse, _scope=name)\n return layer.apply(inputs)", "docstring": "Functional interface for 1D convolution layer (e.g. temporal convolution).\n\nThis layer creates a convolution kernel that is convolved\n(actually cross-correlated) with the layer input to produce a tensor of\noutputs. If `use_bias` is True (and a `bias_initializer` is provided),\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nArgs:\n inputs: Tensor input.\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of a single integer, specifying the\n length of the 1D convolution window.\n strides: An integer or tuple/list of a single integer,\n specifying the stride length of the convolution.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n dilation_rate: An integer or tuple/list of a single integer, specifying\n the dilation rate to use for dilated convolution.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any `strides` value != 1.\n activation: Activation function. Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: A string, the name of the layer.\n reuse: Boolean, whether to reuse the weights of a previous layer\n by the same name.\n\nReturns:\n Output tensor.\n\nRaises:\n ValueError: if eager execution is enabled."} +{"repo": "tensorflow", "function": "def _global_batch_size(self):\n return True", "docstring": "`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n`make_input_fn_iterator` assumes per-replica batching.\n\nReturns:\n Boolean."} +{"repo": "transformers", "function": "class MoeModelOutputWithPast(ModelOutput):\n last_hidden_state: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Cache] = None\n hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None\n attentions: Optional[Tuple[torch.FloatTensor, ...]] = None\n router_logits: Optional[Tuple[torch.FloatTensor]] = None", "docstring": "Base class for model's outputs, with potential hidden states and attentions.\n\nArgs:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if\n `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`\n input) to speed up sequential decoding.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n router_logits (`tuple(torch.FloatTensor)`, *optional*, returned when `output_router_probs=True` and `config.add_router_probs=True` is passed or when `config.output_router_probs=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_experts)`.\n\n Raw router logtis (post-softmax) that are computed by MoE routers, these terms are used to compute the auxiliary\n loss for Mixture of Experts models."} +{"repo": "transformers", "function": "def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor]=None, dim=None) -> torch.Tensor:\n if weights is not None:\n weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor))\n sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0)\n return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights\n else:\n return input_tensor.mean(dim=dim)", "docstring": "Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero,\nmeaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`.\n\nArgs:\n input_tensor (`torch.FloatTensor`):\n Input tensor, of which the average must be computed.\n weights (`torch.FloatTensor`, *optional*):\n Weights tensor, of the same shape as `input_tensor`.\n dim (`int`, *optional*):\n The dim along which to average `input_tensor`.\n\nReturns:\n `torch.FloatTensor`: The tensor with values averaged along the specified `dim`."} +{"repo": "transformers", "function": "def _compute_mask_indices(shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor]=None, min_masks: int=0) -> np.ndarray:\n batch_size, sequence_length = shape\n if mask_length < 1:\n raise ValueError('`mask_length` has to be bigger than 0.')\n if mask_length > sequence_length:\n raise ValueError(f'`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`')\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n if input_length - (mask_length - 1) < num_masked_span:\n num_masked_span = max(input_length - (mask_length - 1), 0)\n return num_masked_span\n input_lengths = attention_mask.detach().sum(-1).tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)]\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)\n spec_aug_mask_idxs = []\n max_num_masked_span = compute_num_masked_span(sequence_length)\n if max_num_masked_span == 0:\n return spec_aug_mask\n for input_length in input_lengths:\n num_masked_span = compute_num_masked_span(input_length)\n spec_aug_mask_idx = np.random.choice(np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False)\n if len(spec_aug_mask_idx) == 0:\n dummy_mask_idx = sequence_length - 1\n else:\n dummy_mask_idx = spec_aug_mask_idx[0]\n spec_aug_mask_idx = np.concatenate([spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx])\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n spec_aug_mask_idxs = np.broadcast_to(spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length))\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(batch_size, max_num_masked_span * mask_length)\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n if spec_aug_mask_idxs.max() > sequence_length - 1:\n spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n return spec_aug_mask", "docstring": "Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\nASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on\nCPU as part of the preprocessing during training.\n\nArgs:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension."} +{"repo": "tensorflow", "function": "def broadcast_recv_v2(shape, dtype, group_size, group_key, instance_key, communication_hint='auto', timeout=0):\n return gen_collective_ops.collective_bcast_recv_v2(T=dtype, group_size=group_size, group_key=group_key, instance_key=instance_key, shape=shape, communication_hint=communication_hint.lower(), timeout_seconds=timeout)", "docstring": "Receives a broadcasts tensor, across devices.\n\nArgs:\n shape: an int tensor. Shape of the tensor to be received.\n dtype: Type of the tensor to be received.\n group_size: an int32 tensor. One plus the number of receiving tensors, i.e.\n the total number of devices participating. Each tensor must reside on a\n different device.\n group_key: an int32 tensor identifying the group of devices.\n instance_key: an int32 tensor identifying the participating group of Ops.\n communication_hint: preferred collective communication. The implementation\n may fall back to another mechanism. Options include `auto`, `ring`, and\n `nccl`.\n timeout: If set to a non zero, set a completion timeout to detect staleness.\n If the timer goes off, a DeadlineExceededError is raised.\n The timeout value in seconds. This feature is experimental.\n\nReturns:\n An Op implementing the broadcast receive."} +{"repo": "tensorflow", "function": "def crossed_column(keys, hash_bucket_size, hash_key=None):\n if not hash_bucket_size or hash_bucket_size < 1:\n raise ValueError('hash_bucket_size must be > 1. hash_bucket_size: {}'.format(hash_bucket_size))\n if not keys or len(keys) < 2:\n raise ValueError('keys must be a list with length > 1. Given: {}'.format(keys))\n for key in keys:\n if not isinstance(key, six.string_types) and (not isinstance(key, (CategoricalColumn, fc_old._CategoricalColumn))):\n raise ValueError('Unsupported key type. All keys must be either string, or categorical column except HashedCategoricalColumn. Given: {}'.format(key))\n if isinstance(key, (HashedCategoricalColumn, fc_old._HashedCategoricalColumn)):\n raise ValueError('categorical_column_with_hash_bucket is not supported for crossing. Hashing before crossing will increase probability of collision. Instead, use the feature name as a string. Given: {}'.format(key))\n return CrossedColumn(keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key)", "docstring": "Returns a column for performing crosses of categorical features.\n\nCrossed features will be hashed according to `hash_bucket_size`. Conceptually,\nthe transformation can be thought of as:\n Hash(cartesian product of features) % `hash_bucket_size`\n\nFor example, if the input features are:\n\n* SparseTensor referred by first key:\n\n ```python\n shape = [2, 2]\n {\n [0, 0]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n }\n ```\n\n* SparseTensor referred by second key:\n\n ```python\n shape = [2, 1]\n {\n [0, 0]: \"d\"\n [1, 0]: \"e\"\n }\n ```\n\nthen crossed feature will look like:\n\n```python\n shape = [2, 2]\n{\n [0, 0]: Hash64(\"d\", Hash64(\"a\")) % hash_bucket_size\n [1, 0]: Hash64(\"e\", Hash64(\"b\")) % hash_bucket_size\n [1, 1]: Hash64(\"e\", Hash64(\"c\")) % hash_bucket_size\n}\n```\n\nHere is an example to create a linear model with crosses of string features:\n\n```python\nkeywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K)\ncolumns = [keywords_x_doc_terms, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nYou could also use vocabulary lookup before crossing:\n\n```python\nkeywords = categorical_column_with_vocabulary_file(\n 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K)\nkeywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K)\ncolumns = [keywords_x_doc_terms, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nIf an input feature is of numeric type, you can use\n`categorical_column_with_identity`, or `bucketized_column`, as in the example:\n\n```python\n# vertical_id is an integer categorical feature.\nvertical_id = categorical_column_with_identity('vertical_id', 10K)\nprice = numeric_column('price')\n# bucketized_column converts numerical feature to a categorical one.\nbucketized_price = bucketized_column(price, boundaries=[...])\nvertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\ncolumns = [vertical_id_x_price, ...]\nfeatures = tf.io.parse_example(..., features=make_parse_example_spec(columns))\nlinear_prediction = linear_model(features, columns)\n```\n\nTo use crossed column in DNN model, you need to add it in an embedding column\nas in this example:\n\n```python\nvertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K)\nvertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10)\ndense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...])\n```\n\nArgs:\n keys: An iterable identifying the features to be crossed. Each element can\n be either:\n * string: Will use the corresponding feature which must be of string type.\n * `CategoricalColumn`: Will use the transformed tensor produced by this\n column. Does not support hashed categorical column.\n hash_bucket_size: An int > 1. The number of buckets.\n hash_key: Specify the hash_key that will be used by the `FingerprintCat64`\n function to combine the crosses fingerprints on SparseCrossOp (optional).\n\nReturns:\n A `CrossedColumn`.\n\nRaises:\n ValueError: If `len(keys) < 2`.\n ValueError: If any of the keys is neither a string nor `CategoricalColumn`.\n ValueError: If any of the keys is `HashedCategoricalColumn`.\n ValueError: If `hash_bucket_size < 1`."} +{"repo": "pytruth", "function": "def HasCalls(self, *calls, **kwargs):\n if len(calls) == 1 and _IsIterable(calls[0]) and (not isinstance(calls[0], mock._Call)):\n calls = calls[0]\n contains_all = AssertThat(self._actual.mock_calls).ContainsAllIn(calls)\n any_order = kwargs.get('any_order')\n if any_order or any_order is None:\n return contains_all\n return contains_all.InOrder()", "docstring": "Assert that the mocked function was called with all the given calls.\n\nArgs:\n *calls: iterable of mock.call objects. Developers may also pass a single\n iterable of mock.call objects, for compatibility with mock's\n assert_has_calls() method, although this form is not preferred.\n **kwargs: optional parameters. The only recognized parameter is any_order:\n If any_order=True, the assertion succeeds if the mocked function was\n called with all the given calls, regardless of the call order.\n If any_order=False, the assertion succeeds if the mocked function was\n called with all of the given calls in the given order.\n If any_order is omitted, it behaves like any_order=True. This is the\n preferred way of calling HasCalls(). Developers who wish to\n enforce an order should call InOrder() on the returned predicate.\n If the order is unimportant, simply omit the InOrder() call.\n This is an intentional divergence from the mock library's syntax.\n\nReturns:\n If any_order is True or omitted, and the mocked function was called all of\n with the expected calls, returns an _Ordered predicate on which\n .InOrder() can be subsequently called.\n If any_order=False, invokes the InOrder() predicate and returns its value.\n\nRaises:\n TruthAssertionError: the mocked function is missing any of the expected\n calls."} +{"repo": "pytype", "function": "def constant_to_value(self, pyval, subst=None, node=None):\n node = node or self.ctx.root_node\n if pyval.__class__ is tuple:\n type_key = tuple((type(v) for v in pyval))\n else:\n type_key = type(pyval)\n key = ('constant', pyval, type_key)\n if key in self._convert_cache:\n if self._convert_cache[key] is None:\n self._convert_cache[key] = self.unsolvable\n if not self.ctx.recursion_allowed:\n name = getattr(pyval, 'name', None) or pyval.__class__.__name__\n self.ctx.errorlog.recursion_error(self.ctx.vm.frames, name)\n return self._convert_cache[key]\n else:\n self._convert_cache[key] = None\n need_node = [False]\n\n def get_node():\n need_node[0] = True\n return node\n recursive = isinstance(pyval, pytd.LateType) and pyval.recursive\n if recursive:\n context = self.ctx.allow_recursive_convert()\n else:\n context = contextlib.nullcontext()\n with context:\n try:\n value = self._constant_to_value(pyval, subst, get_node)\n except NotImplementedError:\n del self._convert_cache[key]\n raise\n if not need_node[0] or node is self.ctx.root_node:\n if recursive:\n annot = abstract.LateAnnotation(pyval.name, self.ctx.vm.frames, self.ctx)\n annot.set_type(value)\n value = annot\n self._convert_cache[key] = value\n return value", "docstring": "Like constant_to_var, but convert to an abstract.BaseValue.\n\nThis also memoizes the results. We don't memoize on name, as builtin types\nlike str or list might be reinitialized under different names (e.g. \"param\n1\"), but we want the canonical name and type. We *do* memoize on the type\nas well, to make sure that e.g. \"1.0\" and \"1\" get converted to different\nconstants. Memoization is an optimization, but an important one - mapping\nconstants like \"None\" to the same AbstractValue greatly simplifies the\ncfg structures we're building.\n\nArgs:\n pyval: The constant to convert.\n subst: The current type parameters.\n node: The current CFG node. (For instances)\n\nReturns:\n The converted constant. (Instance of BaseValue)"} +{"repo": "beam", "function": "def __init__(self, chunk_id_fn: Optional[ChunkIdFn]=None):\n self.assign_chunk_id_fn = functools.partial(_assign_chunk_id, chunk_id_fn) if chunk_id_fn is not None else None", "docstring": "Base class for chunking transforms in RAG pipelines.\n\nChunkingTransformProvider defines the interface for splitting documents\ninto chunks for embedding and retrieval. Implementations should define how\nto split content while preserving metadata and managing chunk IDs.\n\nThe transform flow:\n- Takes input documents with content and metadata\n- Splits content into chunks using implementation-specific logic\n- Preserves document metadata in resulting chunks\n- Optionally assigns unique IDs to chunks (configurable via chunk_id_fn\n\nExample usage:\n >>> class MyChunker(ChunkingTransformProvider):\n ... def get_splitter_transform(self):\n ... return beam.ParDo(MySplitterDoFn())\n ... \n >>> chunker = MyChunker(chunk_id_fn=my_id_function)\n >>> \n >>> with beam.Pipeline() as p:\n ... chunks = (\n ... p \n ... | beam.Create([{'text': 'document...', 'source': 'doc.txt'}])\n ... | MLTransform(...).with_transform(chunker))\n\nArgs:\n chunk_id_fn: Optional function to generate chunk IDs. If not provided,\n random UUIDs will be used. Function should take a Chunk and return str."} +{"repo": "beam", "function": "class DefaultThrottler(PreCallThrottler):\n\n def __init__(self, window_ms: int=1, bucket_ms: int=1, overload_ratio: float=2, delay_secs: int=5):\n self.throttler = AdaptiveThrottler(window_ms=window_ms, bucket_ms=bucket_ms, overload_ratio=overload_ratio)\n self.delay_secs = delay_secs", "docstring": "Default throttler that uses\n:class:`apache_beam.io.components.adaptive_throttler.AdaptiveThrottler`\n\nArgs:\n window_ms (int): length of history to consider, in ms, to set throttling.\n bucket_ms (int): granularity of time buckets that we store data in, in ms.\n overload_ratio (float): the target ratio between requests sent and\n successful requests. This is \"K\" in the formula in\n https://landing.google.com/sre/book/chapters/handling-overload.html.\n delay_secs (int): minimum number of seconds to throttle a request."} +{"repo": "transformers", "function": "def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n with open(json_file_path, 'w', encoding='utf-8') as writer:\n writer.write(self.to_json_string())", "docstring": "Save this instance to a JSON file.\n\nArgs:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this image_processor instance's parameters will be saved."} +{"repo": "tensorflow", "function": "def get_check_numerics_error_message(slot, num_outputs, op_type, tensor, inputs, graph=None, traceback=None, stack_height_limit=30, path_length_limit=50):\n eager_vs_graph_qualifier = 'graph' if graph else 'eagerly-executing'\n message = '\\n'\n message += '\\n!!! Detected Infinity or NaN in output %d of %s op \"%s\" (# of outputs: %d) !!!\\n' % (slot, eager_vs_graph_qualifier, op_type, num_outputs)\n message += ' dtype: %s\\n' % tensor.dtype\n message += ' shape: %s\\n' % (tensor.shape,)\n if not graph:\n is_inf = np.isinf(tensor)\n num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.0), is_inf))\n num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.0), is_inf))\n num_nan = np.sum(np.isnan(tensor))\n if num_neg_inf > 0:\n message += ' # of -Inf elements: %s\\n' % num_neg_inf\n if num_pos_inf > 0:\n message += ' # of +Inf elements: %s\\n' % num_pos_inf\n if num_nan:\n message += ' # of +NaN elements: %s\\n' % num_nan\n if len(inputs) > 1:\n message += '\\n Input tensors (%d):\\n' % len(inputs)\n for slot, input_tensor in enumerate(inputs):\n message += ' %d: %s\\n' % (slot, _maybe_lookup_original_input_tensor(graph, input_tensor))\n elif len(inputs) == 1:\n message += '\\n Input tensor: %s\\n' % _maybe_lookup_original_input_tensor(graph, inputs[0])\n if graph and hasattr(graph, 'name') and graph.name:\n message += ' Graph name: \"%s\"\\n' % graph.name\n if graph and traceback:\n message += '\\n Stack trace of op\\'s creation (\"->\": inferred user code):\\n'\n if stack_height_limit is not None and len(traceback) > stack_height_limit:\n num_omitted_frames = len(traceback) - stack_height_limit\n message += ' + ... (Omitted %d frames)\\n' % num_omitted_frames\n for filepath, lineno, function_name, source_line in traceback[-stack_height_limit:]:\n user_code_indicator = ' '\n if not source_utils.guess_is_tensorflow_py_library(filepath):\n user_code_indicator = ' -> '\n message += ' + %s (L%d) %s\\n' % (limit_string_length(filepath, path_length_limit), lineno, function_name)\n if source_line is not None:\n message += '%s| %s\\n' % (user_code_indicator, source_line)\n message += '\\n'\n return message", "docstring": "Create a meaningful and user-friendly error message about offending tensor.\n\nThe error message reveals the following info about the op that outputs\nNaN/Infinity: dtype, shape (to the extent known at graph-construction time),\ninput tensors, stack trace for op creation (if is graph mode).\n\nArgs:\n slot: (int) slot index of the tensor output.\n num_outputs: (int) total number of outputs of the op.\n op_type: (str) Type of the that generates `tensor`.\n tensor: (Tensor) the offending tensor, i.e., the tensor that contains\n Infinities or NaNs.\n inputs: (array of Tensor) inputs to the op that generates `tensor`.\n graph: (tf.Graph) the graph object that `tensor` belongs to. Available only\n under graph mode.\n traceback: (list of trace frames) the stack trace of the op's creation.\n Available only under graph model.\n stack_height_limit: (int or None) If int, limit to the height of the stack\n trace printed in the error message. If None, no limit to the height.\n path_length_limit: (int or None) Length limit for file paths included in the\n formatted stack trace.\n\nReturns:\n (str) A formatted error message."} +{"repo": "transformers", "function": "def __call__(self, images: Optional[ImageInput]=None, text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]]=None, audio=None, videos=None, **kwargs: Unpack[Llama4ProcessorKwargs]) -> BatchFeature:\n if text is None:\n raise ValueError('You have to specify text.')\n output_kwargs = self._merge_kwargs(Llama4ProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if not isinstance(text, (list, tuple)):\n text = [text]\n image_inputs = {}\n if images is not None:\n images = make_flat_list_of_images(images)\n image_inputs = self.image_processor(images=images, **output_kwargs['images_kwargs'])\n image_height, image_width = image_inputs['pixel_values'][0].shape[-2:]\n num_patches_per_chunk = int(image_height // self.patch_size * (image_width // self.patch_size) // self.downsample_ratio)\n aspect_ratios = image_inputs.pop('aspect_ratios')\n total_placeholders = sum((prompt.count(self.fake_image_token) for prompt in text))\n if total_placeholders != len(images):\n raise ValueError(f'Found {total_placeholders} placeholders across the batch, but have {len(images)} flattened images.')\n image_index = 0\n processed_text = []\n for prompt in text:\n placeholder_count = prompt.count(self.fake_image_token)\n if placeholder_count == 0:\n processed_text.append(prompt)\n continue\n prompt_splits = prompt.split(self.fake_image_token)\n new_prompt = []\n for local_image_index, split_part in enumerate(prompt_splits):\n new_prompt.append(split_part)\n if local_image_index < placeholder_count:\n tokens_for_this_image = self._prompt_split_image(aspect_ratios[image_index], num_patches_per_chunk)\n image_index += 1\n new_prompt.append(tokens_for_this_image)\n processed_text.append(''.join(new_prompt))\n if image_index != len(images):\n raise ValueError('Number of image placeholders in the prompt does not match the number of images.')\n text = processed_text\n return_tensors = output_kwargs['text_kwargs'].pop('return_tensors', None)\n text_inputs = self.tokenizer(text, **output_kwargs['text_kwargs'])\n self._check_special_mm_tokens(text, text_inputs, modalities=['image'])\n return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)", "docstring": "Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\nand `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text.\nTo prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to\nLlama4ImageProcessor's [`~Llama4ImageProcessor.__call__`] if `images` is not `None`.\n\nArgs:\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\nReturns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`."} +{"repo": "mobly", "function": "def start_services(self, service_alises):\n for name in service_alises:\n if name not in self._service_objects:\n raise Error(self._device, 'No service is registered under the name \"%s\", cannot start.' % name)\n service = self._service_objects[name]\n if not service.is_alive:\n service.start()", "docstring": "Starts the specified services.\n\nServices will be started in the order specified by the input list.\nNo-op for services that are already running.\n\nArgs:\n service_alises: list of strings, the aliases of services to start."} +{"repo": "beam", "function": "def create(cls, num_quantiles, epsilon=None, max_num_elements=None, key=None, reverse=False, weighted=False, input_batched=False):\n max_num_elements = max_num_elements or cls._MAX_NUM_ELEMENTS\n if not epsilon:\n epsilon = min(0.01, 1.0 / num_quantiles) if weighted else 1.0 / num_quantiles\n b = 2\n while (b - 2) * (1 << b - 2) < epsilon * max_num_elements:\n b = b + 1\n b = b - 1\n k = max(2, int(math.ceil(max_num_elements / float(1 << b - 1))))\n return cls(num_quantiles=num_quantiles, buffer_size=k, num_buffers=b, key=key, reverse=reverse, weighted=weighted, input_batched=input_batched)", "docstring": "Creates an approximate quantiles combiner with the given key and desired\nnumber of quantiles.\n\nArgs:\n num_quantiles: Number of quantiles to produce. It is the size of the\n final output list, including the mininum and maximum value items.\n epsilon: (optional) The default error bound is `epsilon`, which holds as\n long as the number of elements is less than `_MAX_NUM_ELEMENTS`.\n Specifically, if one considers the input as a sorted list x_1, ...,\n x_N, then the distance between each exact quantile x_c and its\n approximation x_c' is bounded by `|c - c'| < epsilon * N`. Note that\n these errors are worst-case scenarios. In practice the accuracy tends\n to be much better.\n max_num_elements: (optional) The cost (in time and space) to compute\n quantiles to a given accuracy is a function of the total number of\n elements in the data set.\n key: (optional) Key is a mapping of elements to a comparable key, similar\n to the key argument of Python's sorting methods.\n reverse: (optional) whether to order things smallest to largest, rather\n than largest to smallest.\n weighted: (optional) if set to True, the combiner produces weighted\n quantiles. The input elements are then expected to be tuples of values\n with the corresponding weight.\n input_batched: (optional) if set to True, inputs are expected to be\n batches of elements."} +{"repo": "tensorflow", "function": "class ProtoAssertions(object):\n\n def assertProtoEqual(self, *args, **kwargs):\n return assertProtoEqual(self, *args, **kwargs)", "docstring": "Mix this into a googletest.TestCase class to get proto2 assertions.\n\nUsage:\n\nclass SomeTestCase(compare.ProtoAssertions, googletest.TestCase):\n ...\n def testSomething(self):\n ...\n self.assertProtoEqual(a, b)\n\nSee module-level definitions for method documentation."} +{"repo": "pytype", "function": "def _has_own(self, node, cls, method):\n assert method in ('__new__', '__init__')\n if not isinstance(cls, abstract.Class):\n return False\n self.load_lazy_attribute(method)\n obj_method = self.members[method]\n _, cls_method = self.ctx.attribute_handler.get_attribute(node, cls, method)\n return obj_method.data != cls_method.data", "docstring": "Whether a class has its own implementation of a particular method.\n\nArgs:\n node: The current node.\n cls: An abstract.Class.\n method: The method name. So that we don't have to handle the cases when\n the method doesn't exist, we only support \"__new__\" and \"__init__\".\n\nReturns:\n True if the class's definition of the method is different from the\n definition in builtins.object, False otherwise."} +{"repo": "tensorflow", "function": "def _generate_enqueue_op(self, flat_inputs: List[internal_types.NativeObject], flat_weights: List[Optional[internal_types.NativeObject]], flat_features: List[tpu_embedding_v2_utils.FeatureConfig], device_ordinal: int, mode_override: Text) -> ops.Operation:\n combiners = [table.combiner for table in self._table_config]\n indices_or_row_splits = []\n values = []\n weights = []\n int_zeros = array_ops.zeros((0,), dtype=dtypes.int32)\n float_zeros = array_ops.zeros((0,), dtype=dtypes.float32)\n for inp, weight, (path, feature) in zip(flat_inputs, flat_weights, flat_features):\n if isinstance(inp, tensor_lib.Tensor):\n self._add_data_for_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path)\n elif isinstance(inp, sparse_tensor.SparseTensor):\n self._add_data_for_sparse_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path, feature)\n elif isinstance(inp, ragged_tensor.RaggedTensor):\n self._add_data_for_ragged_tensor(inp, weight, indices_or_row_splits, values, weights, int_zeros, float_zeros, path, feature)\n else:\n raise ValueError('Input {} is of unknown type {}. Please only pass Tensor, SparseTensor or RaggedTensor as input to enqueue.'.format(path, type(inp)))\n return tpu_ops.enqueue_tpu_embedding_arbitrary_tensor_batch(sample_indices_or_row_splits=indices_or_row_splits, embedding_indices=values, aggregation_weights=weights, mode_override=mode_override, device_ordinal=device_ordinal, combiners=combiners)", "docstring": "Outputs a the enqueue op given the inputs and weights.\n\nArgs:\n flat_inputs: A list of input tensors.\n flat_weights: A list of input weights (or None) of the same length as\n flat_inputs.\n flat_features: A list of FeatureConfigs of the same length as flat_inputs.\n device_ordinal: The device to create the enqueue op for.\n mode_override: A tensor containing the string \"train\" or \"inference\".\n\nReturns:\n The enqueue op."} +{"repo": "tensorflow", "function": "def __mod__(self, other):\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return Dimension(None)\n else:\n return Dimension(self._value % other.value)", "docstring": "Returns `self` modulo `other`.\n\nDimension modulo are computed as follows:\n\n```python\ntf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) ==\ntf.compat.v1.Dimension(m % n)\ntf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to\ntf.compat.v1.Dimension(None)\ntf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to\ntf.compat.v1.Dimension(None)\n```\n\nArgs:\n other: Another Dimension, or a value accepted by `as_dimension`.\n\nReturns:\n A Dimension whose value is `self` modulo `other`."} +{"repo": "keras", "function": "class custom_gradient:\n\n def __init__(self, fun):\n warnings.warn('`custom_gradient` for the numpy backend acts as a pass-through to support the forward pass. No gradient computation or modification takes place.')\n self.fun = fun\n\n def __call__(self, *args, **kwargs):\n outputs, _ = self.fun(*args, **kwargs)\n return outputs", "docstring": "Decorator for custom gradients.\n\nArgs:\n fun: Forward pass function."} +{"repo": "fhir-py", "function": "def coerce(lhs: FhirPathDataType, rhs: FhirPathDataType) -> FhirPathDataType:\n if not is_coercible(lhs, rhs):\n raise TypeError(f'Unsupported Standard SQL coercion between {lhs} and {rhs}.')\n if isinstance(rhs, _Any) or isinstance(lhs, _Any):\n return _Any\n if rhs in lhs.supported_coercion:\n return rhs\n else:\n return lhs", "docstring": "Performs implicit type coercion between two datatypes.\n\nSee more at: https://hl7.org/fhirpath/#conversion.\n\nArgs:\n lhs: The left operand.\n rhs: The right operand.\n\nReturns:\n The resulting coerced datatype, if successful.\n\nRaises:\n TypeError: In the event that coercion is not supported.\n ValueError: In the event that a coercion cycle is detected."} +{"repo": "transformers", "function": "class Idefics2PerceiverConfig(PretrainedConfig):\n model_type = 'idefics2_perceiver'\n\n def __init__(self, hidden_act='silu', hidden_size=4096, rms_norm_eps=1e-06, resampler_n_latents=64, resampler_depth=3, resampler_n_heads=16, resampler_head_dim=96, num_key_value_heads=4, attention_dropout=0.0, initializer_range=0.02, **kwargs):\n self.hidden_act = hidden_act\n self.hidden_size = hidden_size\n self.rms_norm_eps = rms_norm_eps\n self.resampler_n_latents = resampler_n_latents\n self.resampler_depth = resampler_depth\n self.resampler_n_heads = resampler_n_heads\n self.num_key_value_heads = num_key_value_heads\n self.resampler_head_dim = resampler_head_dim\n self.attention_dropout = attention_dropout\n self.initializer_range = initializer_range\n if self.num_key_value_heads > self.resampler_n_heads:\n raise ValueError(f'num_key_value_heads={self.num_key_value_heads} must be less than or equal to resampler_n_heads={self.resampler_n_heads}')\n super().__init__(**kwargs)", "docstring": "Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the perceiver block.\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n rms_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the rms normalization layers.\n resampler_n_latents (`int`, *optional*, defaults to 64):\n Number of latent embeddings to resample (\"compress\") the input sequence to (usually < 128).\n resampler_depth (`int`, *optional*, defaults to 3):\n Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (<= 3).\n resampler_n_heads (`int`, *optional*, defaults to 16):\n Number of heads in each Transformer block (for multi-headed self-attention).\n resampler_head_dim (`int`, *optional*, defaults to 96):\n Dimensionality of each head projection in the Transformer block.\n num_key_value_heads (`int`, *optional*, defaults to 4):\n Number of key-value heads in the perceiver attention block.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation for initializing all weight matrices in the model."} +{"repo": "tensorflow", "function": "def _StopOps(from_ops: list[ops.Operation], stop_gradient_ops: list[ops.Operation], pending_count, xs_set):\n stop_ops = set()\n for op in from_ops:\n is_stop_op = True\n for inp in _NonEagerInputs(op, xs_set):\n if pending_count[inp.op] > 0:\n is_stop_op = False\n break\n if is_stop_op:\n stop_ops.add(op)\n stop_ops.update((op for op in stop_gradient_ops))\n return stop_ops", "docstring": "The set of ops that terminate the gradient computation.\n\nThis computes the frontier of the forward graph *before* which backprop\nshould stop. Operations in the returned set will not be differentiated.\nThis set is defined as the subset of `from_ops` containing ops that have\nno predecessor in `from_ops`. `pending_count` is the result of\n`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`\niff pending_count[op] > 0.\n\nIn addition, none of `stop_gradient_ops` will be differentiated.\n\nArgs:\n from_ops: list of Operations.\n stop_gradient_ops: list of Operations never to backprop through.\n pending_count: mapping from operation to number of backprop inputs.\n xs_set: ObjectIdentitySet of Tensors.\n\nReturns:\n The set of operations."} +{"repo": "tensorflow", "function": "def _assert_sparse_compatible(sparse_tensors):\n checks = []\n first = sparse_tensors[0]\n for t in sparse_tensors[1:]:\n checks.append(check_ops.assert_equal(first.dense_shape, t.dense_shape, message='Mismatched shapes!'))\n checks.append(check_ops.assert_equal(first.indices, t.indices, message='Mismatched indices!'))\n return checks", "docstring": "Check that all of `sparse_tensors` have same `indices` and `dense_shape`.\n\nArgs:\n sparse_tensors: A list of sparse tensors.\n\nReturns:\n An op to be used as a control dependency."} +{"repo": "tensorflow", "function": "def gelu(features, approximate=False, name=None):\n with ops.name_scope(name, 'Gelu', [features]):\n features = ops.convert_to_tensor(features, name='features')\n if not features.dtype.is_floating:\n raise ValueError(f'`features.dtype` must be a floating point tensor.Received:features.dtype={features.dtype}')\n if approximate:\n coeff = math_ops.cast(0.044715, features.dtype)\n return 0.5 * features * (1.0 + math_ops.tanh(0.7978845608028654 * (features + coeff * math_ops.pow(features, 3))))\n else:\n return 0.5 * features * math_ops.erfc(-features * math_ops.cast(0.7071067811865476, features.dtype))", "docstring": "Compute the Gaussian Error Linear Unit (GELU) activation function.\n\nGaussian error linear unit (GELU) computes\n`x * P(X <= x)`, where `P(X) ~ N(0, 1)`.\nThe (GELU) nonlinearity weights inputs by their value, rather than gates\ninputs by their sign as in ReLU.\n\nFor example:\n\n>>> x = tf.constant([-3.0, -1.0, 0.0, 1.0, 3.0], dtype=tf.float32)\n>>> y = tf.nn.gelu(x)\n>>> y.numpy()\narray([-0.00404951, -0.15865529, 0. , 0.8413447 , 2.9959507 ],\n dtype=float32)\n>>> y = tf.nn.gelu(x, approximate=True)\n>>> y.numpy()\narray([-0.00363752, -0.15880796, 0. , 0.841192 , 2.9963627 ],\n dtype=float32)\n\nArgs:\n features: A `float Tensor` representing preactivation values.\n approximate: An optional `bool`. Defaults to `False`. Whether to enable\n approximation.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` with the same type as `features`.\n\nRaises:\n ValueError: if `features` is not a floating point `Tensor`.\n\nReferences:\n [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)."} +{"repo": "tensorflow", "function": "def set_cpu0(device_string):\n if context.is_custom_device(device_string):\n return device_string\n parsed_device = pydev.DeviceSpec.from_string(device_string)\n parsed_device = parsed_device.replace(device_type='CPU', device_index=0)\n return parsed_device.to_string()", "docstring": "Creates a new device string based on `device_string` but using /CPU:0.\n\nIf the device is already on /CPU:0 or it is a custom device, this is a no-op.\n\nArgs:\n device_string: A device string.\n\nReturns:\n A device string."} +{"repo": "data-quality-monitor", "function": "def handle_http_error(error: HTTPException) -> ResponseReturnValue:\n code = error.code or 500\n return (DQMResponse(name=error.name, description=error.description, code=code), code)", "docstring": "DQM HTTP Error Response.\n\nArgs:\n * error: HTTP error\n\nReturns:\n * DQMResponse for the error with the relevant status code"} +{"repo": "tensorflow", "function": "def __init__(self, cell):\n self._cell = cell", "docstring": "Creates a new IntGaugeCell.\n\nArgs:\n cell: A c pointer of TFE_MonitoringIntGaugeCell."} +{"repo": "tensorflow", "function": "def traverse(root, visit):\n _traverse_internal(root, visit, [], '')", "docstring": "Recursively enumerate all members of `root`.\n\nSimilar to the Python library function `os.path.walk`.\n\nTraverses the tree of Python objects starting with `root`, depth first.\nParent-child relationships in the tree are defined by membership in modules or\nclasses. The function `visit` is called with arguments\n`(path, parent, children)` for each module or class `parent` found in the tree\nof python objects starting with `root`. `path` is a string containing the name\nwith which `parent` is reachable from the current context. For example, if\n`root` is a local class called `X` which contains a class `Y`, `visit` will be\ncalled with `('Y', X.Y, children)`).\n\nIf `root` is not a module or class, `visit` is never called. `traverse`\nnever descends into built-in modules.\n\n`children`, a list of `(name, object)` pairs are determined by\n`tf_inspect.getmembers`. To avoid visiting parts of the tree, `children` can\nbe modified in place, using `del` or slice assignment.\n\nCycles (determined by reference equality, `is`) stop the traversal. A stack of\nobjects is kept to find cycles. Objects forming cycles may appear in\n`children`, but `visit` will not be called with any object as `parent` which\nis already in the stack.\n\nTraversing system modules can take a long time, it is advisable to pass a\n`visit` callable which denylists such modules.\n\nArgs:\n root: A python object with which to start the traversal.\n visit: A function taking arguments `(path, parent, children)`. Will be\n called for each object found in the traversal."} +{"repo": "pytype", "function": "def Visit(self, visitor, *args, **kwargs):\n return _Visit(self, visitor, *args, **kwargs)", "docstring": "Visitor interface for transforming a tree of nodes to a new tree.\n\nYou can pass a visitor, and callback functions on that visitor will be\ncalled for all nodes in the tree. Note that nodes are also allowed to\nbe stored in lists and as the values of dictionaries, as long as these\nlists/dictionaries are stored in the named fields of the Node class.\nIt's possible to overload the Visit function on Nodes, to do your own\nprocessing.\n\nArguments:\n visitor: An instance of a visitor for this tree. For every node type you\n want to transform, this visitor implements a \"Visit\"\n function named after the class of the node this function should\n target. Note that is the *actual* class of the node, so\n if you subclass a Node class, visitors for the superclasses will *not*\n be triggered anymore. Also, visitor callbacks are only triggered\n for subclasses of Node.\n *args: Passed to the visitor callback.\n **kwargs: Passed to the visitor callback.\n\nReturns:\n Transformed version of this node."} +{"repo": "transformers", "function": "def _compute_router_probabilities(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n self.input_dtype = hidden_states.dtype\n hidden_states = hidden_states.to(self.dtype)\n if self.training and self.jitter_noise > 0:\n hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)\n self._cast_classifier()\n router_logits = self.classifier(hidden_states)\n router_probabilities = nn.functional.softmax(router_logits, dim=-1, dtype=self.dtype).to(self.input_dtype)\n return (router_probabilities, router_logits)", "docstring": "Computes router probabilities from input hidden states.\n\nArgs:\n hidden_states (`torch.Tensor`):\n (batch_size, sequence_length, hidden_dim) from which router probabilities are computed.\nReturns:\n router_probabilities (`torch.Tensor`):\n Tensor of shape (batch_size, sequence_length, num_experts) corresponding to the probabilities for each\n token and expert. Used for routing tokens to experts.\n router_logits (`torch.Tensor`):\n Logits tensor of shape (batch_size, sequence_length, num_experts) corresponding to raw router logits.\n This is used later for computing router z-loss."} +{"repo": "tensorflow", "function": "def pack(self, tensors: Sequence[Any], layout: layout_lib.Layout) -> Any:\n if not context.executing_eagerly():\n raise RuntimeError('`pack` must be called eagerly.')\n self._register_mesh(layout.mesh)\n with ops.device(self.name):\n if all((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)):\n if not all((t.shape == tensors[0].shape for t in tensors)):\n raise TypeError('All input SparseTensors to Pack must be same shape.')\n is_sparse = True\n tensors = [t.indices for t in tensors] + [t.values for t in tensors] + [ops.convert_to_tensor(t.shape, dtype=dtypes.int64) for t in tensors]\n elif any((isinstance(t, sparse_tensor.SparseTensor) for t in tensors)):\n raise TypeError('Cannot Pack SparseTensors with Tensors.')\n else:\n is_sparse = False\n try:\n return _pywrap_dtensor_device.Pack(context.context()._handle, tensors, layout.to_string(), self._device_info, is_sparse)\n except core._NotOkStatusException as e:\n raise core._status_to_exception(e) from None", "docstring": "Packs tensors into a DTensor handle on this DTensor device.\n\nPacking and unpacking are inverse operations:\n\n```\n* unpack(pack(tensors)) == tensors\n* pack(unpack(dtensor)) == dtensor\n```\n\nRefer to `dtensor.pack` for more information.\n\nArgs:\n tensors: The list of tensors to pack into a DTensor.\n layout: The layout of the DTensor to be created.\n\nReturns:\n A DTensor created from the individual component tensors.\n\nRaises:\n RuntimeError: When not called eagerly."} +{"repo": "transformers", "function": "def to_json_string(self, use_diff: bool=True, ignore_metadata: bool=False) -> str:\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n if ignore_metadata:\n for metadata_field in METADATA_FIELDS:\n config_dict.pop(metadata_field, None)\n\n def convert_keys_to_string(obj):\n if isinstance(obj, dict):\n return {str(key): convert_keys_to_string(value) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [convert_keys_to_string(item) for item in obj]\n else:\n return obj\n\n def convert_dataclass_to_dict(obj):\n if isinstance(obj, dict):\n return {key: convert_dataclass_to_dict(value) for key, value in obj.items()}\n elif is_dataclass(obj):\n return obj.to_dict()\n else:\n return obj\n config_dict = convert_keys_to_string(config_dict)\n config_dict = convert_dataclass_to_dict(config_dict)\n return json.dumps(config_dict, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nArgs:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `GenerationConfig()`\n is serialized to JSON string.\n ignore_metadata (`bool`, *optional*, defaults to `False`):\n Whether to ignore the metadata fields present in the instance\n\nReturns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format."} +{"repo": "nsscache", "function": "def WriteModifyTimestamp(self, timestamp):\n if timestamp is None:\n return True\n self.modify_time = None\n return self._WriteTimestamp(timestamp, self.modify_file)", "docstring": "Convenience method for writing the last modify timestamp.\n\nArgs:\n timestamp: An int with the number of seconds since epoch.\n If timestamp is None, performs no action.\n\nReturns:\n A boolean indicating success of the write."} +{"repo": "transformers", "function": "class SquadResult:\n\n def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):\n self.start_logits = start_logits\n self.end_logits = end_logits\n self.unique_id = unique_id\n if start_top_index:\n self.start_top_index = start_top_index\n self.end_top_index = end_top_index\n self.cls_logits = cls_logits", "docstring": "Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.\n\nArgs:\n unique_id: The unique identifier corresponding to that example.\n start_logits: The logits corresponding to the start of the answer\n end_logits: The logits corresponding to the end of the answer"} +{"repo": "keras", "function": "class GlobalAveragePooling1D(BaseGlobalPooling):\n\n def __init__(self, data_format=None, keepdims=False, **kwargs):\n super().__init__(pool_dimensions=1, data_format=data_format, keepdims=keepdims, **kwargs)\n self.supports_masking = True\n\n def call(self, inputs, mask=None):\n steps_axis = 1 if self.data_format == 'channels_last' else 2\n if mask is not None:\n mask = backend.cast(mask, inputs[0].dtype)\n mask = ops.expand_dims(mask, 2 if self.data_format == 'channels_last' else 1)\n inputs *= mask\n return ops.sum(inputs, axis=steps_axis, keepdims=self.keepdims) / ops.sum(mask, axis=steps_axis, keepdims=self.keepdims)\n else:\n return ops.mean(inputs, axis=steps_axis, keepdims=self.keepdims)\n\n def compute_mask(self, inputs, mask=None):\n return None", "docstring": "Global average pooling operation for temporal data.\n\nArgs:\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, steps, features)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, features, steps)`. It defaults to the `image_data_format`\n value found in your Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be `\"channels_last\"`.\n keepdims: A boolean, whether to keep the temporal dimension or not.\n If `keepdims` is `False` (default), the rank of the tensor is\n reduced for spatial dimensions. If `keepdims` is `True`, the\n temporal dimension are retained with length 1.\n The behavior is the same as for `tf.reduce_mean` or `np.mean`.\n\nCall arguments:\n inputs: A 3D tensor.\n mask: Binary tensor of shape `(batch_size, steps)` indicating whether\n a given step should be masked (excluded from the average).\n\nInput shape:\n\n- If `data_format='channels_last'`:\n 3D tensor with shape:\n `(batch_size, steps, features)`\n- If `data_format='channels_first'`:\n 3D tensor with shape:\n `(batch_size, features, steps)`\n\nOutput shape:\n\n- If `keepdims=False`:\n 2D tensor with shape `(batch_size, features)`.\n- If `keepdims=True`:\n - If `data_format=\"channels_last\"`:\n 3D tensor with shape `(batch_size, 1, features)`\n - If `data_format=\"channels_first\"`:\n 3D tensor with shape `(batch_size, features, 1)`\n\nExample:\n\n>>> x = np.random.rand(2, 3, 4)\n>>> y = keras.layers.GlobalAveragePooling1D()(x)\n>>> y.shape\n(2, 4)"} +{"repo": "tensorflow", "function": "def data_gen_sig1() -> repr_dataset.RepresentativeDataset:\n for _ in range(4):\n yield {'matmul_input': random_ops.random_uniform(shape=(1, 4))}", "docstring": "Generates tuple-style samples for signature 'sig1'.\n\nThe first element of the tuple identifies the signature key the input data\nis for.\n\nYields:\n Representative sample for 'sig1'."} +{"repo": "transformers", "function": "class OPTDecoder(OPTPreTrainedModel):\n\n def __init__(self, config: OPTConfig):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.layerdrop\n self.padding_idx = config.pad_token_id\n self.max_target_positions = config.max_position_embeddings\n self.vocab_size = config.vocab_size\n self.embed_tokens = nn.Embedding(config.vocab_size, config.word_embed_proj_dim, self.padding_idx)\n self.embed_positions = OPTLearnedPositionalEmbedding(config.max_position_embeddings, config.hidden_size)\n if config.word_embed_proj_dim != config.hidden_size:\n self.project_out = nn.Linear(config.hidden_size, config.word_embed_proj_dim, bias=False)\n else:\n self.project_out = None\n if config.word_embed_proj_dim != config.hidden_size:\n self.project_in = nn.Linear(config.word_embed_proj_dim, config.hidden_size, bias=False)\n else:\n self.project_in = None\n if config.do_layer_norm_before and (not config._remove_final_layer_norm):\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=config.layer_norm_elementwise_affine)\n else:\n self.final_layer_norm = None\n self.layers = nn.ModuleList([OPTDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n self.post_init()\n\n def get_input_embeddings(self):\n return self.embed_tokens\n\n def set_input_embeddings(self, value):\n self.embed_tokens = value\n\n def _update_causal_mask(self, attention_mask: Union[torch.Tensor, 'BlockMask'], input_tensor: torch.Tensor, cache_position: torch.Tensor, past_key_values: Cache, output_attentions: bool=False):\n if self.config._attn_implementation == 'flash_attention_2':\n if attention_mask is not None and (attention_mask == 0.0).any():\n return attention_mask\n return None\n if self.config._attn_implementation == 'flex_attention':\n if isinstance(attention_mask, torch.Tensor):\n attention_mask = make_flex_block_causal_mask(attention_mask)\n return attention_mask\n past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0\n using_compilable_cache = past_key_values.is_compileable if past_key_values is not None else False\n if self.config._attn_implementation == 'sdpa' and (not using_compilable_cache) and (not output_attentions):\n if AttentionMaskConverter._ignore_causal_mask_sdpa(attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens, is_training=self.training):\n return None\n dtype = input_tensor.dtype\n sequence_length = input_tensor.shape[1]\n if using_compilable_cache:\n target_length = past_key_values.get_max_cache_shape()\n else:\n target_length = attention_mask.shape[-1] if isinstance(attention_mask, torch.Tensor) else past_seen_tokens + sequence_length + 1\n causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(attention_mask, sequence_length=sequence_length, target_length=target_length, dtype=dtype, cache_position=cache_position, batch_size=input_tensor.shape[0])\n if self.config._attn_implementation == 'sdpa' and attention_mask is not None and (attention_mask.device.type in ['cuda', 'xpu', 'npu']) and (not output_attentions):\n min_dtype = torch.finfo(dtype).min\n causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)\n return causal_mask\n\n @staticmethod\n def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n \"\"\"\n Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\n Args:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size.\n \"\"\"\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask\n\n @can_return_tuple\n def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, head_mask: Optional[torch.Tensor]=None, past_key_values: Optional[List[torch.FloatTensor]]=None, inputs_embeds: Optional[torch.FloatTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, position_ids: Optional[torch.LongTensor]=None, cache_position: Optional[torch.Tensor]=None, **kwargs: Unpack[FlashAttentionKwargs]) -> Union[Tuple, BaseModelOutputWithPast]:\n \"\"\"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you\n provide it.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):\n Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of\n shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the\n cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those\n that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of\n all `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.n_positions - 1]`. for padding use -1.\n\n [What are position IDs?](../glossary#position-ids)\n cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):\n Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,\n this tensor is not affected by padding. It is used to update the cache in the correct position and to infer\n the complete sequence length.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if (input_ids is None) ^ (inputs_embeds is not None):\n raise ValueError('You must specify exactly one of input_ids or inputs_embeds')\n if self.gradient_checkpointing and self.training and use_cache:\n logger.warning_once('`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.')\n use_cache = False\n if input_ids is not None:\n input_ids = input_ids.view(-1, input_ids.shape[-1])\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n return_legacy_cache = False\n if use_cache and (not isinstance(past_key_values, Cache)):\n return_legacy_cache = True\n past_key_values = DynamicCache.from_legacy_cache(past_key_values)\n if past_key_values is None:\n logger.warning_once('Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.53.0. You should pass an instance of `DynamicCache` instead, e.g. `past_key_values=DynamicCache.from_legacy_cache(past_key_values)`.')\n past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0\n if cache_position is None:\n cache_position = torch.arange(past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device)\n if attention_mask is None:\n seq_length = past_seen_tokens + inputs_embeds.shape[1]\n attention_mask = torch.ones(inputs_embeds.shape[0], seq_length, device=inputs_embeds.device)\n causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions)\n if position_ids is None:\n position_ids = torch.cumsum(attention_mask, dim=1)\n position_ids = (position_ids * attention_mask - 1).long()\n position_ids = position_ids[:, past_seen_tokens:]\n pos_embeds = self.embed_positions(attention_mask, past_seen_tokens, position_ids=position_ids)\n if self.project_in is not None:\n inputs_embeds = self.project_in(inputs_embeds)\n hidden_states = inputs_embeds + pos_embeds.to(inputs_embeds.device)\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = None\n for attn_mask, mask_name in zip([head_mask], ['head_mask']):\n if attn_mask is not None:\n if attn_mask.size()[0] != len(self.layers):\n raise ValueError(f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.')\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, causal_mask, head_mask[idx] if head_mask is not None else None, None, output_attentions, use_cache, position_ids, cache_position)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=causal_mask, position_ids=position_ids, layer_head_mask=head_mask[idx] if head_mask is not None else None, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, **kwargs)\n hidden_states = layer_outputs[0]\n if use_cache:\n next_decoder_cache = layer_outputs[2 if output_attentions else 1]\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if self.final_layer_norm is not None:\n hidden_states = self.final_layer_norm(hidden_states)\n if self.project_out is not None:\n hidden_states = self.project_out(hidden_states)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n next_cache = next_decoder_cache if use_cache else None\n if return_legacy_cache:\n next_cache = next_cache.to_legacy_cache()\n return BaseModelOutputWithPast(last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns)", "docstring": "Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`OPTDecoderLayer`]\n\nArgs:\n config: OPTConfig"} +{"repo": "keras", "function": "class UnitNorm(Constraint):\n\n def __init__(self, axis=0):\n self.axis = axis\n\n def __call__(self, w):\n w = backend.convert_to_tensor(w)\n return w / (backend.epsilon() + ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True)))\n\n def get_config(self):\n return {'axis': self.axis}", "docstring": "Constrains the weights incident to each hidden unit to have unit norm.\n\nArgs:\n axis: integer, axis along which to calculate weight norms.\n For instance, in a `Dense` layer the weight matrix\n has shape `(input_dim, output_dim)`,\n set `axis` to `0` to constrain each weight vector\n of length `(input_dim,)`.\n In a `Conv2D` layer with `data_format=\"channels_last\"`,\n the weight tensor has shape\n `(rows, cols, input_depth, output_depth)`,\n set `axis` to `[0, 1, 2]`\n to constrain the weights of each filter tensor of size\n `(rows, cols, input_depth)`."} +{"repo": "transformers", "function": "def get_image_features(self, pixel_values: Optional[torch.FloatTensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, interpolate_pos_encoding: bool=False) -> torch.FloatTensor:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n vision_outputs: BaseModelOutputWithPooling = self.vision_model(pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding)\n pooled_output = vision_outputs.pooler_output\n image_features = self.visual_projection(pooled_output)\n return image_features", "docstring": "Returns:\n image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by\n applying the projection layer to the pooled output of [`CLIPVisionModel`].\n\nExamples:\n\n```python\n>>> from PIL import Image\n>>> import requests\n>>> from transformers import AutoProcessor, CLIPModel\n\n>>> model = CLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n>>> processor = AutoProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n\n>>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n>>> image = Image.open(requests.get(url, stream=True).raw)\n\n>>> inputs = processor(images=image, return_tensors=\"pt\")\n\n>>> image_features = model.get_image_features(**inputs)\n```"} +{"repo": "transformers", "function": "def get_optimal_tiled_canvas(image_height: int, image_width: int, max_image_tiles: int, tile_size: int) -> Tuple[int, int]:\n possible_tile_arrangements = get_all_supported_aspect_ratios(max_image_tiles)\n possible_canvas_sizes = np.array(possible_tile_arrangements) * tile_size\n target_heights, target_widths = np.array(possible_canvas_sizes).T\n scale_h = target_heights / image_height\n scale_w = target_widths / image_width\n scales = np.where(scale_w > scale_h, scale_h, scale_w)\n upscaling_options = scales[scales >= 1]\n if len(upscaling_options) > 0:\n selected_scale = np.min(upscaling_options)\n else:\n downscaling_options = scales[scales < 1]\n selected_scale = np.max(downscaling_options)\n chosen_canvas = possible_canvas_sizes[scales == selected_scale]\n if len(chosen_canvas) > 1:\n areas = chosen_canvas[:, 0] * chosen_canvas[:, 1]\n optimal_idx = np.argmin(areas)\n optimal_canvas = chosen_canvas[optimal_idx]\n else:\n optimal_canvas = chosen_canvas[0]\n return optimal_canvas", "docstring": "Determines the best canvas based on image and tile size and maximum number of tiles.\n\nFirst, calculates possible resolutions based on the maximum number of tiles and tile size.\nFor example for max_image_tiles=2, tile_size=100, possible tile arrangements are:\n[(1, 1), (1, 2), (2, 1)] and corresponding canvas sizes are:\n[(100, 100), (100, 200), (200, 100)]\n\nFor each possible resolution, calculates the scaling factors for\nwidth and height, and selects the smallest one, which is the limiting side.\nE.g. to match the canvas you can upscale height by 2x, and width by 1.5x,\ntherefore, the maximum upscaling you can do is min(2, 1.5) = 1.5.\n\nIf upscaling is possible (any of the scaling factors is greater than 1),\nthen picks the smallest upscaling factor > 1.\n\nIf upscaling is not possible, then picks the largest scaling factor <= 1, i.e.\nreduce downscaling as much as possible.\n\nIf there are multiple resolutions with the same max scale, we pick the one with the lowest area,\nto minimize padding. E.g., the same image can be upscaled to 224x224 and 224x448, but the latter\nhas more padding.\n\nExample of canvases made from tiles:\n\nTo visualize how the image can fit onto different tile grids, let's try fitting an ASCII cat into the tiles.\n\nHere's an ASCII cat image you want to fit into the tiles:\n\n /\\_/\\\n ( o.o )\n > ^ <\n\nIf `num_tiles=6`, possible tile grids would look like this:\n\n**2x3 Canvas (2 tiles wide, 3 tiles tall)**: -> total of 6 tiles\n+-------+-------+\n| /\\_/\\ | 0 | <- Cat image split across two tiles horizontally\n+-------+-------+\n| > ^ < | 0 | <- Remaining part of the cat occupies the left tile\n+-------+-------+\n|( o.o )| 0 |\n+-------+-------+\n\n**3x2 Canvas (3 tiles wide, 2 tiles tall)**: -> total of 6 tiles\n+-------+-------+-------+\n| /\\_/\\ |( o.o )| 0 | <- Cat image occupies the first two tiles, 1 tile remains empty\n+-------+-------+-------+\n| > ^ < | 0 | 0 | <- Remaining part of the cat occupies the left tile\n+-------+-------+-------+\n\n**1x6 Canvas (1 tile wide, 6 tiles tall)**: -> total of 6 tiles\n+-------+\n| /\\_/\\ | <- Top part of the cat\n+-------+\n|( o.o )| <- Middle part of the cat\n+-------+\n| > ^ < | <- Bottom part of the cat\n+-------+\n| 0 |\n+-------+\n| 0 |\n+-------+\n| 0 |\n+-------+\n\nGiven that the tiles you get depend on the chosen aspect ratio, you have to add\nembedding in the modeling code to help it know if it got a 3x2 or a 1x6 or a 2x3\naspect ratio.\n\nThe function tests these arrangements to find the smallest canvas where the image fits.\nIf multiple canvases fit, it selects the one where the dimensions are closest to the image size.\n\nIn this case the first canvas is the closest to the original image.\n\nYou then feed all of the tiles to the model:\n\n +-------+-------+-------+-------+-------+-------+\n- | /\\_/\\ |( o.o )| > ^ < | 0 | 0 | 0 | <- Last canvas\n +-------+-------+-------+-------+-------+-------+\n\n +-------+-------+-------+-------+-------+-------+\n- | /\\_/\\ | 0 |( o.o )| 0 | > ^ < | 0 | <- First canvas\n +-------+-------+-------+-------+-------+-------+\n\n +-------+-------+-------+-------+-------+-------+\n- | /\\_/\\ |( o.o )| 0 | > ^ < | 0 | 0 | <- second canvas\n +-------+-------+-------+-------+-------+-------+\n\nFor each tile, you have num_channels (usually RGB so 3), tile_width, tile_height\n\nArgs:\n image_height (`int`):\n The height of the image.\n image_width (`int`):\n The width of the image.\n max_image_tiles (`int`):\n The maximum number of tiles any image can be split into.\n tile_size (`int`):\n The tile size.\n\nReturns:\n `Tuple[int, int]`: The best canvas resolution [height, width] for the given image."} +{"repo": "transformers", "function": "def generate(self, past_values: torch.Tensor, past_observed_mask: Optional[torch.Tensor]=None) -> SamplePatchTSTOutput:\n num_parallel_samples = self.config.num_parallel_samples\n outputs = self(past_values=past_values, target_values=None, past_observed_mask=past_observed_mask, output_hidden_states=False)\n distribution = self.distribution_output.distribution(outputs.regression_outputs)\n samples = [distribution.sample() for _ in range(num_parallel_samples)]\n samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets)\n return SamplePatchTSTOutput(sequences=samples)", "docstring": "Generate sequences of sample predictions from a model with a probability distribution head.\n\nParameters:\n past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`):\n Past values of the time series that serves as context in order to predict the future.\n past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):\n Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected\n in `[0, 1]`:\n\n - 1 for values that are **observed**,\n - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).\n\nReturn:\n [`SamplePatchTSTOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of\n samples, num_targets)`."} +{"repo": "keras", "function": "def _save_model(self, epoch, batch, logs):\n filepath = self._get_file_path(epoch, batch, logs)\n try:\n if self._should_save_model(epoch, batch, logs, filepath):\n dirname = os.path.dirname(filepath)\n if dirname and (not file_utils.exists(dirname)):\n file_utils.makedirs(dirname)\n if self.save_weights_only:\n self.model.save_weights(filepath, overwrite=True)\n else:\n self.model.save(filepath, overwrite=True)\n except IsADirectoryError:\n raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {filepath}')\n except IOError as e:\n if 'is a directory' in str(e.args[0]).lower():\n raise IOError(f'Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: f{filepath}')\n raise e", "docstring": "Saves the model.\n\nArgs:\n epoch: the epoch this iteration is in.\n batch: the batch this iteration is in. `None` if the `save_freq`\n is set to `\"epoch\"`.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`."} +{"repo": "transformers", "function": "class DetrDecoder(DetrPreTrainedModel):\n\n def __init__(self, config: DetrConfig):\n super().__init__(config)\n self.dropout = config.dropout\n self.layerdrop = config.decoder_layerdrop\n self.layers = nn.ModuleList([DetrDecoderLayer(config) for _ in range(config.decoder_layers)])\n self.layernorm = nn.LayerNorm(config.d_model)\n self.gradient_checkpointing = False\n self.post_init()\n\n def forward(self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, object_queries=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None):\n \"\"\"\n Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n The query embeddings that are passed into the decoder.\n\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`:\n\n - 1 for queries that are **not masked**,\n - 0 for queries that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n of the decoder.\n encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):\n Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected\n in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n\n object_queries (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Object queries that are added to the queries and keys in each cross-attention layer.\n query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):\n , *optional*): Position embeddings that are added to the values and keys in each self-attention layer.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if inputs_embeds is not None:\n hidden_states = inputs_embeds\n input_shape = inputs_embeds.size()[:-1]\n combined_attention_mask = None\n if attention_mask is not None and combined_attention_mask is not None:\n combined_attention_mask = combined_attention_mask + _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n if encoder_hidden_states is not None and encoder_attention_mask is not None:\n encoder_attention_mask = _prepare_4d_attention_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])\n intermediate = () if self.config.auxiliary_loss else None\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None\n for idx, decoder_layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.training:\n dropout_probability = torch.rand([])\n if dropout_probability < self.layerdrop:\n continue\n if self.gradient_checkpointing and self.training:\n layer_outputs = self._gradient_checkpointing_func(decoder_layer.__call__, hidden_states, combined_attention_mask, encoder_hidden_states, encoder_attention_mask, None)\n else:\n layer_outputs = decoder_layer(hidden_states, attention_mask=combined_attention_mask, object_queries=object_queries, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if self.config.auxiliary_loss:\n hidden_states = self.layernorm(hidden_states)\n intermediate += (hidden_states,)\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n if encoder_hidden_states is not None:\n all_cross_attentions += (layer_outputs[2],)\n hidden_states = self.layernorm(hidden_states)\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n if self.config.auxiliary_loss:\n intermediate = torch.stack(intermediate)\n if not return_dict:\n return tuple((v for v in [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate] if v is not None))\n return DetrDecoderOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate)", "docstring": "Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetrDecoderLayer`].\n\nThe decoder updates the query embeddings through multiple self-attention and cross-attention layers.\n\nSome small tweaks for DETR:\n\n- object_queries and query_position_embeddings are added to the forward pass.\n- if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers.\n\nArgs:\n config: DetrConfig"} +{"repo": "keras", "function": "def add_weight(self, shape=None, initializer=None, dtype=None, trainable=True, autocast=True, regularizer=None, constraint=None, aggregation='none', overwrite_with_gradient=False, name=None):\n self._check_super_called()\n if shape is None:\n shape = ()\n if dtype is not None:\n dtype = backend.standardize_dtype(dtype)\n else:\n dtype = self.variable_dtype\n if initializer is None:\n if 'float' in dtype:\n initializer = 'glorot_uniform'\n else:\n initializer = 'zeros'\n initializer = initializers.get(initializer)\n with backend.name_scope(self.name, caller=self):\n variable = backend.Variable(initializer=initializer, shape=shape, dtype=dtype, trainable=trainable, autocast=autocast, aggregation=aggregation, name=name)\n variable.regularizer = regularizers.get(regularizer)\n variable.constraint = constraints.get(constraint)\n variable.overwrite_with_gradient = overwrite_with_gradient\n self._track_variable(variable)\n return variable", "docstring": "Add a weight variable to the layer.\n\nArgs:\n shape: Shape tuple for the variable. Must be fully-defined\n (no `None` entries). Defaults to `()` (scalar) if unspecified.\n initializer: Initializer object to use to populate the initial\n variable value, or string name of a built-in initializer\n (e.g. `\"random_normal\"`). If unspecified, defaults to\n `\"glorot_uniform\"` for floating-point variables and to `\"zeros\"`\n for all other types (e.g. int, bool).\n dtype: Dtype of the variable to create, e.g. `\"float32\"`. If\n unspecified, defaults to the layer's variable dtype\n (which itself defaults to `\"float32\"` if unspecified).\n trainable: Boolean, whether the variable should be trainable via\n backprop or whether its updates are managed manually. Defaults\n to `True`.\n autocast: Boolean, whether to autocast layers variables when\n accessing them. Defaults to `True`.\n regularizer: Regularizer object to call to apply penalty on the\n weight. These penalties are summed into the loss function\n during optimization. Defaults to `None`.\n constraint: Contrainst object to call on the variable after any\n optimizer update, or string name of a built-in constraint.\n Defaults to `None`.\n aggregation: Optional string, one of `None`, `\"none\"`, `\"mean\"`,\n `\"sum\"` or `\"only_first_replica\"`. Annotates the variable with\n the type of multi-replica aggregation to be used for this\n variable when writing custom data parallel training loops.\n Defaults to `\"none\"`.\n overwrite_with_gradient: Boolean, whether to overwrite the variable\n with the computed gradient. This is useful for float8 training.\n Defaults to `False`.\n name: String name of the variable. Useful for debugging purposes."} +{"repo": "beam", "function": "def to_hashable_table_ref(table_ref_elem_kv: Tuple[Union[str, TableReference], V]) -> Tuple[str, V]:\n table_ref = table_ref_elem_kv[0]\n hashable_table_ref = get_hashable_destination(table_ref)\n return (hashable_table_ref, table_ref_elem_kv[1])", "docstring": "Turns the key of the input tuple to its string representation. The key\nshould be either a string or a TableReference.\n\nArgs:\n table_ref_elem_kv: A tuple of table reference and element.\n\nReturns:\n A tuple of string representation of input table and input element."} +{"repo": "starthinker", "function": "def recipe_bigquery_to_sheet(config, auth_read, sheet, tab, range, dataset, query, legacy):\n bigquery(config, {'auth': auth_read, 'from': {'auth': 'service', 'dataset': dataset, 'query': query, 'legacy': legacy}, 'to': {'sheet': sheet, 'tab': tab, 'range': range}})", "docstring": "Copy the contents of a query into a Google Sheet.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n sheet (string) - Either sheet url or sheet name.\n tab (string) - Name of the tab where to put the data.\n range (string) - Range in the sheet to place the data, leave blank for whole sheet.\n dataset (string) - Existing BigQuery dataset.\n query (text) - Query to pull data from the table.\n legacy (boolean) - Use Legacy SQL"} +{"repo": "transformers", "function": "def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)\n text_embeds = text_outputs[1]\n text_embeds = self.text_projection(text_embeds)\n return text_embeds", "docstring": "Returns:\n text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by\n applying the projection layer to the pooled output of [`XCLIPTextModel`].\n\nExamples:\n\n```python\n>>> from transformers import AutoTokenizer, AutoModel\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"microsoft/xclip-base-patch32\")\n>>> model = AutoModel.from_pretrained(\"microsoft/xclip-base-patch32\")\n\n>>> inputs = tokenizer([\"a photo of a cat\", \"a photo of a dog\"], padding=True, return_tensors=\"pt\")\n>>> text_features = model.get_text_features(**inputs)\n```"} +{"repo": "tf-quant-finance", "function": "def douglas_adi_scheme(theta):\n if theta < 0 or theta > 1:\n raise ValueError('Theta should be in the interval [0, 1].')\n\n def _marching_scheme(value_grid, t1, t2, equation_params_fn, append_boundaries_fn, n_dims, has_default_lower_boundary, has_default_upper_boundary):\n \"\"\"Constructs the Douglas ADI time marching scheme.\"\"\"\n current_grid = value_grid\n matrix_params_t1, inhomog_terms_t1 = equation_params_fn(t1)\n matrix_params_t2, inhomog_terms_t2 = equation_params_fn(t2)\n value_grid_with_boundaries = append_boundaries_fn(value_grid)\n for i in range(n_dims - 1):\n for j in range(i + 1, n_dims):\n mixed_term = matrix_params_t1[i][j]\n if mixed_term is not None:\n current_grid += _apply_mixed_term_explicitly(value_grid_with_boundaries, mixed_term, t2 - t1, i, j, has_default_lower_boundary, has_default_upper_boundary, n_dims)\n explicit_contributions = []\n for i in range(n_dims):\n superdiag, diag, subdiag = (matrix_params_t1[i][i][d] for d in range(3))\n contribution = _apply_tridiag_matrix_explicitly(value_grid, superdiag, diag, subdiag, i, n_dims) * (t2 - t1)\n explicit_contributions.append(contribution)\n current_grid += contribution\n for inhomog_term in inhomog_terms_t1:\n current_grid += inhomog_term * (t2 - t1)\n if theta == 0:\n return current_grid\n for i in range(n_dims):\n inhomog_term_delta = inhomog_terms_t2[i] - inhomog_terms_t1[i]\n superdiag, diag, subdiag = (matrix_params_t2[i][i][d] for d in range(3))\n current_grid = _apply_correction(theta, current_grid, explicit_contributions[i], superdiag, diag, subdiag, inhomog_term_delta, t1, t2, i, n_dims)\n return current_grid\n return _marching_scheme", "docstring": "Applies Douglas time marching scheme (see [1] and Eq. 3.1 in [2]).\n\nTime marching schemes solve the space-discretized equation\n`du_inner/dt = A(t) u_inner(t) + A_mixed u(t) + b(t)`,\nwhere `u`, `u_inner` and `b` are vectors and `A`, `A_mixed` are matrices.\n`u_inner` is `u` with all boundaries having Robin boundary conditions\ntrimmed and `A_mixed` are contributions of mixed derivative terms.\nSee more details in multidim_parabolic_equation_stepper.py.\n\nIn Douglas scheme (as well as other ADI schemes), the matrix `A` is\nrepresented as sum `A = sum_i A_i`. `A_i` is the contribution of\nterms with partial derivatives w.r.t. dimension `i`. The shift term is split\nevenly between `A_i`. Similarly, inhomogeneous term is represented as sum\n`b = sum_i b_i`, where `b_i` comes from boundary conditions on boundary\northogonal to dimension `i`.\n\nGiven the current values vector u(t1), the step is defined as follows\n(using the notation of Eq. 3.1 in [2]):\n`Y_0 = (1 + (A(t1) + A_mixed(t1)) dt) U_{n-1} + b(t1) dt`,\n`Y_j = Y_{j-1} + theta dt (A_j(t2) Y_j - A_j(t1) U_{n-1} + b_j(t2) - b_j(t1))`\nfor each spatial dimension `j`, and\n`U_n = Y_{n_dims-1}`.\n\nHere the parameter `theta` is a non-negative number, `U_{n-1} = u(t1)`,\n`U_n = u(t2)`, and `dt = t2 - t1`.\n\nNote: Douglas scheme is only first-order accurate if mixed terms are\npresent. More advanced schemes, such as Craig-Sneyd scheme, are needed to\nachieve the second-order accuracy.\n\n#### References:\n[1] Douglas Jr., Jim (1962), \"Alternating direction methods for three space\n variables\", Numerische Mathematik, 4 (1): 41-63\n[2] Tinne Haentjens, Karek J. in't Hout. ADI finite difference schemes for\n the Heston-Hull-White PDE. https://arxiv.org/abs/1111.4087\n\nArgs:\n theta: Number between 0 and 1 (see the step definition above). `theta = 0`\n corresponds to fully-explicit scheme.\n\nReturns:\n A callable consumes the following arguments by keyword:\n 1. inner_value_grid: Grid of solution values at the current time of\n the same `dtype` as `value_grid` and shape of\n `batch_shape` + `[d_1 - 2 + n_def_i , ..., d_n -2 + n_def_i]`\n where `d_i` is the number of space discretization points along dimension\n `i` and `n_def_i` is the number of default boundaries along that\n dimension. `n_def_i` takes values 0, 1, 2 (default boundary),\n 2. t1: Time before the step.\n 3. t2: Time after the step.\n 4. equation_params_fn: A callable that takes a scalar `Tensor` argument\n representing time, and returns a tuple of two objects:\n * First object is a nested list `L` such that `L[i][i]` is a tuple of\n three `Tensor`s, main, upper, and lower diagonal of the tridiagonal\n matrix `A` in a direction `i`. Each element `L[i][j]` corresponds\n to the mixed terms and is either None (meaning there are no mixed\n terms present) or a tuple of `Tensor`s representing contributions of\n mixed terms in directions (i + 1, j + 1), (i + 1, j - 1),\n (i - 1, j + 1), and (i - 1, j - 1).\n * The second object is a tuple of inhomogeneous terms for each\n dimension.\n All of the `Tensor`s are of the same `dtype` as `inner_value_grid` and\n of the shape broadcastable with the shape of `inner_value_grid`.\n 5. A callable that accepts a `Tensor` of shape `inner_value_grid` and\n appends boundaries according to the boundary conditions, i.e. transforms\n `u_inner` to `u`.\n 6. n_dims: A Python integer, the spatial dimension of the PDE.\n 7. has_default_lower_boundary: A Python list of booleans of length\n `n_dims`. List indices enumerate the dimensions with `True` values\n marking default lower boundary condition along corresponding dimensions,\n and `False` values indicating Robin boundary conditions.\n 8. has_default_upper_boundary: Similar to has_default_lower_boundary, but\n for upper boundaries.\n The callable returns a `Tensor` of the same shape and `dtype` a\n `values_grid` and represents an approximate solution `u(t2)`."} +{"repo": "weather-tools", "function": "class Regrid(ToDataSink):\n output_path: str\n regrid_kwargs: t.Dict\n force_regrid: bool = False\n to_netcdf: bool = False\n zarr_input_chunks: t.Optional[t.Dict] = None\n zarr_output_chunks: t.Optional[t.Dict] = None\n\n @classmethod\n def add_parser_arguments(cls, subparser: argparse.ArgumentParser) -> None:\n subparser.add_argument('-o', '--output_path', type=str, required=True, help='The destination path for the regridded files.')\n subparser.add_argument('-k', '--regrid_kwargs', type=json.loads, default='{\"grid\": [0.25, 0.25]}', help='Keyword-args to pass into `metview.regrid()` in the form of a JSON string. Will default to \\'{\"grid\": [0.25, 0.25]}\\'.')\n subparser.add_argument('--force_regrid', action='store_true', default=False, help='Force regrid all files even if file is present at output_path.')\n subparser.add_argument('--to_netcdf', action='store_true', default=False, help='Write output file in NetCDF via XArray. Default: off')\n subparser.add_argument('-zi', '--zarr_input_chunks', type=json.loads, default=None, help='When reading a Zarr, break up the data into chunks. Takes a JSON string.')\n subparser.add_argument('-zo', '--zarr_output_chunks', type=json.loads, default=None, help='When writing a Zarr, write the data with chunks. Takes a JSON string.')\n\n @classmethod\n def validate_arguments(cls, known_args: argparse.Namespace, pipeline_options: t.List[str]) -> None:\n if known_args.zarr and known_args.to_netcdf:\n raise ValueError('only Zarr-to-Zarr regridding is allowed!')\n if not known_args.zarr and (known_args.zarr_input_chunks or known_args.zarr_output_chunks):\n raise ValueError('chunks can only be set when input URI is a Zarr.')\n if known_args.zarr:\n _, out_ext = os.path.splitext(known_args.output_path)\n if out_ext not in ['', '.zarr']:\n warnings.warn('if input is a Zarr, the output_path must also be a Zarr.', RuntimeWarning)\n\n def target_from(self, uri: str) -> str:\n \"\"\"Create the target path from the input URI.\n\n In the case of Zarr, the output will be treated like a valid path.\n For NetCDF, this will change the extension to '.nc'.\n \"\"\"\n if self.zarr:\n return self.output_path\n base = os.path.basename(uri)\n in_dest = os.path.join(self.output_path, base)\n if not self.to_netcdf:\n return in_dest\n no_ext, _ = os.path.splitext(in_dest)\n return f'{no_ext}.nc'\n\n def is_grib_file_corrupt(self, local_grib: str) -> bool:\n try:\n subprocess.check_output(['grib_ls', local_grib])\n return False\n except subprocess.CalledProcessError as e:\n logger.info(f'Encountered error while reading GRIB: {e}.')\n return True\n\n def apply(self, uri: str) -> None:\n logger.info(f'Regridding from {uri!r} to {self.target_from(uri)!r}.')\n if self.dry_run:\n return\n if path_exists(self.target_from(uri), self.force_regrid):\n logger.info(f'Skipping {uri}.')\n return\n with _metview_op():\n try:\n logger.info(f'Copying grib from {uri!r} to local disk.')\n with open_local(uri) as local_grib:\n logger.info(f\"Checking for {uri}'s validity...\")\n if self.is_grib_file_corrupt(local_grib):\n logger.error(f'Corrupt GRIB file found: {uri}.')\n return\n logger.info(f'No issues found with {uri}.')\n logger.info(f'Regridding {uri!r}.')\n fs = mv.bindings.Fieldset(path=local_grib)\n fieldset = mv.regrid(data=fs, **self.regrid_kwargs)\n with tempfile.NamedTemporaryFile() as src:\n logger.info(f'Writing {self.target_from(uri)!r} to local disk.')\n if self.to_netcdf:\n fieldset.to_dataset().to_netcdf(src.name)\n else:\n mv.write(src.name, fieldset)\n src.flush()\n _clear_metview()\n logger.info(f'Uploading {self.target_from(uri)!r}.')\n copy(src.name, self.target_from(uri))\n except Exception as e:\n logger.info(f'Regrid failed for {uri!r}. Error: {str(e)}')\n\n def expand(self, paths):\n if not self.zarr:\n paths | beam.Map(self.apply)\n return\n source_ds = xr.open_zarr(self.first_uri, **self.zarr_kwargs)\n regrid_op = RegridChunk(self.regrid_kwargs, self.zarr_input_chunks)\n regridded = paths | xbeam.DatasetToChunks(source_ds, self.zarr_input_chunks) | 'RegridChunk' >> regrid_op\n tmpl = paths | beam.Create([source_ds]) | 'CalcZarrTemplate' >> beam.Map(regrid_op.template)\n to_write = regridded\n if self.zarr_output_chunks:\n to_write |= xbeam.ConsolidateChunks(self.zarr_output_chunks)\n to_write | xbeam.ChunksToZarr(self.output_path, beam.pvalue.AsSingleton(tmpl), self.zarr_output_chunks)", "docstring": "Regrid data using MetView.\n\nSee https://metview.readthedocs.io/en/latest/metview/using_metview/regrid_intro.html\nfor an in-depth intro on regridding with MetView.\n\nAttributes:\n output_path: URI for regridding target. Can be a glob pattern of NetCDF or Grib files; optionally,\n it can be a Zarr corpus is supported.\n regrid_kwargs: A dictionary of keyword-args to be passed into `mv.regrid()` (excluding the dataset).\n to_netcdf: When set, it raw data output will be written as NetCDF. Cannot use with Zarr datasets.\n zarr_input_chunks: (Optional) When regridding Zarr data, how the input dataset should be chunked upon open.\n zarr_output_chunks: (Optional, recommended) When regridding Zarr data, how the output Zarr dataset should be\n divided into chunks."} +{"repo": "transformers", "function": "class JanusProcessor(ProcessorMixin):\n attributes = ['image_processor', 'tokenizer']\n image_processor_class = 'JanusImageProcessor'\n tokenizer_class = 'LlamaTokenizerFast'\n\n def __init__(self, image_processor, tokenizer, chat_template=None, use_default_system_prompt=False, **kwargs):\n self.num_image_tokens = 576\n self.image_token = tokenizer.image_token\n self.image_start_token = tokenizer.boi_token\n self.image_end_token = tokenizer.eoi_token\n self.use_default_system_prompt = use_default_system_prompt\n super().__init__(image_processor, tokenizer, chat_template=chat_template)\n\n def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]=None, images: ImageInput=None, videos=None, audio=None, **kwargs: Unpack[JanusProcessorKwargs]) -> BatchFeature:\n \"\"\"\n Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`\n and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode\n the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to\n JanusImageProcessor's [`~JanusImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring\n of the above two methods for more information.\n\n Args:\n text (`str`, `List[str]`, `List[List[str]]`):\n The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings\n (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set\n `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).\n images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):\n The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch\n tensor. Both channels-first and channels-last formats are supported.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors of a particular framework. Acceptable values are:\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return NumPy `np.ndarray` objects.\n - `'jax'`: Return JAX `jnp.ndarray` objects.\n\n Returns:\n [`BatchFeature`]: A [`BatchFeature`] with the following fields:\n\n - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.\n - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when\n `return_attention_mask=True` or if *\"attention_mask\"* is in `self.model_input_names` and if `text` is not\n `None`).\n - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.\n \"\"\"\n output_kwargs = self._merge_kwargs(JanusProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs)\n if text is None and images is None:\n raise ValueError('You must specify either text or images.')\n if text is not None:\n if isinstance(text, str):\n text = [text]\n elif not (isinstance(text, (list, tuple)) and all((isinstance(t, str) for t in text))):\n raise ValueError('Invalid input text. Please provide a string, or a list of strings')\n generation_mode = output_kwargs['text_kwargs'].pop('generation_mode')\n prompt_strings = []\n one_img_tokens = self.image_start_token + self.image_token * self.num_image_tokens + self.image_end_token\n for prompt in text:\n prompt = prompt.replace(self.image_token, one_img_tokens)\n if self.use_default_system_prompt and generation_mode == 'text':\n prompt = DEFAULT_SYSTEM_PROMPT + prompt\n if generation_mode == 'image':\n prompt += self.image_start_token\n prompt_strings.append(prompt)\n data = self.tokenizer(prompt_strings, **output_kwargs['text_kwargs'])\n if images is not None and generation_mode != 'image':\n data['pixel_values'] = self.image_processor(images=images, **output_kwargs['images_kwargs'])['pixel_values']\n return BatchFeature(data=data)\n\n def batch_decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, **kwargs)\n\n def decode(self, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, **kwargs)\n\n def postprocess(self, images: ImageInput, **kwargs):\n \"\"\"\n Forwards all arguments to the image processor's `postprocess` method.\n Refer to the original method's docstring for more details.\n \"\"\"\n return self.image_processor.postprocess(images, **kwargs)\n\n @property\n def model_input_names(self):\n tokenizer_input_names = self.tokenizer.model_input_names\n image_processor_input_names = self.image_processor.model_input_names\n return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))", "docstring": "Constructs a Janus processor which wraps a Janus Image Processor and a Llama tokenizer into a single processor.\n\n[`JanusProcessor`] offers all the functionalities of [`JanusImageProcessor`] and [`LlamaTokenizerFast`]. See the\n[`~JanusProcessor.__call__`] and [`~JanusProcessor.decode`] for more information.\n\nArgs:\n image_processor ([`JanusImageProcessor`]):\n The image processor is a required input.\n tokenizer ([`LlamaTokenizerFast`]):\n The tokenizer is a required input.\n chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages\n in a chat into a tokenizable string.\n use_default_system_prompt (`str`, *optional*, defaults to `False`):\n Use default system prompt for Text Generation."} +{"repo": "transformers", "function": "def resize_annotation(annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float=0.5, resample: PILImageResampling=PILImageResampling.NEAREST):\n ratios = tuple((float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)))\n ratio_height, ratio_width = ratios\n new_annotation = {}\n new_annotation['size'] = target_size\n for key, value in annotation.items():\n if key == 'boxes':\n boxes = value\n scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)\n new_annotation['boxes'] = scaled_boxes\n elif key == 'area':\n area = value\n scaled_area = area * (ratio_width * ratio_height)\n new_annotation['area'] = scaled_area\n elif key == 'masks':\n masks = value[:, None]\n masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])\n masks = masks.astype(np.float32)\n masks = masks[:, 0] > threshold\n new_annotation['masks'] = masks\n elif key == 'size':\n new_annotation['size'] = target_size\n else:\n new_annotation[key] = value\n return new_annotation", "docstring": "Resizes an annotation to a target size.\n\nArgs:\n annotation (`Dict[str, Any]`):\n The annotation dictionary.\n orig_size (`Tuple[int, int]`):\n The original size of the input image.\n target_size (`Tuple[int, int]`):\n The target size of the image, as returned by the preprocessing `resize` step.\n threshold (`float`, *optional*, defaults to 0.5):\n The threshold used to binarize the segmentation masks.\n resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):\n The resampling filter to use when resizing the masks."} +{"repo": "transformers", "function": "class MPNetConfig(PretrainedConfig):\n model_type = 'mpnet'\n\n def __init__(self, vocab_size=30527, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, initializer_range=0.02, layer_norm_eps=1e-12, relative_attention_num_buckets=32, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):\n super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.relative_attention_num_buckets = relative_attention_num_buckets", "docstring": "This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to\ninstantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the MPNet\n[microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 30527):\n Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`].\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `Callable`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n hidden_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (`int`, *optional*, defaults to 512):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n relative_attention_num_buckets (`int`, *optional*, defaults to 32):\n The number of buckets to use for each attention layer.\n\nExamples:\n\n```python\n>>> from transformers import MPNetModel, MPNetConfig\n\n>>> # Initializing a MPNet mpnet-base style configuration\n>>> configuration = MPNetConfig()\n\n>>> # Initializing a model from the mpnet-base style configuration\n>>> model = MPNetModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "class InstructBlipVideoEncoder(nn.Module):\n\n def __init__(self, config: InstructBlipVideoConfig):\n super().__init__()\n self.config = config\n self.layers = nn.ModuleList([InstructBlipVideoEncoderLayer(config) for _ in range(config.num_hidden_layers)])\n self.gradient_checkpointing = False\n\n def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]:\n \"\"\"\n Args:\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Embedded representation of the inputs. Should be float, not int tokens.\n attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n encoder_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n hidden_states = inputs_embeds\n for idx, encoder_layer in enumerate(self.layers):\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions)\n hidden_states = layer_outputs[0]\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[1],)\n if output_hidden_states:\n encoder_states = encoder_states + (hidden_states,)\n if not return_dict:\n return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None))\n return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)", "docstring": "Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a\n[`InstructBlipVideoEncoderLayer`].\n\nArgs:\n config (`InstructBlipVideoConfig`):\n The corresponding vision configuration for the `InstructBlipVideoEncoder`."} +{"repo": "tensorflow", "function": "def _get_var_info(var, prev_tensor_name=None):\n if checkpoint_utils._is_variable(var):\n current_var_name = _infer_var_name([var])\n elif isinstance(var, list) and all((checkpoint_utils._is_variable(v) for v in var)):\n current_var_name = _infer_var_name(var)\n elif isinstance(var, variables_lib.PartitionedVariable):\n current_var_name = _infer_var_name([var])\n var = var._get_variable_list()\n else:\n raise TypeError('var MUST be one of the following: a Variable, list of Variable or PartitionedVariable, but is {}'.format(type(var)))\n if not prev_tensor_name:\n prev_tensor_name = current_var_name\n return (prev_tensor_name, var)", "docstring": "Helper method for standarizing Variable and naming.\n\nArgs:\n var: Current graph's variable that needs to be warm-started (initialized).\n Can be either of the following: (i) `Variable` (ii) `ResourceVariable`\n (iii) list of `Variable`: The list must contain slices of the same larger\n variable. (iv) `PartitionedVariable`\n prev_tensor_name: Name of the tensor to lookup in provided `prev_ckpt`. If\n None, we lookup tensor with same name as given `var`.\n\nReturns:\n A tuple of the Tensor name and var."} +{"repo": "starthinker", "function": "def recipe_google_ads_segmentology(config, auth_read, customer_id, developer_token, login_id, auth_write, recipe_slug):\n dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug})\n bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': recipe_slug}})\n google_api(config, {'auth': auth_read, 'api': 'googleads', 'version': 'v8', 'function': 'customers.googleAds.search', 'kwargs': {'customerId': customer_id, 'body': {'query': 'SELECT\\n campaign.name,\\n ad_group.name,\\n segments.geo_target_postal_code,\\n metrics.impressions,\\n metrics.clicks,\\n metrics.conversions,\\n metrics.interactions\\n FROM user_location_view '}}, 'headers': {'developer-token': developer_token, 'login-customer-id': login_id}, 'iterate': True, 'results': {'bigquery': {'dataset': recipe_slug, 'table': 'GoogleAds_KPI', 'schema': [{'name': 'userLocationView', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'segments', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'geoTargetPostalCode', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'metrics', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'interactions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'impressions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'conversions', 'type': 'INTEGER', 'mode': 'NULLABLE'}, {'name': 'clicks', 'type': 'INTEGER', 'mode': 'NULLABLE'}]}, {'name': 'adGroup', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}, {'name': 'campaign', 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': [{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'}, {'name': 'resourceName', 'type': 'STRING', 'mode': 'NULLABLE'}]}]}}})\n bigquery(config, {'auth': auth_write, 'from': {'query': 'SELECT\\n campaign.name AS Campaign,\\n adGRoup.name AS Ad_Group,\\n segments.geoTargetPostalCode AS Postal_Code,\\n SAFE_DIVIDE(metrics.impressions, SUM(metrics.impressions) OVER()) AS Impression,\\n SAFE_DIVIDE(metrics.clicks, metrics.impressions) AS Click,\\n SAFE_DIVIDE(metrics.conversions, metrics.impressions) AS Conversion,\\n SAFE_DIVIDE(metrics.interactions, metrics.impressions) AS Interaction,\\n metrics.impressions AS Impressions FROM\\n `{dataset}.GoogleAds_KPI`; ', 'parameters': {'dataset': recipe_slug}, 'legacy': False}, 'to': {'dataset': recipe_slug, 'view': 'GoogleAds_KPI_Normalized'}})\n census(config, {'auth': auth_write, 'normalize': {'census_geography': 'zip_codes', 'census_year': '2018', 'census_span': '5yr'}, 'to': {'dataset': recipe_slug, 'type': 'view'}})\n census(config, {'auth': auth_write, 'correlate': {'join': 'Postal_Code', 'pass': ['Campaign', 'Ad_Group'], 'sum': ['Impressions'], 'correlate': ['Impression', 'Click', 'Conversion', 'Interaction'], 'dataset': recipe_slug, 'table': 'GoogleAds_KPI_Normalized', 'significance': 80}, 'to': {'dataset': recipe_slug, 'type': 'view'}})", "docstring": "GoogleAds funnel analysis using Census data.\n\nArgs:\n auth_read (authentication) - Credentials used for reading data.\n customer_id (string) - Google Ads customer.\n developer_token (string) - Google Ads developer token.\n login_id (string) - Google Ads login.\n auth_write (authentication) - Authorization used for writing data.\n recipe_slug (string) - Name of Google BigQuery dataset to create."} +{"repo": "tensorflow", "function": "def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):\n return gen_array_ops.extract_image_patches(images, sizes, strides, rates, padding, name)", "docstring": "Extract `patches` from `images`.\n\nThis op collects patches from the input image, as if applying a\nconvolution. All extracted patches are stacked in the depth (last) dimension\nof the output.\n\nSpecifically, the op extracts patches of shape `sizes` which are `strides`\napart in the input image. The output is subsampled using the `rates` argument,\nin the same manner as \"atrous\" or \"dilated\" convolutions.\n\nThe result is a 4D tensor which is indexed by batch, row, and column.\n`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`\nwhich is taken from the input starting at\n`images[i, x*strides[1], y*strides[2]]`.\n\nEach output patch can be reshaped to `sizes[1], sizes[2], depth`, where\n`depth` is `images.shape[3]`.\n\nThe output elements are taken from the input at intervals given by the `rate`\nargument, as in dilated convolutions.\n\nThe `padding` argument has no effect on the size of each patch, it determines\nhow many patches are extracted. If `VALID`, only patches which are fully\ncontained in the input image are included. If `SAME`, all patches whose\nstarting point is inside the input are included, and areas outside the input\ndefault to zero.\n\nExample:\n\n```\n n = 10\n # images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100\n images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]\n\n # We generate two outputs as follows:\n # 1. 3x3 patches with stride length 5\n # 2. Same as above, but the rate is increased to 2\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 1, 1, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 2 3 11 12 13 21 22 23]\n [ 6 7 8 16 17 18 26 27 28]]\n [[51 52 53 61 62 63 71 72 73]\n [56 57 58 66 67 68 76 77 78]]]]\n```\n\nIf we mark the pixels in the input image which are taken for the output with\n`*`, we see the pattern:\n\n```\n * * * 4 5 * * * 9 10\n * * * 14 15 * * * 19 20\n * * * 24 25 * * * 29 30\n 31 32 33 34 35 36 37 38 39 40\n 41 42 43 44 45 46 47 48 49 50\n * * * 54 55 * * * 59 60\n * * * 64 65 * * * 69 70\n * * * 74 75 * * * 79 80\n 81 82 83 84 85 86 87 88 89 90\n 91 92 93 94 95 96 97 98 99 100\n```\n\n```\n tf.image.extract_patches(images=images,\n sizes=[1, 3, 3, 1],\n strides=[1, 5, 5, 1],\n rates=[1, 2, 2, 1],\n padding='VALID')\n\n # Yields:\n [[[[ 1 3 5 21 23 25 41 43 45]\n [ 6 8 10 26 28 30 46 48 50]]\n\n [[ 51 53 55 71 73 75 91 93 95]\n [ 56 58 60 76 78 80 96 98 100]]]]\n```\n\nWe can again draw the effect, this time using the symbols `*`, `x`, `+` and\n`o` to distinguish the patches:\n\n```\n * 2 * 4 * x 7 x 9 x\n 11 12 13 14 15 16 17 18 19 20\n * 22 * 24 * x 27 x 29 x\n 31 32 33 34 35 36 37 38 39 40\n * 42 * 44 * x 47 x 49 x\n + 52 + 54 + o 57 o 59 o\n 61 62 63 64 65 66 67 68 69 70\n + 72 + 74 + o 77 o 79 o\n 81 82 83 84 85 86 87 88 89 90\n + 92 + 94 + o 97 o 99 o\n```\n\nArgs:\n images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.\n sizes: The size of the extracted patches. Must be\n `[1, size_rows, size_cols, 1]`.\n strides: A 1-D Tensor of length 4. How far the centers of two consecutive\n patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.\n rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.\n This is the input stride, specifying how far two consecutive patch samples\n are in the input. Equivalent to extracting patches with `patch_sizes_eff =\n patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling\n them spatially by a factor of `rates`. This is equivalent to `rate` in\n dilated (a.k.a. Atrous) convolutions.\n padding: The type of padding algorithm to use.\n name: A name for the operation (optional).\n\nReturns:\n A 4-D Tensor of the same type as the input."} +{"repo": "tensorflow", "function": "def _op_in_graph_mode(tensor):\n if context.executing_eagerly():\n return tensor\n return tensor.op", "docstring": "Returns the tensor's op in graph mode, or the tensor in eager mode.\n\nThis is useful because sometimes an op is needed in graph mode instead of a\ntensor. In eager mode, there are no ops.\n\nArgs:\n tensor: A tensor.\n\nReturns:\n The tensor's op in graph mode. The tensor in eager mode."} +{"repo": "transformers", "function": "def _prepare_4d_causal_attention_mask_with_cache_position(attention_mask: torch.Tensor, sequence_length: int, target_length: int, dtype: torch.dtype, cache_position: torch.Tensor, batch_size: int, **kwargs):\n if attention_mask is not None and attention_mask.dim() == 4:\n causal_mask = attention_mask\n else:\n min_dtype = torch.finfo(dtype).min\n causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device)\n if sequence_length != 1:\n causal_mask = torch.triu(causal_mask, diagonal=1)\n causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)\n causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)\n if attention_mask is not None:\n causal_mask = causal_mask.clone()\n mask_length = attention_mask.shape[-1]\n padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(causal_mask.device)\n padding_mask = padding_mask == 0\n causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(padding_mask, min_dtype)\n return causal_mask", "docstring": "Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape\n`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.\n\nArgs:\n attention_mask (`torch.Tensor`):\n A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape\n `(batch_size, 1, query_length, key_value_length)`.\n sequence_length (`int`):\n The sequence length being processed.\n target_length (`int`):\n The target length: when generating with static cache, the mask should be as long as the static cache,\n to account for the 0 padding, the part of the cache that is not filled yet.\n dtype (`torch.dtype`):\n The dtype to use for the 4D attention mask.\n cache_position (`torch.Tensor`):\n Indices depicting the position of the input sequence tokens in the sequence.\n batch_size (`torch.Tensor`):\n Batch size."} +{"repo": "transformers", "function": "def preprocess_with_tokenizer_info(self, image_input: 'torch.Tensor', image_present: 'torch.Tensor', image_unpadded_h: 'torch.Tensor', image_unpadded_w: 'torch.Tensor', image_placeholder_id: int, image_newline_id: int, variable_sized: bool, patch_size: Optional[Dict[str, int]]=None) -> FuyuBatchFeature:\n requires_backends(self, ['torch'])\n patch_size = patch_size if patch_size is not None else self.patch_size\n patch_height, patch_width = (patch_size['height'], patch_size['width'])\n images: List[List[torch.Tensor]] = []\n batch_image_patches: List[List[torch.Tensor]] = []\n batch_image_input_ids: List[List[torch.Tensor]] = []\n for batch_index in range(image_input.shape[0]):\n image_input_ids = []\n image_patches = []\n for subseq_index in range(image_input.shape[1]):\n if image_present[batch_index, subseq_index]:\n image = image_input[batch_index, subseq_index]\n image_height, image_width = (image.shape[1], image.shape[2])\n if variable_sized:\n new_h = min(image_height, math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height)\n new_w = min(image_width, math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width)\n image = image[:, :new_h, :new_w]\n image_height, image_width = (new_h, new_w)\n num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)\n tensor_of_image_ids = torch.full([num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device)\n patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)\n assert num_patches == patches.shape[0]\n if variable_sized:\n tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)\n newline_ids = torch.full([tensor_of_image_ids.shape[0], 1], image_newline_id, dtype=torch.int32, device=image_input.device)\n tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)\n tensor_of_image_ids = tensor_of_image_ids.reshape(-1)\n images.append([image])\n image_input_ids.append(tensor_of_image_ids)\n image_patches.append(patches)\n else:\n image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))\n batch_image_input_ids.append(image_input_ids)\n batch_image_patches.append(image_patches)\n image_patch_indices_per_batch: List[List[torch.Tensor]] = []\n image_patch_indices_per_subsequence: List[List[torch.Tensor]] = []\n for sample_image_input_ids in batch_image_input_ids:\n index_offset = 0\n per_batch_indices = []\n per_subsequence_indices = []\n for subseq_image_input_ids in sample_image_input_ids:\n patches_mask = subseq_image_input_ids == image_placeholder_id\n num_patches = torch.count_nonzero(patches_mask)\n indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(subseq_image_input_ids)\n indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)\n indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)\n patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]\n indices_in_stream_per_batch[patches_inds] = indices + index_offset\n indices_in_stream_per_subsequence[patches_inds] = indices\n per_batch_indices.append(indices_in_stream_per_batch)\n per_subsequence_indices.append(indices_in_stream_per_subsequence)\n index_offset += num_patches\n image_patch_indices_per_batch.append(per_batch_indices)\n image_patch_indices_per_subsequence.append(per_subsequence_indices)\n return FuyuBatchFeature(data={'images': images, 'image_input_ids': batch_image_input_ids, 'image_patches': batch_image_patches, 'image_patch_indices_per_batch': image_patch_indices_per_batch, 'image_patch_indices_per_subsequence': image_patch_indices_per_subsequence})", "docstring": "Process images for model input. In particular, variable-sized images are handled here.\n\nArgs:\n image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):\n Tensor of images padded to model input size.\n image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):\n Tensor of 1s and 0s indicating whether an image is present.\n image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):\n Tensor of unpadded image heights.\n image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):\n Tensor of unpadded image widths.\n image_placeholder_id (int):\n The id of the image placeholder token. Comes from an associated tokenizer.\n image_newline_id (int):\n The id of the image newline token. Comes from an associated tokenizer.\n variable_sized (bool):\n Whether to process images as variable-sized.\n patch_size (`Dict[str, int]`, *optional*, defaults to `self.patch_size`):\n Size of the patches."} +{"repo": "beam", "function": "def copy(self, source_file_names, destination_file_names):\n raise NotImplementedError", "docstring": "Recursively copy the file tree from the source to the destination\n\nArgs:\n source_file_names: list of source file objects that needs to be copied\n destination_file_names: list of destination of the new object\n\nRaises:\n ``BeamIOError``: if any of the copy operations fail"} +{"repo": "tensorflow", "function": "def get(self):\n if self._obj is None:\n raise AlreadyGarbageCollectedError(self.name, self.type_name)\n yield self._obj", "docstring": "Yields the managed C-API Object, guaranteeing aliveness.\n\nThis is a context manager. Inside the context the C-API object is\nguaranteed to be alive.\n\nRaises:\n AlreadyGarbageCollectedError: if the object is already deleted."} +{"repo": "transformers", "function": "class ConfidenceCriteria(StoppingCriteria):\n\n def __init__(self, assistant_confidence_threshold):\n self.assistant_confidence_threshold = assistant_confidence_threshold\n\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:\n probs = scores[-1].softmax(-1)\n p = probs[0, input_ids[0, -1]].item()\n if p < self.assistant_confidence_threshold:\n return True\n return False", "docstring": "This class can be used to stop generation whenever assistant model's confidence in its prediction for the current token is lower than the threshold\n `model.generation_config.assistant_confidence_threshold` even if the number of speculative tokens (defined by `num_assistant_tokens`) is not yet reached.\n\nArgs:\n assistant_confidence_threshold (`float`):\n The value of the threshold."} +{"repo": "tensorflow", "function": "def log_ndtr(x, series_order=3, name='log_ndtr'):\n if not isinstance(series_order, int):\n raise TypeError('series_order must be a Python integer.')\n if series_order < 0:\n raise ValueError('series_order must be non-negative.')\n if series_order > 30:\n raise ValueError('series_order must be <= 30.')\n with ops.name_scope(name, values=[x]):\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.as_numpy_dtype == np.float64:\n lower_segment = LOGNDTR_FLOAT64_LOWER\n upper_segment = LOGNDTR_FLOAT64_UPPER\n elif x.dtype.as_numpy_dtype == np.float32:\n lower_segment = LOGNDTR_FLOAT32_LOWER\n upper_segment = LOGNDTR_FLOAT32_UPPER\n else:\n raise TypeError('x.dtype=%s is not supported.' % x.dtype)\n return array_ops.where_v2(math_ops.greater(x, upper_segment), -_ndtr(-x), array_ops.where_v2(math_ops.greater(x, lower_segment), math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))), _log_ndtr_lower(math_ops.minimum(x, lower_segment), series_order)))", "docstring": "Log Normal distribution function.\n\nFor details of the Normal distribution function see `ndtr`.\n\nThis function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or\nusing an asymptotic series. Specifically:\n- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on\n `log(1-x) ~= -x, x << 1`.\n- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique\n and take a log.\n- For `x <= lower_segment`, we use the series approximation of erf to compute\n the log CDF directly.\n\nThe `lower_segment` is set based on the precision of the input:\n\n```\nlower_segment = { -20, x.dtype=float64\n { -10, x.dtype=float32\nupper_segment = { 8, x.dtype=float64\n { 5, x.dtype=float32\n```\n\nWhen `x < lower_segment`, the `ndtr` asymptotic series approximation is:\n\n```\n ndtr(x) = scale * (1 + sum) + R_N\n scale = exp(-0.5 x**2) / (-x sqrt(2 pi))\n sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}\n R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})\n```\n\nwhere `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a\n[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).\n\n\nArgs:\n x: `Tensor` of type `float32`, `float64`.\n series_order: Positive Python `integer`. Maximum depth to\n evaluate the asymptotic expansion. This is the `N` above.\n name: Python string. A name for the operation (default=\"log_ndtr\").\n\nReturns:\n log_ndtr: `Tensor` with `dtype=x.dtype`.\n\nRaises:\n TypeError: if `x.dtype` is not handled.\n TypeError: if `series_order` is a not Python `integer.`\n ValueError: if `series_order` is not in `[0, 30]`."} +{"repo": "tensorflow", "function": "def __init__(self, fetches, contraction_fn):\n self._unique_fetches = []\n for fetch in fetches:\n try:\n self._unique_fetches.append(ops.get_default_graph().as_graph_element(fetch, allow_tensor=True, allow_operation=True))\n except TypeError as e:\n raise TypeError(f'Argument `fetch` = {fetch} has invalid type \"{type(fetch).__name__}\" must be a string or Tensor. ({str(e)})')\n except ValueError as e:\n raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')\n except KeyError as e:\n raise ValueError(f'Argument `fetch` = {fetch} cannot be interpreted as a Tensor. ({str(e)})')\n self._contraction_fn = contraction_fn", "docstring": "Creates an _ElementFetchMapper.\n\nThis is the fetch mapper used for leaves in the fetch struct. Because of\nthe expansions mechanism, a leaf can actually fetch more than one tensor.\n\nAlso note that the fetches here can be just strings (tensor or op names) or\nany other object that the graph knows how to convert to a tensor, such as a\nVariable. So we have to run each fetch through `as_graph_element()` to get\nthe corresponding tensor or op.\n\nArgs:\n fetches: List of objects, as returned by a fetch_fn defined in\n _REGISTERED_EXPANSIONS.\n contraction_fn: Callable as returned by a fetch_fn."} +{"repo": "transformers", "function": "class PatchTSMixerLayer(nn.Module):\n\n def __init__(self, config: PatchTSMixerConfig):\n super().__init__()\n self.patch_mixer = PatchMixerBlock(config=config)\n self.feature_mixer = FeatureMixerBlock(config=config)\n self.mode = config.mode\n if config.mode == 'mix_channel':\n self.channel_feature_mixer = PatchTSMixerChannelFeatureMixerBlock(config=config)\n\n def forward(self, hidden: torch.Tensor):\n \"\"\"\n Args:\n hidden (`torch.Tensor` of shape `(batch_size, num_patches, d_model)`):\n Input tensor to the layer.\n\n Returns:\n `torch.Tensor`: Transformed tensor.\n \"\"\"\n if self.mode == 'mix_channel':\n hidden = self.channel_feature_mixer(hidden)\n hidden = self.patch_mixer(hidden)\n hidden = self.feature_mixer(hidden)\n return hidden", "docstring": "The `PatchTSMixer` layer that does all three kinds of mixing.\n\nArgs:\n config (`PatchTSMixerConfig`):\n Configuration."} +{"repo": "beam", "function": "def create_issue(title: str, description: str, labels: Optional[List[str]]=None) -> Tuple[int, str]:\n url = 'https://api.github.com/repos/{}/{}/issues'.format(_GITHUB_REPO_OWNER, _GITHUB_REPO_NAME)\n data = {'owner': _GITHUB_REPO_OWNER, 'repo': _GITHUB_REPO_NAME, 'title': title, 'body': description, 'labels': [_AWAITING_TRIAGE_LABEL, _PERF_ALERT_LABEL]}\n if labels:\n data['labels'].extend(labels)\n response = requests.post(url=url, data=json.dumps(data), headers=_HEADERS, timeout=_REQUEST_TIMEOUT_SECS).json()\n return (response['number'], response['html_url'])", "docstring": "Create an issue with title, description with a label.\n\nArgs:\n title: GitHub issue title.\n description: GitHub issue description.\n labels: Labels used to tag the GitHub issue.\nReturns:\n Tuple containing GitHub issue number and issue URL."} +{"repo": "beam", "function": "def delete(self, paths):\n results = self._blobstorageIO().delete_paths(paths)\n exceptions = {path: error for path, error in results.items() if error is not None}\n if exceptions:\n raise BeamIOError('Delete operation failed', exceptions)", "docstring": "Deletes files or directories at the provided paths.\nDirectories will be deleted recursively.\n\nArgs:\n paths: list of paths that give the file objects to be deleted\n\nRaises:\n ``BeamIOError``: if any of the delete operations fail"} +{"repo": "keras", "function": "def init_pool_generator(gens, random_seed=None, id_queue=None):\n global _SHARED_SEQUENCES\n _SHARED_SEQUENCES = gens\n worker_proc = multiprocessing.current_process()\n worker_proc.name = f'Keras_worker_{worker_proc.name}'\n if random_seed is not None:\n np.random.seed(random_seed + worker_proc.ident)\n if id_queue is not None:\n id_queue.put(worker_proc.ident, block=True, timeout=0.1)", "docstring": "Initializer function for pool workers.\n\nArgs:\n gens: State which should be made available to worker processes.\n random_seed: An optional value with which to seed child processes.\n id_queue: A multiprocessing Queue of worker ids.\n This is used to indicate that a worker process\n was created by Keras."} +{"repo": "transformers", "function": "def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int) -> torch.FloatTensor:\n batch_size = current_tokens.shape[0] // self._num_beams\n group_start_idx = beam_group_idx * self._num_sub_beams\n group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams)\n group_size = group_end_idx - group_start_idx\n vocab_size = scores.shape[-1]\n if group_start_idx == 0:\n return scores\n scores_processed = scores.clone()\n for batch_idx in range(batch_size):\n previous_group_tokens = current_tokens[batch_idx * self._num_beams:batch_idx * self._num_beams + group_start_idx]\n token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device)\n scores_processed[batch_idx * group_size:(batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency\n return scores_processed", "docstring": "Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using\n beam search or log softmax for each vocabulary token when using beam search\n current_tokens (`torch.LongTensor` of shape `(batch_size)`):\n Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other\n beam groups in the current generation step.\n beam_group_idx (`int`):\n The index of the beam group currently being processed.\n\nReturn:\n `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`:\n The processed prediction scores."} +{"repo": "tensorflow", "function": "def recall_at_thresholds(labels, predictions, thresholds, weights=None, metrics_collections=None, updates_collections=None, name=None):\n if context.executing_eagerly():\n raise RuntimeError('tf.metrics.recall_at_thresholds is not supported when eager execution is enabled.')\n with variable_scope.variable_scope(name, 'recall_at_thresholds', (predictions, labels, weights)):\n values, update_ops = _confusion_matrix_at_thresholds(labels, predictions, thresholds, weights, includes=('tp', 'fn'))\n epsilon = 1e-07\n\n def compute_recall(tp, fn, name):\n return math_ops.divide(tp, epsilon + tp + fn, name='recall_' + name)\n\n def recall_across_replicas(_, values):\n return compute_recall(values['tp'], values['fn'], 'value')\n rec = _aggregate_across_replicas(metrics_collections, recall_across_replicas, values)\n update_op = compute_recall(update_ops['tp'], update_ops['fn'], 'update_op')\n if updates_collections:\n ops.add_to_collections(updates_collections, update_op)\n return (rec, update_op)", "docstring": "Computes various recall values for different `thresholds` on `predictions`.\n\nThe `recall_at_thresholds` function creates four local variables,\n`true_positives`, `true_negatives`, `false_positives` and `false_negatives`\nfor various values of thresholds. `recall[i]` is defined as the total weight\nof values in `predictions` above `thresholds[i]` whose corresponding entry in\n`labels` is `True`, divided by the total weight of `True` values in `labels`\n(`true_positives[i] / (true_positives[i] + false_negatives[i])`).\n\nFor estimation of the metric over a stream of data, the function creates an\n`update_op` operation that updates these variables and returns the `recall`.\n\nIf `weights` is `None`, weights default to 1. Use weights of 0 to mask values.\n\nArgs:\n labels: The ground truth values, a `Tensor` whose dimensions must match\n `predictions`. Will be cast to `bool`.\n predictions: A floating point `Tensor` of arbitrary shape and whose values\n are in the range `[0, 1]`.\n thresholds: A python list or tuple of float thresholds in `[0, 1]`.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n metrics_collections: An optional list of collections that `recall` should be\n added to.\n updates_collections: An optional list of collections that `update_op` should\n be added to.\n name: An optional variable_scope name.\n\nReturns:\n recall: A float `Tensor` of shape `[len(thresholds)]`.\n update_op: An operation that increments the `true_positives`,\n `true_negatives`, `false_positives` and `false_negatives` variables that\n are used in the computation of `recall`.\n\nRaises:\n ValueError: If `predictions` and `labels` have mismatched shapes, or if\n `weights` is not `None` and its shape doesn't match `predictions`, or if\n either `metrics_collections` or `updates_collections` are not a list or\n tuple.\n RuntimeError: If eager execution is enabled."} +{"repo": "tensorflow", "function": "def build_model_flags(change_concat_input_ranges=False, allow_nonexistent_arrays=False, saved_model_dir=None, saved_model_version=0, saved_model_tags=None, saved_model_exported_names=None, **_):\n model_flags = _model_flags_pb2.ModelFlags()\n model_flags.change_concat_input_ranges = change_concat_input_ranges\n model_flags.allow_nonexistent_arrays = allow_nonexistent_arrays\n if saved_model_dir:\n model_flags.saved_model_dir = saved_model_dir\n model_flags.saved_model_version = saved_model_version\n if saved_model_tags:\n model_flags.saved_model_tags.extend(saved_model_tags)\n if saved_model_exported_names:\n model_flags.saved_model_exported_names.extend(saved_model_exported_names)\n return model_flags", "docstring": "Builds the model flags object from params.\n\nArgs:\n change_concat_input_ranges: Boolean to change behavior of min/max ranges for\n inputs and outputs of the concat operator for quantized models. Changes\n the ranges of concat operator overlap when true. (default False)\n allow_nonexistent_arrays: Allow specifying array names that don't exist or\n are unused in the final graph. (default False)\n saved_model_dir: Filepath of the saved model to be converted. This value\n will be non-empty only when the saved model import path will be used.\n Otherwises, the graph def-based conversion will be processed.\n saved_model_version: SavedModel file format version of The saved model file\n to be converted. This value will be set only when the SavedModel import\n path will be used.\n saved_model_tags: Set of string saved model tags, formatted in the\n comma-separated value. This value will be set only when the SavedModel\n import path will be used.\n saved_model_exported_names: Names to be exported (default: export all) when\n the saved model import path is on. This value will be set only when the\n SavedModel import path will be used.\n\nReturns:\n model_flags: protocol buffer describing the model."} +{"repo": "tf-quant-finance", "function": "def options_price_from_samples(strikes: types.RealTensor, expiries: types.RealTensor, maturities: types.RealTensor, is_call_options: types.BoolTensor, sample_discount_curve_paths_fn: Callable[..., Tuple[types.RealTensor, types.RealTensor]], num_samples: types.IntTensor, time_step: types.RealTensor, dtype: tf.DType=None, name: str=None) -> types.RealTensor:\n name = name or 'options_price_from_samples'\n with tf.name_scope(name):\n sim_times, _ = tf.unique(tf.reshape(expiries, shape=[-1]))\n longest_expiry = tf.reduce_max(sim_times)\n sim_times, _ = tf.unique(tf.concat([sim_times, tf.range(time_step, longest_expiry, time_step)], axis=0))\n sim_times = tf.sort(sim_times, name='sort_sim_times')\n tau = maturities - expiries\n curve_times_builder, _ = tf.unique(tf.reshape(tau, shape=[-1]))\n curve_times = tf.sort(curve_times_builder, name='sort_curve_times')\n p_t_tau, r_t = sample_discount_curve_paths_fn(times=sim_times, curve_times=curve_times, num_samples=num_samples)\n dim = p_t_tau.shape[-1]\n dt_builder = tf.concat(axis=0, values=[tf.convert_to_tensor([0.0], dtype=dtype), sim_times[1:] - sim_times[:-1]])\n dt = tf.expand_dims(tf.expand_dims(dt_builder, axis=-1), axis=0)\n discount_factors_builder = tf.math.exp(-r_t * dt)\n discount_factors_builder = tf.transpose(utils.cumprod_using_matvec(tf.transpose(discount_factors_builder, [0, 2, 1])), [0, 2, 1])\n discount_factors_builder = tf.expand_dims(discount_factors_builder, axis=1)\n discount_factors_simulated = tf.repeat(discount_factors_builder, p_t_tau.shape.as_list()[1], axis=1)\n sim_time_index = tf.searchsorted(sim_times, tf.reshape(expiries, [-1]))\n curve_time_index = tf.searchsorted(curve_times, tf.reshape(tau, [-1]))\n curve_time_index, sim_time_index = tff_utils.broadcast_tensors(curve_time_index, sim_time_index)\n gather_index = _prepare_indices(tf.range(0, num_samples), curve_time_index, sim_time_index, tf.range(0, dim))\n payoff_discount_factors_builder = tf.gather_nd(discount_factors_simulated, gather_index)\n payoff_discount_factors = tf.reshape(payoff_discount_factors_builder, [num_samples] + strikes.shape + [dim])\n payoff_bond_price_builder = tf.gather_nd(p_t_tau, gather_index)\n payoff_bond_price = tf.reshape(payoff_bond_price_builder, [num_samples] + strikes.shape + [dim])\n is_call_options = tf.reshape(tf.broadcast_to(is_call_options, strikes.shape), [1] + strikes.shape + [1])\n strikes = tf.reshape(strikes, [1] + strikes.shape + [1])\n payoff = tf.where(is_call_options, tf.math.maximum(payoff_bond_price - strikes, 0.0), tf.math.maximum(strikes - payoff_bond_price, 0.0))\n option_value = tf.math.reduce_mean(payoff_discount_factors * payoff, axis=0)\n return option_value", "docstring": "Computes the zero coupon bond options price from simulated discount curves.\n\nArgs:\n strikes: A real `Tensor` of any shape and dtype. The strike price of the\n options. The shape of this input determines the number (and shape) of the\n options to be priced and the output.\n expiries: A real `Tensor` of the same dtype and compatible shape as\n `strikes`. The time to expiry of each bond option.\n maturities: A real `Tensor` of the same dtype and compatible shape as\n `strikes`. The time to maturity of the underlying zero coupon bonds.\n is_call_options: A boolean `Tensor` of a shape compatible with `strikes`.\n Indicates whether the option is a call (if True) or a put (if False).\n sample_discount_curve_paths_fn: Callable which takes the following args:\n\n 1) times: Rank 1 `Tensor` of positive real values, specifying the times at\n which the path points are to be evaluated.\n 2) curve_times: Rank 1 `Tensor` of positive real values, specifying the\n maturities at which the discount curve is to be computed at each\n simulation time.\n 3) num_samples: Positive scalar integer specifying the number of paths to\n draw.\n\n and returns two `Tensor`s, the first being a Rank-4 tensor of shape\n `[num_samples, m, k, dim]` containing the simulated zero coupon bond\n curves, and the second being a `Tensor` of shape `[num_samples, k, dim]`\n containing the simulated short rate paths. Here, `m` is the size of\n `curve_times`, `k` is the size of `times`, and `dim` is the dimensionality\n of the paths.\n\n num_samples: Positive scalar `int32` `Tensor`. The number of simulation\n paths during Monte-Carlo valuation.\n time_step: Scalar real `Tensor`. Maximal distance between time grid points\n in Euler scheme. Relevant when Euler scheme is used for simulation.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name\n `options_price_from_samples`.\n\nReturns:\n A `Tensor` of real dtype and shape `strikes.shape + [dim]` containing the\n computed option prices."} +{"repo": "transformers", "function": "def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):\n cos = cos.unsqueeze(unsqueeze_dim)\n sin = sin.unsqueeze(unsqueeze_dim)\n q_embed = q * cos + rotate_half(q) * sin\n k_embed = k * cos + rotate_half(k) * sin\n return (q_embed, k_embed)", "docstring": "Applies Rotary Position Embedding to the query and key tensors.\n\nArgs:\n q (`torch.Tensor`): The query tensor.\n k (`torch.Tensor`): The key tensor.\n cos (`torch.Tensor`): The cosine part of the rotary embedding.\n sin (`torch.Tensor`): The sine part of the rotary embedding.\n position_ids (`torch.Tensor`, *optional*):\n Deprecated and unused.\n unsqueeze_dim (`int`, *optional*, defaults to 1):\n The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and\n sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note\n that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and\n k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes\n cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have\n the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.\nReturns:\n `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding."} +{"repo": "transformers", "function": "class LayoutLMv2FastImageProcessorKwargs(DefaultFastImageProcessorKwargs):\n apply_ocr: Optional[bool]\n ocr_lang: Optional[str]\n tesseract_config: Optional[str]", "docstring": "Args:\n apply_ocr (`bool`, *optional*, defaults to `True`):\n Whether to apply the Tesseract OCR engine to get words + normalized bounding boxes. Can be overridden by\n the `apply_ocr` parameter in the `preprocess` method.\n ocr_lang (`str`, *optional*):\n The language, specified by its ISO code, to be used by the Tesseract OCR engine. By default, English is\n used. Can be overridden by the `ocr_lang` parameter in the `preprocess` method.\n tesseract_config (`str`, *optional*):\n Any additional custom configuration flags that are forwarded to the `config` parameter when calling\n Tesseract. For example: '--psm 6'. Can be overridden by the `tesseract_config` parameter in the\n `preprocess` method."} +{"repo": "fhir-py", "function": "def count_function(function: _evaluation.CountFunction, operand_result: Optional[_sql_data_types.Select], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select:\n del function, params_result\n if operand_result is None:\n raise ValueError('count() cannot be called without an operand.')\n if operand_result.from_part is None:\n return _sql_data_types.Select(select_part=_sql_data_types.CountCall((_sql_data_types.RawExpression(operand_result.sql_alias, _sql_data_type=operand_result.sql_data_type),)), from_part=str(operand_result.to_subquery()), where_part=operand_result.where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK)\n else:\n return dataclasses.replace(operand_result, select_part=_sql_data_types.CountCall((operand_result.select_part,)))", "docstring": "Returns an integer representing the number of elements in a collection.\n\nBy default, `_CountFunction` will return 0.\n\nArgs:\n function: The FHIRPath AST `HasValueFunction` node\n operand_result: The expression which is being evaluated\n params_result: The parameter passed in to function\n\nReturns:\n A compiled Spark SQL expression.\n\nRaises:\n ValueError: When the function is called without an operand"} +{"repo": "tensorflow", "function": "def set_help_intro(self, help_intro):\n self._command_handler_registry.set_help_intro(help_intro=help_intro)", "docstring": "Set an introductory message to the help output of the command registry.\n\nArgs:\n help_intro: (RichTextLines) Rich text lines appended to the beginning of\n the output of the command \"help\", as introductory information."} +{"repo": "keras", "function": "def set_state_tree(self, state_tree):\n for k, v in state_tree.items():\n path_value_dict = self._flatten_nested_dict(v)\n if k == 'trainable_variables':\n self._assign_variable_values(self.trainable_variables, path_value_dict)\n elif k == 'non_trainable_variables':\n self._assign_variable_values(self.non_trainable_variables, path_value_dict)\n elif k == 'optimizer_variables':\n self._assign_variable_values(self.optimizer.variables, path_value_dict)\n elif k == 'metrics_variables':\n self._assign_variable_values(self.metrics_variables, path_value_dict)\n else:\n raise ValueError(f'Unknown variable name: {k}')", "docstring": "Assigns values to variables of the model.\n\nThis method takes a dictionary of nested variable values, which\nrepresents the state tree of the model, and assigns them to the\ncorresponding variables of the model. The dictionary keys represent the\nvariable names (e.g., `'trainable_variables'`, `'optimizer_variables'`),\nand the values are nested dictionaries containing the variable\npaths and their corresponding values.\n\nArgs:\n state_tree: A dictionary representing the state tree of the model.\n The keys are the variable names, and the values are nested\n dictionaries representing the variable paths and their values."} +{"repo": "beam", "function": "def __init__(self, counter_factory, state_sampler, declaring_step, input_index):\n super().__init__(counter_factory, state_sampler)\n self.declaring_step = declaring_step\n self.input_index = input_index\n self.update_current_step()", "docstring": "Create a side input read counter.\n\nArgs:\n counter_factory: A counters.CounterFactory to create byte counters.\n state_sampler: A statesampler.StateSampler to transition into read states.\n declaring_step: A string with the step name of the step that directly\n receives the side input initially.\n input_index: The index of the side input in the list of inputs of the\n declaring step.\n\nThe side input is uniquely identified by (declaring_step, input_index);\nwhere declaring_step is the step that receives the PCollectionView as a\nside input, and input_index is the index of the PCollectionView within\nthe list of inputs."} +{"repo": "transformers", "function": "class Qwen2AudioEncoderConfig(PretrainedConfig):\n model_type = 'qwen2_audio_encoder'\n\n def __init__(self, num_mel_bins=128, encoder_layers=32, encoder_attention_heads=20, encoder_ffn_dim=5120, encoder_layerdrop=0.0, d_model=1280, dropout=0.0, attention_dropout=0.0, activation_function='gelu', activation_dropout=0.0, scale_embedding=False, initializer_range=0.02, max_source_positions=1500, **kwargs):\n super().__init__(**kwargs)\n self.num_mel_bins = num_mel_bins\n self.d_model = d_model\n self.encoder_layers = encoder_layers\n self.encoder_attention_heads = encoder_attention_heads\n self.encoder_ffn_dim = encoder_ffn_dim\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation_function = activation_function\n self.activation_dropout = activation_dropout\n self.encoder_layerdrop = encoder_layerdrop\n self.num_hidden_layers = encoder_layers\n self.initializer_range = initializer_range\n self.scale_embedding = scale_embedding\n self.max_source_positions = max_source_positions", "docstring": "This is the configuration class to store the configuration of a [`Qwen2AudioEncoder`]. It is used to instantiate a\nQwen2-Audio audio encoder according to the specified arguments, defining the model architecture. Instantiating a\nconfiguration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio\narchitecture.\n\ne.g. [Qwen/Qwen2-Audio-7B](https://huggingface.co/Qwen/Qwen2-Audio-7B)\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n num_mel_bins (`int`, *optional*, defaults to 128):\n Number of mel features used per input features. Should correspond to the value used in the\n `Qwen2AudioProcessor` class.\n encoder_layers (`int`, *optional*, defaults to 32):\n Number of encoder layers.\n encoder_attention_heads (`int`, *optional*, defaults to 20):\n Number of attention heads for each attention layer in the Transformer encoder.\n encoder_ffn_dim (`int`, *optional*, defaults to 5120):\n Dimensionality of the \"intermediate\" (often named feed-forward) layer in encoder.\n encoder_layerdrop (`float`, *optional*, defaults to 0.0):\n The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n d_model (`int`, *optional*, defaults to 1280):\n Dimensionality of the layers.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n activation_function (`str`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"silu\"` and `\"gelu_new\"` are supported.\n activation_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for activations inside the fully connected layer.\n scale_embedding (`bool`, *optional*, defaults to `False`):\n Scale embeddings by diving by sqrt(d_model).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n max_source_positions (`int`, *optional*, defaults to 1500):\n The maximum sequence length of log-mel filter-bank features that this model might ever be used with.\n\nExample:\n\n```python\n>>> from transformers import Qwen2AudioEncoderConfig, Qwen2AudioEncoder\n\n>>> # Initializing a Qwen2AudioEncoderConfig\n>>> configuration = Qwen2AudioEncoderConfig()\n\n>>> # Initializing a Qwen2AudioEncoder (with random weights)\n>>> model = Qwen2AudioEncoder(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "fhir-py", "function": "def returns_scalar(return_type: Optional[FhirPathDataType]) -> bool:\n return not return_type or return_type.cardinality == Cardinality.SCALAR", "docstring": "Indicates if the return type evaluates to a scalar.\n\nArgs:\n return_type: The data type to describe.\n\nReturns:\n True if `return_type` represents an element with cardinality less than or\n equal to one whose parents are all also scalars.\n False otherwise. For example, the path Patient.name.use does not return a\n scalar, despite 'use' being a scalar, because it is a child of the\n collection, 'name.'"} +{"repo": "tensorflow", "function": "def enable_mixed_precision_graph_rewrite_v1(opt, loss_scale='dynamic'):\n if mixed_precision_global_state.is_using_mixed_precision_policy():\n raise ValueError('The mixed precision graph rewrite cannot be enabled, because the global Keras dtype Policy has been set to a mixed precision policy. At most, one of the following can be called:\\n\\n 1. tf.keras.mixed_precision.set_global_policy() with a mixed precision policy (You called this first)\\n\\n 2. tf.train.experimental.enable_mixed_precision_graph_rewrite() (You called this second)\\nYou called both functions, which is an error, because both functions enable you to use mixed precision. If in doubt which function to use, use the first, as it supports Eager execution and is more customizable.')\n if mixed_precision_global_state.non_mixed_precision_session_created():\n tf_logging.warn('You already have existing Sessions that do not use mixed precision. enable_mixed_precision_graph_rewrite() will not affect these Sessions.')\n opt = _wrap_optimizer(opt, loss_scale)\n config.set_optimizer_experimental_options({'auto_mixed_precision': True})\n mixed_precision_global_state.set_mixed_precision_graph_rewrite_enabled(True)\n return opt", "docstring": "Enable mixed precision via a graph rewrite.\n\nMixed precision is the use of both float32 and float16 data types when\ntraining a model to improve performance. This is achieved via a graph rewrite\noperation and a loss-scale optimizer.\n\nPerforming arithmetic operations in float16 takes advantage of specialized\nprocessing units, such as NVIDIA Tensor Cores, for much higher arithmetic\nthroughput. However, due to the smaller representable range, performing the\nentire training with float16 can result in gradient underflow, that is, small\ngradient values becoming zeroes. Instead, performing only select arithmetic\noperations in float16 results in higher throughput and decreased training\ntime when using compatible hardware accelerators while also reducing memory\nusage, typically without sacrificing model accuracy.\n\nNote: While the mixed precision rewrite changes the datatype of various\nlayers throughout the model, the same accuracy reached in float32 is\nexpected. If a `NaN` gradient occurs with dynamic loss scaling, the model\nupdate for that batch is skipped. In this case, the global step count is not\nincremented, and the `LossScaleOptimizer` attempts to decrease the loss\nscaling value to avoid `NaN` values in subsequent iterations. This approach\nhas been shown to achieve the same accuracy as float32 and, in most cases,\nbetter training throughput.\n\nExample:\n\n```python\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(64, activation='softmax'),\n])\n\nopt = tf.keras.optimizers.SGD()\nopt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt)\nmodel.compile(loss=\"mse\", optimizer=opt)\n\nx_train = np.random.random((1024, 64))\ny_train = np.random.random((1024, 64))\nmodel.fit(x_train, y_train)\n```\n\nCalling `enable_mixed_precision_graph_rewrite(opt)` enables the graph rewrite\noperation before computing gradients. The function additionally returns an\n`Optimizer` (`opt`) wrapped with a `LossScaleOptimizer`. This prevents\nunderflow in the float16 tensors during the backward pass. An optimizer of\ntype `tf.train.Optimizer` or `tf.keras.optimizers.Optimizer` must be passed\nto this function, which will then be wrapped to use loss scaling.\n\nThe graph rewrite operation changes the `dtype` of certain operations in the\ngraph from float32 to float16. There are several categories of operations\nthat are either included or excluded by this rewrite operation. The following\ncategories of Ops are defined inside corresponding functions under the class\n`AutoMixedPrecisionLists` in\n\nauto_mixed_precision_lists.h:\n\n* `ClearList`: Ops that do not have numerically significant adverse effects.\nE.g. `ArgMax` and `Floor`.\n* `AllowList`: Ops that are considered numerically safe for execution in\nfloat16, and thus are always converted. E.g. `Conv2D`.\n* `DenyList`: Ops that are numerically unsafe to execute in float16 and\ncan negatively affect downstream nodes. E.g. `Softmax`.\n* `GrayList`: Ops that are considered numerically safe for execution in\nfloat16 unless downstream from a DenyList Op. E.g. `Add` and `AvgPool`.\n\nWhen this function is used, gradients should only be computed and applied\nwith the returned optimizer, either by calling `opt.minimize()` or\n`opt.compute_gradients()` followed by `opt.apply_gradients()`.\nGradients should not be computed with `tf.gradients` or `tf.GradientTape`.\nThis is because the returned optimizer will apply loss scaling, and\n`tf.gradients` or `tf.GradientTape` will not. If you do directly use\n`tf.gradients` or `tf.GradientTape`, your model may not converge due to\nfloat16 underflow problems.\n\nWhen eager execution is enabled, the mixed precision graph rewrite is only\nenabled within `tf.function`s, as outside `tf.function`s, there is no graph.\n\nFor NVIDIA GPUs with Tensor cores, as a general performance guide, dimensions\n(such as batch size, input size, output size, and channel counts)\nshould be powers of two if under 256, or otherwise divisible by 8 if above\n256. For more information, check out the\n[NVIDIA Deep Learning Performance Guide](\nhttps://docs.nvidia.com/deeplearning/sdk/dl-performance-guide/index.html).\n\nCurrently, mixed precision is only enabled on NVIDIA Tensor Core GPUs with\nCompute Capability 7.0 and above (Volta, Turing, or newer architectures). The\nparts of the graph on CPUs and TPUs are untouched by the graph rewrite.\n\nRaises:\n `ValueError`, if the `tf.keras.mixed_precision` API is also used by calling\n `tf.keras.mixed_precision.set_global_policy`. Only one mixed precision\n API can be used.\n\nArgs:\n opt: An instance of a `tf.keras.optimizers.Optimizer` or a\n `tf.train.Optimizer`.\n loss_scale: Either an int/float, the string `\"dynamic\"`, or an instance of\n a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It\n is recommended to keep this as its default value of `\"dynamic\"`, which\n will adjust the scaling automatically to prevent `Inf` or `NaN` values.\n\nReturns:\n A version of `opt` that will use loss scaling to prevent underflow."} +{"repo": "tensorflow", "function": "def lu_reconstruct(lower_upper, perm, validate_args=False, name=None):\n with ops.name_scope(name or 'lu_reconstruct'):\n lower_upper = ops.convert_to_tensor(lower_upper, dtype_hint=dtypes.float32, name='lower_upper')\n perm = ops.convert_to_tensor(perm, dtype_hint=dtypes.int32, name='perm')\n assertions = lu_reconstruct_assertions(lower_upper, perm, validate_args)\n if assertions:\n with ops.control_dependencies(assertions):\n lower_upper = array_ops.identity(lower_upper)\n perm = array_ops.identity(perm)\n shape = array_ops.shape(lower_upper)\n lower = set_diag(band_part(lower_upper, num_lower=-1, num_upper=0), array_ops.ones(shape[:-1], dtype=lower_upper.dtype))\n upper = band_part(lower_upper, num_lower=0, num_upper=-1)\n x = math_ops.matmul(lower, upper)\n if lower_upper.shape is None or lower_upper.shape.rank is None or lower_upper.shape.rank != 2:\n batch_size = math_ops.reduce_prod(shape[:-2])\n d = shape[-1]\n x = array_ops.reshape(x, [batch_size, d, d])\n perm = array_ops.reshape(perm, [batch_size, d])\n perm = map_fn.map_fn(array_ops.invert_permutation, perm)\n batch_indices = array_ops.broadcast_to(math_ops.range(batch_size)[:, array_ops.newaxis], [batch_size, d])\n x = array_ops.gather_nd(x, array_ops_stack.stack([batch_indices, perm], axis=-1))\n x = array_ops.reshape(x, shape)\n else:\n x = array_ops.gather(x, array_ops.invert_permutation(perm))\n x.set_shape(lower_upper.shape)\n return x", "docstring": "The reconstruct one or more matrices from their LU decomposition(s).\n\nArgs:\n lower_upper: `lu` as returned by `tf.linalg.lu`, i.e., if `matmul(P,\n matmul(L, U)) = X` then `lower_upper = L + U - eye`.\n perm: `p` as returned by `tf.linag.lu`, i.e., if `matmul(P, matmul(L, U)) =\n X` then `perm = argmax(P)`.\n validate_args: Python `bool` indicating whether arguments should be checked\n for correctness.\n Default value: `False` (i.e., don't validate arguments).\n name: Python `str` name given to ops managed by this object.\n Default value: `None` (i.e., 'lu_reconstruct').\n\nReturns:\n x: The original input to `tf.linalg.lu`, i.e., `x` as in,\n `lu_reconstruct(*tf.linalg.lu(x))`.\n\n#### Examples\n\n```python\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_probability as tfp\n\nx = [[[3., 4], [1, 2]],\n [[7., 8], [3, 4]]]\nx_reconstructed = tf.linalg.lu_reconstruct(*tf.linalg.lu(x))\ntf.assert_near(x, x_reconstructed)\n# ==> True\n```"} +{"repo": "tensorflow", "function": "def reduce_to(self, reduce_op, value, destinations, options=None):\n with monitoring.MonitoredTimer(distributed_api_time_counter.get_cell(self.__class__.__name__, 'Reduce_to_eagerly')) if not ops.inside_function() else contextlib.nullcontext():\n if options is None:\n options = collective_util.Options()\n _require_cross_replica_or_default_context_extended(self)\n assert not isinstance(destinations, (list, tuple))\n assert not isinstance(reduce_op, variable_scope.VariableAggregation)\n if isinstance(reduce_op, six.string_types):\n reduce_op = reduce_util.ReduceOp(reduce_op.upper())\n assert reduce_op == reduce_util.ReduceOp.SUM or reduce_op == reduce_util.ReduceOp.MEAN\n return self._reduce_to(reduce_op, value, destinations, options)", "docstring": "Combine (via e.g. sum or mean) values across replicas.\n\n`reduce_to` aggregates `tf.distribute.DistributedValues` and distributed\nvariables. It supports both dense values and `tf.IndexedSlices`.\n\nThis API currently can only be called in cross-replica context. Other\nvariants to reduce values across replicas are:\n* `tf.distribute.StrategyExtended.batch_reduce_to`: the batch version of\n this API.\n* `tf.distribute.ReplicaContext.all_reduce`: the counterpart of this API\n in replica context. It supports both batched and non-batched all-reduce.\n* `tf.distribute.Strategy.reduce`: a more convenient method to reduce\n to the host in cross-replica context.\n\n`destinations` specifies where to reduce the value to, e.g. \"GPU:0\". You can\nalso pass in a `Tensor`, and the destinations will be the device of that\ntensor. For all-reduce, pass the same to `value` and `destinations`.\n\nIt can be used in `tf.distribute.ReplicaContext.merge_call` to write code\nthat works for all `tf.distribute.Strategy`.\n\n@tf.function\ndef step_fn(var):\n\n def merge_fn(strategy, value, var):\n # All-reduce the value. Note that `value` here is a\n # `tf.distribute.DistributedValues`.\n reduced = strategy.extended.reduce_to(tf.distribute.ReduceOp.SUM,\n value, destinations=var)\n strategy.extended.update(var, lambda var, value: var.assign(value),\n args=(reduced,))\n\n value = tf.identity(1.)\n tf.distribute.get_replica_context().merge_call(merge_fn,\n args=(value, var))\n\ndef run(strategy):\n with strategy.scope():\n v = tf.Variable(0.)\n strategy.run(step_fn, args=(v,))\n return v\n\nrun(tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]))\nMirroredVariable:{\n 0: ,\n 1: \n}\nrun(tf.distribute.experimental.CentralStorageStrategy(\n compute_devices=[\"GPU:0\", \"GPU:1\"], parameter_device=\"CPU:0\"))\n\nrun(tf.distribute.OneDeviceStrategy(\"GPU:0\"))\n\n\nArgs:\n reduce_op: a `tf.distribute.ReduceOp` value specifying how values should\n be combined. Allows using string representation of the enum such as\n \"SUM\", \"MEAN\".\n value: a `tf.distribute.DistributedValues`, or a `tf.Tensor` like object.\n destinations: a `tf.distribute.DistributedValues`, a `tf.Variable`, a\n `tf.Tensor` alike object, or a device string. It specifies the devices\n to reduce to. To perform an all-reduce, pass the same to `value` and\n `destinations`. Note that if it's a `tf.Variable`, the value is reduced\n to the devices of that variable, and this method doesn't update the\n variable.\n options: a `tf.distribute.experimental.CommunicationOptions`. Options to\n perform collective operations. This overrides the default options if the\n `tf.distribute.Strategy` takes one in the constructor. See\n `tf.distribute.experimental.CommunicationOptions` for details of the\n options.\n\nReturns:\n A tensor or value reduced to `destinations`."} +{"repo": "transformers", "function": "class AriaGroupedExpertsGemm(nn.Module):\n\n def __init__(self, in_features, out_features, groups):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.groups = groups\n self.weight = nn.Parameter(torch.empty(groups, in_features, out_features))\n\n def forward(self, input, tokens_per_expert):\n \"\"\"\n Perform grouped matrix multiplication.\n\n Args:\n input (`torch.Tensor`):\n Input tensor of shape (num_tokens, in_features).\n tokens_per_expert (`torch.Tensor`):\n Number of tokens assigned to each expert.\n\n Returns:\n torch.Tensor: Output tensor of shape (num_tokens, out_features).\n \"\"\"\n return sequential_experts_gemm(input, self.weight, tokens_per_expert.cpu())", "docstring": "Grouped GEMM (General Matrix Multiplication) module for efficient expert computation.\nThis module utilizes the grouped_gemm library (https://github.com/fanshiqing/grouped_gemm)\nfor optimized performance. If the grouped_gemm library is not installed, it gracefully\nfalls back to a sequential GEMM implementation, which may be slower but ensures\nfunctionality.\n\nArgs:\n in_features (`int`):\n Number of input features.\n out_features (`int`):\n Number of output features.\n groups (`int`):\n Number of expert groups."} +{"repo": "keras", "function": "def ceil(x):\n if any_symbolic_tensors((x,)):\n return Ceil().symbolic_call(x)\n return backend.numpy.ceil(x)", "docstring": "Return the ceiling of the input, element-wise.\n\nThe ceil of the scalar `x` is the smallest integer `i`, such that\n`i >= x`.\n\nArgs:\n x: Input tensor.\n\nReturns:\n The ceiling of each element in `x`, with float dtype."} +{"repo": "keras", "function": "def trunc(x):\n if any_symbolic_tensors((x,)):\n return Trunc().symbolic_call(x)\n return backend.numpy.trunc(x)", "docstring": "Return the truncated value of the input, element-wise.\n\nThe truncated value of the scalar `x` is the nearest integer `i` which is\ncloser to zero than `x` is. In short, the fractional part of the signed\nnumber `x` is discarded.\n\nArgs:\n x: Input tensor.\n\nReturns:\n The truncated value of each element in `x`.\n\nExample:\n>>> x = ops.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])\n>>> ops.trunc(x)\narray([-1.0, -1.0, -0.0, 0.0, 1.0, 1.0, 2.0])"} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.LongTensor]=None, decoder_input_values: Optional[torch.FloatTensor]=None, decoder_attention_mask: Optional[torch.LongTensor]=None, head_mask: Optional[torch.FloatTensor]=None, decoder_head_mask: Optional[torch.FloatTensor]=None, cross_attn_head_mask: Optional[torch.Tensor]=None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, speaker_embeddings: Optional[torch.FloatTensor]=None, labels: Optional[torch.FloatTensor]=None, stop_labels: Optional[torch.Tensor]=None) -> Union[Tuple, Seq2SeqSpectrogramOutput]:\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n if labels is not None:\n if decoder_input_values is None:\n decoder_input_values, decoder_attention_mask = shift_spectrograms_right(labels, self.config.reduction_factor, decoder_attention_mask)\n if self.config.use_guided_attention_loss:\n output_attentions = True\n outputs = self.speecht5(input_values=input_ids, attention_mask=attention_mask, decoder_input_values=decoder_input_values, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, use_cache=use_cache, speaker_embeddings=speaker_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True)\n outputs_before_postnet, outputs_after_postnet, logits = self.speech_decoder_postnet(outputs[0])\n loss = None\n if labels is not None:\n criterion = SpeechT5SpectrogramLoss(self.config)\n loss = criterion(attention_mask, outputs_before_postnet, outputs_after_postnet, logits, labels, outputs.cross_attentions)\n if not return_dict:\n output = (outputs_after_postnet,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n return Seq2SeqSpectrogramOutput(loss=loss, spectrogram=outputs_after_postnet, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions)", "docstring": "input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and\n [`~PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\ndecoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):\n Float values of input mel spectrogram.\n\n SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If\n `past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see\n `past_key_values`).\ndecoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will\n also be used by default.\n\n If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`]\n and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more\n information on the default strategy.\ncross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):\n Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\nspeaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):\n Tensor containing the speaker embeddings.\nlabels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):\n Float values of target mel spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss\n computation. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`]\n for details.\nstop_labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Binary tensor indicating the position of the stop token in the sequence.\n\nExample:\n\n```python\n>>> from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, set_seed\n>>> import torch\n\n>>> processor = SpeechT5Processor.from_pretrained(\"microsoft/speecht5_tts\")\n>>> model = SpeechT5ForTextToSpeech.from_pretrained(\"microsoft/speecht5_tts\")\n>>> vocoder = SpeechT5HifiGan.from_pretrained(\"microsoft/speecht5_hifigan\")\n\n>>> inputs = processor(text=\"Hello, my dog is cute\", return_tensors=\"pt\")\n>>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file\n\n>>> set_seed(555) # make deterministic\n\n>>> # generate speech\n>>> speech = model.generate(inputs[\"input_ids\"], speaker_embeddings=speaker_embeddings, vocoder=vocoder)\n>>> speech.shape\ntorch.Size([15872])\n```"} +{"repo": "python-fire", "function": "def FromId(os_id, error_on_unknown=True):\n if not os_id:\n return None\n for operating_system in OperatingSystem._ALL:\n if operating_system.id == os_id:\n return operating_system\n if error_on_unknown:\n raise InvalidEnumValue(os_id, 'Operating System', [value.id for value in OperatingSystem._ALL])\n return None", "docstring": "Gets the enum corresponding to the given operating system id.\n\nArgs:\n os_id: str, The operating system id to parse\n error_on_unknown: bool, True to raise an exception if the id is unknown,\n False to just return None.\n\nRaises:\n InvalidEnumValue: If the given value cannot be parsed.\n\nReturns:\n OperatingSystemTuple, One of the OperatingSystem constants or None if the\n input is None."} +{"repo": "transformers", "function": "def apply_rotary_unpadded(qkv, cos, sin, cu_seqlens: Optional[torch.Tensor]=None, max_seqlen: Optional[int]=None):\n return ApplyRotaryEmbUnpad.apply(qkv, cos, sin, cu_seqlens, max_seqlen)", "docstring": "Arguments:\n qkv: (total_nnz, 3, nheads, headdim) - input tensor for packed QKV.\n cos, sin: (seqlen_rotary, rotary_dim / 2)\n interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead\n of 1st half and 2nd half (GPT-NeoX style).\n inplace: if True, apply rotary embedding in-place.\n seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.\n Most commonly used in inference when we have KV cache.\n cu_seqlens: (batch + 1,) or None\n max_seqlen: int\nReturn:\n out: (total_nnz, dim)\nrotary_dim must be <= headdim\nApply rotary embedding to the first rotary_dim of x."} +{"repo": "tensorflow", "function": "def build_graph(device, n, m, k, transpose_a, transpose_b, dtype):\n with ops.device('%s' % device):\n if not transpose_a:\n x = variable_v1.VariableV1(random_ops.random_uniform([n, m], dtype=dtype), use_resource=False)\n else:\n x = variable_v1.VariableV1(random_ops.random_uniform([m, n], dtype=dtype), use_resource=False)\n if not transpose_b:\n y = variable_v1.VariableV1(random_ops.random_uniform([m, k], dtype=dtype), use_resource=False)\n else:\n y = variable_v1.VariableV1(random_ops.random_uniform([k, m], dtype=dtype), use_resource=False)\n z = math_ops.matmul(x, y, transpose_a=transpose_a, transpose_b=transpose_b)\n return control_flow_ops.group(z)", "docstring": "Build a graph containing a sequence of matmul operations.\n\nArgs:\n device: String, the device to run on.\n n: tensor A's first dimension size.\n m: tensor A's second dimension size.\n k: tensor B's second dimension size.\n transpose_a: boolean value to show if tensor A is transposed.\n transpose_b: boolean value to show if tensor B is transposed.\n dtype: numpy data type of the input tensor.\n\nReturns:\n A matmul operation to run()"} +{"repo": "tf-quant-finance", "function": "def _expected_exercise_fn(design, calibration_indices, continuation_value, exercise_value):\n mask = exercise_value > 0\n design_t = tf.transpose(design, [0, 2, 1])\n masked = tf.where(tf.expand_dims(tf.transpose(mask), axis=-1), design_t, tf.zeros_like(design_t))\n if calibration_indices is None:\n submask = masked\n mask_cont_value = continuation_value\n else:\n submask = tf.gather(masked, calibration_indices, axis=1)\n mask_cont_value = tf.gather(continuation_value, calibration_indices)\n lhs = tf.matmul(submask, submask, transpose_a=True)\n lhs_pinv = tf.linalg.pinv(lhs)\n rhs = tf.matmul(submask, tf.expand_dims(tf.transpose(mask_cont_value), axis=-1), transpose_a=True)\n beta = tf.matmul(lhs_pinv, rhs)\n continuation = tf.matmul(design_t, beta)\n return tf.nn.relu(tf.transpose(tf.squeeze(continuation, axis=-1)))", "docstring": "Returns the expected continuation value for each path.\n\nArgs:\n design: A real `Tensor` of shape `[batch_size, basis_size, num_samples]`.\n calibration_indices: A rank 1 integer `Tensor` denoting indices of samples\n used for regression.\n continuation_value: A `Tensor` of shape `[num_samples, batch_size]` and of\n the same dtype as `design`. The optimal value of the option conditional on\n not exercising now or earlier, taking future information into account.\n exercise_value: A `Tensor` of the same shape and dtype as\n `continuation_value`. Value of the option if exercised immideately at\n the current time\n\nReturns:\n A `Tensor` of the same shape and dtype as `continuation_value` whose\n `(n, v)`-th entry represents the expected continuation value of sample path\n `n` under the `v`-th payoff scheme."} +{"repo": "beam", "function": "def assert_that(actual, matcher, label='assert_that', reify_windows=False, use_global_window=True):\n assert isinstance(actual, pvalue.PCollection), '%s is not a supported type for Beam assert' % type(actual)\n pipeline = actual.pipeline\n if getattr(actual.pipeline, 'result', None):\n raise RuntimeError('assert_that must be used within a beam.Pipeline context. ' + 'Prior to Beam 2.60.0, asserts outside of the context of a pipeline ' + 'were silently ignored, starting with Beam 2.60.0 this is no longer ' + 'allowed. To fix, move your assert_that call into your pipeline ' + 'context so that it is added before the pipeline is run. For more ' + 'information, see https://github.com/apache/beam/pull/30771')\n if label in pipeline.applied_labels:\n label_idx = 2\n while f'{label}_{label_idx}' in pipeline.applied_labels:\n label_idx += 1\n label = f'{label}_{label_idx}'\n if isinstance(matcher, _EqualToPerWindowMatcher):\n reify_windows = True\n use_global_window = True\n\n class ReifyTimestampWindow(DoFn):\n\n def process(self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam, pane_info=DoFn.PaneInfoParam):\n return [TestWindowedValue(element, timestamp, [window], pane_info)]\n\n class AddWindow(DoFn):\n\n def process(self, element, window=DoFn.WindowParam):\n yield (element, window)\n\n class AssertThat(PTransform):\n\n def expand(self, pcoll):\n if reify_windows:\n pcoll = pcoll | ParDo(ReifyTimestampWindow())\n keyed_singleton = pcoll.pipeline | Create([(None, None)])\n keyed_singleton.is_bounded = True\n if use_global_window:\n pcoll = pcoll | WindowInto(window.GlobalWindows())\n keyed_actual = pcoll | 'ToVoidKey' >> Map(lambda v: (None, v))\n keyed_actual.is_bounded = True\n plain_actual = (keyed_singleton, keyed_actual) | 'Group' >> CoGroupByKey() | 'Unkey' >> Map(lambda k_values: list(k_values[1][1]))\n if not use_global_window:\n plain_actual = plain_actual | 'AddWindow' >> ParDo(AddWindow())\n return plain_actual | 'Match' >> Map(matcher)\n\n def default_label(self):\n return label\n return actual | AssertThat()", "docstring": "A PTransform that checks a PCollection has an expected value.\n\nNote that assert_that should be used only for testing pipelines since the\ncheck relies on materializing the entire PCollection being checked.\n\nArgs:\n actual: A PCollection.\n matcher: A matcher function taking as argument the actual value of a\n materialized PCollection. The matcher validates this actual value against\n expectations and raises BeamAssertException if they are not met.\n label: Optional string label. This is needed in case several assert_that\n transforms are introduced in the same pipeline.\n reify_windows: If True, matcher is passed a list of TestWindowedValue.\n use_global_window: If False, matcher is passed a dictionary of\n (k, v) = (window, elements in the window).\n\nReturns:\n Ignored."} +{"repo": "etils", "function": "def auto_plot_array(*, video_min_num_frames: int=15, height: None | int | tuple[int, int]=(100, 250), show_images_kwargs: Optional[dict[str, Any]]=None, show_videos_kwargs: Optional[dict[str, Any]]=None) -> None:\n ipython = IPython.get_ipython()\n if ipython is None:\n return\n array_repr_html_fn = functools.partial(array_repr_html, video_min_num_frames=video_min_num_frames, height=height, show_images_kwargs=show_images_kwargs, show_videos_kwargs=show_videos_kwargs)\n print('Display big np/tf/jax arrays as image for nicer IPython display')\n formatter = ipython.display_formatter.formatters['text/html']\n try:\n jnp = enp.lazy.jnp\n except ImportError:\n pass\n else:\n jax_array_cls = type(jnp.zeros(shape=()))\n formatter.for_type(jax_array_cls, array_repr_html_fn)\n try:\n tf = enp.lazy.tf\n except ImportError:\n pass\n else:\n formatter.for_type(tf.Tensor, array_repr_html_fn)\n try:\n torch = enp.lazy.torch\n except ImportError:\n pass\n else:\n formatter.for_type(torch.Tensor, array_repr_html_fn)\n formatter.for_type(enp.lazy.np.ndarray, array_repr_html_fn)", "docstring": "If called, 2d/3d imgage arrays will be plotted as images in colab/jupyter.\n\nUsage:\n\n>>> ecolab.auto_plot_array()\n>>> np.zeros((28, 28, 3)) # Displayed as image\n\nArgs:\n video_min_num_frames: Video `(num_frames, h, w, c)` with less than this\n number of frames will be displayed as individual images\n height: `(min, max)` image height in pixels. Images smaller/larger will be\n reshaped. `None` to disable. If a single number, assume `min == max`.\n show_images_kwargs: Kwargs forwarded to `mediapy.show_images`\n show_videos_kwargs: Kwargs forwarded to `mediapy.show_videos`"} +{"repo": "tensorflow", "function": "def _gen_save_and_restore_functions(checkpoint_factory_map: object_identity.ObjectIdentityDictionary) -> object_identity.ObjectIdentityDictionary:\n saveable_fn_map = object_identity.ObjectIdentityDictionary()\n for obj, factory_data_list in checkpoint_factory_map.items():\n if resource_variable_ops.is_resource_variable(obj) or not factory_data_list:\n continue\n if factory_data_list[0].name == trackable_utils.SERIALIZE_TO_TENSORS_NAME:\n assert len(factory_data_list) == 1\n saveable_fn_map[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: tracing_utils.trace_save_and_restore(obj)}\n else:\n saveable_fn_map[obj] = trace_saveable_util.trace_save_restore_function_map(obj, factory_data_list)\n return saveable_fn_map", "docstring": "Generates global and individual save/restore concrete functions.\n\nThe global functions records the ops to save and restore the entire object to\na file prefix, while the individual functions save and restore value tensors\nfor resources.\n\nThis function is intended to run on the output of\n`save_util_v1.get_checkpoint_factories_and_keys(object_names)`,\nwhich returns the generated a map of `_CheckpointFactoryData`.\n\nArgs:\n checkpoint_factory_map: A dictionary mapping trackable objects to\n a list of `_CheckpointFactoryData`.\n\nReturns:\n Tuple of (\n saveable_fn_map: Maps obj -> factory name -> (concrete save, restore)\n )"} +{"repo": "transformers", "function": "class TFGPT2DoubleHeadsModelOutput(ModelOutput):\n logits: Optional[tf.Tensor] = None\n mc_logits: Optional[tf.Tensor] = None\n past_key_values: List[tf.Tensor] | None = None\n hidden_states: Tuple[tf.Tensor] | None = None\n attentions: Tuple[tf.Tensor] | None = None", "docstring": "Base class for outputs of models predicting if two sentences are consecutive or not.\n\nArgs:\n logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`):\n Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).\n past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,\n sequence_length, embed_size_per_head)`).\n\n Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape\n `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads."} +{"repo": "beam", "function": "class IncMeanTracker(WindowedTracker, MeanTracker):\n\n def __init__(self, window_mode, **kwargs):\n super().__init__(window_mode=window_mode, **kwargs)\n self._mean = 0\n\n def push(self, x):\n \"\"\"Pushes a new value and updates the incremental mean.\n\n Args:\n x: The new value to be pushed.\n \"\"\"\n if not math.isnan(x):\n self._n += 1\n delta = x - self._mean\n else:\n delta = 0\n if self._window_mode == WindowMode.SLIDING:\n if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))):\n self._n -= 1\n delta += self._mean - old_x\n super().push(x)\n if self._n > 0:\n self._mean += delta / self._n\n else:\n self._mean = 0\n\n def get(self):\n \"\"\"Returns the current incremental mean.\n\n Returns:\n float: The current incremental mean value.\n Returns NaN if no valid (non-NaN) values have been pushed.\n \"\"\"\n if self._n < 1:\n return float('nan')\n return self._mean", "docstring": "Base class for incremental mean trackers.\n\nThis class implements incremental calculation of the mean, which is more\nefficient for streaming data as it updates the mean with each new data point\ninstead of recalculating from scratch.\n\nArgs:\n window_mode: A `WindowMode` enum specifying whether the window is `LANDMARK`\n or `SLIDING`.\n **kwargs: Keyword arguments passed to the parent class constructor."} +{"repo": "tensorflow", "function": "def freeze_graph_with_def_protos(input_graph_def: Optional[graph_pb2.GraphDef], input_saver_def: Optional[saver_pb2.SaverDef], input_checkpoint: Optional[str], output_node_names: str, restore_op_name: Optional[str], filename_tensor_name: Optional[str], output_graph: str, clear_devices: bool, initializer_nodes: str, variable_names_whitelist: str='', variable_names_denylist: str='', input_meta_graph_def: Optional[meta_graph_pb2.MetaGraphDef]=None, input_saved_model_dir: Optional[str]=None, saved_model_tags: Optional[List[str]]=None, checkpoint_version: int=saver_pb2.SaverDef.V2) -> graph_pb2.GraphDef:\n del restore_op_name, filename_tensor_name\n if not input_saved_model_dir and (not checkpoint_management.checkpoint_exists(input_checkpoint)):\n raise ValueError(\"Input checkpoint '\" + input_checkpoint + \"' doesn't exist!\")\n if not output_node_names:\n raise ValueError('You need to supply the name of a node to --output_node_names.')\n if clear_devices:\n if input_meta_graph_def:\n for node in input_meta_graph_def.graph_def.node:\n node.device = ''\n elif input_graph_def:\n for node in input_graph_def.node:\n node.device = ''\n if input_graph_def:\n _ = importer.import_graph_def(input_graph_def, name='')\n with session.Session() as sess:\n if input_saver_def:\n saver = saver_lib.Saver(saver_def=input_saver_def, write_version=checkpoint_version)\n saver.restore(sess, input_checkpoint)\n elif input_meta_graph_def:\n restorer = saver_lib.import_meta_graph(input_meta_graph_def, clear_devices=True)\n restorer.restore(sess, input_checkpoint)\n if initializer_nodes:\n sess.run(initializer_nodes.replace(' ', '').split(','))\n elif input_saved_model_dir:\n if saved_model_tags is None:\n saved_model_tags = []\n loader.load(sess, saved_model_tags, input_saved_model_dir)\n else:\n var_list = {}\n reader = py_checkpoint_reader.NewCheckpointReader(input_checkpoint)\n var_to_shape_map = reader.get_variable_to_shape_map()\n all_partition_variable_names = [tensor.name.split(':')[0] for op in sess.graph.get_operations() for tensor in op.values() if re.search('/part_\\\\d+/', tensor.name)]\n has_partition_var = False\n for key in var_to_shape_map:\n try:\n tensor = sess.graph.get_tensor_by_name(key + ':0')\n if any((key in name for name in all_partition_variable_names)):\n has_partition_var = True\n except KeyError:\n continue\n var_list[key] = tensor\n try:\n saver = saver_lib.Saver(var_list=var_list, write_version=checkpoint_version)\n except TypeError as e:\n if has_partition_var:\n raise ValueError('Models containing partition variables cannot be converted from checkpoint files. Please pass in a SavedModel using the flag --input_saved_model_dir.')\n elif _has_no_variables(sess):\n raise ValueError('No variables were found in this model. It is likely the model was frozen previously. You cannot freeze a graph twice.')\n else:\n raise e\n saver.restore(sess, input_checkpoint)\n if initializer_nodes:\n sess.run(initializer_nodes.replace(' ', '').split(','))\n variable_names_whitelist = variable_names_whitelist.replace(' ', '').split(',') if variable_names_whitelist else None\n variable_names_denylist = variable_names_denylist.replace(' ', '').split(',') if variable_names_denylist else None\n if input_meta_graph_def:\n output_graph_def = convert_to_constants.convert_variables_to_constants(sess, input_meta_graph_def.graph_def, output_node_names.replace(' ', '').split(','), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_denylist)\n else:\n output_graph_def = convert_to_constants.convert_variables_to_constants(sess, input_graph_def, output_node_names.replace(' ', '').split(','), variable_names_whitelist=variable_names_whitelist, variable_names_blacklist=variable_names_denylist)\n if output_graph:\n with gfile.GFile(output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString(deterministic=True))\n return output_graph_def", "docstring": "Converts all variables in a graph and checkpoint into constants.\n\nArgs:\n input_graph_def: A `GraphDef`.\n input_saver_def: A `SaverDef` (optional).\n input_checkpoint: The prefix of a V1 or V2 checkpoint, with V2 taking\n priority. Typically the result of `Saver.save()` or that of\n `tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or\n V1/V2.\n output_node_names: The name(s) of the output nodes, comma separated.\n restore_op_name: Unused.\n filename_tensor_name: Unused.\n output_graph: String where to write the frozen `GraphDef`.\n clear_devices: A Bool whether to remove device specifications.\n initializer_nodes: Comma separated string of initializer nodes to run before\n freezing.\n variable_names_whitelist: The set of variable names to convert (optional, by\n default, all variables are converted).\n variable_names_denylist: The set of variable names to omit converting to\n constants (optional).\n input_meta_graph_def: A `MetaGraphDef` (optional),\n input_saved_model_dir: Path to the dir with TensorFlow 'SavedModel' file and\n variables (optional).\n saved_model_tags: Group of comma separated tag(s) of the MetaGraphDef to\n load, in string format (optional).\n checkpoint_version: Tensorflow variable file format (saver_pb2.SaverDef.V1\n or saver_pb2.SaverDef.V2)\n\nReturns:\n Location of the output_graph_def."} +{"repo": "tensorflow", "function": "def index_add(x, idx, y):\n return _index_update_helper(tf_np.ndarray._with_index_add, x, idx, y)", "docstring": "Pure equivalent of `x[idx] += y`.\n\nReturns the value of x that would result from the NumPy-style indexed\nassignment `x[idx] += y`. Because it's a pure function, `x` itself won't be\nchanged.\n\nArgs:\n x: an array with the values to be updated.\n idx: a Numpy-style index, consisting of `None`, integers, slice objects,\n ellipses, ndarrays with integer dtypes, or a tuple of the above.\n y: the array of updates. `y` must be broadcastable to the shape of the array\n that would be returned by `x[idx]`.\n\nReturns:\n The updated version of `x`."} +{"repo": "beam", "function": "class DataframeTransform(transforms.PTransform):\n\n def __init__(self, func, proxy=None, yield_elements='schemas', include_indexes=False):\n self._func = func\n self._proxy = proxy\n self._yield_elements = yield_elements\n self._include_indexes = include_indexes\n\n def expand(self, input_pcolls):\n from apache_beam.dataframe import convert\n input_dict: dict[Any, PCollection] = _flatten(input_pcolls)\n proxies = _flatten(self._proxy) if self._proxy is not None else {tag: None for tag in input_dict}\n input_frames: dict[Any, frame_base.DeferredFrame] = {k: convert.to_dataframe(pc, proxies[k]) for k, pc in input_dict.items()}\n frames_input = _substitute(input_pcolls, input_frames)\n if isinstance(frames_input, dict):\n result_frames = self._func(**frames_input)\n elif isinstance(frames_input, tuple):\n result_frames = self._func(*frames_input)\n else:\n result_frames = self._func(frames_input)\n result_frames_dict = _flatten(result_frames)\n keys = list(result_frames_dict.keys())\n result_frames_tuple = tuple((result_frames_dict[key] for key in keys))\n result_pcolls_tuple = convert.to_pcollection(*result_frames_tuple, label='Eval', always_return_tuple=True, yield_elements=self._yield_elements, include_indexes=self._include_indexes)\n result_pcolls_dict = dict(zip(keys, result_pcolls_tuple))\n return _substitute(result_frames, result_pcolls_dict)", "docstring": "A PTransform for applying function that takes and returns dataframes\nto one or more PCollections.\n\n:class:`DataframeTransform` will accept a PCollection with a `schema`_ and\nbatch it into :class:`~pandas.DataFrame` instances if necessary::\n\n (pcoll | beam.Select(key=..., foo=..., bar=...)\n | DataframeTransform(lambda df: df.group_by('key').sum()))\n\nIt is also possible to process a PCollection of :class:`~pandas.DataFrame`\ninstances directly, in this case a \"proxy\" must be provided. For example, if\n``pcoll`` is a PCollection of DataFrames, one could write::\n\n pcoll | DataframeTransform(lambda df: df.group_by('key').sum(), proxy=...)\n\nTo pass multiple PCollections, pass a tuple of PCollections wich will be\npassed to the callable as positional arguments, or a dictionary of\nPCollections, in which case they will be passed as keyword arguments.\n\nArgs:\n yield_elements: (optional, default: \"schemas\") If set to ``\"pandas\"``,\n return PCollection(s) containing the raw Pandas objects\n (:class:`~pandas.DataFrame` or :class:`~pandas.Series` as appropriate).\n If set to ``\"schemas\"``, return an element-wise PCollection, where\n DataFrame and Series instances are expanded to one element per row.\n DataFrames are converted to `schema-aware`_ PCollections, where column\n values can be accessed by attribute.\n include_indexes: (optional, default: False) When\n ``yield_elements=\"schemas\"``, if ``include_indexes=True``, attempt to\n include index columns in the output schema for expanded DataFrames.\n Raises an error if any of the index levels are unnamed (name=None), or if\n any of the names are not unique among all column and index names.\n proxy: (optional) An empty :class:`~pandas.DataFrame` or\n :class:`~pandas.Series` instance with the same ``dtype`` and ``name``\n as the elements of the input PCollection. Required when input\n PCollection :class:`~pandas.DataFrame` or :class:`~pandas.Series`\n elements. Ignored when input PCollection has a `schema`_.\n\n.. _schema:\n https://beam.apache.org/documentation/programming-guide/#what-is-a-schema\n.. _schema-aware:\n https://beam.apache.org/documentation/programming-guide/#what-is-a-schema"} +{"repo": "etils", "function": "def reraise(e: Exception, prefix: Optional[_Str]=None, suffix: Optional[_Str]=None) -> NoReturn:\n __tracebackhide__ = True\n prefix = prefix() if callable(prefix) else prefix\n suffix = suffix() if callable(suffix) else suffix\n prefix = prefix or ''\n suffix = '\\n' + suffix if suffix else ''\n msg = f'{prefix}{e}{suffix}'\n\n class WrappedException(type(e)):\n \"\"\"Exception proxy with additional message.\"\"\"\n\n def __init__(self, msg):\n Exception.__init__(self, msg)\n\n def __getattr__(self, name: str):\n return getattr(e, name)\n __repr__ = BaseException.__repr__\n __str__ = BaseException.__str__\n WrappedException.__name__ = type(e).__name__\n WrappedException.__qualname__ = type(e).__qualname__\n WrappedException.__module__ = type(e).__module__\n new_exception = WrappedException(msg)\n raise new_exception.with_traceback(e.__traceback__) from e.__cause__", "docstring": "Reraise an exception with an additional message.\n\nBenefit: Contrary to `raise ... from ...` and\n`raise Exception().with_traceback(tb)`, this function will:\n\n* Keep the original exception type, attributes,...\n* Avoid multi-nested `During handling of the above exception, another\n exception occurred`. Only the single original stacktrace is displayed.\n\nThis result in cleaner and more compact error messages.\n\nUsage:\n\n```\ntry:\n fn(x)\nexcept Exception as e:\n epy.reraise(e, prefix=f'Error for {x}: ')\n```\n\nArgs:\n e: Exception to reraise\n prefix: Prefix to add to the exception message.\n suffix: Suffix to add to the exception message."} +{"repo": "transformers", "function": "def batch_encode_candidates(self, text, **kwargs):\n kwargs['padding'] = PaddingStrategy.MAX_LENGTH\n batch_text = text\n batch_text_pair = kwargs.pop('text_pair', None)\n return_tensors = kwargs.pop('return_tensors', None)\n output_data = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}\n for idx, candidate_text in enumerate(batch_text):\n if batch_text_pair is not None:\n candidate_text_pair = batch_text_pair[idx]\n else:\n candidate_text_pair = None\n encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs)\n encoded_input_ids = encoded_candidates.get('input_ids')\n encoded_attention_mask = encoded_candidates.get('attention_mask')\n encoded_token_type_ids = encoded_candidates.get('token_type_ids')\n if encoded_input_ids is not None:\n output_data['input_ids'].append(encoded_input_ids)\n if encoded_attention_mask is not None:\n output_data['attention_mask'].append(encoded_attention_mask)\n if encoded_token_type_ids is not None:\n output_data['token_type_ids'].append(encoded_token_type_ids)\n output_data = {key: item for key, item in output_data.items() if len(item) != 0}\n return BatchEncoding(output_data, tensor_type=return_tensors)", "docstring": "Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following\ndifferences:\n\n 1. Handle additional num_candidate axis. (batch_size, num_candidates, text)\n 2. Always pad the sequences to *max_length*.\n 3. Must specify *max_length* in order to stack packs of candidates into a batch.\n\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\nArgs:\n text (`List[List[str]]`):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n text_pair (`List[List[str]]`, *optional*):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n **kwargs:\n Keyword arguments of the __call__ method.\n\nReturns:\n [`BatchEncoding`]: Encoded text or text pair.\n\nExample:\n\n```python\n>>> from transformers import RealmTokenizerFast\n\n>>> # batch_size = 2, num_candidates = 2\n>>> text = [[\"Hello world!\", \"Nice to meet you!\"], [\"The cute cat.\", \"The adorable dog.\"]]\n\n>>> tokenizer = RealmTokenizerFast.from_pretrained(\"google/realm-cc-news-pretrained-encoder\")\n>>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors=\"pt\")\n```"} +{"repo": "tensorflow", "function": "def create_gpu_capa_map(match_list, generate_csv=False, filename='compute_capability'):\n gpu_capa = collections.OrderedDict()\n include = False\n gpu = ''\n cnt = 0\n mismatch_cnt = 0\n for match in match_list:\n if 'Products' in match:\n if not include:\n include = True\n continue\n elif 'www' in match:\n include = False\n break\n if include:\n if gpu:\n if gpu in gpu_capa:\n gpu_capa[gpu].append(match)\n else:\n gpu_capa[gpu] = [match]\n gpu = ''\n cnt += 1\n if len(list(gpu_capa.keys())) < cnt:\n mismatch_cnt += 1\n cnt = len(list(gpu_capa.keys()))\n else:\n gpu = match\n if generate_csv:\n f_name = filename + '.csv'\n write_csv_from_dict(f_name, gpu_capa)\n return gpu_capa", "docstring": "Generates a map between GPU types and corresponding compute capability.\n\nThis method is used for retrieving CUDA compute capability from the web only.\n\nArgs:\n match_list: List of all CUDA compute capability detected from the webpage.\n generate_csv: Boolean for creating csv file to store results.\n filename: String that is the name of the csv file (without `.csv` ending).\n\nReturns:\n OrderedDict that lists in the incoming order of all CUDA compute capability\n provided as `match_list`."} +{"repo": "transformers", "function": "def __call__(self, image: Union[str, 'Image.Image', List[Dict[str, Any]]], candidate_labels: Optional[Union[str, List[str]]]=None, **kwargs: Any) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n if 'text_queries' in kwargs:\n candidate_labels = kwargs.pop('text_queries')\n if isinstance(image, (str, Image.Image)):\n inputs = {'image': image, 'candidate_labels': candidate_labels}\n elif isinstance(image, (list, tuple)) and valid_images(image):\n return list(super().__call__(({'image': img, 'candidate_labels': labels} for img, labels in zip(image, candidate_labels)), **kwargs))\n else:\n '\\n Supports the following format\\n - {\"image\": image, \"candidate_labels\": candidate_labels}\\n - [{\"image\": image, \"candidate_labels\": candidate_labels}]\\n - Generator and datasets\\n This is a common pattern in other multimodal pipelines, so we support it here as well.\\n '\n inputs = image\n results = super().__call__(inputs, **kwargs)\n return results", "docstring": "Detect objects (bounding boxes & classes) in the image(s) passed as inputs.\n\nArgs:\n image (`str`, `PIL.Image` or `List[Dict[str, Any]]`):\n The pipeline handles three types of images:\n\n - A string containing an http url pointing to an image\n - A string containing a local path to an image\n - An image loaded in PIL directly\n\n You can use this parameter to send directly a list of images, or a dataset or a generator like so:\n\n ```python\n >>> from transformers import pipeline\n\n >>> detector = pipeline(model=\"google/owlvit-base-patch32\", task=\"zero-shot-object-detection\")\n >>> detector(\n ... [\n ... {\n ... \"image\": \"http://images.cocodataset.org/val2017/000000039769.jpg\",\n ... \"candidate_labels\": [\"cat\", \"couch\"],\n ... },\n ... {\n ... \"image\": \"http://images.cocodataset.org/val2017/000000039769.jpg\",\n ... \"candidate_labels\": [\"cat\", \"couch\"],\n ... },\n ... ]\n ... )\n [[{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.25, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}], [{'score': 0.287, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}}, {'score': 0.254, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}}, {'score': 0.121, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}}]]\n ```\n\n\n candidate_labels (`str` or `List[str]` or `List[List[str]]`):\n What the model should recognize in the image.\n\n threshold (`float`, *optional*, defaults to 0.1):\n The probability necessary to make a prediction.\n\n top_k (`int`, *optional*, defaults to None):\n The number of top predictions that will be returned by the pipeline. If the provided number is `None`\n or higher than the number of predictions available, it will default to the number of predictions.\n\n timeout (`float`, *optional*, defaults to None):\n The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and\n the call may block forever.\n\n\nReturn:\n A list of lists containing prediction results, one list per input image. Each list contains dictionaries\n with the following keys:\n\n - **label** (`str`) -- Text query corresponding to the found object.\n - **score** (`float`) -- Score corresponding to the object (between 0 and 1).\n - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a\n dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys."} +{"repo": "mobly", "function": "class TestResultRecord:\n\n def __init__(self, t_name, t_class=None):\n self.test_name = t_name\n self.test_class = t_class\n self.begin_time = None\n self.end_time = None\n self.uid = None\n self.signature = None\n self.retry_parent = None\n self.parent = None\n self.termination_signal = None\n self.extra_errors = collections.OrderedDict()\n self.result = None\n\n @property\n def details(self):\n \"\"\"String description of the cause of the test's termination.\n\n Note a passed test can have this as well due to the explicit pass\n signal. If the test passed implicitly, this field would be None.\n \"\"\"\n if self.termination_signal:\n return self.termination_signal.details\n\n @property\n def termination_signal_type(self):\n \"\"\"Type name of the signal that caused the test's termination.\n\n Note a passed test can have this as well due to the explicit pass\n signal. If the test passed implicitly, this field would be None.\n \"\"\"\n if self.termination_signal:\n return self.termination_signal.type\n\n @property\n def stacktrace(self):\n \"\"\"The stacktrace string for the exception that terminated the test.\"\"\"\n if self.termination_signal:\n return self.termination_signal.stacktrace\n\n @property\n def extras(self):\n \"\"\"User defined extra information of the test result.\n\n Must be serializable.\n \"\"\"\n if self.termination_signal:\n return self.termination_signal.extras\n\n def test_begin(self):\n \"\"\"Call this when the test begins execution.\n\n Sets the begin_time of this record.\n \"\"\"\n self.begin_time = utils.get_current_epoch_time()\n self.signature = '%s-%s' % (self.test_name, self.begin_time)\n\n def _test_end(self, result, e):\n \"\"\"Marks the end of the test logic.\n\n Args:\n result: One of the TEST_RESULT enums in TestResultEnums.\n e: A test termination signal (usually an exception object). It can\n be any exception instance or of any subclass of\n mobly.signals.TestSignal.\n \"\"\"\n if self.begin_time is not None:\n self.end_time = utils.get_current_epoch_time()\n self.result = result\n if e:\n self.termination_signal = ExceptionRecord(e)\n\n def update_record(self):\n \"\"\"Updates the content of a record.\n\n Several display fields like \"details\" and \"stacktrace\" need to be\n updated based on the content of the record object.\n\n As the content of the record change, call this method to update all\n the appropirate fields.\n \"\"\"\n if self.extra_errors:\n if self.result != TestResultEnums.TEST_RESULT_FAIL:\n self.result = TestResultEnums.TEST_RESULT_ERROR\n if not self.termination_signal and self.extra_errors:\n _, self.termination_signal = self.extra_errors.popitem(last=False)\n\n def test_pass(self, e=None):\n \"\"\"To mark the test as passed in this record.\n\n Args:\n e: An instance of mobly.signals.TestPass.\n \"\"\"\n self._test_end(TestResultEnums.TEST_RESULT_PASS, e)\n\n def test_fail(self, e=None):\n \"\"\"To mark the test as failed in this record.\n\n Only test_fail does instance check because we want 'assert xxx' to also\n fail the test same way assert_true does.\n\n Args:\n e: An exception object. It can be an instance of AssertionError or\n mobly.base_test.TestFailure.\n \"\"\"\n self._test_end(TestResultEnums.TEST_RESULT_FAIL, e)\n\n def test_skip(self, e=None):\n \"\"\"To mark the test as skipped in this record.\n\n Args:\n e: An instance of mobly.signals.TestSkip.\n \"\"\"\n self._test_end(TestResultEnums.TEST_RESULT_SKIP, e)\n\n def test_error(self, e=None):\n \"\"\"To mark the test as error in this record.\n\n Args:\n e: An exception object.\n \"\"\"\n self._test_end(TestResultEnums.TEST_RESULT_ERROR, e)\n\n def add_error(self, position, e):\n \"\"\"Add extra error happened during a test.\n\n If the test has passed or skipped, this will mark the test result as\n ERROR.\n\n If an error is added the test record, the record's result is equivalent\n to the case where an uncaught exception happened.\n\n If the test record has not recorded any error, the newly added error\n would be the main error of the test record. Otherwise the newly added\n error is added to the record's extra errors.\n\n Args:\n position: string, where this error occurred, e.g. 'teardown_test'.\n e: An exception or a `signals.ExceptionRecord` object.\n \"\"\"\n if self.result != TestResultEnums.TEST_RESULT_FAIL:\n self.result = TestResultEnums.TEST_RESULT_ERROR\n if position in self.extra_errors:\n raise Error('An exception is already recorded with position \"%s\", cannot reuse.' % position)\n if isinstance(e, ExceptionRecord):\n self.extra_errors[position] = e\n else:\n self.extra_errors[position] = ExceptionRecord(e, position=position)\n\n def __str__(self):\n d = self.to_dict()\n kv_pairs = ['%s = %s' % (k, v) for k, v in d.items()]\n s = ', '.join(kv_pairs)\n return s\n\n def __repr__(self):\n \"\"\"This returns a short string representation of the test record.\"\"\"\n t = utils.epoch_to_human_time(self.begin_time)\n return f'{t} {self.test_name} {self.result}'\n\n def to_dict(self):\n \"\"\"Gets a dictionary representating the content of this class.\n\n Returns:\n A dictionary representating the content of this class.\n \"\"\"\n d = {}\n d[TestResultEnums.RECORD_NAME] = self.test_name\n d[TestResultEnums.RECORD_CLASS] = self.test_class\n d[TestResultEnums.RECORD_BEGIN_TIME] = self.begin_time\n d[TestResultEnums.RECORD_END_TIME] = self.end_time\n d[TestResultEnums.RECORD_RESULT] = self.result\n d[TestResultEnums.RECORD_UID] = self.uid\n d[TestResultEnums.RECORD_SIGNATURE] = self.signature\n d[TestResultEnums.RECORD_RETRY_PARENT] = self.retry_parent.signature if self.retry_parent else None\n d[TestResultEnums.RECORD_PARENT] = {'parent': self.parent[0].signature, 'type': self.parent[1].value} if self.parent else None\n d[TestResultEnums.RECORD_EXTRAS] = self.extras\n d[TestResultEnums.RECORD_DETAILS] = self.details\n d[TestResultEnums.RECORD_TERMINATION_SIGNAL_TYPE] = self.termination_signal_type\n d[TestResultEnums.RECORD_EXTRA_ERRORS] = {key: value.to_dict() for key, value in self.extra_errors.items()}\n d[TestResultEnums.RECORD_STACKTRACE] = self.stacktrace\n return d", "docstring": "A record that holds the information of a single test.\n\nThe record object holds all information of a test, including all the\nexceptions occurred during the test.\n\nA test can terminate for two reasons:\n 1. the test function executes to the end and completes naturally.\n 2. the test is terminated by an exception, which we call\n \"termination signal\".\n\nThe termination signal is treated differently. Its content are extracted\ninto first-tier attributes of the record object, like `details` and\n`stacktrace`, for easy consumption.\n\nNote the termination signal is not always an error, it can also be explicit\npass signal or abort/skip signals.\n\nAttributes:\n test_name: string, the name of the test.\n begin_time: Epoch timestamp of when the test started.\n end_time: Epoch timestamp of when the test ended.\n uid: User-defined unique identifier of the test.\n signature: string, unique identifier of a test record, the value is\n generated by Mobly.\n retry_parent: [DEPRECATED] Use the `parent` field instead.\n parent: tuple[TestResultRecord, TestParentType], set for multiple iterations\n of a test. This is the test result record of the previous iteration.\n Parsers can use this field to construct the chain of execution for each test.\n termination_signal: ExceptionRecord, the main exception of the test.\n extra_errors: OrderedDict, all exceptions occurred during the entire\n test lifecycle. The order of occurrence is preserved.\n result: TestResultEnum.TEST_RESULT_*, PASS/FAIL/SKIP."} +{"repo": "tensorflow", "function": "def _build_ragged_tensor_from_value_ranges(starts, limits, step, values):\n if step is None:\n step = 1\n step = ops.convert_to_tensor(step, name='step')\n if step.dtype.is_integer:\n step = math_ops.cast(step, starts.dtype)\n else:\n raise TypeError('slice strides must be integers or None')\n value_indices = ragged_math_ops.range(starts, limits, step, row_splits_dtype=starts.dtype)\n if isinstance(values, ragged_tensor.RaggedTensor):\n gathered_values = ragged_gather_ops.gather(params=values, indices=value_indices.values)\n else:\n gathered_values = array_ops.gather(params=values, indices=value_indices.values)\n return value_indices.with_values(gathered_values)", "docstring": "Returns a `RaggedTensor` containing the specified sequences of values.\n\nReturns a RaggedTensor `output` where:\n\n```python\noutput.shape[0] = starts.shape[0]\noutput[i] = values[starts[i]:limits[i]:step]\n```\n\nRequires that `starts.shape == limits.shape` and\n`0 <= starts[i] <= limits[i] <= values.shape[0]`.\n\nArgs:\n starts: 1D integer Tensor specifying the start indices for the sequences of\n values to include.\n limits: 1D integer Tensor specifying the limit indices for the sequences of\n values to include.\n step: Integer value specifying the step size for strided slices.\n values: The set of values to select from.\n\nReturns:\n A `RaggedTensor`.\n\nRaises:\n ValueError: Until the prerequisite ops are checked in."} +{"repo": "tensorflow", "function": "def compress(element):\n element_spec = structure.type_spec_from_value(element)\n tensor_list = structure.to_tensor_list(element_spec, element)\n return ged_ops.compress_element(tensor_list)", "docstring": "Compress a dataset element.\n\nArgs:\n element: A nested structure of types supported by Tensorflow.\n\nReturns:\n A variant tensor representing the compressed element. This variant can be\n passed to `uncompress` to get back the original element."} +{"repo": "transformers", "function": "def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:\n old_embedding_dim = shape_list(old_embeddings)[1]\n init_range = getattr(self.config, 'initializer_range', 0.02)\n embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)\n new_embeddings = self.add_weight(name=old_embeddings.name.split(':')[0], shape=[new_num_tokens, old_embedding_dim], initializer=get_initializer(init_range), dtype=tf.float32)\n init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())\n new_embeddings.assign(init_embeddings)\n return new_embeddings", "docstring": "Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly\ninitialized vectors at the end. Reducing the size will remove vectors from the end\n\nArgs:\n old_embeddings (`tf.Variable`):\n Old embeddings to be resized.\n new_num_tokens (`int`, *optional*):\n New number of tokens in the embedding matrix.\n\n Increasing the size will add newly initialized vectors at the end. Reducing the size will remove\n vectors from the end. If not provided or `None`, just returns a pointer to the input tokens\n `tf.Variable` module of the model without doing anything.\n\nReturn:\n `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is\n `None`"} +{"repo": "transformers", "function": "def create_dummy_object(name: str, backend_name: str) -> str:\n if name.isupper():\n return DUMMY_CONSTANT.format(name)\n elif name.islower():\n return DUMMY_FUNCTION.format(name, backend_name)\n else:\n return DUMMY_CLASS.format(name, backend_name)", "docstring": "Create the code for a dummy object.\n\nArgs:\n name (`str`): The name of the object.\n backend_name (`str`): The name of the backend required for that object.\n\nReturns:\n `str`: The code of the dummy object."} +{"repo": "yapf", "function": "def GetExcludePatternsForDir(dirname):\n ignore_patterns = []\n yapfignore_file = os.path.join(dirname, '.yapfignore')\n if os.path.exists(yapfignore_file):\n ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file)\n pyproject_toml_file = os.path.join(dirname, 'pyproject.toml')\n if os.path.exists(pyproject_toml_file):\n ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file)\n return ignore_patterns", "docstring": "Return patterns of files to exclude from ignorefile in a given directory.\n\nLooks for .yapfignore in the directory dirname.\n\nArguments:\n dirname: (unicode) The name of the directory.\n\nReturns:\n A List of file patterns to exclude if ignore file is found, otherwise empty\n List."} +{"repo": "keras", "function": "def densifying_unary(func):\n\n @functools.wraps(func)\n def sparse_wrapper(x, *args, **kwargs):\n if isinstance(x, jax_sparse.JAXSparse):\n x = x.todense()\n return func(x, *args, **kwargs)\n return sparse_wrapper", "docstring": "Decorator to add support for `JAXSparse` tensors (including `BCOO`) to a\nnon-zero-preserving element-wise unary operator.\n\nThere are requirements on the operator for this decorator to work correctly:\n\n- The operator must be element-wise\n- The operator must be unary (one input tensor and one output tensor)\n- The operator must return a tensor of the same shape.\n\nAdditional arguments to the function (besides the input tensor) are\nsupported. The returned result is a dense tensor.\n\nArgs:\n func: The unary operator to wrap.\nReturns:\n Wrapped function that supports `JAXSparse` tensors."} +{"repo": "transformers", "function": "def forward(self, input_ids: Optional[torch.LongTensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[Cache]=None, inputs_embeds: Optional[torch.FloatTensor]=None, labels: Optional[torch.LongTensor]=None, use_cache: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, cache_position: Optional[torch.LongTensor]=None, logits_to_keep: Union[int, torch.Tensor]=0, **kwargs: Unpack[KwargsForCausalLM]) -> CausalLMOutputWithPast:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n outputs: BaseModelOutputWithPast = self.model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs)\n hidden_states = outputs.last_hidden_state\n slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep\n logits = self.lm_head(hidden_states[:, slice_indices, :])\n loss = None\n if labels is not None:\n loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)\n return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\nExample:\n\n```python\n>>> from transformers import AutoTokenizer, Starcoder2ForCausalLM\n\n>>> model = Starcoder2ForCausalLM.from_pretrained(\"meta-starcoder2/Starcoder2-2-7b-hf\")\n>>> tokenizer = AutoTokenizer.from_pretrained(\"meta-starcoder2/Starcoder2-2-7b-hf\")\n\n>>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n>>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n>>> # Generate\n>>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n\"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n```"} +{"repo": "tensorflow", "function": "def abs(x, name=None):\n with ops.name_scope(name, 'Abs', [x]) as name:\n x = ops.convert_to_tensor(x, name='x')\n if x.dtype.is_complex:\n return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)\n return gen_math_ops._abs(x, name=name)", "docstring": "Computes the absolute value of a tensor.\n\nGiven a tensor of integer or floating-point values, this operation returns a\ntensor of the same type, where each element contains the absolute value of the\ncorresponding element in the input.\n\nGiven a tensor `x` of complex numbers, this operation returns a tensor of type\n`float32` or `float64` that is the absolute value of each element in `x`. For\na complex number \\\\(a + bj\\\\), its absolute value is computed as\n\\\\(\\sqrt{a^2 + b^2}\\\\).\n\nFor example:\n\n>>> # real number\n>>> x = tf.constant([-2.25, 3.25])\n>>> tf.abs(x)\n\n\n>>> # complex number\n>>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])\n>>> tf.abs(x)\n\n\nArgs:\n x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,\n `int32`, `int64`, `complex64` or `complex128`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,\n with absolute values. Note, for `complex64` or `complex128` input, the\n returned `Tensor` will be of type `float32` or `float64`, respectively."} +{"repo": "python-fire", "function": "def GetControlSequenceIndicator(self):\n return self._csi", "docstring": "Returns the control sequence indicator string.\n\nReturns:\n The control sequence indicator string or None if control sequences are not\n supported."} +{"repo": "tensorflow", "function": "def device(self, name):\n if isinstance(name, LogicalDevice):\n name = name.name\n elif pydev.is_device_spec(name):\n name = name.to_string()\n return _EagerDeviceContext(self, name)", "docstring": "Context-manager to force placement of operations and Tensors on a device.\n\nArgs:\n name: Name of the device or None to get default placement.\n\nReturns:\n Context manager that forces device placement.\n\nRaises:\n ValueError: If name is not a string or is an invalid device name.\n RuntimeError: If device scopes are not properly nested."} +{"repo": "tensorflow", "function": "class Conv3D(keras_layers.Conv3D, base.Layer):\n\n def __init__(self, filters, kernel_size, strides=(1, 1, 1), padding='valid', data_format='channels_last', dilation_rate=(1, 1, 1), activation=None, use_bias=True, kernel_initializer=None, bias_initializer=init_ops.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, trainable=True, name=None, **kwargs):\n super(Conv3D, self).__init__(filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, trainable=trainable, name=name, **kwargs)", "docstring": "3D convolution layer (e.g. spatial convolution over volumes).\n\nThis layer creates a convolution kernel that is convolved\n(actually cross-correlated) with the layer input to produce a tensor of\noutputs. If `use_bias` is True (and a `bias_initializer` is provided),\na bias vector is created and added to the outputs. Finally, if\n`activation` is not `None`, it is applied to the outputs as well.\n\nArgs:\n filters: Integer, the dimensionality of the output space (i.e. the number\n of filters in the convolution).\n kernel_size: An integer or tuple/list of 3 integers, specifying the\n depth, height and width of the 3D convolution window.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n strides: An integer or tuple/list of 3 integers,\n specifying the strides of the convolution along the depth,\n height and width.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Specifying any stride value != 1 is incompatible with specifying\n any `dilation_rate` value != 1.\n padding: One of `\"valid\"` or `\"same\"` (case-insensitive).\n `\"valid\"` means no padding. `\"same\"` results in padding evenly to\n the left/right or up/down of the input such that output has the same\n height/width dimension as the input.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, depth, height, width, channels)` while `channels_first`\n corresponds to inputs with shape\n `(batch, channels, depth, height, width)`.\n dilation_rate: An integer or tuple/list of 3 integers, specifying\n the dilation rate to use for dilated convolution.\n Can be a single integer to specify the same value for\n all spatial dimensions.\n Currently, specifying any `dilation_rate` value != 1 is\n incompatible with specifying any stride value != 1.\n activation: Activation function. Set it to None to maintain a\n linear activation.\n use_bias: Boolean, whether the layer uses a bias.\n kernel_initializer: An initializer for the convolution kernel.\n bias_initializer: An initializer for the bias vector. If None, the default\n initializer will be used.\n kernel_regularizer: Optional regularizer for the convolution kernel.\n bias_regularizer: Optional regularizer for the bias vector.\n activity_regularizer: Optional regularizer function for the output.\n kernel_constraint: Optional projection function to be applied to the\n kernel after being updated by an `Optimizer` (e.g. used to implement\n norm constraints or value constraints for layer weights). The function\n must take as input the unprojected variable and must return the\n projected variable (which must have the same shape). Constraints are\n not safe to use when doing asynchronous distributed training.\n bias_constraint: Optional projection function to be applied to the\n bias after being updated by an `Optimizer`.\n trainable: Boolean, if `True` also add variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n name: A string, the name of the layer."} +{"repo": "pytype", "function": "def trace(src, options=None):\n options = options or config.Options.create()\n with config.verbosity_from(options):\n loader = load_pytd.create_loader(options)\n ret = analyze.infer_types(src=src, options=options, loader=loader)\n pytd_module = ret.ast\n raw_traces = []\n for op, symbol, data in ret.context.vm.opcode_traces:\n raw_traces.append((op, symbol, tuple((_to_pytd(d, loader, pytd_module) for d in data))))\n return source.Code(src, raw_traces, TypeTrace, options.input)", "docstring": "Generates type traces for the given source code.\n\nArgs:\n src: The source text.\n options: A pytype.config.Options object that can be used to specify options\n such as the target Python version.\n\nReturns:\n A source.Code object."} +{"repo": "etils", "function": "class _Common:\n cache: dict[int, _MutableProxyImpl] = dataclasses.field(default_factory=dict)\n resolved: dict[int, Any] = dataclasses.field(default_factory=dict)\n is_frozen: bool = False\n\n def get_proxy(self, value: Any) -> _MutableProxyImpl:\n \"\"\"Returns the proxy associated with the given value, or create it.\"\"\"\n id_ = id(value)\n if id_ not in self.cache:\n self.cache[id_] = _MutableProxyImpl(obj=value, common=self)\n return self.cache[id_]", "docstring": "Shared variable across all nested childs of an `unfrozen()` object.\n\nAttributes:\n cache: Global mapping `id(_MutableProxyImpl) -> _MutableProxyImpl` to avoid\n duplicating the same object proxy\n\n ```python\n a = a.unfrozen()\n a.x = x\n a.y = x # `a.x` and `a.y` point to the same object\n a = a.frozen()\n assert a.x is a.y\n ```\n\n resolved: Cache of the objects after they have been frozen (to avoid\n evaluating the object twice.\n is_frozen: Become `True` after `.frozen()` is called. After which all\n Mutable are invalid"} +{"repo": "tensorflow", "function": "def _column_name_with_class_name(fc):\n return fc.__class__.__name__ + ':' + fc.name", "docstring": "Returns a unique name for the feature column used during deduping.\n\nWithout this two FeatureColumns that have the same name and where\none wraps the other, such as an IndicatorColumn wrapping a\nSequenceCategoricalColumn, will fail to deserialize because they will have the\nsame name in columns_by_name, causing the wrong column to be returned.\n\nArgs:\n fc: A FeatureColumn.\n\nReturns:\n A unique name as a string."} +{"repo": "transformers", "function": "def to_json_string(self) -> str:\n dictionary = self.to_dict()\n return json.dumps(dictionary, indent=2, sort_keys=True) + '\\n'", "docstring": "Serializes this instance to a JSON string.\n\nReturns:\n `str`: String containing all the attributes that make up this feature_extractor instance in JSON format."} +{"repo": "tensorflow", "function": "def _replace_row_partitions(value, new_partitions):\n if isinstance(value, tensor.Tensor) or not new_partitions:\n return value\n elif isinstance(value, ragged_tensor.RaggedTensor):\n return ragged_tensor.RaggedTensor._from_row_partition(values=_replace_row_partitions(value.values, new_partitions[1:]), row_partition=new_partitions[0])\n else:\n assert isinstance(value, StructuredTensor)\n new_fields = dict(((k, _replace_row_partitions(v, new_partitions)) for k, v in value._fields.items()))\n return StructuredTensor._old_init(fields=new_fields, shape=value.shape, nrows=value.nrows(), row_partitions=tuple(new_partitions) + tuple(value.row_partitions[len(new_partitions):]))", "docstring": "Updates `value` to use `new_partitions` as its (outer) row partitions.\n\nThis is used to ensure that all fields in a `StructuredTensor` use identical\n`RowPartition` objects for the shared dimensions. In particular,\n`StructuredTensor.from_fields` first merges all of the row partitions from\nany fields, and then replaces the outer row partitions of all fields with\nthe merged row partitions (using this function).\n\nArgs:\n value: A `Tensor`, `RaggedTensor`, or `StructuredTensor`.\n new_partitions: A list of row-partitions that should be used by `value`.\n Must be equivalent to `value`'s current row partitions.\n\nReturns:\n A value that is equivalent to `value`, where outer row partitions have been\n replaced by `new_partitions`."} +{"repo": "keras", "function": "class TruePositives(_ConfusionMatrixConditionCount):\n\n def __init__(self, thresholds=None, name=None, dtype=None):\n super().__init__(confusion_matrix_cond=metrics_utils.ConfusionMatrix.TRUE_POSITIVES, thresholds=thresholds, name=name, dtype=dtype)", "docstring": "Calculates the number of true positives.\n\nIf `sample_weight` is given, calculates the sum of the weights of\ntrue positives. This metric creates one local variable, `true_positives`\nthat is used to keep track of the number of true positives.\n\nIf `sample_weight` is `None`, weights default to 1.\nUse `sample_weight` of 0 to mask values.\n\nArgs:\n thresholds: (Optional) Defaults to `0.5`. A float value, or a Python\n list/tuple of float threshold values in `[0, 1]`. A threshold is\n compared with prediction values to determine the truth value of\n predictions (i.e., above the threshold is `True`, below is `False`).\n If used with a loss function that sets `from_logits=True` (i.e. no\n sigmoid applied to predictions), `thresholds` should be set to 0.\n One metric value is generated for each threshold value.\n name: (Optional) string name of the metric instance.\n dtype: (Optional) data type of the metric result.\n\nExample:\n\n>>> m = keras.metrics.TruePositives()\n>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1])\n>>> m.result()\n2.0\n\n>>> m.reset_state()\n>>> m.update_state([0, 1, 1, 1], [1, 0, 1, 1], sample_weight=[0, 0, 1, 0])\n>>> m.result()\n1.0"} +{"repo": "tensorflow", "function": "def __init__(self, num_groups=2):\n if num_groups < 1:\n raise ValueError(f'Argument `num_groups` must be a positive integer. Received: num_groups={num_groups}')\n self._ready = threading.Condition(threading.Lock())\n self._num_groups = num_groups\n self._group_member_counts = [0] * self._num_groups", "docstring": "Initialize a group lock.\n\nArgs:\n num_groups: The number of groups that will be accessing the resource under\n consideration. Should be a positive number.\n\nReturns:\n A group lock that can then be used to synchronize code.\n\nRaises:\n ValueError: If num_groups is less than 1."} +{"repo": "transformers", "function": "def _truncate(self, processed_features: Union[dict[str, np.ndarray], BatchFeature], max_length: Optional[int]=None, pad_to_multiple_of: Optional[int]=None, truncation: Optional[bool]=None):\n if not truncation:\n return processed_features\n elif truncation and max_length is None:\n raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.')\n required_input = processed_features[self.model_input_names[0]]\n if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):\n max_length = (max_length // pad_to_multiple_of + 1) * pad_to_multiple_of\n needs_to_be_truncated = len(required_input) > max_length\n if needs_to_be_truncated:\n processed_features[self.model_input_names[0]] = processed_features[self.model_input_names[0]][:max_length]\n if 'attention_mask' in processed_features:\n processed_features['attention_mask'] = processed_features['attention_mask'][:max_length]\n return processed_features", "docstring": "Truncate inputs to predefined length or max length in the batch\n\nArgs:\n processed_features(`Union[Dict[str, np.ndarray], BatchFeature]`):\n Dictionary of input values (`np.ndarray[float]`) / input vectors (`List[np.ndarray[float]]`) or batch\n of inputs values (`List[np.ndarray[int]]`) / input vectors (`List[np.ndarray[int]]`)\n max_length (`int`, *optional*):\n maximum length of the returned list and optionally padding length (see below)\n pad_to_multiple_of (`int`, *optional*) :\n Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to\n enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta), or on TPUs\n which benefit from having sequence lengths be a multiple of 128.\n truncation (`bool`, *optional*):\n Activates truncation to cut input sequences longer than `max_length` to `max_length`."} +{"repo": "tensorflow", "function": "def enable_output_all_intermediates(fn: _F) -> _F:\n\n def wrapper(*args, **kwargs):\n output_all_intermediates_old = control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE\n control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True\n try:\n return fn(*args, **kwargs)\n finally:\n control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = output_all_intermediates_old\n return wrapper", "docstring": "Force-enable outputting all intermediates from functional control flow ops.\n\nArgs:\n fn: the function to be wrapped\n\nReturns:\n The wrapped function"} +{"repo": "keras", "function": "def initialize(job_addresses=None, num_processes=None, process_id=None):\n if job_addresses is None and 'KERAS_DISTRIBUTION_JOB_ADDRESSES' in os.environ:\n job_addresses = os.environ['KERAS_DISTRIBUTION_JOB_ADDRESSES']\n if num_processes is None and 'KERAS_DISTRIBUTION_NUM_PROCESSES' in os.environ:\n num_processes = int(os.environ['KERAS_DISTRIBUTION_NUM_PROCESSES'])\n if process_id is None and 'KERAS_DISTRIBUTION_PROCESS_ID' in os.environ:\n process_id = int(os.environ['KERAS_DISTRIBUTION_PROCESS_ID'])\n distribution_lib.initialize(job_addresses, num_processes, process_id)", "docstring": "Initialize the distribution system for multi-host/process setting.\n\nCalling `initialize` will prepare the backend for execution on multi-host\nGPU or TPUs. It should be called before any computations.\n\nNote that the parameters can also be injected via environment variables,\nwhich can be better controlled by the launch script at startup time.\nFor certain backend that also rely on the environment variables to\nconfigure, Keras will properly forward them.\n\nArgs:\n job_addresses: string. Comma separated IP addresses for all the jobs\n that will form the whole computation cluster. Note that for JAX\n backend, only the address for job 0 (coodinator) is needed. For\n certain runtime like cloud TPU, this value can be `None`, and the\n backend will figure it out with the TPU environment variables. You\n can also config this value via environment variable\n `KERAS_DISTRIBUTION_JOB_ADDRESSES`.\n num_processes: int. The number of worker/processes that will form the\n whole computation cluster. For certain runtime like cloud TPU, this\n value can be `None`, and the backend will figure it out with the TPU\n environment variables. You can also configure this value via\n environment variable `KERAS_DISTRIBUTION_NUM_PROCESSES`.\n process_id: int. The ID number of the current worker/process. The value\n should be ranged from `0` to `num_processes - 1`. `0` will indicate\n the current worker/process is the master/coordinate job. You can\n also configure this value via environment variable\n `KERAS_DISTRIBUTION_PROCESS_ID`.\n\n Example:\n Suppose there are two GPU processes, and process 0 is running at\n address `10.0.0.1:1234`, and process 1 is running at address\n `10.0.0.2:2345`. To configure such cluster, you can run\n\n On process 0:\n ```python\n keras.distribute.initialize(\n job_addresses=\"10.0.0.1:1234,10.0.0.2:2345\",\n num_processes=2,\n process_id=0)\n ```\n\n On process 1:\n ```python\n keras.distribute.initialize(\n job_addresses=\"10.0.0.1:1234,10.0.0.2:2345\",\n num_processes=2,\n process_id=1)\n ```\n\n or via the environment variables:\n On process 0:\n ```python\n os.environ[\n \"KERAS_DISTRIBUTION_JOB_ADDRESSES\"] = \"10.0.0.1:1234,10.0.0.2:2345\"\n os.environ[\"KERAS_DISTRIBUTION_NUM_PROCESSES\"] = \"2\"\n os.environ[\"KERAS_DISTRIBUTION_PROCESS_ID\"] = \"0\"\n keras.distribute.initialize()\n ```\n\n On process 1:\n ```python\n os.environ[\n \"KERAS_DISTRIBUTION_JOB_ADDRESSES\"] = \"10.0.0.1:1234,10.0.0.2:2345\"\n os.environ[\"KERAS_DISTRIBUTION_NUM_PROCESSES\"] = \"2\"\n os.environ[\"KERAS_DISTRIBUTION_PROCESS_ID\"] = \"1\"\n keras.distribute.initialize()\n ```\n\n Also note that for JAX backend, the `job_addresses` can be further\n reduced to just the master/coordinator address, which is\n `10.0.0.1:1234`."} +{"repo": "tensorflow", "function": "def enter_cond_section(self, section_id):\n assert section_id not in self.cond_entry\n assert section_id not in self.cond_leaves\n self.cond_leaves[section_id] = []", "docstring": "Enters a conditional section.\n\nConditional sections define an entry node, and one or more branches.\n\nArgs:\n section_id: Hashable, the same node that will be used in calls to the\n section_id arg passed to new_cond_branch"} +{"repo": "tensorflow", "function": "def compute(i, tas):\n elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]\n elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature)\n elems_value = elems_unflatten(elems_value_flat)\n ag_ctx = autograph_ctx.control_status_ctx()\n autographed_fn = autograph.tf_convert(fn, ag_ctx)\n result_value = autographed_fn(elems_value)\n nest.assert_same_structure(fn_output_signature or elems, result_value)\n result_value_flat = nest.flatten(result_value)\n result_value_batchable = _result_value_flat_to_batchable(result_value_flat, result_flat_signature)\n tas = [ta.write(i, value) for ta, value in zip(tas, result_value_batchable)]\n return (i + 1, tas)", "docstring": "The loop body of map_fn.\n\nArgs:\n i: the loop counter\n tas: the flat TensorArray accumulator list\n\nReturns:\n (i + 1, tas): the updated counter + updated TensorArrays\n\nRaises:\n TypeError: if fn_output_signature and result_value structure don't match\n ValueType: if fn_output_signature and result_value lengths don't match"} +{"repo": "transformers", "function": "class TFBaseModelOutputWithNoAttention(ModelOutput):\n last_hidden_state: Optional[tf.Tensor] = None\n hidden_states: Optional[Tuple[tf.Tensor, ...]] = None", "docstring": "Base class for model's outputs, with potential hidden states.\n\nArgs:\n last_hidden_state (`tf.Tensor` shape `(batch_size, num_channels, height, width)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for\n the output of each layer) of shape `(batch_size, num_channels, height, width)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs."} +{"repo": "transformers", "function": "def forward(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, output_attentions: bool=False, query_states: Optional[torch.Tensor]=None, relative_pos: Optional[torch.Tensor]=None, rel_embeddings: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:\n if query_states is None:\n qp = self.in_proj(hidden_states)\n query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1)\n else:\n ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0)\n qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)]\n q = torch.matmul(qkvw[0], query_states.t().to(dtype=qkvw[0].dtype))\n k = torch.matmul(qkvw[1], hidden_states.t().to(dtype=qkvw[1].dtype))\n v = torch.matmul(qkvw[2], hidden_states.t().to(dtype=qkvw[2].dtype))\n query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]]\n query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])\n value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])\n rel_att: int = 0\n scale_factor = 1 + len(self.pos_att_type)\n scale = scaled_size_sqrt(query_layer, scale_factor)\n query_layer = query_layer / scale.to(dtype=query_layer.dtype)\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n if self.relative_attention and rel_embeddings is not None and (relative_pos is not None):\n rel_embeddings = self.pos_dropout(rel_embeddings)\n rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)\n if rel_att is not None:\n attention_scores = attention_scores + rel_att\n if self.head_logits_proj is not None:\n attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n attention_mask = attention_mask.bool()\n attention_scores = attention_scores.masked_fill(~attention_mask, torch.finfo(query_layer.dtype).min)\n attention_probs = nn.functional.softmax(attention_scores, dim=-1)\n attention_probs = self.dropout(attention_probs)\n if self.head_weights_proj is not None:\n attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\n context_layer = torch.matmul(attention_probs, value_layer)\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (-1,)\n context_layer = context_layer.view(new_context_layer_shape)\n if not output_attentions:\n return (context_layer, None)\n return (context_layer, attention_probs)", "docstring": "Call the module\n\nArgs:\n hidden_states (`torch.FloatTensor`):\n Input states to the module usually the output from previous layer, it will be the Q,K and V in\n *Attention(Q,K,V)*\n\n attention_mask (`torch.BoolTensor`):\n An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum\n sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*\n th token.\n\n output_attentions (`bool`, *optional*):\n Whether return the attention matrix.\n\n query_states (`torch.FloatTensor`, *optional*):\n The *Q* state in *Attention(Q,K,V)*.\n\n relative_pos (`torch.LongTensor`):\n The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with\n values ranging in [*-max_relative_positions*, *max_relative_positions*].\n\n rel_embeddings (`torch.FloatTensor`):\n The embedding of relative distances. It's a tensor of shape [\\(2 \\times\n \\text{max_relative_positions}\\), *hidden_size*]."} +{"repo": "tensorflow", "function": "def load_library(library_location):\n if os.path.exists(library_location):\n if os.path.isdir(library_location):\n directory_contents = os.listdir(library_location)\n kernel_libraries = [os.path.join(library_location, f) for f in directory_contents if _is_shared_object(f)]\n else:\n kernel_libraries = [library_location]\n for lib in kernel_libraries:\n py_tf.TF_LoadLibrary(lib)\n else:\n raise OSError(errno.ENOENT, 'The file or folder to load kernel libraries from does not exist.', library_location)", "docstring": "Loads a TensorFlow plugin.\n\n\"library_location\" can be a path to a specific shared object, or a folder.\nIf it is a folder, all shared objects that are named \"libtfkernel*\" will be\nloaded. When the library is loaded, kernels registered in the library via the\n`REGISTER_*` macros are made available in the TensorFlow process.\n\nArgs:\n library_location: Path to the plugin or the folder of plugins.\n Relative or absolute filesystem path to a dynamic library file or folder.\n\nReturns:\n None\n\nRaises:\n OSError: When the file to be loaded is not found.\n RuntimeError: when unable to load the library."} +{"repo": "tensorflow", "function": "def _GetMaxSizeFromNestedMaximumIterations(value, while_ctxt):\n value_name = value.name\n curr_ctxt = ops.get_default_graph()._get_control_flow_context()\n curr_ctxt_name = curr_ctxt.name if curr_ctxt is not None else ''\n max_size = constant_op.constant(1)\n while while_ctxt not in (None, curr_ctxt):\n max_iter = while_ctxt.maximum_iterations\n if max_iter is None:\n raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop because maximum_iterations was not passed to the tf.while_loop call ('%s').\" % (value_name, while_ctxt.name))\n max_iter_ctxt = max_iter.op._get_control_flow_context()\n if util.IsContainingContext(curr_ctxt, max_iter_ctxt):\n max_size *= max_iter\n else:\n const_max_iter = tensor_util.constant_value(max_iter)\n if const_max_iter is None:\n raise ValueError(\"Cannot create a gradient accumulator for tensor '%s' inside XLA while_loop. maximum_iterations tensor '%s' for while_loop context '%s' must be statically known (e.g. a constant value or known shape dimension), or be defined at or outside the while loop context '%s' (currently defined in '%s').\" % (value_name, max_iter.name, while_ctxt.name, curr_ctxt_name, max_iter_ctxt.name))\n max_size *= const_max_iter\n while_ctxt = util.GetContainingWhileContext(while_ctxt.outer_context, stop_ctxt=curr_ctxt)\n return max_size", "docstring": "Calculate a max_size for use by stack ops inside an XLA while_loop.\n\nArgs:\n value: The value inside the while_loop forward context. Used for printing\n error messages.\n while_ctxt: The forward context inside which value resides. This does not\n always match the value's immediate context, as `value` may be inside e.g.\n a cond context inside the while_loop.\n\nReturns:\n A tensor containing the `max_size` to feed to a Stack initializer.\n\nRaises:\n ValueError: If `value` is nested inside a `while_loop` that either\n lacks a `maximum_iterations` parameter, or the `maximum_iterations`\n parameter:\n\n - is inside a `while_loop` that is a parent of the calling context, and\n - cannot be evaluated at graph build time to a constant."} +{"repo": "transformers", "function": "def _get_column_values(table, col_index):\n index_to_values = {}\n for row_index, row in table.iterrows():\n text = normalize_for_match(row[col_index].text)\n index_to_values[row_index] = list(_get_numeric_values(text))\n return index_to_values", "docstring": "Parses text in column and returns a dict mapping row_index to values. This is the _get_column_values function from\nnumber_annotation_utils.py of the original implementation\n\nArgs:\n table: Pandas dataframe\n col_index: integer, indicating the index of the column to get the numeric values of"} +{"repo": "tensorflow", "function": "def reset_from_seed(self, seed):\n state = create_rng_state(seed, self.algorithm)\n self._state_var.assign(state)", "docstring": "Resets the generator by a new seed.\n\nSee `from_seed` for the meaning of \"seed\".\n\nArgs:\n seed: the new seed."} +{"repo": "transformers", "function": "class FlaxTopPLogitsWarper(FlaxLogitsWarper):\n\n def __init__(self, top_p: float, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1):\n if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):\n raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')\n if not isinstance(min_tokens_to_keep, int) or min_tokens_to_keep < 1:\n raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')\n self.top_p = top_p\n self.filter_value = filter_value\n self.min_tokens_to_keep = min_tokens_to_keep\n\n def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:\n topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])\n mask_scores = jnp.full_like(scores, self.filter_value)\n cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)\n score_mask = cumulative_probs < self.top_p\n score_mask = jnp.roll(score_mask, 1)\n score_mask |= score_mask.at[:, 0].set(True)\n score_mask = score_mask.at[:, :self.min_tokens_to_keep].set(True)\n topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)\n next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]\n return next_scores", "docstring": "[`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.\n\nArgs:\n top_p (`float`):\n If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or\n higher are kept for generation.\n filter_value (`float`, *optional*, defaults to -inf):\n All filtered values will be set to this float value.\n min_tokens_to_keep (`int`, *optional*, defaults to 1):\n Minimum number of tokens that cannot be filtered."} +{"repo": "tensorflow", "function": "def string_length(input, name=None, unit='BYTE'):\n return gen_string_ops.string_length(input, unit=unit, name=name)", "docstring": "Computes the length of each string given in the input tensor.\n\n>>> strings = tf.constant(['Hello','TensorFlow', '\ud83d\ude42'])\n>>> tf.strings.length(strings).numpy() # default counts bytes\narray([ 5, 10, 4], dtype=int32)\n>>> tf.strings.length(strings, unit=\"UTF8_CHAR\").numpy()\narray([ 5, 10, 1], dtype=int32)\n\nArgs:\n input: A `Tensor` of type `string`. The strings for which to compute the\n length for each element.\n name: A name for the operation (optional).\n unit: An optional `string` from: `\"BYTE\", \"UTF8_CHAR\"`. Defaults to\n `\"BYTE\"`. The unit that is counted to compute string length. One of:\n `\"BYTE\"` (for the number of bytes in each string) or `\"UTF8_CHAR\"` (for\n the number of UTF-8 encoded Unicode code points in each string). Results\n are undefined if `unit=UTF8_CHAR` and the `input` strings do not contain\n structurally valid UTF-8.\n\nReturns:\n A `Tensor` of type `int32`, containing the length of the input string in\n the same element of the input tensor."} +{"repo": "beam", "function": "def get_dataset(categories: list, split: str='train'):\n labels = ['sadness', 'joy', 'love', 'anger', 'fear', 'surprise']\n label_map = {class_name: class_id for class_id, class_name in enumerate(labels)}\n labels_subset = np.array([label_map[class_name] for class_name in categories])\n emotion_dataset = load_dataset('emotion', download_mode='force_redownload')\n X, y = (np.array(emotion_dataset[split]['text']), np.array(emotion_dataset[split]['label']))\n subclass_idxs = [idx for idx, label in enumerate(y) if label in labels_subset]\n X_subset, y_subset = (X[subclass_idxs], y[subclass_idxs])\n return (X_subset.tolist(), y_subset.tolist())", "docstring": "Takes a list of categories and a split (train/test/dev) and returns the\ncorresponding subset of the dataset\n\nArgs:\n categories (list): list of emotion categories to use\n split (str): The split of the dataset to use. Can be either \"train\", \"dev\", or \"test\".\n Defaults to train\n\nReturns:\n A list of text and a list of labels"} +{"repo": "transformers", "function": "class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor):\n\n def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor):\n if not isinstance(penalty, float) or not penalty > 0:\n raise ValueError(f'`penalty` has to be a strictly positive float, but is {penalty}')\n self.penalty = 1 / penalty\n self.encoder_input_ids = encoder_input_ids\n\n @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)\n def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:\n score = torch.gather(scores, 1, self.encoder_input_ids)\n score = torch.where(score < 0, score * self.penalty, score / self.penalty)\n scores_processed = scores.scatter(1, self.encoder_input_ids, score)\n return scores_processed", "docstring": "[`LogitsProcessor`] that works similarly to [`RepetitionPenaltyLogitsProcessor`], but with an *inverse* penalty\nthat is applied to the tokens present in the prompt. In other words, a penalty above 1.0 increases the odds of\nselecting tokens that were present in the prompt.\n\nIt was designed to avoid hallucination in input-grounded tasks, like summarization. Although originally intended\nfor encoder-decoder models, it can also be used with decoder-only models like LLMs.\n\nArgs:\n penalty (`float`):\n The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 rewards prompt tokens. Between 0.0\n and 1.0 penalizes prompt tokens.\n encoder_input_ids (`torch.LongTensor`):\n The encoder_input_ids that should be repeated within the decoder ids.\n\nExamples:\n\n```python\n>>> from transformers import AutoModelForCausalLM, AutoTokenizer\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"bigscience/bloomz-560m\")\n>>> model = AutoModelForCausalLM.from_pretrained(\"bigscience/bloomz-560m\")\n\n>>> inputs = tokenizer([\"Alice and Bob. The third member's name was\"], return_tensors=\"pt\")\n>>> gen_out = model.generate(**inputs)\n>>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])\nAlice and Bob. The third member's name was not mentioned.\n\n>>> # With the `encoder_repetition_penalty` argument we can trigger this logits processor in `generate`, which can\n>>> # promote the use of prompt tokens (\"Bob\" in this example)\n>>> gen_out = model.generate(**inputs, encoder_repetition_penalty=1.2)\n>>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])\nAlice and Bob. The third member's name was Bob. The third member's name was Bob.\n```"} +{"repo": "tensorflow", "function": "def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):\n uniform_sampler = functools.partial(random_ops.random_uniform, seed=seed)\n\n def dummy_rng_step():\n random_seed.get_seed(seed)\n return _dropout(x=x, rate=rate, noise_shape=noise_shape, uniform_sampler=uniform_sampler, dummy_rng_step=dummy_rng_step, name=name, default_name='dropout')", "docstring": "Computes dropout: randomly sets elements to zero to prevent overfitting.\n\nWarning: You should consider using\n`tf.nn.experimental.stateless_dropout` instead of this function. The\ndifference between `tf.nn.experimental.stateless_dropout` and this\nfunction is analogous to the difference between\n`tf.random.stateless_uniform` and `tf.random.uniform`. Please see\n[Random number\ngeneration](https://www.tensorflow.org/guide/random_numbers) guide\nfor a detailed description of the various RNG systems in TF. As the\nguide states, legacy stateful RNG ops like `tf.random.uniform` and\n`tf.nn.dropout` are not deprecated yet but highly discouraged,\nbecause their states are hard to control.\n\nNote: The behavior of dropout has changed between TensorFlow 1.x and 2.x.\nWhen converting 1.x code, please use named arguments to ensure behavior stays\nconsistent.\n\nSee also: `tf.keras.layers.Dropout` for a dropout layer.\n\n[Dropout](https://arxiv.org/abs/1207.0580) is useful for regularizing DNN\nmodels. Inputs elements are randomly set to zero (and the other elements are\nrescaled). This encourages each node to be independently useful, as it cannot\nrely on the output of other nodes.\n\nMore precisely: With probability `rate` elements of `x` are set to `0`.\nThe remaining elements are scaled up by `1.0 / (1 - rate)`, so that the\nexpected value is preserved.\n\n>>> tf.random.set_seed(0)\n>>> x = tf.ones([3,5])\n>>> tf.nn.dropout(x, rate = 0.5, seed = 1).numpy()\narray([[2., 0., 0., 2., 2.],\n [2., 2., 2., 2., 2.],\n [2., 0., 2., 0., 2.]], dtype=float32)\n\n>>> tf.random.set_seed(0)\n>>> x = tf.ones([3,5])\n>>> tf.nn.dropout(x, rate = 0.8, seed = 1).numpy()\narray([[0., 0., 0., 5., 5.],\n [0., 5., 0., 5., 0.],\n [5., 0., 5., 0., 5.]], dtype=float32)\n\n>>> tf.nn.dropout(x, rate = 0.0) == x\n\n\n\nBy default, each element is kept or dropped independently. If `noise_shape`\nis specified, it must be\n[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\nto the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`\nwill make independent decisions. This is useful for dropping whole\nchannels from an image or sequence. For example:\n\n>>> tf.random.set_seed(0)\n>>> x = tf.ones([3,10])\n>>> tf.nn.dropout(x, rate = 2/3, noise_shape=[1,10], seed=1).numpy()\narray([[0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],\n [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.],\n [0., 0., 0., 3., 3., 0., 3., 3., 3., 0.]], dtype=float32)\n\nArgs:\n x: A floating point tensor.\n rate: A scalar `Tensor` with the same type as x. The probability\n that each element is dropped. For example, setting rate=0.1 would drop\n 10% of input elements.\n noise_shape: A 1-D integer `Tensor`, representing the\n shape for randomly generated keep/drop flags.\n seed: A Python integer. Used to create random seeds. See\n `tf.random.set_seed` for behavior.\n name: A name for this operation (optional).\n\nReturns:\n A Tensor of the same shape of `x`.\n\nRaises:\n ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating point\n tensor. `rate=1` is disallowed, because the output would be all zeros,\n which is likely not what was intended."} +{"repo": "fhir-py", "function": "def _encode_required_field(self, name: str, containing_type_builder: expressions.Builder, builder: expressions.Builder, element_definition: message.Message) -> Optional[validation_pb2.SqlRequirement]:\n element = cast(Any, element_definition)\n if not _is_elem_supported(element):\n return None\n field_name = _last_path_token(builder)\n min_size = element.min.value\n max_size = element.max.value\n element_count = builder.count()\n query_list = []\n if _fhir_path_data_types.is_collection(builder.return_type) and max_size.isdigit():\n query_list.append(element_count <= int(max_size))\n if min_size == 1:\n query_list.append(builder.exists())\n elif min_size > 0:\n query_list.append(element_count >= min_size)\n if not query_list:\n return None\n constraint_key = f'{name}-cardinality-is-valid'\n description = f'The length of {name} must be maximum {max_size} and minimum {min_size}.'\n fhir_path_builder = query_list[0]\n for query in query_list[1:]:\n fhir_path_builder = fhir_path_builder & query\n if constraint_key in self._options.skip_keys:\n return None\n type_codes = _utils.element_type_codes(element)\n if 'Reference' not in type_codes and (not _SKIP_TYPE_CODES.isdisjoint(type_codes)):\n return None\n result = self._encode_fhir_path_builder_constraint(fhir_path_builder, containing_type_builder)\n if result is None:\n return None\n element_definition_path = self._abs_path_invocation(containing_type_builder)\n constraint_key_column_name: str = _key_to_sql_column_name(_path_to_sql_column_name(constraint_key))\n column_name_base: str = _path_to_sql_column_name(element_definition_path)\n column_name = f'{column_name_base}_{constraint_key_column_name}'\n requirement = validation_pb2.SqlRequirement(column_name=column_name, sql_expression=result.sql, fhir_path_sql_expression=result.fhir_path_sql, severity=validation_pb2.ValidationSeverity.SEVERITY_ERROR, type=validation_pb2.ValidationType.VALIDATION_TYPE_CARDINALITY, element_path=element_definition_path, description=description, fhir_path_key=constraint_key, fhir_path_expression=result.builder.fhir_path, fields_referenced_by_expression=[field_name])\n return requirement", "docstring": "Returns `SqlRequirement` for the required field passed.\n\nArgs:\n name: name of the constraint key.\n containing_type_builder: The builder of the Structure definition for the\n required field.\n builder: The builder containing the element to encode required field for.\n element_definition: Element definition of the builder.\n\nReturns:\n A `SqlRequirement` representing the requirement generated from\n the element."} +{"repo": "transformers", "function": "def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray:\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n mask = np.zeros(output_size, dtype=np.int64)\n mask[:input_height, :input_width] = 1\n return mask", "docstring": "Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.\nArgs:\n image (`np.ndarray`):\n Image to make the pixel mask for.\n output_size (`Tuple[int, int]`):\n Output size of the mask."} +{"repo": "tensorflow", "function": "def stft(signals, frame_length, frame_step, fft_length=None, window_fn=window_ops.hann_window, pad_end=False, name=None):\n with ops.name_scope(name, 'stft', [signals, frame_length, frame_step]):\n signals = ops.convert_to_tensor(signals, name='signals')\n signals.shape.with_rank_at_least(1)\n frame_length = ops.convert_to_tensor(frame_length, name='frame_length')\n frame_length.shape.assert_has_rank(0)\n frame_step = ops.convert_to_tensor(frame_step, name='frame_step')\n frame_step.shape.assert_has_rank(0)\n if fft_length is None:\n fft_length = _enclosing_power_of_two(frame_length)\n else:\n fft_length = ops.convert_to_tensor(fft_length, name='fft_length')\n framed_signals = shape_ops.frame(signals, frame_length, frame_step, pad_end=pad_end)\n if window_fn is not None:\n window = window_fn(frame_length, dtype=framed_signals.dtype)\n framed_signals *= window\n return fft_ops.rfft(framed_signals, [fft_length])", "docstring": "Computes the [Short-time Fourier Transform][stft] of `signals`.\n\nImplemented with TPU/GPU-compatible ops and supports gradients.\n\nArgs:\n signals: A `[..., samples]` `float32`/`float64` `Tensor` of real-valued\n signals.\n frame_length: An integer scalar `Tensor`. The window length in samples.\n frame_step: An integer scalar `Tensor`. The number of samples to step.\n fft_length: An integer scalar `Tensor`. The size of the FFT to apply.\n If not provided, uses the smallest power of 2 enclosing `frame_length`.\n window_fn: A callable that takes a window length and a `dtype` keyword\n argument and returns a `[window_length]` `Tensor` of samples in the\n provided datatype. If set to `None`, no windowing is used.\n pad_end: Whether to pad the end of `signals` with zeros when the provided\n frame length and step produces a frame that lies partially past its end.\n name: An optional name for the operation.\n\nReturns:\n A `[..., frames, fft_unique_bins]` `Tensor` of `complex64`/`complex128`\n STFT values where `fft_unique_bins` is `fft_length // 2 + 1` (the unique\n components of the FFT).\n\nRaises:\n ValueError: If `signals` is not at least rank 1, `frame_length` is\n not scalar, or `frame_step` is not scalar.\n\n[stft]: https://en.wikipedia.org/wiki/Short-time_Fourier_transform"} +{"repo": "transformers", "function": "class JanusVisionConfig(PretrainedConfig):\n model_type = 'janus_vision_model'\n base_config_key = 'vision_config'\n\n def __init__(self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, num_channels=3, patch_size=16, image_size=384, attention_dropout=0.0, layer_norm_eps=1e-06, hidden_act='gelu', mlp_ratio=4.0, attention_bias=True, hidden_dropout_rate=0.0, projection_dim=2048, projection_dropout=0.0, use_qk_norm=False, initializer_range=0.02, depth=2, num_image_tokens=576, **kwargs):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.mlp_ratio = mlp_ratio\n self.attention_bias = attention_bias\n self.hidden_dropout_rate = hidden_dropout_rate\n self.projection_dim = projection_dim\n self.projection_dropout = projection_dropout\n self.use_qk_norm = use_qk_norm\n self.initializer_range = initializer_range\n self.depth = depth\n self.num_image_tokens = num_image_tokens", "docstring": "This is the configuration class to store the configuration of a [`JanusVisionModel`]. It is used to instantiate a\n`JanusVisionModel` according to the specified arguments, defining the model architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\nArgs:\n hidden_size (`int`, *optional*, defaults to 1024):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 24):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 16):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_channels (`int`, *optional*, defaults to 3):\n The number of input channels.\n patch_size (`int`, *optional*, defaults to 16):\n The size (resolution) of each patch.\n image_size (`int`, *optional*, defaults to 384):\n The size (resolution) of each image.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n Dropout probability for attention weights.\n layer_norm_eps (`float`, *optional*, defaults to 1e-06):\n The epsilon used by the layer normalization layers.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"`, and `\"gelu_new\"` are supported.\n mlp_ratio (`float`, *optional*, defaults to 4.0):\n Ratio of MLP hidden dimensionality to embedding dimensionality.\n attention_bias (`bool`, *optional*, defaults to `True`):\n Whether to add a bias to the queries, keys, and values in the attention layers.\n hidden_dropout_rate (`float`, *optional*, defaults to 0.0):\n The dropout probability for fully connected layers in the encoder.\n projection_dim (`int`, *optional*, defaults to 2048):\n Dimensionality of the MLP projection head.\n projection_dropout (`float`, *optional*, defaults to 0.0):\n Dropout probability for the projection layer.\n use_qk_norm (`bool`, *optional*, defaults to `False`):\n Whether to normalize the query and key matrices.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated normal initializer for initializing all weight matrices.\n depth (`int`, *optional*, defaults to 2):\n Number of hidden layers in the aligner module.\n num_image_tokens (`int`, *optional*, defaults to 576):\n Number of image tokens."} +{"repo": "tensorflow", "function": "def calc_control_outputs(self, graph):\n control_outputs = {}\n for op in graph.get_operations():\n for control_input in op.control_inputs:\n if control_input not in control_outputs:\n control_outputs[control_input] = set()\n control_outputs[control_input].add(op)\n return control_outputs", "docstring": "Returns the map of control_outputs for a given graph.\n\nArgs:\n graph: The graph to parse.\n\nReturns:\n A map of the control outputs."} +{"repo": "tensorflow", "function": "def bessel_k0e(x, name=None):\n with ops.name_scope(name, 'bessel_k0e', [x]):\n return gen_special_math_ops.bessel_k0e(x)", "docstring": "Computes the Bessel k0e function of `x` element-wise.\n\nModified Bessel function of order 0.\n\n>>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy()\narray([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32)\n\nArgs:\n x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,\n `float32`, `float64`.\n name: A name for the operation (optional).\n\nReturns:\n A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.\n\n@compatibility(scipy)\nEquivalent to scipy.special.k0e\n@end_compatibility"} +{"repo": "beam", "function": "def write_to_pubsub(pcoll, *, topic: str, format: str, schema: Optional[Any]=None, attributes: Optional[Iterable[str]]=None, attributes_map: Optional[str]=None, id_attribute: Optional[str]=None, timestamp_attribute: Optional[str]=None):\n input_schema = schemas.schema_from_element_type(pcoll.element_type)\n extra_fields: list[str] = []\n if isinstance(attributes, str):\n attributes = [attributes]\n if attributes:\n extra_fields.extend(attributes)\n if attributes_map:\n extra_fields.append(attributes_map)\n\n def attributes_extractor(row):\n if attributes_map:\n attribute_values = dict(getattr(row, attributes_map))\n else:\n attribute_values = {}\n if attributes:\n attribute_values.update({attr: getattr(row, attr) for attr in attributes})\n return attribute_values\n schema_names = set((f.name for f in input_schema.fields))\n missing_attribute_names = set(extra_fields) - schema_names\n if missing_attribute_names:\n raise ValueError(f'Attribute fields {missing_attribute_names} not found in schema fields {schema_names}')\n payload_schema = schema_pb2.Schema(fields=[field for field in input_schema.fields if field.name not in extra_fields])\n formatter = _create_formatter(format, schema, payload_schema)\n return pcoll | beam.Map(lambda row: beam.io.gcp.pubsub.PubsubMessage(formatter(row), attributes_extractor(row))) | beam.io.WriteToPubSub(topic, with_attributes=True, id_label=id_attribute, timestamp_attribute=timestamp_attribute)", "docstring": "Writes messages to Cloud Pub/Sub.\n\nArgs:\n topic: Cloud Pub/Sub topic in the form \"/topics//\".\n format: How to format the message payload. Currently suported\n formats are\n\n - RAW: Expects a message with a single field (excluding\n attribute-related fields) whose contents are used as the raw bytes\n of the pubsub message.\n - AVRO: Encodes records with a given Avro schema, which may be inferred\n from the input PCollection schema.\n - JSON: Formats records with a given JSON schema, which may be inferred\n from the input PCollection schema.\n\n schema: Schema specification for the given format.\n attributes: List of attribute keys whose values will be pulled out as\n PubSub message attributes. For example, if the format is `raw`\n and attributes is `[\"a\", \"b\"]` then elements of the form\n `Row(any_field=..., a=..., b=...)` will result in PubSub messages whose\n payload has the contents of any_field and whose attribute will be\n populated with the values of `a` and `b`.\n attribute_map: Name of a string-to-string map field in which to pull a set\n of attributes associated with this message. For example, if the format\n is `raw` and `attribute_map` is set to `\"attrs\"` then elements of the form\n `Row(any_field=..., attrs=...)` will result in PubSub messages whose\n payload has the contents of any_field and whose attribute will be\n populated with the values from attrs.\n If both `attributes` and `attribute_map` are set, the union of attributes\n from these two sources will be used to populate the PubSub message\n attributes.\n id_attribute: If set, will set an attribute for each Cloud Pub/Sub message\n with the given name and a unique value. This attribute can then be used\n in a ReadFromPubSub PTransform to deduplicate messages.\n timestamp_attribute: If set, will set an attribute for each Cloud Pub/Sub\n message with the given name and the message's publish time as the value."} +{"repo": "pytype", "function": "def add_pop_block_targets(bytecode: list[opcodes.Opcode]) -> None:\n if not bytecode:\n return\n for op in bytecode:\n op.block_target = None\n setup_except_op = (opcodes.SETUP_FINALLY, opcodes.SETUP_EXCEPT_311)\n todo = [(bytecode[0], ())]\n seen = set()\n while todo:\n op, block_stack = todo.pop()\n if op in seen:\n continue\n else:\n seen.add(op)\n if isinstance(op, opcodes.POP_BLOCK):\n assert block_stack, 'POP_BLOCK without block.'\n op.block_target = block_stack[-1].target\n block_stack = block_stack[0:-1]\n elif isinstance(op, opcodes.RAISE_VARARGS):\n for b in reversed(block_stack):\n if isinstance(b, setup_except_op):\n op.block_target = b.target\n break\n elif isinstance(op, opcodes.BREAK_LOOP):\n for i in reversed(range(len(block_stack))):\n b = block_stack[i]\n if isinstance(b, opcodes.SETUP_LOOP):\n op.block_target = b.target\n assert b.target != op\n todo.append((op.block_target, block_stack[0:i]))\n break\n elif isinstance(op, setup_except_op):\n todo.append((op.target, block_stack))\n block_stack += (op,)\n elif op.pushes_block():\n assert op.target, f'{op.name} without target'\n block_stack += (op,)\n elif op.does_jump() and op.target:\n if op.push_exc_block:\n setup_op = op.target\n while not isinstance(setup_op, setup_except_op):\n setup_op = setup_op.prev\n block_stack += (setup_op,)\n todo.append((op.target, block_stack))\n if not op.no_next():\n assert op.next, f'Bad instruction at end of bytecode: {op!r}.'\n todo.append((op.next, block_stack))", "docstring": "Modifies bytecode so that each POP_BLOCK has a block_target.\n\nThis is to achieve better initial ordering of try/except and try/finally code.\ntry:\n i = 1\n a[i]\nexcept IndexError:\n return i\nBy connecting a CFG edge from the end of the block (after the \"a[i]\") to the\nexcept handler, our basic block ordering algorithm knows that the except block\nneeds to be scheduled last, whereas if there only was an edge before the\n\"i = 1\", it would be able to schedule it too early and thus encounter an\nundefined variable. This is only for ordering. The actual analysis of the\ncode happens later, in vm.py.\n\nArgs:\n bytecode: An array of bytecodes."} +{"repo": "tensorflow", "function": "def reduce_sum(self, x):\n return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x)", "docstring": "Performs a sum reduction on `x` across pfor iterations.\n\nNote that this currently may not work inside a control flow construct.\nArgs:\n x: an unvectorized Tensor.\n\nReturns:\n A Tensor that has same rank as `x`. The value is the sum of the values\n of `x` across the pfor iterations."} +{"repo": "tensorflow", "function": "def maximum(inputs, **kwargs):\n return Maximum(**kwargs)(inputs)", "docstring": "Functional interface to compute maximum (element-wise) list of `inputs`.\n\nThis is equivalent to the `tf.keras.layers.Maximum` layer.\n\nFor example:\n\n```python\ninput1 = tf.keras.layers.Input(shape=(16,))\nx1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\ninput2 = tf.keras.layers.Input(shape=(32,))\nx2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\nmax_inp=tf.keras.layers.maximum([x1,x2]) #shape=(None, 8)\nout = tf.keras.layers.Dense(4)(max_inp)\nmodel = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n```\n\nArgs:\n inputs: A list of input tensors (at least 2) of same shape.\n **kwargs: Standard layer keyword arguments.\n\nReturns:\n A tensor (of same shape as input tensor) with the element-wise\n maximum of the inputs.\n\nRaises:\n ValueError: If input tensors are of different shape."} +{"repo": "tensorflow", "function": "def _dict_to_tensor(self, x, k1, k2, k3):\n return array_ops_stack.stack([array_ops_stack.stack([array_ops_stack.stack([x[i, j, k] for k in range(k3)]) for j in range(k2)]) for i in range(k1)])", "docstring": "Convert a dictionary to a tensor.\n\nArgs:\n x: A k1 * k2 dictionary.\n k1: First dimension of x.\n k2: Second dimension of x.\n k3: Third dimension of x.\n\nReturns:\n A k1 * k2 * k3 tensor."} +{"repo": "beam", "function": "def delete_bq_table(project, dataset_id, table_id):\n _LOGGER.info('Clean up a BigQuery table with project: %s, dataset: %s, table: %s.', project, dataset_id, table_id)\n client = bigquery.Client(project=project)\n table_ref = client.dataset(dataset_id).table(table_id)\n try:\n client.delete_table(table_ref)\n except gexc.NotFound:\n raise GcpTestIOError('BigQuery table does not exist: %s' % table_ref)", "docstring": "Delete a BiqQuery table.\n\nArgs:\n project: Name of the project.\n dataset_id: Name of the dataset where table is.\n table_id: Name of the table."} +{"repo": "transformers", "function": "class LlavaOnevisionConfig(PretrainedConfig):\n model_type = 'llava_onevision'\n attribute_map = {'image_token_id': 'image_token_index', 'video_token_id': 'video_token_index'}\n sub_configs = {'text_config': AutoConfig, 'vision_config': AutoConfig}\n\n def __init__(self, vision_config=None, text_config=None, image_token_index=151646, video_token_index=151647, projector_hidden_act='gelu', vision_feature_select_strategy='full', vision_feature_layer=-1, vision_aspect_ratio='anyres_max_9', image_grid_pinpoints=None, tie_word_embeddings=False, multimodal_projector_bias=True, **kwargs):\n self.image_token_index = image_token_index\n self.video_token_index = video_token_index\n self.projector_hidden_act = projector_hidden_act\n self.multimodal_projector_bias = multimodal_projector_bias\n if vision_feature_select_strategy not in ['default', 'full']:\n raise ValueError(f\"vision_feature_select_strategy should be one of 'default', 'full'.Got: {vision_feature_select_strategy}\")\n self.vision_feature_select_strategy = vision_feature_select_strategy\n self.vision_feature_layer = vision_feature_layer\n self.vision_aspect_ratio = vision_aspect_ratio\n image_grid_pinpoints = image_grid_pinpoints if image_grid_pinpoints is not None else [[384, 384], [384, 768], [384, 1152], [384, 1536], [384, 1920], [384, 2304], [768, 384], [768, 768], [768, 1152], [768, 1536], [768, 1920], [768, 2304], [1152, 384], [1152, 768], [1152, 1152], [1152, 1536], [1152, 1920], [1152, 2304], [1536, 384], [1536, 768], [1536, 1152], [1536, 1536], [1536, 1920], [1536, 2304], [1920, 384], [1920, 768], [1920, 1152], [1920, 1536], [1920, 1920], [1920, 2304], [2304, 384], [2304, 768], [2304, 1152], [2304, 1536], [2304, 1920], [2304, 2304]]\n self.image_grid_pinpoints = image_grid_pinpoints\n if isinstance(vision_config, dict):\n vision_config['model_type'] = vision_config['model_type'] if 'model_type' in vision_config else 'siglip_vision_model'\n vision_config = CONFIG_MAPPING[vision_config['model_type']](**vision_config)\n elif vision_config is None:\n vision_config = CONFIG_MAPPING['siglip_vision_model'](hidden_size=1152, intermediate_size=4304, patch_size=14, image_size=384, num_hidden_layers=26, num_attention_heads=14, vision_use_head=False)\n self.vision_config = vision_config\n if isinstance(text_config, dict):\n text_config['model_type'] = text_config['model_type'] if 'model_type' in text_config else 'qwen2'\n text_config = CONFIG_MAPPING[text_config['model_type']](**text_config)\n elif text_config is None:\n text_config = CONFIG_MAPPING['qwen2']()\n self.text_config = text_config\n super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)", "docstring": "This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an\nLlava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration\nwith the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf)\nmodel.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):\n The config object or dictionary of the vision backbone.\n text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):\n The config object or dictionary of the text backbone.\n image_token_index (`int`, *optional*, defaults to 151646):\n The image token index to encode the image prompt.\n video_token_index (`int`, *optional*, defaults to 151647):\n The video token index to encode the video prompt.\n projector_hidden_act (`str`, *optional*, defaults to `\"gelu\"`):\n The activation function used by the multimodal projector.\n vision_feature_select_strategy (`str`, *optional*, defaults to `\"full\"`):\n The feature selection strategy used to select the vision feature from the vision backbone.\n Can be one of `\"default\"` or `\"full\"`. If `\"default\"`, the CLS token is removed from the vision features.\n If `\"full\"`, the full vision features are used.\n vision_feature_layer (`Union[int, List[int]]`, *optional*, defaults to -1):\n The index of the layer to select the vision feature. If multiple indices are provided,\n the vision feature of the corresponding indices will be concatenated to form the\n vision features.\n vision_aspect_ratio (`str`, *optional*, defaults to `\"anyres_max_9\"`):\n Aspect ratio used when processong image features. The default value is \"anyres_max_9\".\n image_grid_pinpoints (`List`, *optional*):\n A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list\n of the form `(height, width)`.\n tie_word_embeddings (`bool`, *optional*, defaults to `False`):\n Whether the model's input and output word embeddings should be tied.\n multimodal_projector_bias (`bool`, *optional*, defaults to `True`):\n Whether to use bias in the multimodal projector.\n\nExample:\n\n```python\n>>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config\n\n>>> # Initializing a CLIP-vision config\n>>> vision_config = SiglipVisionConfig()\n\n>>> # Initializing a Llama config\n>>> text_config = Qwen2Config()\n\n>>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration\n>>> configuration = LlavaOnevisionConfig(vision_config, text_config)\n\n>>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration\n>>> model = LlavaOnevisionForConditionalGeneration(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "beam", "function": "def __call__(self, request: beam.Row, *args, **kwargs):\n try:\n entity_id = request._asdict()[self.row_key]\n except KeyError:\n raise KeyError('Enrichment requests to Vertex AI Feature Store should contain a field: %s in the input `beam.Row` to join the input with fetched response. This is used as the `FeatureViewDataKey` to fetch feature values corresponding to this key.' % self.row_key)\n try:\n response = self.client.fetch_feature_values(request=aiplatform.gapic.FetchFeatureValuesRequest(data_key=aiplatform.gapic.FeatureViewDataKey(key=entity_id), feature_view=self.feature_view_path, data_format=aiplatform.gapic.FeatureViewDataFormat.PROTO_STRUCT))\n except NotFound:\n if self.exception_level == ExceptionLevel.WARN:\n _LOGGER.warning(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id))\n return (request, beam.Row())\n elif self.exception_level == ExceptionLevel.RAISE:\n raise ValueError(_not_found_err_message(self.feature_store_name, self.feature_view_name, entity_id))\n response_dict = dict(response.proto_struct)\n return (request, beam.Row(**response_dict))", "docstring": "Fetches feature value for an entity-id from Vertex AI Feature Store.\n\nArgs:\n request: the input `beam.Row` to enrich."} +{"repo": "tensorflow", "function": "def __init__(self, cell):\n self._cell = cell", "docstring": "Creates a new SamplerCell.\n\nArgs:\n cell: A c pointer of TFE_MonitoringSamplerCell."} +{"repo": "tensorflow", "function": "def has_deprecation_decorator(symbol):\n decorators, symbol = tf_decorator.unwrap(symbol)\n if contains_deprecation_decorator(decorators):\n return True\n if tf_inspect.isfunction(symbol):\n return False\n if not tf_inspect.isclass(symbol):\n return False\n if not hasattr(symbol, '__init__'):\n return False\n init_decorators, _ = tf_decorator.unwrap(symbol.__init__)\n return contains_deprecation_decorator(init_decorators)", "docstring": "Checks if given object has a deprecation decorator.\n\nWe check if deprecation decorator is in decorators as well as\nwhether symbol is a class whose __init__ method has a deprecation\ndecorator.\nArgs:\n symbol: Python object.\n\nReturns:\n True if symbol has deprecation decorator."} +{"repo": "beam", "function": "class SimpleSlidingQuantileTracker(WindowedTracker, QuantileTracker):\n\n def __init__(self, window_size, q):\n super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)\n QuantileTracker.__init__(self, q)\n\n def get(self):\n \"\"\"Calculates and returns the specified quantile of the current sliding\n window.\n\n Returns:\n float: The specified quantile of the values in the current sliding window.\n Returns NaN if the window is empty.\n \"\"\"\n with warnings.catch_warnings(record=False):\n warnings.simplefilter('ignore')\n return np.nanquantile(self._queue, self._q)", "docstring": "Sliding window quantile tracker using NumPy.\n\nThis tracker uses NumPy's `nanquantile` function to calculate the specified\nquantile of the values currently in the sliding window. It's a simple,\nnon-incremental approach.\n\nArgs:\n window_size: The size of the sliding window.\n q: The quantile to calculate, a float between 0 and 1 (inclusive)."} +{"repo": "tensorflow", "function": "def compute_gradients(self, loss, var_list=None, gate_gradients=GATE_OP, aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None):\n if callable(loss):\n with backprop.GradientTape() as tape:\n if var_list is not None:\n tape.watch(var_list)\n loss_value = loss()\n loss_value = self._scale_loss(loss_value)\n if var_list is None:\n var_list = tape.watched_variables()\n with ops.control_dependencies([loss_value]):\n grads = tape.gradient(loss_value, var_list, grad_loss)\n return list(zip(grads, var_list))\n if context.executing_eagerly():\n raise RuntimeError('`loss` passed to Optimizer.compute_gradients should be a function when eager execution is enabled.')\n loss = self._scale_loss(loss)\n if gate_gradients not in [Optimizer.GATE_NONE, Optimizer.GATE_OP, Optimizer.GATE_GRAPH]:\n raise ValueError('gate_gradients must be one of: Optimizer.GATE_NONE, Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s' % gate_gradients)\n self._assert_valid_dtypes([loss])\n if grad_loss is not None:\n self._assert_valid_dtypes([grad_loss])\n if var_list is None:\n var_list = variables.trainable_variables() + ops.get_collection(ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)\n else:\n var_list = nest.flatten(var_list)\n var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)\n processors = [_get_processor(v) for v in var_list]\n if not var_list:\n raise ValueError('No variables to optimize.')\n var_refs = [p.target() for p in processors]\n grads = gradients.gradients(loss, var_refs, grad_ys=grad_loss, gate_gradients=gate_gradients == Optimizer.GATE_OP, aggregation_method=aggregation_method, colocate_gradients_with_ops=colocate_gradients_with_ops)\n if gate_gradients == Optimizer.GATE_GRAPH:\n grads = control_flow_ops.tuple(grads)\n grads_and_vars = list(zip(grads, var_list))\n self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None and v.dtype != dtypes.resource])\n return grads_and_vars", "docstring": "Compute gradients of `loss` for the variables in `var_list`.\n\nThis is the first part of `minimize()`. It returns a list\nof (gradient, variable) pairs where \"gradient\" is the gradient\nfor \"variable\". Note that \"gradient\" can be a `Tensor`, an\n`IndexedSlices`, or `None` if there is no gradient for the\ngiven variable.\n\n@compatibility(TF2)\n`tf.keras.optimizers.Optimizer` in TF2 does not provide a\n`compute_gradients` method, and you should use a `tf.GradientTape` to\nobtain the gradients:\n\n```python\n@tf.function\ndef train step(inputs):\n batch_data, labels = inputs\n with tf.GradientTape() as tape:\n predictions = model(batch_data, training=True)\n loss = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)(labels, predictions)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n```\n\nArgs:\n loss: A Tensor containing the value to minimize or a callable taking\n no arguments which returns the value to minimize. When eager execution\n is enabled it must be a callable.\n var_list: Optional list or tuple of `tf.Variable` to update to minimize\n `loss`. Defaults to the list of variables collected in the graph\n under the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with\n the corresponding op.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n\nReturns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\nRaises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid.\n RuntimeError: If called with eager execution enabled and `loss` is\n not callable.\n\n@compatibility(eager)\nWhen eager execution is enabled, `gate_gradients`, `aggregation_method`,\nand `colocate_gradients_with_ops` are ignored.\n@end_compatibility"} +{"repo": "mobly", "function": "def _sanitize_windows_filename(filename):\n if re.match(WINDOWS_RESERVED_FILENAME_REGEX, filename):\n return WINDOWS_RESERVED_FILENAME_PREFIX + filename\n filename = _truncate_filename(filename, WINDOWS_MAX_FILENAME_LENGTH)\n new_filename_chars = []\n for char in filename:\n if char in WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS:\n new_filename_chars.append(WINDOWS_RESERVED_CHARACTERS_REPLACEMENTS[char])\n else:\n new_filename_chars.append(char)\n filename = ''.join(new_filename_chars)\n if filename.endswith('.') or filename.endswith(' '):\n filename = filename[:-1] + '_'\n return filename", "docstring": "Sanitizes a filename for Windows.\n\nRefer to the following Windows documentation page for the rules:\nhttps://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#naming-conventions\n\nIf the filename matches one of Window's reserved file namespaces, then the\n`WINDOWS_RESERVED_FILENAME_PREFIX` (i.e. \"mobly_\") prefix will be appended\nto the filename to convert it into a valid Windows filename.\n\nArgs:\n filename: string, the filename to sanitize for the Windows file system.\n\nReturns:\n A filename that should be safe to use on Windows."} +{"repo": "tensorflow", "function": "class RaggedTensorDynamicShape:\n\n def __init__(self, partitioned_dim_sizes, inner_dim_sizes, dim_size_dtype=None):\n \"\"\"Creates a RaggedTensorDynamicShape.\n\n Args:\n partitioned_dim_sizes: A `list` of 0-D or 1-D integer `Tensor`, one for\n each partitioned dimension. If dimension `d` is uniform, then\n `partitioned_dim_sizes[d]` must be an integer scalar, specifying the\n size of all slices across dimension `d`. If dimension `d` is ragged,\n then `partitioned_dim_sizes[d]` must be an integer vector, specifying\n the size of each slice across dimension `d`.\n inner_dim_sizes: A 1-D integer `Tensor`, whose length is equal to the\n number of inner dimensions. `inner_dim_sizes[n]` is the size of all\n slices across the `n`th inner dimension (which is the\n `(len(partitioned_dim_sizes)+n)`th dimension in the overall tensor.\n dim_size_dtype: dtype for dimension sizes. If not specified, then it\n is chosen based on the dtypes of `partitioned_dim_sizes` and\n `inner_dim_sizes`.\n \"\"\"\n assert isinstance(partitioned_dim_sizes, (list, tuple))\n with ops.name_scope(None, 'RaggedTensorDynamicShape', (partitioned_dim_sizes, inner_dim_sizes)):\n partitioned_dim_sizes = tuple((ops.convert_to_tensor(size, name='partitioned_dimension_size_%d' % i) for i, size in enumerate(partitioned_dim_sizes)))\n inner_dim_sizes = ops.convert_to_tensor(inner_dim_sizes, name='inner_dim_sizes')\n if partitioned_dim_sizes:\n for axis, dimension_size in enumerate(partitioned_dim_sizes):\n if dimension_size.shape.ndims is None:\n raise ValueError('rank of partitioned_dim_sizes[%d] is unknown' % axis)\n dimension_size.shape.with_rank_at_most(1)\n if partitioned_dim_sizes[0].shape.ndims == 1:\n raise ValueError('outermost partitioned dimension must be uniform')\n if partitioned_dim_sizes[-1].shape.ndims == 0:\n raise ValueError('innermost partitioned dimension must be ragged')\n inner_dim_sizes.shape.assert_has_rank(1)\n if dim_size_dtype is None:\n dim_size_dtypes = set((p.dtype for p in partitioned_dim_sizes if p.shape.ndims == 1))\n if not dim_size_dtypes:\n dim_size_dtype = dtypes.int64\n elif len(dim_size_dtypes) == 1:\n dim_size_dtype = dim_size_dtypes.pop()\n else:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError('partitioned_dim_sizes must have matching dtypes')\n dim_size_dtype = dtypes.int64\n partitioned_dim_sizes = tuple((math_ops.cast(p, dim_size_dtype) for p in partitioned_dim_sizes))\n inner_dim_sizes = math_ops.cast(inner_dim_sizes, dim_size_dtype)\n self._partitioned_dim_sizes = partitioned_dim_sizes\n self._inner_dim_sizes = inner_dim_sizes\n\n def __repr__(self):\n return 'RaggedTensorDynamicShape(partitioned_dim_sizes=%r, inner_dim_sizes=%r)' % (self._partitioned_dim_sizes, self._inner_dim_sizes)\n\n @staticmethod\n def from_dim_sizes(dim_sizes):\n \"\"\"Constructs a ragged shape from a list of dimension sizes.\n\n This list contains a single tensor for each dimension, where the tensor\n is a scalar if the dimension is uniform, or a vector if the dimension is\n ragged.\n\n Args:\n dim_sizes: List of int32 or int64 scalars or vectors.\n\n Returns:\n A RaggedTensorDynamicShape.\n \"\"\"\n with ops.name_scope(None, 'RaggedTensorDynamicShapeFromDimensionSizes', [dim_sizes]):\n dim_sizes = tuple((ops.convert_to_tensor(size, preferred_dtype=dtypes.int64, name='dim_sizes') for size in dim_sizes))\n inner_split = 0\n for dim, dim_size in enumerate(dim_sizes):\n if dim_size.shape.ndims == 1:\n inner_split = dim + 1\n elif dim_size.shape.ndims != 0:\n raise ValueError('Each dim_size must be a scalar or a vector')\n return RaggedTensorDynamicShape(dim_sizes[:inner_split], dim_sizes[inner_split:])\n\n @classmethod\n def from_tensor(cls, rt_input, dim_size_dtype=None):\n \"\"\"Constructs a ragged shape for a potentially ragged tensor.\"\"\"\n with ops.name_scope(None, 'RaggedTensorDynamicShapeFromTensor', [rt_input]):\n rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt_input)\n if not ragged_tensor.is_ragged(rt_input):\n return cls([], array_ops.shape(rt_input), dim_size_dtype=dim_size_dtype)\n else:\n partitioned_dim_sizes = (rt_input.nrows(),) + rt_input.nested_row_lengths()\n return RaggedTensorDynamicShape(partitioned_dim_sizes, array_ops.shape(rt_input.flat_values)[1:], dim_size_dtype=dim_size_dtype)\n\n def dimension_size(self, axis):\n \"\"\"Returns the size of slices across the specified dimension.\"\"\"\n if not isinstance(axis, int):\n raise TypeError('axis must be an integer')\n partitioned_ndims = len(self._partitioned_dim_sizes)\n if axis < partitioned_ndims:\n return self._partitioned_dim_sizes[axis]\n else:\n return self._inner_dim_sizes[axis - partitioned_ndims]\n\n def is_ragged(self, axis):\n \"\"\"Returns true if the indicated dimension is ragged.\"\"\"\n if not isinstance(axis, int):\n raise TypeError('axis must be an integer')\n rank = self.rank\n if axis < 0:\n raise ValueError('Negative axis values are not supported')\n elif rank is not None and axis >= rank:\n raise ValueError('Expected axis=%s < rank=%s' % (axis, rank))\n else:\n return axis > 0 and axis < len(self._partitioned_dim_sizes) and (self._partitioned_dim_sizes[axis].shape.ndims == 1)\n\n @property\n def rank(self):\n \"\"\"The number of dimensions in this shape, or None if unknown.\"\"\"\n inner_ndims = tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])\n if inner_ndims is None:\n return None\n else:\n return len(self._partitioned_dim_sizes) + inner_ndims\n\n @property\n def partitioned_dim_sizes(self):\n \"\"\"The partitioned dimension sizes for this shape.\n\n Returns:\n A `list` of 0-D or 1-D integer `Tensor`.\n \"\"\"\n return self._partitioned_dim_sizes\n\n @property\n def inner_dim_sizes(self):\n \"\"\"The inner dimension sizes for this shape.\n\n Returns:\n A 1-D integer `Tensor`.\n \"\"\"\n return self._inner_dim_sizes\n\n @property\n def num_partitioned_dimensions(self):\n \"\"\"The number of partitioned dimensions in this shape.\"\"\"\n return len(self._partitioned_dim_sizes)\n\n @property\n def num_inner_dimensions(self):\n \"\"\"The number of inner dimensions, or `None` if not statically known.\"\"\"\n return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])\n\n @property\n def dim_size_dtype(self):\n \"\"\"DType used by this shape for dimension sizes.\"\"\"\n return self._inner_dim_sizes.dtype\n\n def broadcast_to_rank(self, rank):\n \"\"\"Adds leading size-1 dimensions to broadcast `self` to the given rank.\n\n E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)`\n is `[1, 1, 3, (D2), 4]`.\n\n Args:\n rank: The rank for the returned shape.\n\n Returns:\n A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions\n have the same size as `self` and whose outer dimensions have size `1`.\n\n Raises:\n ValueError: If `self.rank` is unknown or greater than `rank`.\n \"\"\"\n if self.rank is None:\n raise ValueError('Unable to broadcast: self.rank is unknown')\n dims_to_add = rank - self.rank\n if dims_to_add < 0:\n raise ValueError('Unable to broadcast: rank=%d must be greater than self.rank=%d.' % (rank, self.rank))\n elif dims_to_add == 0:\n return self\n elif self._partitioned_dim_sizes:\n partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes\n return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, self.dim_size_dtype)\n else:\n inner_dims = array_ops.concat([array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0)\n return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)\n\n def broadcast_dimension(self, axis, lengths):\n \"\"\"Returns a shape that is broadcast-compatible with self & lengths.\n\n * If dimension[axis] is uniform and lengths is a scalar, the check\n that either lengths==1 or axis==1 or lengths==axis, and tile\n dimension[axis] with tf.where(lengths==axis, 1, axis) repeats.\n\n * If dimension[axis] is uniform and lengths is a vector, then check\n that dimension[axis]==1, and raggedly tile dimension[axis] with\n lengths repeats. (we can skip tiling if we statically know that\n slice_lengths == 1??)\n\n * If dimension[axis] is ragged and lengths is a scalar, then check\n that lengths==1.\n\n * If dimension[axis] is ragged and lengths is a vector, then check\n that self.dimension_size(axis) == lengths.\n\n Args:\n axis: `int`. The dimension to broadcast.\n lengths: 0-D or 1-D integer `Tensor`.\n\n Returns:\n A `RaggedTensorDynamicShape`.\n \"\"\"\n lengths = ragged_util.convert_to_int_tensor(lengths, name='lengths', dtype=self.dim_size_dtype)\n if lengths.shape.ndims is None:\n raise ValueError('lengths must have a known rank.')\n elif lengths.shape.ndims > 1:\n raise ValueError('lengths must be a scalar or vector')\n else:\n lengths_is_scalar = lengths.shape.ndims == 0\n if self.is_ragged(axis):\n if lengths_is_scalar:\n condition = math_ops.equal(lengths, 1)\n else:\n condition = math_ops.reduce_all(math_ops.equal(lengths, self.dimension_size(axis)))\n else:\n axis_dim_size = self.dimension_size(axis)\n if lengths_is_scalar:\n condition = math_ops.equal(lengths, 1) | math_ops.equal(axis_dim_size, 1) | math_ops.equal(axis_dim_size, lengths)\n else:\n condition = math_ops.equal(axis_dim_size, 1)\n broadcast_err = ['Unable to broadcast: dimension size mismatch in dimension', axis, 'lengths=', lengths, 'dim_size=', self.dimension_size(axis)]\n broadcast_check = control_flow_assert.Assert(condition, data=broadcast_err, summarize=10)\n with ops.control_dependencies([broadcast_check]):\n if axis < self.num_partitioned_dimensions:\n if self.is_ragged(axis):\n return RaggedTensorDynamicShape(self._partitioned_dim_sizes, array_ops.identity(self.inner_dim_sizes), self.dim_size_dtype)\n else:\n return self._broadcast_uniform_partitioned_dimension(axis, lengths)\n elif lengths_is_scalar:\n return self._broadcast_inner_dimension_to_uniform(axis, lengths)\n else:\n if axis == 0:\n raise ValueError('Unable to broadcast: outermost dimension must be uniform.')\n return self._broadcast_inner_dimension_to_ragged(axis, lengths)\n\n def num_slices_in_dimension(self, axis):\n \"\"\"Returns the total number of slices across the indicated dimension.\"\"\"\n if axis < 0:\n return constant_op.constant(1, dtype=self.dim_size_dtype)\n elif self.is_ragged(axis):\n return math_ops.reduce_sum(self._partitioned_dim_sizes[axis])\n else:\n return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1)\n\n def _broadcast_uniform_partitioned_dimension(self, axis, lengths):\n \"\"\"Broadcasts the partitioned dimension `axis` to match `lengths`.\"\"\"\n axis_dim_size = self.dimension_size(axis)\n partitioned_sizes = list(self._partitioned_dim_sizes[:axis])\n if lengths.shape.ndims == 0:\n lengths = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, axis_dim_size)\n repeats = array_ops.where(math_ops.equal(axis_dim_size, 1), lengths, 1)\n splits = array_ops_stack.stack([0, self.num_slices_in_dimension(axis)])\n else:\n splits = math_ops.range(array_ops.size(lengths, out_type=self.dim_size_dtype) + 1)\n repeats = lengths\n partitioned_sizes.append(lengths)\n for dim_size in self._partitioned_dim_sizes[axis + 1:]:\n if dim_size.shape.ndims == 0:\n partitioned_sizes.append(dim_size)\n splits *= dim_size\n else:\n partitioned_sizes.append(ragged_util.repeat_ranges(dim_size, splits, repeats))\n splits = array_ops.gather(ragged_util.lengths_to_splits(dim_size), splits)\n inner_sizes = self._inner_dim_sizes\n return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, self.dim_size_dtype)\n\n def _broadcast_inner_dimension_to_uniform(self, axis, length):\n \"\"\"Broadcasts the inner dimension `axis` to match `lengths`.\"\"\"\n dim_size = self.dimension_size(axis)\n axis_in_inner_dims = axis - self.num_partitioned_dimensions\n partitioned_sizes = self._partitioned_dim_sizes\n inner_sizes = array_ops.concat([self._inner_dim_sizes[:axis_in_inner_dims], [array_ops.where(math_ops.equal(dim_size, 1), length, dim_size)], self._inner_dim_sizes[axis_in_inner_dims + 1:]], axis=0)\n return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes, self.dim_size_dtype)\n\n def _broadcast_inner_dimension_to_ragged(self, axis, lengths):\n axis_in_inner_dims = axis - self.num_partitioned_dimensions\n partitioned_sizes = self._partitioned_dim_sizes + tuple([self._inner_dim_sizes[i] for i in range(axis_in_inner_dims)]) + (lengths,)\n inner_sizes = self._inner_dim_sizes[axis_in_inner_dims + 1:]\n return RaggedTensorDynamicShape(partitioned_sizes, inner_sizes)\n\n def with_dim_size_dtype(self, dtype):\n if dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError('dtype must be int32 or int64')\n if self.dim_size_dtype == dtype:\n return self\n return RaggedTensorDynamicShape([math_ops.cast(p, dtype) for p in self._partitioned_dim_sizes], math_ops.cast(self._inner_dim_sizes, dtype))", "docstring": "A collection of tensors encoding the shape of a potentially ragged tensor.\n\nEach `RaggedTensorDynamicShape` consists of an ordered list of dimension\nsizes. There are two dimension types:\n\n * \"Uniform dimensions\" are dimensions where all slices have the same\n length. `RaggedTensorDynamicShape` records the size of each uniform\n dimension using a single scalar integer.\n\n * \"Ragged dimensions\" are dimensions whose slices may have different\n lengths. `RaggedTensorDynamicShape` records the size of each ragged\n dimension using an integer vector containing the slice lengths for all\n the slices across that dimension.\n\nFurthermore, there are two ways a dimension might be encoded:\n\n * \"Partitioned dimensions\" are dimensions that are encoded using a\n `RaggedTensor`'s `nested_row_splits`. The outermostmost partitioned\n dimension must be uniform, and the innermost partitioned dimension must\n be ragged.\n\n * \"Inner dimensions\" are dimensions that are encoded using a\n `RaggedTensor`'s `flat_values`. Inner dimensions are always uniform.\n\nThe sizes of partitioned dimensions are recorded using `partitioned_dim_sizes`\nand `inner_dim_sizes`:\n\n * `partitioned_dim_sizes` is a list of tensors (one for each partitioned\n dimension).\n\n * For uniform dimensions, the tensor is an integer scalar specifying the\n size of all slices across that dimension.\n * For ragged dimensions, the tensor is an integer vector specifying the\n size of each slice across that dimension.\n\n * `inner_dim_sizes` is a single integer vector, where each element\n specifies the size of a single inner dimension.\n\nExamples:\n\nTensor | Ragged | Partitioned Dim Sizes | Inner Dim\n : Rank : : Sizes\n------------------------------ | ------ | ---------------------- | ----------\n`[[1, 2, 3], [4, 5, 6]]` | 0 | | `2, 3`\n`[[1, 2], [], [3, 4, 5]]` | 1 | `3, (2, 0, 3)` |\n`[[[1, 2], [3, 4]], [[5, 6]]]` | 1 | `2, (2, 1)` | 2\n`[[[1, 2], [3]], [[4, 5]]]` | 2 | `2, (2, 1), (2, 1, 2)` |"} +{"repo": "beam", "function": "def increment_max_models(self, increment: int):\n if self._max_models is None:\n self._max_models = 0\n self._max_models += increment", "docstring": "Increments the number of models that this instance of a _ModelManager is\nable to hold. If it is never called, no limit is imposed.\nArgs:\n increment: the amount by which we are incrementing the number of models."} +{"repo": "tensorflow", "function": "def get_task_states(self, job_configs):\n if self._context_handle:\n job_names, task_nums = zip(*job_configs)\n return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, task_nums)\n else:\n raise ValueError('Context is not initialized.')", "docstring": "Get task states from the Coordination Service.\n\nArgs:\n job_configs: A list of tuples of job name and task number.\n\nReturns:\n A list of TF_Status."} +{"repo": "transformers", "function": "class FlavaModelOutput(ModelOutput):\n image_embeddings: Optional[torch.FloatTensor] = None\n image_output: Optional[BaseModelOutputWithPooling] = None\n text_embeddings: Optional[torch.FloatTensor] = None\n text_output: Optional[BaseModelOutputWithPooling] = None\n multimodal_embeddings: Optional[torch.FloatTensor] = None\n multimodal_output: Optional[BaseModelOutputWithPooling] = None\n\n def to_tuple(self) -> Tuple[Any]:\n return tuple((self[k] if k not in ['text_output', 'image_output', 'multimodal_output'] else getattr(self, k).to_tuple() for k in self.keys()))", "docstring": "Output from FlavaModel containing embeddings and outputs from individual encoders.\n\nNote that `image_embeddings` and `text_embeddigns` returned are similar to pooled output returned from a\ntransformer. If you want embeddings for contrastive loss or retrieval use a FLAVA model's `image_projection` and\n`text_projection` layers on `image_embeddings` and `text_embeddings` respectively.\n\nArgs:\n image_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `pixel_values` are present):\n The image embeddings which are basically the pooled output of [`FlavaImageModel`].\n image_output (`BaseModelOutputWithPooling`, *optional*, returned when `pixel_values` are present):\n The output of the [`FlavaImageModel`].\n text_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` are present):\n The text embeddings which are basically the pooled output of [`FlavaTextModel`].\n text_output (`BaseModelOutputWithPooling`, *optional*, returned when `input_ids` are present):\n The output of the [`FlavaTextModel`].\n multimodal_embeddings (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):\n The multimodal embeddings which are basically the pooled output of [`FlavaTextModel`].\n multimodal_output (`BaseModelOutputWithPooling`, returned when `input_ids` and `pixel_values` are present and `skip_multimodal_encoder` is `None` or `False`):\n The output of the [`FlavaMultimodalModel`]."} +{"repo": "tensorflow", "function": "def __init__(self, initial_value=None, trainable=None, collections=None, validate_shape=True, caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None, constraint=None, use_resource=None, synchronization=variables.VariableSynchronization.AUTO, aggregation=variables.VariableAggregation.NONE, shape=None):", "docstring": "Creates a new variable with value `initial_value`.\n\nThe new variable is added to the graph collections listed in `collections`,\nwhich defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\nIf `trainable` is `True` the variable is also added to the graph collection\n`GraphKeys.TRAINABLE_VARIABLES`.\n\nThis constructor creates both a `variable` Op and an `assign` Op to set the\nvariable to its initial value.\n\nArgs:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, also adds the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default\n list of variables to use by the `Optimizer` classes. Defaults to `True`,\n unless `synchronization` is set to `ON_READ`, in which case it defaults\n to `False`.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device. If not\n `None`, caches on another device. Typical use is to cache on the device\n where the Ops using the Variable reside, to deduplicate copying through\n `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates the\n Variable object with its contents, referencing the variable's nodes in\n the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type. If\n `None`, either the datatype will be kept (if `initial_value` is a\n Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected to have\n this shape.\n import_scope: Optional `string`. Name scope to add to the `Variable.` Only\n used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n use_resource: whether to use resource variables.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\nRaises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled."} +{"repo": "beam", "function": "def _retry_on_appropriate_openai_error(exception):\n return isinstance(exception, (RateLimitError, APIError))", "docstring": "Retry filter that returns True for rate limit (429) or server (5xx) errors.\n\nArgs:\n exception: the returned exception encountered during the request/response\n loop.\n\nReturns:\n boolean indication whether or not the exception is a Server Error (5xx) or\n a RateLimitError (429) error."} +{"repo": "tensorflow", "function": "class MaxPooling1D(keras_layers.MaxPooling1D, base.Layer):\n\n def __init__(self, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs):\n if strides is None:\n raise ValueError('Argument `strides` must not be None.')\n super(MaxPooling1D, self).__init__(pool_size=pool_size, strides=strides, padding=padding, data_format=data_format, name=name, **kwargs)", "docstring": "Max Pooling layer for 1D inputs.\n\nArgs:\n pool_size: An integer or tuple/list of a single integer,\n representing the size of the pooling window.\n strides: An integer or tuple/list of a single integer, specifying the\n strides of the pooling operation.\n padding: A string. The padding method, either 'valid' or 'same'.\n Case-insensitive.\n data_format: A string, one of `channels_last` (default) or `channels_first`.\n The ordering of the dimensions in the inputs.\n `channels_last` corresponds to inputs with shape\n `(batch, length, channels)` while `channels_first` corresponds to\n inputs with shape `(batch, channels, length)`.\n name: A string, the name of the layer."} +{"repo": "tensorflow", "function": "def prepare_background_data(self):\n self.background_data = []\n background_dir = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME)\n if not gfile.Exists(background_dir):\n return self.background_data\n with tf.compat.v1.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.compat.v1.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = tf.audio.decode_wav(wav_loader, desired_channels=1)\n search_path = os.path.join(self.data_dir, BACKGROUND_NOISE_DIR_NAME, '*.wav')\n for wav_path in gfile.Glob(search_path):\n wav_data = sess.run(wav_decoder, feed_dict={wav_filename_placeholder: wav_path}).audio.flatten()\n self.background_data.append(wav_data)\n if not self.background_data:\n raise Exception('No background wav files were found in ' + search_path)", "docstring": "Searches a folder for background noise audio, and loads it into memory.\n\nIt's expected that the background audio samples will be in a subdirectory\nnamed '_background_noise_' inside the 'data_dir' folder, as .wavs that match\nthe sample rate of the training data, but can be much longer in duration.\n\nIf the '_background_noise_' folder doesn't exist at all, this isn't an\nerror, it's just taken to mean that no background noise augmentation should\nbe used. If the folder does exist, but it's empty, that's treated as an\nerror.\n\nReturns:\n List of raw PCM-encoded audio samples of background noise.\n\nRaises:\n Exception: If files aren't found in the folder."} +{"repo": "transformers", "function": "class VitsConfig(PretrainedConfig):\n model_type = 'vits'\n\n def __init__(self, vocab_size=38, hidden_size=192, num_hidden_layers=6, num_attention_heads=2, window_size=4, use_bias=True, ffn_dim=768, layerdrop=0.1, ffn_kernel_size=3, flow_size=192, spectrogram_bins=513, hidden_act='relu', hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.1, initializer_range=0.02, layer_norm_eps=1e-05, use_stochastic_duration_prediction=True, num_speakers=1, speaker_embedding_size=0, upsample_initial_channel=512, upsample_rates=[8, 8, 2, 2], upsample_kernel_sizes=[16, 16, 4, 4], resblock_kernel_sizes=[3, 7, 11], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], leaky_relu_slope=0.1, depth_separable_channels=2, depth_separable_num_layers=3, duration_predictor_flow_bins=10, duration_predictor_tail_bound=5.0, duration_predictor_kernel_size=3, duration_predictor_dropout=0.5, duration_predictor_num_flows=4, duration_predictor_filter_channels=256, prior_encoder_num_flows=4, prior_encoder_num_wavenet_layers=4, posterior_encoder_num_wavenet_layers=16, wavenet_kernel_size=5, wavenet_dilation_rate=1, wavenet_dropout=0.0, speaking_rate=1.0, noise_scale=0.667, noise_scale_duration=0.8, sampling_rate=16000, **kwargs):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.window_size = window_size\n self.use_bias = use_bias\n self.ffn_dim = ffn_dim\n self.layerdrop = layerdrop\n self.ffn_kernel_size = ffn_kernel_size\n self.flow_size = flow_size\n self.spectrogram_bins = spectrogram_bins\n self.hidden_act = hidden_act\n self.hidden_dropout = hidden_dropout\n self.attention_dropout = attention_dropout\n self.activation_dropout = activation_dropout\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.use_stochastic_duration_prediction = use_stochastic_duration_prediction\n self.num_speakers = num_speakers\n self.speaker_embedding_size = speaker_embedding_size\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_rates = upsample_rates\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.leaky_relu_slope = leaky_relu_slope\n self.depth_separable_channels = depth_separable_channels\n self.depth_separable_num_layers = depth_separable_num_layers\n self.duration_predictor_flow_bins = duration_predictor_flow_bins\n self.duration_predictor_tail_bound = duration_predictor_tail_bound\n self.duration_predictor_kernel_size = duration_predictor_kernel_size\n self.duration_predictor_dropout = duration_predictor_dropout\n self.duration_predictor_num_flows = duration_predictor_num_flows\n self.duration_predictor_filter_channels = duration_predictor_filter_channels\n self.prior_encoder_num_flows = prior_encoder_num_flows\n self.prior_encoder_num_wavenet_layers = prior_encoder_num_wavenet_layers\n self.posterior_encoder_num_wavenet_layers = posterior_encoder_num_wavenet_layers\n self.wavenet_kernel_size = wavenet_kernel_size\n self.wavenet_dilation_rate = wavenet_dilation_rate\n self.wavenet_dropout = wavenet_dropout\n self.speaking_rate = speaking_rate\n self.noise_scale = noise_scale\n self.noise_scale_duration = noise_scale_duration\n self.sampling_rate = sampling_rate\n if len(upsample_kernel_sizes) != len(upsample_rates):\n raise ValueError(f'The length of `upsample_kernel_sizes` ({len(upsample_kernel_sizes)}) must match the length of `upsample_rates` ({len(upsample_rates)})')\n super().__init__(**kwargs)", "docstring": "This is the configuration class to store the configuration of a [`VitsModel`]. It is used to instantiate a VITS\nmodel according to the specified arguments, defining the model architecture. Instantiating a configuration with the\ndefaults will yield a similar configuration to that of the VITS\n[facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng) architecture.\n\nConfiguration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\ndocumentation from [`PretrainedConfig`] for more information.\n\nArgs:\n vocab_size (`int`, *optional*, defaults to 38):\n Vocabulary size of the VITS model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed to the forward method of [`VitsModel`].\n hidden_size (`int`, *optional*, defaults to 192):\n Dimensionality of the text encoder layers.\n num_hidden_layers (`int`, *optional*, defaults to 6):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 2):\n Number of attention heads for each attention layer in the Transformer encoder.\n window_size (`int`, *optional*, defaults to 4):\n Window size for the relative positional embeddings in the attention layers of the Transformer encoder.\n use_bias (`bool`, *optional*, defaults to `True`):\n Whether to use bias in the key, query, value projection layers in the Transformer encoder.\n ffn_dim (`int`, *optional*, defaults to 768):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n layerdrop (`float`, *optional*, defaults to 0.1):\n The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)\n for more details.\n ffn_kernel_size (`int`, *optional*, defaults to 3):\n Kernel size of the 1D convolution layers used by the feed-forward network in the Transformer encoder.\n flow_size (`int`, *optional*, defaults to 192):\n Dimensionality of the flow layers.\n spectrogram_bins (`int`, *optional*, defaults to 513):\n Number of frequency bins in the target spectrogram.\n hidden_act (`str` or `function`, *optional*, defaults to `\"relu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n hidden_dropout (`float`, *optional*, defaults to 0.1):\n The dropout probability for all fully connected layers in the embeddings and encoder.\n attention_dropout (`float`, *optional*, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n activation_dropout (`float`, *optional*, defaults to 0.1):\n The dropout ratio for activations inside the fully connected layer.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-05):\n The epsilon used by the layer normalization layers.\n use_stochastic_duration_prediction (`bool`, *optional*, defaults to `True`):\n Whether to use the stochastic duration prediction module or the regular duration predictor.\n num_speakers (`int`, *optional*, defaults to 1):\n Number of speakers if this is a multi-speaker model.\n speaker_embedding_size (`int`, *optional*, defaults to 0):\n Number of channels used by the speaker embeddings. Is zero for single-speaker models.\n upsample_initial_channel (`int`, *optional*, defaults to 512):\n The number of input channels into the HiFi-GAN upsampling network.\n upsample_rates (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 2, 2]`):\n A tuple of integers defining the stride of each 1D convolutional layer in the HiFi-GAN upsampling network.\n The length of `upsample_rates` defines the number of convolutional layers and has to match the length of\n `upsample_kernel_sizes`.\n upsample_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[16, 16, 4, 4]`):\n A tuple of integers defining the kernel size of each 1D convolutional layer in the HiFi-GAN upsampling\n network. The length of `upsample_kernel_sizes` defines the number of convolutional layers and has to match\n the length of `upsample_rates`.\n resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 7, 11]`):\n A tuple of integers defining the kernel sizes of the 1D convolutional layers in the HiFi-GAN\n multi-receptive field fusion (MRF) module.\n resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):\n A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the\n HiFi-GAN multi-receptive field fusion (MRF) module.\n leaky_relu_slope (`float`, *optional*, defaults to 0.1):\n The angle of the negative slope used by the leaky ReLU activation.\n depth_separable_channels (`int`, *optional*, defaults to 2):\n Number of channels to use in each depth-separable block.\n depth_separable_num_layers (`int`, *optional*, defaults to 3):\n Number of convolutional layers to use in each depth-separable block.\n duration_predictor_flow_bins (`int`, *optional*, defaults to 10):\n Number of channels to map using the unonstrained rational spline in the duration predictor model.\n duration_predictor_tail_bound (`float`, *optional*, defaults to 5.0):\n Value of the tail bin boundary when computing the unconstrained rational spline in the duration predictor\n model.\n duration_predictor_kernel_size (`int`, *optional*, defaults to 3):\n Kernel size of the 1D convolution layers used in the duration predictor model.\n duration_predictor_dropout (`float`, *optional*, defaults to 0.5):\n The dropout ratio for the duration predictor model.\n duration_predictor_num_flows (`int`, *optional*, defaults to 4):\n Number of flow stages used by the duration predictor model.\n duration_predictor_filter_channels (`int`, *optional*, defaults to 256):\n Number of channels for the convolution layers used in the duration predictor model.\n prior_encoder_num_flows (`int`, *optional*, defaults to 4):\n Number of flow stages used by the prior encoder flow model.\n prior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 4):\n Number of WaveNet layers used by the prior encoder flow model.\n posterior_encoder_num_wavenet_layers (`int`, *optional*, defaults to 16):\n Number of WaveNet layers used by the posterior encoder model.\n wavenet_kernel_size (`int`, *optional*, defaults to 5):\n Kernel size of the 1D convolution layers used in the WaveNet model.\n wavenet_dilation_rate (`int`, *optional*, defaults to 1):\n Dilation rates of the dilated 1D convolutional layers used in the WaveNet model.\n wavenet_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the WaveNet layers.\n speaking_rate (`float`, *optional*, defaults to 1.0):\n Speaking rate. Larger values give faster synthesised speech.\n noise_scale (`float`, *optional*, defaults to 0.667):\n How random the speech prediction is. Larger values create more variation in the predicted speech.\n noise_scale_duration (`float`, *optional*, defaults to 0.8):\n How random the duration prediction is. Larger values create more variation in the predicted durations.\n sampling_rate (`int`, *optional*, defaults to 16000):\n The sampling rate at which the output audio waveform is digitalized expressed in hertz (Hz).\n\nExample:\n\n```python\n>>> from transformers import VitsModel, VitsConfig\n\n>>> # Initializing a \"facebook/mms-tts-eng\" style configuration\n>>> configuration = VitsConfig()\n\n>>> # Initializing a model (with random weights) from the \"facebook/mms-tts-eng\" style configuration\n>>> model = VitsModel(configuration)\n\n>>> # Accessing the model configuration\n>>> configuration = model.config\n```"} +{"repo": "transformers", "function": "def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, labels: np.ndarray | tf.Tensor | None=None, next_sentence_label: np.ndarray | tf.Tensor | None=None, training: Optional[bool]=False) -> Union[Tuple, TFMobileBertForPreTrainingOutput]:\n outputs = self.mobilebert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)\n sequence_output, pooled_output = outputs[:2]\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n total_loss = None\n if labels is not None and next_sentence_label is not None:\n d_labels = {'labels': labels}\n d_labels['next_sentence_label'] = next_sentence_label\n total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score))\n if not return_dict:\n output = (prediction_scores, seq_relationship_score) + outputs[2:]\n return (total_loss,) + output if total_loss is not None else output\n return TFMobileBertForPreTrainingOutput(loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions)", "docstring": "Return:\n\nExamples:\n\n```python\n>>> import tensorflow as tf\n>>> from transformers import AutoTokenizer, TFMobileBertForPreTraining\n\n>>> tokenizer = AutoTokenizer.from_pretrained(\"google/mobilebert-uncased\")\n>>> model = TFMobileBertForPreTraining.from_pretrained(\"google/mobilebert-uncased\")\n>>> input_ids = tf.constant(tokenizer.encode(\"Hello, my dog is cute\"))[None, :] # Batch size 1\n>>> outputs = model(input_ids)\n>>> prediction_scores, seq_relationship_scores = outputs[:2]\n```"} +{"repo": "tf-quant-finance", "function": "def calibration_from_swaptions(*, prices: types.RealTensor, expiries: types.RealTensor, floating_leg_start_times: types.RealTensor, floating_leg_end_times: types.RealTensor, fixed_leg_payment_times: types.RealTensor, floating_leg_daycount_fractions: types.RealTensor, fixed_leg_daycount_fractions: types.RealTensor, fixed_leg_coupon: types.RealTensor, reference_rate_fn: Callable[..., types.RealTensor], num_hjm_factors: types.RealTensor, mean_reversion: types.RealTensor, volatility: types.RealTensor, notional: types.RealTensor=None, is_payer_swaption: types.BoolTensor=None, swaption_valuation_method: vm.ValuationMethod=None, num_samples: types.IntTensor=1, random_type: random.RandomType=None, seed: types.IntTensor=None, skip: types.IntTensor=0, times: types.RealTensor=None, time_step: types.RealTensor=None, num_time_steps: types.IntTensor=None, curve_times: types.RealTensor=None, time_step_finite_difference: types.RealTensor=None, num_grid_points_finite_difference: types.IntTensor=101, volatility_based_calibration: bool=True, calibrate_correlation: bool=True, optimizer_fn: Callable[..., types.RealTensor]=None, mean_reversion_lower_bound: types.RealTensor=0.001, mean_reversion_upper_bound: types.RealTensor=0.5, volatility_lower_bound: types.RealTensor=1e-05, volatility_upper_bound: types.RealTensor=0.1, tolerance: types.RealTensor=1e-06, maximum_iterations: types.IntTensor=50, dtype: tf.DType=None, name: str=None) -> Tuple[CalibrationResult, types.BoolTensor, types.IntTensor]:\n del floating_leg_daycount_fractions\n name = name or 'hjm_swaption_calibration'\n with tf.name_scope(name):\n prices = tf.convert_to_tensor(prices, dtype=dtype, name='prices')\n dtype = dtype or prices.dtype\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n float_leg_start_times = tf.convert_to_tensor(floating_leg_start_times, dtype=dtype, name='float_leg_start_times')\n float_leg_end_times = tf.convert_to_tensor(floating_leg_end_times, dtype=dtype, name='float_leg_end_times')\n fixed_leg_payment_times = tf.convert_to_tensor(fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')\n fixed_leg_daycount_fractions = tf.convert_to_tensor(fixed_leg_daycount_fractions, dtype=dtype, name='fixed_leg_daycount_fractions')\n fixed_leg_coupon = tf.convert_to_tensor(fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')\n if times is None:\n times, _ = tf.unique(tf.reshape(expiries, [-1]))\n times = tf.sort(times, name='sort_times')\n else:\n times = tf.convert_to_tensor(times, dtype=dtype)\n if curve_times is None:\n tau = fixed_leg_payment_times - tf.expand_dims(expiries, axis=-1)\n curve_times, _ = tf.unique(tf.reshape(tau, [-1]))\n curve_times = tf.sort(curve_times)\n else:\n curve_times = tf.convert_to_tensor(curve_times, dtype=dtype)\n notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')\n vol_lb = tf.convert_to_tensor(volatility_lower_bound, dtype=dtype)\n vol_ub = tf.convert_to_tensor(volatility_upper_bound, dtype=dtype)\n mr_lb = tf.convert_to_tensor(mean_reversion_lower_bound, dtype=dtype)\n mr_ub = tf.convert_to_tensor(mean_reversion_upper_bound, dtype=dtype)\n theta_lb = tf.convert_to_tensor(0, dtype=dtype)\n theta_ub = tf.convert_to_tensor(_THETA_UB, dtype=dtype)\n mean_reversion = tf.convert_to_tensor(mean_reversion, dtype=dtype)\n volatility = tf.convert_to_tensor(volatility, dtype=dtype)\n swaption_valuation_method = swaption_valuation_method or vm.ValuationMethod.MONTE_CARLO\n if optimizer_fn is None:\n optimizer_fn = optimizer.conjugate_gradient_minimize\n\n def _price_to_normal_vol(x, swap_rate, annuity):\n vols = implied_vol(prices=x / annuity / notional, strikes=fixed_leg_coupon[..., 0], expiries=expiries, forwards=swap_rate, is_call_options=is_payer_swaption, underlying_distribution=UnderlyingDistribution.NORMAL, dtype=dtype)\n return vols\n if volatility_based_calibration:\n batch_shape = tf.shape(prices)[:-1]\n batch_size = tf.math.reduce_prod(batch_shape)\n num_instruments = tf.shape(prices)[-1]\n swap_rate, annuity = swap.ir_swap_par_rate_and_annuity(float_leg_start_times, float_leg_end_times, fixed_leg_payment_times, fixed_leg_daycount_fractions, reference_rate_fn)\n swap_rate = tf.reshape(swap_rate, [batch_size, batch_size, num_instruments])\n annuity = tf.reshape(annuity, [batch_size, batch_size, num_instruments])\n indices = tf.stack([tf.range(batch_size, dtype=tf.int32), tf.range(batch_size, dtype=tf.int32)], axis=-1)\n swap_rate = tf.gather_nd(swap_rate, indices)\n annuity = tf.gather_nd(annuity, indices)\n swap_rate = tf.reshape(swap_rate, tf.shape(prices))\n annuity = tf.reshape(annuity, tf.shape(prices))\n target_values = _price_to_normal_vol(prices, swap_rate, annuity)\n else:\n target_values = prices\n with tf.control_dependencies([target_values]):\n tf.debugging.assert_all_finite(target_values, 'Conversion to implied vols resulted in failure for input swaption prices.')\n target_lb = tf.constant(0.0, dtype=dtype)\n target_ub = tf.math.reduce_max(target_values)\n\n def _scale(x, lb, ub):\n return (x - lb) / (ub - lb)\n\n def _to_unconstrained(x, lb, ub):\n x = _scale(x, lb, ub)\n return -tf.math.log((1.0 - x) / x)\n\n def _to_constrained(x, lb, ub):\n x = tf.math.exp(x) / (1.0 + tf.math.exp(x))\n return x * (ub - lb) + lb\n if calibrate_correlation:\n num_thetas = num_hjm_factors * (num_hjm_factors - 1)\n init_corr = tf.range(0.1, num_thetas + 0.1, dtype=dtype) / num_thetas\n else:\n init_corr = []\n if mean_reversion.shape.rank > 1:\n init_corr = [[]] * mean_reversion.shape.rank\n initial_guess = tf.concat([_to_unconstrained(mean_reversion, mr_lb, mr_ub), _to_unconstrained(volatility, vol_lb, vol_ub), _to_unconstrained(init_corr, theta_lb, theta_ub)], axis=-1)\n scaled_target = _scale(target_values, target_lb, target_ub)\n\n @make_val_and_grad_fn\n def loss_function(x):\n \"\"\"Loss function for the optimization.\"\"\"\n x_mr = _to_constrained(x[..., :num_hjm_factors], mr_lb, mr_ub)\n x_vol = _to_constrained(x[..., num_hjm_factors:2 * num_hjm_factors], vol_lb, vol_ub)\n if calibrate_correlation:\n thetas = x[..., 2 * num_hjm_factors:]\n thetas = tfp.math.clip_by_value_preserve_gradient(thetas, -25.0, 25.0)\n x_corr = _correlation_matrix_using_hypersphere_decomposition(num_hjm_factors, _to_constrained(thetas, theta_lb, theta_ub))\n else:\n x_corr = None\n volatility_param = _make_hjm_volatility_fn(x_vol, dtype)\n model_values = swaption_price(expiries=expiries, fixed_leg_payment_times=fixed_leg_payment_times, fixed_leg_daycount_fractions=fixed_leg_daycount_fractions, fixed_leg_coupon=fixed_leg_coupon, reference_rate_fn=reference_rate_fn, num_hjm_factors=num_hjm_factors, mean_reversion=x_mr, volatility=volatility_param, corr_matrix=x_corr, notional=notional, is_payer_swaption=is_payer_swaption, valuation_method=swaption_valuation_method, num_samples=num_samples, random_type=random_type, seed=seed, skip=skip, times=times, time_step=time_step, num_time_steps=num_time_steps, curve_times=curve_times, time_step_finite_difference=time_step_finite_difference, num_grid_points_finite_difference=num_grid_points_finite_difference, dtype=dtype)\n if volatility_based_calibration:\n model_values = _price_to_normal_vol(model_values, swap_rate, annuity)\n model_values = tf.where(tf.math.is_nan(model_values), tf.constant(1e-07, dtype=dtype), model_values)\n value = tf.math.reduce_sum((_scale(model_values, target_lb, target_ub) - scaled_target) ** 2, axis=-1)\n return value\n optimization_result = optimizer_fn(loss_function, initial_position=initial_guess, tolerance=tolerance, max_iterations=maximum_iterations)\n calibrated_parameters = optimization_result.position\n mean_reversion_calibrated = _to_constrained(calibrated_parameters[..., :num_hjm_factors], mr_lb, mr_ub)\n volatility_calibrated = _to_constrained(calibrated_parameters[..., num_hjm_factors:2 * num_hjm_factors], vol_lb, vol_ub)\n if calibrate_correlation:\n correlation_calibrated = _correlation_matrix_using_hypersphere_decomposition(num_hjm_factors, _to_constrained(calibrated_parameters[..., 2 * num_hjm_factors:], theta_lb, theta_ub))\n else:\n correlation_calibrated = None\n return (CalibrationResult(mean_reversion=mean_reversion_calibrated, volatility=volatility_calibrated, corr_matrix=correlation_calibrated), optimization_result.converged, optimization_result.num_iterations)", "docstring": "Calibrates a batch of HJM models using European Swaption prices.\n\nThis function estimates the mean-reversion rates, volatility and correlation\nparameters of a multi factor HJM model using a set of European swaption\nprices as the target. The calibration is performed using least-squares\noptimization where the loss function minimizes the squared error between the\ntarget swaption prices (or volatilities) and the model implied swaption\nprices (or volatilities). The current calibration supports constant mean\nreversion, volatility and correlation parameters.\n\n#### Example\nThe example shows how to calibrate a Two factor HJM model with constant mean\nreversion rate and constant volatility.\n\n````python\nimport numpy as np\nimport tensorflow.compat.v2 as tf\nimport tf_quant_finance as tff\n\ndtype = tf.float64\n\nexpiries = np.array(\n [0.5, 0.5, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0, 10., 10.])\nfloat_leg_start_times = np.array([\n [0.5, 1.0, 1.5, 2.0, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5], # 6M x 2Y swap\n [0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0], # 6M x 5Y swap\n [1.0, 1.5, 2.0, 2.5, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], # 1Y x 2Y swap\n [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5], # 1Y x 5Y swap\n [2.0, 2.5, 3.0, 3.5, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0], # 2Y x 2Y swap\n [2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5], # 2Y x 5Y swap\n [3.0, 3.5, 4.0, 4.5, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], # 3Y x 2Y swap\n [3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5], # 3Y x 5Y swap\n [4.0, 4.5, 5.0, 5.5, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0], # 4Y x 2Y swap\n [4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5], # 4Y x 5Y swap\n [5.0, 5.5, 6.0, 6.5, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0], # 5Y x 2Y swap\n [5.0, 5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5], # 5Y x 5Y swap\n [10.0, 10.5, 11.0, 11.5, 12.0, 12.0, 12.0, 12.0, 12.0,\n 12.0], # 10Y x 2Y swap\n [10.0, 10.5, 11.0, 11.5, 12.0, 12.5, 13.0, 13.5, 14.0,\n 14.5] # 10Y x 5Y swap\n])\nfloat_leg_end_times = float_leg_start_times + 0.5\nmax_maturities = np.array(\n [2.5, 5.5, 3.0, 6.0, 4., 7., 5., 8., 6., 9., 7., 10., 12., 15.])\nfor i in range(float_leg_end_times.shape[0]):\n float_leg_end_times[i] = np.clip(\n float_leg_end_times[i], 0.0, max_maturities[i])\n\nfixed_leg_payment_times = float_leg_end_times\nfloat_leg_daycount_fractions = (\n float_leg_end_times - float_leg_start_times)\nfixed_leg_daycount_fractions = float_leg_daycount_fractions\nfixed_leg_coupon = 0.01 * np.ones_like(fixed_leg_payment_times)\n\nzero_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)\nnotional = 1.0\nprices = np.array([\n 0.42919881, 0.98046542, 0.59045074, 1.34909391, 0.79491583,\n 1.81768802, 0.93210461, 2.13625342, 1.05114573, 2.40921088,\n 1.12941064, 2.58857507, 1.37029637, 3.15081683])\n\n(calibrated_mr, calibrated_vol, calibrated_corr), _, _ = (\ntff.models.hjm.calibration_from_swaptions(\n prices=prices,\n expiries=expiries,\n floating_leg_start_times=float_leg_start_times,\n floating_leg_end_times=float_leg_end_times,\n fixed_leg_payment_times=fixed_leg_payment_times,\n floating_leg_daycount_fractions=float_leg_daycount_fractions,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n reference_rate_fn=zero_rate_fn,\n notional=100.,\n mean_reversion=[0.01, 0.01], # Initial guess for mean reversion rate\n volatility=[0.005, 0.004], # Initial guess for volatility\n volatility_based_calibration=True,\n calibrate_correlation=True,\n num_samples=2000,\n time_step=0.1,\n random_type=random.RandomType.STATELESS_ANTITHETIC,\n seed=[0,0],\n maximum_iterations=50,\n dtype=dtype))\n# Expected calibrated_mr: [0.00621303, 0.3601772]\n# Expected calibrated_vol: [0.00586125, 0.00384013]\n# Expected correlation: 0.65126492\n# Prices using calibrated model: [\n 0.42939121, 0.95362327, 0.59186236, 1.32622752, 0.79575526,\n 1.80457544, 0.93909176, 2.14336776, 1.04132595, 2.39385229,\n 1.11770416, 2.58809336, 1.39557389, 3.29306317]\n````\n\nArgs:\n prices: An N-D real `Tensor` of shape `batch_shape + [k]`. `batch_shape` is\n the shape of the batch of models to calibrate and `k` is the number of\n swaptions per calibration. The input represents the prices of swaptions\n used for calibration.\n expiries: A real `Tensor` of same shape and dtype as `prices`. The time to\n expiration of the swaptions.\n floating_leg_start_times: A real `Tensor` of the same dtype as `prices`. The\n times when accrual begins for each payment in the floating leg. The shape\n of this input should be `expiries.shape + [m]` where `m` denotes the\n number of floating payments in each leg.\n floating_leg_end_times: A real `Tensor` of the same dtype as `prices`. The\n times when accrual ends for each payment in the floating leg. The shape of\n this input should be `expiries.shape + [m]` where `m` denotes the number\n of floating payments in each leg.\n fixed_leg_payment_times: A real `Tensor` of the same dtype as `prices`. The\n payment times for each payment in the fixed leg. The shape of this input\n should be `expiries.shape + [n]` where `n` denotes the number of fixed\n payments in each leg.\n floating_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `floating_leg_start_times`. The daycount fractions for\n each payment in the floating leg.\n fixed_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `fixed_leg_payment_times`. The daycount fractions for\n each payment in the fixed leg.\n fixed_leg_coupon: A real `Tensor` of the same dtype and compatible shape as\n `fixed_leg_payment_times`. The fixed rate for each payment in the fixed\n leg.\n reference_rate_fn: A Python callable that accepts expiry time as a real\n `Tensor` and returns a `Tensor` of shape `input_shape`. Returns\n the continuously compounded zero rate at the present time for the input\n expiry time.\n num_hjm_factors: A Python scalar which corresponds to the number of factors\n in the batch of calibrated HJM models.\n mean_reversion: A real positive `Tensor` of same dtype as `prices` and shape\n `batch_shape + [num_hjm_factors]`. Corresponds to the initial values of\n the mean reversion rates of the factors for calibration.\n volatility: A real positive `Tensor` of the same `dtype` and shape as\n `mean_reversion`. Corresponds to the initial values of the volatility of\n the factors for calibration.\n notional: An optional `Tensor` of same dtype and compatible shape as\n `strikes`specifying the notional amount for the underlying swap.\n Default value: None in which case the notional is set to 1.\n is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.\n Indicates whether the prices correspond to payer (if True) or receiver (if\n False) swaption. If not supplied, payer swaptions are assumed.\n swaption_valuation_method: An enum of type\n `valuation_method.ValuationMethod` specifying the method to be used for\n swaption valuation during calibration. Currently the valuation is\n supported using `MONTE_CARLO` and `FINITE_DIFFERENCE` methods. Valuation\n using finite difference is only supported for Gaussian HJM models, i.e.\n for models with constant mean-reversion rate and time-dependent\n volatility.\n Default value: `valuation_method.ValuationMethod.MONTE_CARLO`, in which\n case swaption valuation is done using Monte Carlo simulations.\n num_samples: Positive scalar `int32` `Tensor`. The number of simulation\n paths during Monte-Carlo valuation of swaptions. This input is ignored\n during analytic valuation.\n Default value: The default value is 1.\n random_type: Enum value of `RandomType`. The type of (quasi)-random number\n generator to use to generate the simulation paths. This input is relevant\n only for Monte-Carlo valuation and ignored during analytic valuation.\n Default value: `None` which maps to the standard pseudo-random numbers.\n seed: Seed for the random number generator. The seed is only relevant if\n `random_type` is one of `[STATELESS, PSEUDO, HALTON_RANDOMIZED,\n PSEUDO_ANTITHETIC, STATELESS_ANTITHETIC]`. For `PSEUDO`,\n `PSEUDO_ANTITHETIC` and `HALTON_RANDOMIZED` the seed should be an Python\n integer. For `STATELESS` and `STATELESS_ANTITHETIC `must be supplied as\n an integer `Tensor` of shape `[2]`. This input is relevant only for\n Monte-Carlo valuation and ignored during analytic valuation.\n Default value: `None` which means no seed is set.\n skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or\n Halton sequence to skip. Used only when `random_type` is 'SOBOL',\n 'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.\n Default value: `0`.\n times: An optional rank 1 `Tensor` of increasing positive real values. The\n times at which Monte Carlo simulations are performed. Relevant when\n swaption valuation is done using Monte Calro simulations.\n Default value: `None` in which case simulation times are computed based\n on either `time_step` or `num_time_steps` inputs.\n time_step: Scalar real `Tensor`. Maximal distance between time grid points\n in Euler scheme. Relevant when Euler scheme is used for simulation. This\n input is ignored during analytic valuation.\n Default value: `None`.\n num_time_steps: An optional scalar integer `Tensor` - a total number of\n time steps during Monte Carlo simulations. The maximal distance betwen\n points in grid is bounded by\n `times[-1] / (num_time_steps - times.shape[0])`.\n Either this or `time_step` should be supplied when the valuation method\n is Monte Carlo.\n Default value: `None`.\n curve_times: An optional rank 1 `Tensor` of positive real values. The\n maturities at which spot discount curve is computed during simulations.\n Default value: `None` in which case `curve_times` is computed based on\n swaption expities and `fixed_leg_payments_times` inputs.\n time_step_finite_difference: Scalar real `Tensor`. Spacing between time\n grid points in finite difference discretization. This input is only\n relevant for valuation using finite difference.\n Default value: `None`, in which case a `time_step` corresponding to 100\n discrete steps is used.\n num_grid_points_finite_difference: Scalar real `Tensor`. Number of spatial\n grid points for discretization. This input is only relevant for valuation\n using finite difference.\n Default value: 100.\n volatility_based_calibration: An optional Python boolean specifying whether\n calibration is performed using swaption implied volatilities. If the input\n is `True`, then the swaption prices are first converted to normal implied\n volatilities and calibration is performed by minimizing the error between\n input implied volatilities and model implied volatilities.\n Default value: True.\n calibrate_correlation: An optional Python boolean specifying if the\n correlation matrix between HJM factors should calibrated. If the input is\n `False`, then the model is calibrated assuming that the HJM factors are\n uncorrelated.\n Default value: True.\n optimizer_fn: Optional Python callable which implements the algorithm used\n to minimize the objective function during calibration. It should have\n the following interface:\n result = optimizer_fn(value_and_gradients_function, initial_position,\n tolerance, max_iterations)\n `value_and_gradients_function` is a Python callable that accepts a point\n as a real `Tensor` and returns a tuple of `Tensor`s of real dtype\n containing the value of the function and its gradient at that point.\n 'initial_position' is a real `Tensor` containing the starting point of\n the optimization, 'tolerance' is a real scalar `Tensor` for stopping\n tolerance for the procedure and `max_iterations` specifies the maximum\n number of iterations.\n `optimizer_fn` should return a namedtuple containing the items: `position`\n (a tensor containing the optimal value), `converged` (a boolean\n indicating whether the optimize converged according the specified\n criteria), `failed` (a boolean indicating if the optimization resulted\n in a failure), `num_iterations` (the number of iterations used), and\n `objective_value` ( the value of the objective function at the optimal\n value). The default value for `optimizer_fn` is None and conjugate\n gradient algorithm is used.\n mean_reversion_lower_bound: An optional scalar `Tensor` specifying the lower\n limit of mean reversion rate during calibration.\n Default value: 0.001.\n mean_reversion_upper_bound: An optional scalar `Tensor` specifying the upper\n limit of mean reversion rate during calibration.\n Default value: 0.5.\n volatility_lower_bound: An optional scalar `Tensor` specifying the lower\n limit of volatility during calibration.\n Default value: 0.00001 (0.1 basis points).\n volatility_upper_bound: An optional scalar `Tensor` specifying the upper\n limit of volatility during calibration.\n Default value: 0.1.\n tolerance: Scalar `Tensor` of real dtype. The absolute tolerance for\n terminating the iterations.\n Default value: 1e-6.\n maximum_iterations: Scalar positive int32 `Tensor`. The maximum number of\n iterations during the optimization.\n Default value: 50.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name\n `hjm_swaption_calibration`.\n\nReturns:\n A Tuple of three elements:\n * The first element is an instance of `CalibrationResult` whose parameters\n are calibrated to the input swaption prices.\n * A `Tensor` of optimization status for each batch element (whether the\n optimization algorithm has found the optimal point based on the specified\n convergance criteria).\n * A `Tensor` containing the number of iterations performed by the\n optimization algorithm."}