language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
scipy__scipy
scipy/signal/_czt.py
{ "start": 3259, "end": 8797 }
class ____: """ Create a callable chirp z-transform function. Transform to compute the frequency response around a spiral. Objects of this class are callables which can compute the chirp z-transform on their inputs. This object precalculates the constant chirps used in the given transform. Parameters ---------- n : int The size of the signal. m : int, optional The number of output points desired. Default is `n`. w : complex, optional The ratio between points in each step. This must be precise or the accumulated error will degrade the tail of the output sequence. Defaults to equally spaced points around the entire unit circle. a : complex, optional The starting point in the complex plane. Default is 1+0j. Returns ------- f : CZT Callable object ``f(x, axis=-1)`` for computing the chirp z-transform on `x`. See Also -------- czt : Convenience function for quickly calculating CZT. ZoomFFT : Class that creates a callable partial FFT function. Notes ----- The defaults are chosen such that ``f(x)`` is equivalent to ``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, m)`` is equivalent to ``fft.fft(x, m)``. If `w` does not lie on the unit circle, then the transform will be around a spiral with exponentially-increasing radius. Regardless, angle will increase linearly. For transforms that do lie on the unit circle, accuracy is better when using `ZoomFFT`, since any numerical error in `w` is accumulated for long data lengths, drifting away from the unit circle. The chirp z-transform can be faster than an equivalent FFT with zero padding. Try it with your own array sizes to see. However, the chirp z-transform is considerably less precise than the equivalent zero-padded FFT. As this CZT is implemented using the Bluestein algorithm, it can compute large prime-length Fourier transforms in O(N log N) time, rather than the O(N**2) time required by the direct DFT calculation. (`scipy.fft` also uses Bluestein's algorithm'.) (The name "chirp z-transform" comes from the use of a chirp in the Bluestein algorithm. It does not decompose signals into chirps, like other transforms with "chirp" in the name.) References ---------- .. [1] Leo I. Bluestein, "A linear filtering approach to the computation of the discrete Fourier transform," Northeast Electronics Research and Engineering Meeting Record 10, 218-219 (1968). .. [2] Rabiner, Schafer, and Rader, "The chirp z-transform algorithm and its application," Bell Syst. Tech. J. 48, 1249-1292 (1969). Examples -------- Compute multiple prime-length FFTs: >>> from scipy.signal import CZT >>> import numpy as np >>> a = np.random.rand(7) >>> b = np.random.rand(7) >>> c = np.random.rand(7) >>> czt_7 = CZT(n=7) >>> A = czt_7(a) >>> B = czt_7(b) >>> C = czt_7(c) Display the points at which the FFT is calculated: >>> czt_7.points() array([ 1.00000000+0.j , 0.62348980+0.78183148j, -0.22252093+0.97492791j, -0.90096887+0.43388374j, -0.90096887-0.43388374j, -0.22252093-0.97492791j, 0.62348980-0.78183148j]) >>> import matplotlib.pyplot as plt >>> plt.plot(czt_7.points().real, czt_7.points().imag, 'o') >>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3)) >>> plt.axis('equal') >>> plt.show() """ def __init__(self, n, m=None, w=None, a=1+0j): m = _validate_sizes(n, m) k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2)) if w is None: # Nothing specified, default to FFT-like w = cmath.exp(-2j*pi/m) wk2 = np.exp(-(1j * pi * ((k**2) % (2*m))) / m) else: # w specified wk2 = w**(k**2/2.) a = 1.0 * a # at least float self.w, self.a = w, a self.m, self.n = m, n nfft = next_fast_len(n + m - 1) self._Awk2 = a**-k[:n] * wk2[:n] self._nfft = nfft self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft) self._wk2 = wk2[:m] self._yidx = slice(n-1, n+m-1) def __call__(self, x, *, axis=-1): """ Calculate the chirp z-transform of a signal. Parameters ---------- x : array The signal to transform. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. Returns ------- out : ndarray An array of the same dimensions as `x`, but with the length of the transformed axis set to `m`. """ x = np.asarray(x) if x.shape[axis] != self.n: raise ValueError(f"CZT defined for length {self.n}, not " f"{x.shape[axis]}") # Calculate transpose coordinates, to allow operation on any given axis trnsp = np.arange(x.ndim) trnsp[[axis, -1]] = [-1, axis] x = x.transpose(*trnsp) y = ifft(self._Fwk2 * fft(x*self._Awk2, self._nfft)) y = y[..., self._yidx] * self._wk2 return y.transpose(*trnsp) def points(self): """ Return the points at which the chirp z-transform is computed. """ return czt_points(self.m, self.w, self.a)
CZT
python
sphinx-doc__sphinx
tests/roots/test-root/autodoc_target.py
{ "start": 730, "end": 1298 }
class ____(CustomDataDescriptor): """Descriptor class with custom metaclass docstring.""" __metaclass__ = CustomDataDescriptorMeta def _funky_classmethod(name, b, c, d, docstring=None): """Generates a classmethod for a class from a template by filling out some arguments.""" def template(cls, a, b, c, d=4, e=5, f=6): return a, b, c, d, e, f from functools import partial function = partial(template, b=b, c=c, d=d) function.__name__ = name function.__doc__ = docstring return classmethod(function)
CustomDataDescriptor2
python
numba__numba
numba/core/byteflow.py
{ "start": 1032, "end": 1908 }
class ____(object): """Kinds of block to make related code safer than just `str`. """ _members = frozenset({ 'LOOP', 'TRY', 'EXCEPT', 'FINALLY', 'WITH', 'WITH_FINALLY', }) def __init__(self, value): assert value in self._members self._value = value def __hash__(self): return hash((type(self), self._value)) def __lt__(self, other): if isinstance(other, BlockKind): return self._value < other._value else: raise TypeError('cannot compare to {!r}'.format(type(other))) def __eq__(self, other): if isinstance(other, BlockKind): return self._value == other._value else: raise TypeError('cannot compare to {!r}'.format(type(other))) def __repr__(self): return "BlockKind({})".format(self._value)
BlockKind
python
ray-project__ray
python/ray/data/datasource/file_datasink.py
{ "start": 6925, "end": 9136 }
class ____(_FileDatasink): """A datasink that writes one row to each file. Subclasses must implement ``write_row_to_file`` and call the superclass constructor. Examples: .. testcode:: import io from typing import Any, Dict import pyarrow from PIL import Image from ray.data.datasource import RowBasedFileDatasink class ImageDatasink(RowBasedFileDatasink): def __init__(self, path: str, *, column: str, file_format: str = "png"): super().__init__(path, file_format=file_format) self._file_format = file_format self._column = column def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"): image = Image.fromarray(row[self._column]) buffer = io.BytesIO() image.save(buffer, format=self._file_format) file.write(buffer.getvalue()) """ # noqa: E501 def write_row_to_file(self, row: Dict[str, Any], file: "pyarrow.NativeFile"): """Write a row to a file. Args: row: The row to write. file: The file to write the row to. """ raise NotImplementedError def write_block(self, block: BlockAccessor, block_index: int, ctx: TaskContext): for row_index, row in enumerate(block.iter_rows(public_row_format=False)): filename = self.filename_provider.get_filename_for_row( row, ctx.kwargs[WRITE_UUID_KWARG_NAME], ctx.task_idx, block_index, row_index, ) write_path = posixpath.join(self.path, filename) logger.debug(f"Writing {write_path} file.") def write_row_to_path(): with self.open_output_stream(write_path) as file: self.write_row_to_file(row, file) call_with_retry( write_row_to_path, description=f"write '{write_path}'", match=self._data_context.retried_io_errors, ) @DeveloperAPI
RowBasedFileDatasink
python
bokeh__bokeh
src/bokeh/models/misc/group_by.py
{ "start": 1749, "end": 2092 }
class ____(GroupBy): """ Group models by manually predefined groups. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) groups = Required(List(List(Instance(Model))), help=""" Predefined groups of models. """)
GroupByModels
python
pypa__pip
src/pip/_internal/resolution/base.py
{ "start": 257, "end": 577 }
class ____: def resolve( self, root_reqs: list[InstallRequirement], check_supported_wheels: bool ) -> RequirementSet: raise NotImplementedError() def get_installation_order( self, req_set: RequirementSet ) -> list[InstallRequirement]: raise NotImplementedError()
BaseResolver
python
django__django
tests/m2m_regress/models.py
{ "start": 2462, "end": 2624 }
class ____(models.Model): primary_lines = models.ManyToManyField(Line, related_name="+") secondary_lines = models.ManyToManyField(Line, related_name="+")
Post
python
tensorflow__tensorflow
tensorflow/python/data/experimental/kernel_tests/service/distributed_save_test.py
{ "start": 1520, "end": 15652 }
class ____( data_service_test_base.TestBase, parameterized.TestCase): @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(num_workers=[1, 3], num_elements=[0, 10, 1000]))) def testSaveLoad(self, num_workers, num_elements): cluster = data_service_test_base.TestCluster(num_workers=num_workers) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(num_elements) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.assertDatasetProduces( dataset, list(range(num_elements)), assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(compression=[None, "AUTO", "GZIP"]))) def testCompression(self, compression): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address(), compression=compression)) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.assertDatasetProduces( dataset, list(range(10)), assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(num_workers=[1, 3], num_repetitions=[1, 5]))) def testRepeatedDataset(self, num_workers, num_repetitions): cluster = data_service_test_base.TestCluster(num_workers=num_workers) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(1000) dataset = dataset.repeat(num_repetitions) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.assertDatasetProduces( dataset, list(range(1000)) * num_repetitions, assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testChooseFromDatasets(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() datasets = [ dataset_ops.Dataset.from_tensor_slices(["a", "a", "a", "a", "a"]), dataset_ops.Dataset.from_tensor_slices(["b", "b", "b", "b", "b"]), dataset_ops.Dataset.from_tensor_slices(["c", "c", "c", "c", "c"])] choice_dataset = dataset_ops.Dataset.range(3).repeat() dataset = dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.assertDatasetProduces( dataset, [b"a", b"b", b"c"] * 5, assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testChooseFromRepeatedDatasets(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() datasets = [ dataset_ops.Dataset.from_tensors("a").repeat(5), dataset_ops.Dataset.from_tensors("b").repeat(5), dataset_ops.Dataset.from_tensors("c").repeat(10)] choice_dataset = dataset_ops.Dataset.range(3).repeat() dataset = dataset_ops.Dataset.choose_from_datasets( datasets, choice_dataset, stop_on_empty_dataset=False) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.assertDatasetProduces( dataset, [b"a", b"b", b"c"] * 5 + [b"c"] * 5, assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine(num_workers=[1, 3]))) def testWriteMultipleDatasets(self, num_workers): cluster = data_service_test_base.TestCluster(num_workers=num_workers) snapshot_dir = data_service_test_base.TempDir() dataset1 = dataset_ops.Dataset.range(100) datasets = [ dataset_ops.Dataset.from_tensors("a").repeat(5), dataset_ops.Dataset.from_tensors("b").repeat(5), dataset_ops.Dataset.from_tensors("c").repeat(5)] choice_dataset = dataset_ops.Dataset.range(3).repeat() dataset2 = dataset_ops.Dataset.choose_from_datasets( datasets, choice_dataset) snapshot_path1 = os.path.join(snapshot_dir.full_path, "snapshot1") snapshot_path2 = os.path.join(snapshot_dir.full_path, "snapshot2") self.evaluate( distributed_save_op.distributed_save( dataset1, snapshot_path1, cluster.dispatcher_address())) self.evaluate( distributed_save_op.distributed_save( dataset2, snapshot_path2, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_path1) _wait_for_snapshot(snapshot_path2) dataset1 = dataset_ops.Dataset.load(snapshot_path1) self.assertDatasetProduces( dataset1, list(range(100)), assert_items_equal=True) self.assertDatasetProduces( dataset2, [b"a", b"b", b"c"] * 5, assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( num_workers=[1, 3], snapshot_max_chunk_size_bytes=[1, 100]))) def testLoadWithCustomReaderFunc( self, num_workers, snapshot_max_chunk_size_bytes): cluster = data_service_test_base.TestCluster( num_workers=num_workers, snapshot_max_chunk_size_bytes=snapshot_max_chunk_size_bytes) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) def custom_reader_func(datasets): datasets = datasets.shuffle(3) return datasets.interleave( lambda x: x, num_parallel_calls=dataset_ops.AUTOTUNE) dataset = dataset_ops.Dataset.load( snapshot_dir.full_path, reader_func=custom_reader_func) self.assertDatasetProduces( dataset, list(range(10)), assert_items_equal=True) @combinations.generate( combinations.times( test_base.default_test_combinations(), combinations.combine( num_workers=[1, 3], repeated_load=[1, 5], sharding_policy=[ data_service_ops.ShardingPolicy.OFF, data_service_ops.ShardingPolicy.DYNAMIC]))) def testDistributedLoad(self, num_workers, repeated_load, sharding_policy): cluster = data_service_test_base.TestCluster(num_workers=num_workers) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) if repeated_load > 1: dataset = dataset.repeat(repeated_load) dataset = dataset.apply( data_service_ops.distribute( processing_mode=sharding_policy, service=cluster.dispatcher_address())) expected = list(range(10)) * repeated_load if sharding_policy == data_service_ops.ShardingPolicy.OFF: expected *= num_workers self.assertDatasetProduces(dataset, expected, assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testImbalancedZipAndRepeat(self): smaller_num_elements = 200 larger_num_elements = 1000 repetitions = 3 cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset1 = dataset_ops.Dataset.range(smaller_num_elements) dataset2 = dataset_ops.Dataset.range(larger_num_elements) dataset = dataset_ops.Dataset.zip((dataset1, dataset2)) dataset = dataset.repeat(repetitions) self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_snapshot(snapshot_dir.full_path) dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) expected = repetitions * ( list(zip(range(smaller_num_elements), range(smaller_num_elements)))) self.assertDatasetProduces(dataset, expected, assert_items_equal=True) @combinations.generate(test_base.default_test_combinations()) def testSnapshotDoesNotExist(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() with self.assertRaises(errors.NotFoundError): dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) dataset = dataset.apply( data_service_ops.distribute( data_service_ops.ShardingPolicy.OFF, cluster.dispatcher_address())) self.getDatasetOutput(dataset) @combinations.generate(test_base.default_test_combinations()) def testDuplicateSnapshot(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegex( errors.AlreadyExistsError, "already started or completed"): self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) @combinations.generate(test_base.default_test_combinations()) def testWorkerFailure(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() components = np.array([1.0, 2.0, 3.0, np.nan, 5.0]).astype(np.float32) dataset = dataset_ops.Dataset.from_tensor_slices(components) dataset = dataset.map(lambda x: array_ops.check_numerics(x, "message")) self.evaluate( distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) _wait_for_error(snapshot_dir.full_path) with self.assertRaisesRegex( ValueError, "The save job failed to write it."): dataset = dataset_ops.Dataset.load(snapshot_dir.full_path) self.getDatasetOutput(dataset) @combinations.generate(test_base.default_test_combinations()) def testBadDispatcherAddress(self): dataset = dataset_ops.Dataset.range(10) with self.assertRaisesRegex(ValueError, "must be a string"): self.evaluate(distributed_save_op.distributed_save(dataset, "", 1)) with self.assertRaisesRegex(ValueError, "must not be empty"): self.evaluate(distributed_save_op.distributed_save(dataset, "", "")) @combinations.generate(test_base.default_test_combinations()) def testBadCardinality(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10).repeat() with self.assertRaisesRegex( errors.InvalidArgumentError, "Saving an infinite dataset is not allowed"): self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address())) @combinations.generate(test_base.default_test_combinations()) def testBadElementSpec(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address(), compression="AUTO")) _wait_for_snapshot(snapshot_dir.full_path) with self.assertRaisesRegex( ValueError, "User specified element_spec bad_element_spec, but the actual " "element_spec is TensorSpec"): _ = dataset_ops.Dataset.load(snapshot_dir.full_path, element_spec="bad_element_spec") @combinations.generate(test_base.default_test_combinations()) def testBadCompression(self): cluster = data_service_test_base.TestCluster(num_workers=1) snapshot_dir = data_service_test_base.TempDir() dataset = dataset_ops.Dataset.range(10) self.evaluate(distributed_save_op.distributed_save( dataset, snapshot_dir.full_path, cluster.dispatcher_address(), compression="AUTO")) _wait_for_snapshot(snapshot_dir.full_path) with self.assertRaisesRegex( ValueError, "User specified compression ZLIB, but the actual compression is " "SNAPPY."): _ = dataset_ops.Dataset.load(snapshot_dir.full_path, compression="ZLIB") @combinations.generate(test_base.default_test_combinations()) def testRequiresFaultTolerantMode(self): cluster = data_service_test_base.TestCluster( num_workers=1, fault_tolerant_mode=False) snapshot_dir = data_service_test_base.TempDir() with self.assertRaisesRegex( errors.InvalidArgumentError, "tf.data distributed snapshot requires running tf.data service in the " "fault tolerant mode."): self.evaluate(distributed_save_op.distributed_save( dataset_ops.Dataset.range(10), snapshot_dir.full_path, cluster.dispatcher_address(), compression="AUTO"))
DistributedSaveTest
python
dagster-io__dagster
python_modules/dagster-pipes/dagster_pipes/__init__.py
{ "start": 2907, "end": 3016 }
class ____(TypedDict): """A range of partition keys.""" start: str end: str
PipesPartitionKeyRange
python
huggingface__transformers
src/transformers/models/gemma3/modeling_gemma3.py
{ "start": 31093, "end": 37419 }
class ____(nn.Module): def __init__(self, config: Gemma3Config): super().__init__() self.mm_input_projection_weight = nn.Parameter( torch.zeros(config.vision_config.hidden_size, config.text_config.hidden_size) ) self.mm_soft_emb_norm = Gemma3RMSNorm( config.vision_config.hidden_size, eps=config.vision_config.layer_norm_eps ) self.patches_per_image = int(config.vision_config.image_size // config.vision_config.patch_size) self.tokens_per_side = int(config.mm_tokens_per_image**0.5) self.kernel_size = self.patches_per_image // self.tokens_per_side self.avg_pool = nn.AvgPool2d(kernel_size=self.kernel_size, stride=self.kernel_size) def forward(self, vision_outputs: torch.Tensor): batch_size, _, seq_length = vision_outputs.shape reshaped_vision_outputs = vision_outputs.transpose(1, 2) reshaped_vision_outputs = reshaped_vision_outputs.reshape( batch_size, seq_length, self.patches_per_image, self.patches_per_image ) reshaped_vision_outputs = reshaped_vision_outputs.contiguous() pooled_vision_outputs = self.avg_pool(reshaped_vision_outputs) pooled_vision_outputs = pooled_vision_outputs.flatten(2) pooled_vision_outputs = pooled_vision_outputs.transpose(1, 2) normed_vision_outputs = self.mm_soft_emb_norm(pooled_vision_outputs) projected_vision_outputs = torch.matmul(normed_vision_outputs, self.mm_input_projection_weight) return projected_vision_outputs.type_as(vision_outputs) def token_type_ids_mask_function( token_type_ids: Optional[torch.Tensor], image_group_ids: Optional[torch.Tensor], ) -> Optional[Callable]: """ This function adds the correct offsets to the `q_idx` and `kv_idx` as the torch API can only accept lengths, not start and end indices. """ # Do not return an additional mask in this case if token_type_ids is None: return None def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool: # If it's 1 for both query and key/value, we are in an image block # NOTE: static cache shape goes beyond input seq length, while token_type_ids.shape[1] == input seq length # Since vmap doesn't support `if statement` we workaround it with `torch.where` safe_q_idx = torch.where(q_idx < token_type_ids.shape[1], q_idx, 0) safe_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], kv_idx, 0) token_type_ids_at_q_idx = token_type_ids[batch_idx, safe_q_idx] token_type_ids_at_q_idx = torch.where(q_idx < token_type_ids.shape[1], token_type_ids_at_q_idx, 0) token_type_ids_at_kv_idx = token_type_ids[batch_idx, safe_kv_idx] token_type_ids_at_kv_idx = torch.where(kv_idx < token_type_ids.shape[1], token_type_ids_at_kv_idx, 0) image_group_ids_at_q_idx = image_group_ids[batch_idx, safe_q_idx] image_group_ids_at_q_idx = torch.where(q_idx < image_group_ids.shape[1], image_group_ids_at_q_idx, -1) image_group_ids_at_kv_idx = image_group_ids[batch_idx, safe_kv_idx] image_group_ids_at_kv_idx = torch.where(kv_idx < image_group_ids.shape[1], image_group_ids_at_kv_idx, -1) is_image_block = (token_type_ids_at_q_idx == 1) & (token_type_ids_at_kv_idx == 1) same_image_block = image_group_ids_at_q_idx == image_group_ids_at_kv_idx # This is bidirectional attention whenever we are dealing with image tokens return is_image_block & same_image_block return inner_mask def create_causal_mask_mapping( config: PreTrainedConfig, input_embeds: torch.Tensor, attention_mask: Optional[torch.Tensor], cache_position: torch.Tensor, past_key_values: Optional[Cache], position_ids: Optional[torch.Tensor], token_type_ids: Optional[torch.Tensor] = None, pixel_values: Optional[torch.FloatTensor] = None, is_training: bool = False, **kwargs, ) -> dict: """ Overwrites the base `create_masks_for_generate` with `token_type_ids` masking to create the causal mask mapping for all kinds of forward passes. Gemma3 uses a bidirectional mask for images. Uses `pixel_values` as an optional input to disambiguate edge cases. """ if is_training and token_type_ids is None: raise ValueError("`token_type_ids` is required as a model input when training") mask_kwargs = { "config": config.get_text_config(), "input_embeds": input_embeds, "attention_mask": attention_mask, "cache_position": cache_position, "past_key_values": past_key_values, "position_ids": position_ids, } # NOTE: this `may_have_image_input` logic is not flawless, it fails when we're using a cache eagerly initialized # (e.g. compiled prefill) AND `pixel_values` are not provided (i.e. the image data is provided through other # means). Determining prefill in that case requires checking data values, which is not compile-compatible. may_have_image_input = past_key_values is None or not past_key_values.is_initialized or pixel_values is not None if token_type_ids is not None and may_have_image_input: # We need to pass an additional mask function to account for token type ids, and it needs to be an `or` (to # undo the causal masking) # First find where a new image block starts: 1 if image and previous not image # The images cannot attend to future images, but can attend to all prev images and to itself bidirectionally is_image = (token_type_ids == 1).to(cache_position.device) is_previous_image = nn.functional.pad(is_image, (1, 0), value=0)[:, :-1] new_image_start = is_image & ~is_previous_image image_group_ids = torch.cumsum(new_image_start.int(), dim=1) - 1 image_group_ids = torch.where(is_image, image_group_ids, -1) mask_kwargs["or_mask_function"] = token_type_ids_mask_function( token_type_ids.to(cache_position.device), image_group_ids ) return create_masks_for_generate(**mask_kwargs) @auto_docstring( custom_intro=""" The Base Gemma3 model which consists of a vision backbone and a language model without language modeling head., """ )
Gemma3MultiModalProjector
python
redis__redis-py
tests/test_data_structure.py
{ "start": 154, "end": 2662 }
class ____: def test_add_items(self): wlist = WeightedList() wlist.add("item1", 3.0) wlist.add("item2", 2.0) wlist.add("item3", 4.0) wlist.add("item4", 4.0) assert wlist.get_top_n(4) == [ ("item3", 4.0), ("item4", 4.0), ("item1", 3.0), ("item2", 2.0), ] def test_remove_items(self): wlist = WeightedList() wlist.add("item1", 3.0) wlist.add("item2", 2.0) wlist.add("item3", 4.0) wlist.add("item4", 4.0) assert wlist.remove("item2") == 2.0 assert wlist.remove("item4") == 4.0 assert wlist.get_top_n(4) == [("item3", 4.0), ("item1", 3.0)] def test_get_by_weight_range(self): wlist = WeightedList() wlist.add("item1", 3.0) wlist.add("item2", 2.0) wlist.add("item3", 4.0) wlist.add("item4", 4.0) assert wlist.get_by_weight_range(2.0, 3.0) == [("item1", 3.0), ("item2", 2.0)] def test_update_weights(self): wlist = WeightedList() wlist.add("item1", 3.0) wlist.add("item2", 2.0) wlist.add("item3", 4.0) wlist.add("item4", 4.0) assert wlist.get_top_n(4) == [ ("item3", 4.0), ("item4", 4.0), ("item1", 3.0), ("item2", 2.0), ] wlist.update_weight("item2", 5.0) assert wlist.get_top_n(4) == [ ("item2", 5.0), ("item3", 4.0), ("item4", 4.0), ("item1", 3.0), ] def test_thread_safety(self) -> None: """Test thread safety with concurrent operations""" wl = WeightedList() def worker(worker_id): for i in range(100): # Add items wl.add(f"item_{worker_id}_{i}", random.uniform(0, 100)) # Read operations try: length = len(wl) if length > 0: wl.get_top_n(min(5, length)) wl.get_by_weight_range(20, 80) except Exception as e: print(f"Error in worker {worker_id}: {e}") sleep(0.001) # Small delay # Run multiple workers concurrently with ThreadPoolExecutor(max_workers=5) as executor: futures = [executor.submit(worker, i) for i in range(5)] concurrent.futures.wait(futures) assert len(wl) == 500
TestWeightedList
python
google__jax
jax/_src/core.py
{ "start": 140240, "end": 140691 }
class ____(NamedTuple): print_shapes: bool = True source_info: bool = False name_stack: bool = False custom_pp_eqn_rules: bool = True print_effects: bool = False def _encode_digits_alphabetic(n: int) -> str: if n == -1: return '*' s = '' while len(s) == 0 or n: n, i = n // 26, n % 26 s = chr(97 + i % 26) + s return s # A JaxprPpContext allows us to globally uniquify variable names within nested # Jaxprs.
JaxprPpSettings
python
huggingface__transformers
src/transformers/models/timesformer/modeling_timesformer.py
{ "start": 1397, "end": 2593 }
class ____(nn.Module): """Image to Patch Embedding""" def __init__(self, config): super().__init__() image_size = config.image_size patch_size = config.patch_size image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.image_size = image_size self.patch_size = patch_size self.num_patches = num_patches self.projection = nn.Conv2d(config.num_channels, config.hidden_size, kernel_size=patch_size, stride=patch_size) def forward(self, pixel_values): batch_size, num_frames, num_channels, height, width = pixel_values.shape pixel_values = pixel_values.reshape(batch_size * num_frames, num_channels, height, width) embeddings = self.projection(pixel_values) patch_width = embeddings.size(-1) embeddings = embeddings.flatten(2).transpose(1, 2) return embeddings, num_frames, patch_width
TimesformerPatchEmbeddings
python
hyperopt__hyperopt
hyperopt/rdists.py
{ "start": 7005, "end": 8674 }
class ____: """Stats for Y = q * round(exp(X) / q) where X ~ N(mu, sigma)""" def __init__(self, mu, sigma, q): self.mu, self.sigma = list(map(float, (mu, sigma))) self.q = q # -- distfn for using the CDF self._norm_cdf = scipy.stats.norm(loc=mu, scale=sigma).cdf def in_domain(self, x): return np.logical_and( (x >= 0), np.isclose(x, safe_int_cast(np.round(x / self.q)) * self.q), ) def pmf(self, x): x1 = np.atleast_1d(x) in_domain = self.in_domain(x1) x1_in_domain = x1[in_domain] rval = np.zeros_like(x1, dtype=float) rval_in_domain = self._norm_cdf(np.log(x1_in_domain + 0.5 * self.q)) rval_in_domain[x1_in_domain != 0] -= self._norm_cdf( np.log(x1_in_domain[x1_in_domain != 0] - 0.5 * self.q) ) rval[in_domain] = rval_in_domain if isinstance(x, np.ndarray): return rval return float(rval[0]) def logpmf(self, x): pmf = self.pmf(np.atleast_1d(x)) assert np.all(pmf >= 0) pmf[pmf == 0] = -np.inf pmf[pmf > 0] = np.log(pmf[pmf > 0]) if isinstance(x, np.ndarray): return pmf return float(pmf) def rvs(self, size=()): x = mtrand.normal(loc=self.mu, scale=self.sigma, size=size) rval = safe_int_cast(np.round(np.exp(x) / self.q)) * self.q return rval def safe_int_cast(obj): if isinstance(obj, np.ndarray): return obj.astype("int") if isinstance(obj, list): return [int(i) for i in obj] return int(obj) # -- non-empty last line for flake8
qlognormal_gen
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 50954, "end": 51224 }
class ____(BaseModel): healing_threshold: Optional[float] = Field( default=0.3, description="Enable HNSW healing if the ratio of missing points is no more than this value. To disable healing completely, set this value to `0.0`.", )
HnswGlobalConfig
python
plotly__plotly.py
plotly/graph_objs/scattersmith/marker/_colorbar.py
{ "start": 233, "end": 61749 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "scattersmith.marker" _path_str = "scattersmith.marker.colorbar" _valid_props = { "bgcolor", "bordercolor", "borderwidth", "dtick", "exponentformat", "labelalias", "len", "lenmode", "minexponent", "nticks", "orientation", "outlinecolor", "outlinewidth", "separatethousands", "showexponent", "showticklabels", "showtickprefix", "showticksuffix", "thickness", "thicknessmode", "tick0", "tickangle", "tickcolor", "tickfont", "tickformat", "tickformatstopdefaults", "tickformatstops", "ticklabeloverflow", "ticklabelposition", "ticklabelstep", "ticklen", "tickmode", "tickprefix", "ticks", "ticksuffix", "ticktext", "ticktextsrc", "tickvals", "tickvalssrc", "tickwidth", "title", "x", "xanchor", "xpad", "xref", "y", "yanchor", "ypad", "yref", } @property def bgcolor(self): """ Sets the color of padded area. The 'bgcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bgcolor"] @bgcolor.setter def bgcolor(self, val): self["bgcolor"] = val @property def bordercolor(self): """ Sets the axis line color. The 'bordercolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["bordercolor"] @bordercolor.setter def bordercolor(self, val): self["bordercolor"] = val @property def borderwidth(self): """ Sets the width (in px) or the border enclosing this color bar. The 'borderwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["borderwidth"] @borderwidth.setter def borderwidth(self, val): self["borderwidth"] = val @property def dtick(self): """ Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" The 'dtick' property accepts values of any type Returns ------- Any """ return self["dtick"] @dtick.setter def dtick(self, val): self["dtick"] = val @property def exponentformat(self): """ Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. The 'exponentformat' property is an enumeration that may be specified as: - One of the following enumeration values: ['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended'] Returns ------- Any """ return self["exponentformat"] @exponentformat.setter def exponentformat(self, val): self["exponentformat"] = val @property def labelalias(self): """ Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html- like tags or MathJax. The 'labelalias' property accepts values of any type Returns ------- Any """ return self["labelalias"] @labelalias.setter def labelalias(self, val): self["labelalias"] = val @property def len(self): """ Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. The 'len' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["len"] @len.setter def len(self, val): self["len"] = val @property def lenmode(self): """ Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. The 'lenmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["lenmode"] @lenmode.setter def lenmode(self, val): self["lenmode"] = val @property def minexponent(self): """ Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". The 'minexponent' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["minexponent"] @minexponent.setter def minexponent(self, val): self["minexponent"] = val @property def nticks(self): """ Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". The 'nticks' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [0, 9223372036854775807] Returns ------- int """ return self["nticks"] @nticks.setter def nticks(self, val): self["nticks"] = val @property def orientation(self): """ Sets the orientation of the colorbar. The 'orientation' property is an enumeration that may be specified as: - One of the following enumeration values: ['h', 'v'] Returns ------- Any """ return self["orientation"] @orientation.setter def orientation(self, val): self["orientation"] = val @property def outlinecolor(self): """ Sets the axis line color. The 'outlinecolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["outlinecolor"] @outlinecolor.setter def outlinecolor(self, val): self["outlinecolor"] = val @property def outlinewidth(self): """ Sets the width (in px) of the axis line. The 'outlinewidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["outlinewidth"] @outlinewidth.setter def outlinewidth(self, val): self["outlinewidth"] = val @property def separatethousands(self): """ If "true", even 4-digit integers are separated The 'separatethousands' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["separatethousands"] @separatethousands.setter def separatethousands(self, val): self["separatethousands"] = val @property def showexponent(self): """ If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. The 'showexponent' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showexponent"] @showexponent.setter def showexponent(self, val): self["showexponent"] = val @property def showticklabels(self): """ Determines whether or not the tick labels are drawn. The 'showticklabels' property must be specified as a bool (either True, or False) Returns ------- bool """ return self["showticklabels"] @showticklabels.setter def showticklabels(self, val): self["showticklabels"] = val @property def showtickprefix(self): """ If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. The 'showtickprefix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showtickprefix"] @showtickprefix.setter def showtickprefix(self, val): self["showtickprefix"] = val @property def showticksuffix(self): """ Same as `showtickprefix` but for tick suffixes. The 'showticksuffix' property is an enumeration that may be specified as: - One of the following enumeration values: ['all', 'first', 'last', 'none'] Returns ------- Any """ return self["showticksuffix"] @showticksuffix.setter def showticksuffix(self, val): self["showticksuffix"] = val @property def thickness(self): """ Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. The 'thickness' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["thickness"] @thickness.setter def thickness(self, val): self["thickness"] = val @property def thicknessmode(self): """ Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. The 'thicknessmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['fraction', 'pixels'] Returns ------- Any """ return self["thicknessmode"] @thicknessmode.setter def thicknessmode(self, val): self["thicknessmode"] = val @property def tick0(self): """ Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. The 'tick0' property accepts values of any type Returns ------- Any """ return self["tick0"] @tick0.setter def tick0(self, val): self["tick0"] = val @property def tickangle(self): """ Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. The 'tickangle' property is a angle (in degrees) that may be specified as a number between -180 and 180. Numeric values outside this range are converted to the equivalent value (e.g. 270 is converted to -90). Returns ------- int|float """ return self["tickangle"] @tickangle.setter def tickangle(self, val): self["tickangle"] = val @property def tickcolor(self): """ Sets the tick color. The 'tickcolor' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["tickcolor"] @tickcolor.setter def tickcolor(self, val): self["tickcolor"] = val @property def tickfont(self): """ Sets the color bar's tick label font The 'tickfont' property is an instance of Tickfont that may be specified as: - An instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Tickfont` - A dict of string/value properties that will be passed to the Tickfont constructor Returns ------- plotly.graph_objs.scattersmith.marker.colorbar.Tickfont """ return self["tickfont"] @tickfont.setter def tickfont(self, val): self["tickfont"] = val @property def tickformat(self): """ Sets the tick label formatting rule using d3 formatting mini- languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" The 'tickformat' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickformat"] @tickformat.setter def tickformat(self, val): self["tickformat"] = val @property def tickformatstops(self): """ The 'tickformatstops' property is a tuple of instances of Tickformatstop that may be specified as: - A list or tuple of instances of plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop - A list or tuple of dicts of string/value properties that will be passed to the Tickformatstop constructor Returns ------- tuple[plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop] """ return self["tickformatstops"] @tickformatstops.setter def tickformatstops(self, val): self["tickformatstops"] = val @property def tickformatstopdefaults(self): """ When used in a template (as layout.template.data.scattersmith.m arker.colorbar.tickformatstopdefaults), sets the default property values to use for elements of scattersmith.marker.colorbar.tickformatstops The 'tickformatstopdefaults' property is an instance of Tickformatstop that may be specified as: - An instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop` - A dict of string/value properties that will be passed to the Tickformatstop constructor Returns ------- plotly.graph_objs.scattersmith.marker.colorbar.Tickformatstop """ return self["tickformatstopdefaults"] @tickformatstopdefaults.setter def tickformatstopdefaults(self, val): self["tickformatstopdefaults"] = val @property def ticklabeloverflow(self): """ Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. The 'ticklabeloverflow' property is an enumeration that may be specified as: - One of the following enumeration values: ['allow', 'hide past div', 'hide past domain'] Returns ------- Any """ return self["ticklabeloverflow"] @ticklabeloverflow.setter def ticklabeloverflow(self, val): self["ticklabeloverflow"] = val @property def ticklabelposition(self): """ Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". The 'ticklabelposition' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', 'outside top', 'inside top', 'outside left', 'inside left', 'outside right', 'inside right', 'outside bottom', 'inside bottom'] Returns ------- Any """ return self["ticklabelposition"] @ticklabelposition.setter def ticklabelposition(self, val): self["ticklabelposition"] = val @property def ticklabelstep(self): """ Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". The 'ticklabelstep' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 9223372036854775807] Returns ------- int """ return self["ticklabelstep"] @ticklabelstep.setter def ticklabelstep(self, val): self["ticklabelstep"] = val @property def ticklen(self): """ Sets the tick length (in px). The 'ticklen' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ticklen"] @ticklen.setter def ticklen(self, val): self["ticklen"] = val @property def tickmode(self): """ Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). The 'tickmode' property is an enumeration that may be specified as: - One of the following enumeration values: ['auto', 'linear', 'array'] Returns ------- Any """ return self["tickmode"] @tickmode.setter def tickmode(self, val): self["tickmode"] = val @property def tickprefix(self): """ Sets a tick label prefix. The 'tickprefix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["tickprefix"] @tickprefix.setter def tickprefix(self, val): self["tickprefix"] = val @property def ticks(self): """ Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. The 'ticks' property is an enumeration that may be specified as: - One of the following enumeration values: ['outside', 'inside', ''] Returns ------- Any """ return self["ticks"] @ticks.setter def ticks(self, val): self["ticks"] = val @property def ticksuffix(self): """ Sets a tick label suffix. The 'ticksuffix' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["ticksuffix"] @ticksuffix.setter def ticksuffix(self, val): self["ticksuffix"] = val @property def ticktext(self): """ Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. The 'ticktext' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["ticktext"] @ticktext.setter def ticktext(self, val): self["ticktext"] = val @property def ticktextsrc(self): """ Sets the source reference on Chart Studio Cloud for `ticktext`. The 'ticktextsrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["ticktextsrc"] @ticktextsrc.setter def ticktextsrc(self, val): self["ticktextsrc"] = val @property def tickvals(self): """ Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. The 'tickvals' property is an array that may be specified as a tuple, list, numpy array, or pandas Series Returns ------- numpy.ndarray """ return self["tickvals"] @tickvals.setter def tickvals(self, val): self["tickvals"] = val @property def tickvalssrc(self): """ Sets the source reference on Chart Studio Cloud for `tickvals`. The 'tickvalssrc' property must be specified as a string or as a plotly.grid_objs.Column object Returns ------- str """ return self["tickvalssrc"] @tickvalssrc.setter def tickvalssrc(self, val): self["tickvalssrc"] = val @property def tickwidth(self): """ Sets the tick width (in px). The 'tickwidth' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["tickwidth"] @tickwidth.setter def tickwidth(self, val): self["tickwidth"] = val @property def title(self): """ The 'title' property is an instance of Title that may be specified as: - An instance of :class:`plotly.graph_objs.scattersmith.marker.colorbar.Title` - A dict of string/value properties that will be passed to the Title constructor Returns ------- plotly.graph_objs.scattersmith.marker.colorbar.Title """ return self["title"] @title.setter def title(self, val): self["title"] = val @property def x(self): """ Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". The 'x' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["x"] @x.setter def x(self, val): self["x"] = val @property def xanchor(self): """ Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". The 'xanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['left', 'center', 'right'] Returns ------- Any """ return self["xanchor"] @xanchor.setter def xanchor(self, val): self["xanchor"] = val @property def xpad(self): """ Sets the amount of padding (in px) along the x direction. The 'xpad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["xpad"] @xpad.setter def xpad(self, val): self["xpad"] = val @property def xref(self): """ Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. The 'xref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["xref"] @xref.setter def xref(self, val): self["xref"] = val @property def y(self): """ Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". The 'y' property is a number and may be specified as: - An int or float Returns ------- int|float """ return self["y"] @y.setter def y(self, val): self["y"] = val @property def yanchor(self): """ Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". The 'yanchor' property is an enumeration that may be specified as: - One of the following enumeration values: ['top', 'middle', 'bottom'] Returns ------- Any """ return self["yanchor"] @yanchor.setter def yanchor(self, val): self["yanchor"] = val @property def ypad(self): """ Sets the amount of padding (in px) along the y direction. The 'ypad' property is a number and may be specified as: - An int or float in the interval [0, inf] Returns ------- int|float """ return self["ypad"] @ypad.setter def ypad(self, val): self["ypad"] = val @property def yref(self): """ Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. The 'yref' property is an enumeration that may be specified as: - One of the following enumeration values: ['container', 'paper'] Returns ------- Any """ return self["yref"] @yref.setter def yref(self, val): self["yref"] = val @property def _prop_descriptions(self): return """\ bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.scattersmith.ma rker.colorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.scatte rsmith.marker.colorbar.tickformatstopdefaults), sets the default property values to use for elements of scattersmith.marker.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.scattersmith.marker.colorb ar.Title` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. """ def __init__( self, arg=None, bgcolor=None, bordercolor=None, borderwidth=None, dtick=None, exponentformat=None, labelalias=None, len=None, lenmode=None, minexponent=None, nticks=None, orientation=None, outlinecolor=None, outlinewidth=None, separatethousands=None, showexponent=None, showticklabels=None, showtickprefix=None, showticksuffix=None, thickness=None, thicknessmode=None, tick0=None, tickangle=None, tickcolor=None, tickfont=None, tickformat=None, tickformatstops=None, tickformatstopdefaults=None, ticklabeloverflow=None, ticklabelposition=None, ticklabelstep=None, ticklen=None, tickmode=None, tickprefix=None, ticks=None, ticksuffix=None, ticktext=None, ticktextsrc=None, tickvals=None, tickvalssrc=None, tickwidth=None, title=None, x=None, xanchor=None, xpad=None, xref=None, y=None, yanchor=None, ypad=None, yref=None, **kwargs, ): """ Construct a new ColorBar object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scattersmith.marker.ColorBar` bgcolor Sets the color of padded area. bordercolor Sets the axis line color. borderwidth Sets the width (in px) or the border enclosing this color bar. dtick Sets the step in-between ticks on this axis. Use with `tick0`. Must be a positive number, or special strings available to "log" and "date" axes. If the axis `type` is "log", then ticks are set every 10^(n*dtick) where n is the tick number. For example, to set a tick mark at 1, 10, 100, 1000, ... set dtick to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2. To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to log_10(5), or 0.69897000433. "log" has several special values; "L<f>", where `f` is a positive number, gives ticks linearly spaced in value (but not position). For example `tick0` = 0.1, `dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus small digits between, use "D1" (all digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and "D2". If the axis `type` is "date", then you must convert the time to milliseconds. For example, to set the interval between ticks to one day, set `dtick` to 86400000.0. "date" also has special values "M<n>" gives ticks spaced by a number of months. `n` must be a positive integer. To set ticks on the 15th of every third month, set `tick0` to "2000-01-15" and `dtick` to "M3". To set ticks every 4 years, set `dtick` to "M48" exponentformat Determines a formatting rule for the tick exponents. For example, consider the number 1,000,000,000. If "none", it appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If "power", 1x10^9 (with 9 in a super script). If "SI", 1G. If "B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T (10^12). *SI extended* covers instead the full SI range from "quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI extended* is used and the exponent is beyond the above ranges, the formatting rule will automatically be switched to the power notation. labelalias Replacement text for specific tick or hover labels. For example using {US: 'USA', CA: 'Canada'} changes US to USA and CA to Canada. The labels we would have shown must match the keys exactly, after adding any tickprefix or ticksuffix. For negative numbers the minus sign symbol used (U+2212) is wider than the regular ascii dash. That means you need to use −1 instead of -1. labelalias can be used with any axis type, and both keys (if needed) and values (if desired) can include html-like tags or MathJax. len Sets the length of the color bar This measure excludes the padding of both ends. That is, the color bar length is this length minus the padding on both ends. lenmode Determines whether this color bar's length (i.e. the measure in the color variation direction) is set in units of plot "fraction" or in *pixels. Use `len` to set the value. minexponent Hide SI prefix for 10^n if |n| is below this number. This only has an effect when `tickformat` is "SI" or "B". nticks Specifies the maximum number of ticks for the particular axis. The actual number of ticks will be chosen automatically to be less than or equal to `nticks`. Has an effect only if `tickmode` is set to "auto". orientation Sets the orientation of the colorbar. outlinecolor Sets the axis line color. outlinewidth Sets the width (in px) of the axis line. separatethousands If "true", even 4-digit integers are separated showexponent If "all", all exponents are shown besides their significands. If "first", only the exponent of the first tick is shown. If "last", only the exponent of the last tick is shown. If "none", no exponents appear. showticklabels Determines whether or not the tick labels are drawn. showtickprefix If "all", all tick labels are displayed with a prefix. If "first", only the first tick is displayed with a prefix. If "last", only the last tick is displayed with a suffix. If "none", tick prefixes are hidden. showticksuffix Same as `showtickprefix` but for tick suffixes. thickness Sets the thickness of the color bar This measure excludes the size of the padding, ticks and labels. thicknessmode Determines whether this color bar's thickness (i.e. the measure in the constant color direction) is set in units of plot "fraction" or in "pixels". Use `thickness` to set the value. tick0 Sets the placement of the first tick on this axis. Use with `dtick`. If the axis `type` is "log", then you must take the log of your starting tick (e.g. to set the starting tick to 100, set the `tick0` to 2) except when `dtick`=*L<f>* (see `dtick` for more info). If the axis `type` is "date", it should be a date string, like date data. If the axis `type` is "category", it should be a number, using the scale where each category is assigned a serial number from zero in the order it appears. tickangle Sets the angle of the tick labels with respect to the horizontal. For example, a `tickangle` of -90 draws the tick labels vertically. tickcolor Sets the tick color. tickfont Sets the color bar's tick label font tickformat Sets the tick label formatting rule using d3 formatting mini-languages which are very similar to those in Python. For numbers, see: https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for dates see: https://github.com/d3/d3-time- format/tree/v2.2.3#locale_format. We add two items to d3's date formatter: "%h" for half of the year as a decimal number as well as "%{n}f" for fractional seconds with n digits. For example, *2016-10-13 09:15:23.456* with tickformat "%H~%M~%S.%2f" would display "09~15~23.46" tickformatstops A tuple of :class:`plotly.graph_objects.scattersmith.ma rker.colorbar.Tickformatstop` instances or dicts with compatible properties tickformatstopdefaults When used in a template (as layout.template.data.scatte rsmith.marker.colorbar.tickformatstopdefaults), sets the default property values to use for elements of scattersmith.marker.colorbar.tickformatstops ticklabeloverflow Determines how we handle tick labels that would overflow either the graph div or the domain of the axis. The default value for inside tick labels is *hide past domain*. In other cases the default is *hide past div*. ticklabelposition Determines where tick labels are drawn relative to the ticks. Left and right options are used when `orientation` is "h", top and bottom when `orientation` is "v". ticklabelstep Sets the spacing between tick labels as compared to the spacing between ticks. A value of 1 (default) means each tick gets a label. A value of 2 means shows every 2nd label. A larger value n means only every nth tick is labeled. `tick0` determines which labels are shown. Not implemented for axes with `type` "log" or "multicategory", or when `tickmode` is "array". ticklen Sets the tick length (in px). tickmode Sets the tick mode for this axis. If "auto", the number of ticks is set via `nticks`. If "linear", the placement of the ticks is determined by a starting position `tick0` and a tick step `dtick` ("linear" is the default value if `tick0` and `dtick` are provided). If "array", the placement of the ticks is set via `tickvals` and the tick text is `ticktext`. ("array" is the default value if `tickvals` is provided). tickprefix Sets a tick label prefix. ticks Determines whether ticks are drawn or not. If "", this axis' ticks are not drawn. If "outside" ("inside"), this axis' are drawn outside (inside) the axis lines. ticksuffix Sets a tick label suffix. ticktext Sets the text displayed at the ticks position via `tickvals`. Only has an effect if `tickmode` is set to "array". Used with `tickvals`. ticktextsrc Sets the source reference on Chart Studio Cloud for `ticktext`. tickvals Sets the values at which ticks on this axis appear. Only has an effect if `tickmode` is set to "array". Used with `ticktext`. tickvalssrc Sets the source reference on Chart Studio Cloud for `tickvals`. tickwidth Sets the tick width (in px). title :class:`plotly.graph_objects.scattersmith.marker.colorb ar.Title` instance or dict with compatible properties x Sets the x position with respect to `xref` of the color bar (in plot fraction). When `xref` is "paper", defaults to 1.02 when `orientation` is "v" and 0.5 when `orientation` is "h". When `xref` is "container", defaults to 1 when `orientation` is "v" and 0.5 when `orientation` is "h". Must be between 0 and 1 if `xref` is "container" and between "-2" and 3 if `xref` is "paper". xanchor Sets this color bar's horizontal position anchor. This anchor binds the `x` position to the "left", "center" or "right" of the color bar. Defaults to "left" when `orientation` is "v" and "center" when `orientation` is "h". xpad Sets the amount of padding (in px) along the x direction. xref Sets the container `x` refers to. "container" spans the entire `width` of the plot. "paper" refers to the width of the plotting area only. y Sets the y position with respect to `yref` of the color bar (in plot fraction). When `yref` is "paper", defaults to 0.5 when `orientation` is "v" and 1.02 when `orientation` is "h". When `yref` is "container", defaults to 0.5 when `orientation` is "v" and 1 when `orientation` is "h". Must be between 0 and 1 if `yref` is "container" and between "-2" and 3 if `yref` is "paper". yanchor Sets this color bar's vertical position anchor This anchor binds the `y` position to the "top", "middle" or "bottom" of the color bar. Defaults to "middle" when `orientation` is "v" and "bottom" when `orientation` is "h". ypad Sets the amount of padding (in px) along the y direction. yref Sets the container `y` refers to. "container" spans the entire `height` of the plot. "paper" refers to the height of the plotting area only. Returns ------- ColorBar """ super().__init__("colorbar") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.scattersmith.marker.ColorBar constructor must be a dict or an instance of :class:`plotly.graph_objs.scattersmith.marker.ColorBar`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("bgcolor", arg, bgcolor) self._set_property("bordercolor", arg, bordercolor) self._set_property("borderwidth", arg, borderwidth) self._set_property("dtick", arg, dtick) self._set_property("exponentformat", arg, exponentformat) self._set_property("labelalias", arg, labelalias) self._set_property("len", arg, len) self._set_property("lenmode", arg, lenmode) self._set_property("minexponent", arg, minexponent) self._set_property("nticks", arg, nticks) self._set_property("orientation", arg, orientation) self._set_property("outlinecolor", arg, outlinecolor) self._set_property("outlinewidth", arg, outlinewidth) self._set_property("separatethousands", arg, separatethousands) self._set_property("showexponent", arg, showexponent) self._set_property("showticklabels", arg, showticklabels) self._set_property("showtickprefix", arg, showtickprefix) self._set_property("showticksuffix", arg, showticksuffix) self._set_property("thickness", arg, thickness) self._set_property("thicknessmode", arg, thicknessmode) self._set_property("tick0", arg, tick0) self._set_property("tickangle", arg, tickangle) self._set_property("tickcolor", arg, tickcolor) self._set_property("tickfont", arg, tickfont) self._set_property("tickformat", arg, tickformat) self._set_property("tickformatstops", arg, tickformatstops) self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults) self._set_property("ticklabeloverflow", arg, ticklabeloverflow) self._set_property("ticklabelposition", arg, ticklabelposition) self._set_property("ticklabelstep", arg, ticklabelstep) self._set_property("ticklen", arg, ticklen) self._set_property("tickmode", arg, tickmode) self._set_property("tickprefix", arg, tickprefix) self._set_property("ticks", arg, ticks) self._set_property("ticksuffix", arg, ticksuffix) self._set_property("ticktext", arg, ticktext) self._set_property("ticktextsrc", arg, ticktextsrc) self._set_property("tickvals", arg, tickvals) self._set_property("tickvalssrc", arg, tickvalssrc) self._set_property("tickwidth", arg, tickwidth) self._set_property("title", arg, title) self._set_property("x", arg, x) self._set_property("xanchor", arg, xanchor) self._set_property("xpad", arg, xpad) self._set_property("xref", arg, xref) self._set_property("y", arg, y) self._set_property("yanchor", arg, yanchor) self._set_property("ypad", arg, ypad) self._set_property("yref", arg, yref) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
ColorBar
python
tensorflow__tensorflow
tensorflow/python/training/session_manager_test.py
{ "start": 28319, "end": 34338 }
class ____(test.TestCase): @classmethod def setUpClass(cls): super(ObsoleteSessionManagerTest, cls).setUpClass() resource_variables_toggle.disable_resource_variables() def testPrepareSessionSucceeds(self): with ops.Graph().as_default(): v = variable_v1.VariableV1([1.0, 2.0, 3.0], name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) sess = sm.prepare_session( "", init_op=variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0, 3.0], sess.run(v)) def testPrepareSessionSucceedsWithInitFeedDict(self): with ops.Graph().as_default(): p = array_ops.placeholder(dtypes.float32, shape=(3,)) v = variable_v1.VariableV1(p, name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) sess = sm.prepare_session( "", init_op=variables.global_variables_initializer(), init_feed_dict={p: [1.0, 2.0, 3.0]}) self.assertAllClose([1.0, 2.0, 3.0], sess.run(v)) def testPrepareSessionSucceedsWithInitFn(self): with ops.Graph().as_default(): v = variable_v1.VariableV1([125], name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) sess = sm.prepare_session( "", init_fn=lambda sess: sess.run(v.initializer)) self.assertAllClose([125], sess.run(v)) def testPrepareSessionFails(self): checkpoint_dir = os.path.join(self.get_temp_dir(), "prepare_session") checkpoint_dir2 = os.path.join(self.get_temp_dir(), "prepare_session2") try: gfile.DeleteRecursively(checkpoint_dir) gfile.DeleteRecursively(checkpoint_dir2) except errors.OpError: pass # Ignore gfile.MakeDirs(checkpoint_dir) with ops.Graph().as_default(): v = variable_v1.VariableV1([1.0, 2.0, 3.0], name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) saver = saver_lib.Saver({"v": v}) sess = sm.prepare_session( "", init_op=variables.global_variables_initializer(), saver=saver, checkpoint_dir=checkpoint_dir) self.assertAllClose([1.0, 2.0, 3.0], sess.run(v)) checkpoint_filename = os.path.join(checkpoint_dir, "prepare_session_checkpoint") saver.save(sess, checkpoint_filename) # Create a new Graph and SessionManager and recover. with ops.Graph().as_default(): # Renames the checkpoint directory. os.rename(checkpoint_dir, checkpoint_dir2) gfile.MakeDirs(checkpoint_dir) v = variable_v1.VariableV1([6.0, 7.0, 8.0], name="v") with self.cached_session(): self.assertEqual(False, variable_v1.is_variable_initialized(v).eval()) session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) saver = saver_lib.Saver({"v": v}) # This should fail as there's no checkpoint within 2 seconds. with self.assertRaisesRegex( RuntimeError, "no init_op or init_fn or local_init_op was given"): sess = sm.prepare_session( "", init_op=None, saver=saver, checkpoint_dir=checkpoint_dir, wait_for_checkpoint=True, max_wait_secs=2) # Rename the checkpoint directory back. gfile.DeleteRecursively(checkpoint_dir) os.rename(checkpoint_dir2, checkpoint_dir) # This should succeed as there's checkpoint. sess = sm.prepare_session( "", init_op=None, saver=saver, checkpoint_dir=checkpoint_dir, wait_for_checkpoint=True, max_wait_secs=2) self.assertEqual( True, variable_v1.is_variable_initialized( sess.graph.get_tensor_by_name("v:0")).eval(session=sess)) def testRecoverSession(self): # Create a checkpoint. checkpoint_dir = os.path.join(self.get_temp_dir(), "recover_session") try: gfile.DeleteRecursively(checkpoint_dir) except errors.OpError: pass # Ignore gfile.MakeDirs(checkpoint_dir) with ops.Graph().as_default(): v = variable_v1.VariableV1(1, name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) saver = saver_lib.Saver({"v": v}) sess, initialized = sm.recover_session( "", saver=saver, checkpoint_dir=checkpoint_dir) self.assertFalse(initialized) sess.run(v.initializer) self.assertEqual(1, sess.run(v)) saver.save(sess, os.path.join(checkpoint_dir, "recover_session_checkpoint")) # Create a new Graph and SessionManager and recover. with ops.Graph().as_default(): v = variable_v1.VariableV1(2, name="v") with self.cached_session(): self.assertEqual(False, variable_v1.is_variable_initialized(v).eval()) sm2 = session_manager.SessionManager( ready_op=variables.assert_variables_initialized()) saver = saver_lib.Saver({"v": v}) sess, initialized = sm2.recover_session( "", saver=saver, checkpoint_dir=checkpoint_dir) self.assertTrue(initialized) self.assertEqual( True, variable_v1.is_variable_initialized( sess.graph.get_tensor_by_name("v:0")).eval(session=sess)) self.assertEqual(1, sess.run(v)) def testWaitForSessionReturnsNoneAfterTimeout(self): with ops.Graph().as_default(): variable_v1.VariableV1(1, name="v") sm = session_manager.SessionManager( ready_op=variables.assert_variables_initialized(), recovery_wait_secs=1) # Set max_wait_secs to allow us to try a few times. with self.assertRaises(errors.DeadlineExceededError): sm.wait_for_session(master="", max_wait_secs=3) if __name__ == "__main__": test.main()
ObsoleteSessionManagerTest
python
giampaolo__psutil
tests/test_testutils.py
{ "start": 6358, "end": 8697 }
class ____(PsutilTestCase): def test_reap_children(self): subp = self.spawn_subproc() p = psutil.Process(subp.pid) assert p.is_running() reap_children() assert not p.is_running() assert not tests._pids_started assert not tests._subprocesses_started def test_spawn_children_pair(self): child, grandchild = self.spawn_children_pair() assert child.pid != grandchild.pid assert child.is_running() assert grandchild.is_running() children = psutil.Process().children() assert children == [child] children = psutil.Process().children(recursive=True) assert len(children) == 2 assert child in children assert grandchild in children assert child.ppid() == os.getpid() assert grandchild.ppid() == child.pid terminate(child) assert not child.is_running() assert grandchild.is_running() terminate(grandchild) assert not grandchild.is_running() @pytest.mark.skipif(not POSIX, reason="POSIX only") def test_spawn_zombie(self): _parent, zombie = self.spawn_zombie() assert zombie.status() == psutil.STATUS_ZOMBIE def test_terminate(self): # by subprocess.Popen p = self.spawn_subproc() terminate(p) self.assert_pid_gone(p.pid) terminate(p) # by psutil.Process p = psutil.Process(self.spawn_subproc().pid) terminate(p) self.assert_pid_gone(p.pid) terminate(p) # by psutil.Popen cmd = [ PYTHON_EXE, "-c", "import time; [time.sleep(0.1) for x in range(100)];", ] p = psutil.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=PYTHON_EXE_ENV, ) terminate(p) self.assert_pid_gone(p.pid) terminate(p) # by PID pid = self.spawn_subproc().pid terminate(pid) self.assert_pid_gone(p.pid) terminate(pid) # zombie if POSIX: parent, zombie = self.spawn_zombie() terminate(parent) terminate(zombie) self.assert_pid_gone(parent.pid) self.assert_pid_gone(zombie.pid)
TestProcessUtils
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/data_time.py
{ "start": 1966, "end": 22895 }
class ____: _instance_queryer: CachingInstanceQueryer _asset_graph: BaseAssetGraph def __init__(self, instance_queryer: CachingInstanceQueryer): self._instance_queryer = instance_queryer @property def instance_queryer(self) -> CachingInstanceQueryer: return self._instance_queryer @property def asset_graph(self) -> BaseAssetGraph: return self.instance_queryer.asset_graph #################### # PARTITIONED DATA TIME #################### def _calculate_data_time_partitioned( self, asset_key: AssetKey, cursor: int, partitions_def: TimeWindowPartitionsDefinition, ) -> Optional[datetime.datetime]: """Returns the time up until which all available data has been consumed for this asset. At a high level, this algorithm works as follows: First, calculate the subset of partitions that have been materialized up until this point in time (ignoring the cursor). This is done using the get_materialized_partitions query, Next, we calculate the set of partitions that are net-new since the cursor. This is done by comparing the count of materializations before after the cursor to the total count of materializations. Finally, we calculate the minimum time window of the net-new partitions. This time window did not exist at the time of the cursor, so we know that we have all data up until the beginning of that time window, or all data up until the end of the first filled time window in the total set, whichever is less. """ # the total set of materialized partitions partition_subset = partitions_def.empty_subset().with_partition_keys( partition_key for partition_key in self._instance_queryer.get_materialized_partitions(asset_key) if partitions_def.has_partition_key(partition_key) ) if not isinstance(partition_subset, TimeWindowPartitionsSubset): check.failed(f"Invalid partition subset {type(partition_subset)}") sorted_time_windows = sorted(partition_subset.included_time_windows) # no time windows, no data if len(sorted_time_windows) == 0: return None first_filled_time_window = sorted_time_windows[0] first_available_time_window = partitions_def.get_first_partition_window() if first_available_time_window is None: return None # if the first partition has not been filled if first_available_time_window.start < first_filled_time_window.start: return None # there are no events for this asset after the cursor asset_record = self._instance_queryer.get_asset_record(asset_key) if ( asset_record is not None and asset_record.asset_entry is not None and asset_record.asset_entry.last_materialization_record is not None and asset_record.asset_entry.last_materialization_record.storage_id <= cursor ): return first_filled_time_window.end # get a per-partition count of the new materializations partitions = self._instance_queryer.get_materialized_partitions(asset_key) prev_partitions = self._instance_queryer.get_materialized_partitions( asset_key, before_cursor=cursor + 1 ) net_new_partitions = { partition_key for partition_key in (partitions - prev_partitions) if partitions_def.has_partition_key(partition_key) } # there are new materializations, but they don't fill any new partitions if not net_new_partitions: return first_filled_time_window.end # the oldest time window that was newly filled oldest_net_new_time_window = min( partitions_def.time_window_for_partition_key(partition_key) for partition_key in net_new_partitions ) # only factor in the oldest net new time window if it breaks the current first filled time window return min( oldest_net_new_time_window.start, first_filled_time_window.end, ) def _calculate_data_time_by_key_time_partitioned( self, asset_key: AssetKey, cursor: int, partitions_def: TimeWindowPartitionsDefinition, ) -> Mapping[AssetKey, Optional[datetime.datetime]]: """Returns the data time (i.e. the time up to which the asset has incorporated all available data) for a time-partitioned asset. This method takes into account all partitions that were materialized for this asset up to the provided cursor. """ partition_data_time = self._calculate_data_time_partitioned( asset_key=asset_key, cursor=cursor, partitions_def=partitions_def, ) root_keys = ( KeysAssetSelection(selected_keys=[asset_key]) .upstream() .sources() .resolve(self.asset_graph) ) return {key: partition_data_time for key in root_keys} #################### # UNPARTITIONED DATA TIME #################### def _upstream_records_by_key( self, asset_key: AssetKey, record_id: int, record_tags_dict: Mapping[str, str] ) -> Mapping[AssetKey, "EventLogRecord"]: upstream_records: dict[AssetKey, EventLogRecord] = {} for parent_key in self.asset_graph.get(asset_key).parent_keys: if not ( self.asset_graph.has(parent_key) and self.asset_graph.get(parent_key).is_executable ): continue input_event_pointer_tag = get_input_event_pointer_tag(parent_key) if input_event_pointer_tag not in record_tags_dict: # if the input event id was not recorded (materialized pre-1.1.0), just grab # the most recent event for this parent which happened before the current record before_cursor = record_id elif record_tags_dict[input_event_pointer_tag] != "NULL": # get the upstream event which was consumed when producing this materialization event before_cursor = int(record_tags_dict[input_event_pointer_tag]) + 1 else: before_cursor = None if before_cursor is not None: parent_record = ( self._instance_queryer.get_latest_materialization_or_observation_record( AssetKeyPartitionKey(parent_key), before_cursor=before_cursor ) ) if parent_record is not None: upstream_records[parent_key] = parent_record return upstream_records @cached_method def _calculate_data_time_by_key_unpartitioned( self, *, asset_key: AssetKey, record_id: int, record_timestamp: float, record_tags: tuple[tuple[str, str]], current_time: datetime.datetime, ) -> Mapping[AssetKey, Optional[datetime.datetime]]: # find the upstream times of each of the parents of this asset record_tags_dict = dict(record_tags) upstream_records_by_key = self._upstream_records_by_key( asset_key, record_id, record_tags_dict ) if not upstream_records_by_key: if not self.asset_graph.has_materializable_parents(asset_key): return { asset_key: datetime.datetime.fromtimestamp( record_timestamp, tz=datetime.timezone.utc ) } else: return {} data_time_by_key: dict[AssetKey, Optional[datetime.datetime]] = {} for parent_key, parent_record in upstream_records_by_key.items(): # recurse to find the data times of this parent for upstream_key, data_time in self._calculate_data_time_by_key( asset_key=parent_key, record_id=parent_record.storage_id, record_timestamp=parent_record.event_log_entry.timestamp, record_tags=make_hashable( ( parent_record.asset_materialization.tags if parent_record.asset_materialization else ( parent_record.event_log_entry.asset_observation.tags if parent_record.event_log_entry.asset_observation else None ) ) or {} ), current_time=current_time, ).items(): # if root data is missing, this overrides other values if data_time is None: data_time_by_key[upstream_key] = None else: cur_data_time = data_time_by_key.get(upstream_key, data_time) data_time_by_key[upstream_key] = ( min(cur_data_time, data_time) if cur_data_time is not None else None ) return data_time_by_key #################### # OBSERVABLE SOURCE DATA TIME #################### @cached_method def _calculate_data_time_by_key_observable_source( self, *, asset_key: AssetKey, record_id: int, record_tags: tuple[tuple[str, str]], current_time: datetime.datetime, ) -> Mapping[AssetKey, Optional[datetime.datetime]]: data_version_value = dict(record_tags).get(DATA_VERSION_TAG) if data_version_value is None: return {asset_key: None} data_version = DataVersion(data_version_value) next_version_record = self._instance_queryer.next_version_record( asset_key=asset_key, data_version=data_version, after_cursor=record_id ) if next_version_record is None: # the most recent available version has been pulled in return {asset_key: current_time} # otherwise, we have all available data up to the point in time that the new version arrived next_version_timestamp = next_version_record.event_log_entry.timestamp return { asset_key: datetime.datetime.fromtimestamp( next_version_timestamp, tz=datetime.timezone.utc ) } #################### # CORE DATA TIME #################### @cached_method def _calculate_data_time_by_key( self, *, asset_key: AssetKey, record_id: Optional[int], record_timestamp: Optional[float], record_tags: tuple[tuple[str, str]], # for hashability current_time: datetime.datetime, ) -> Mapping[AssetKey, Optional[datetime.datetime]]: if record_id is None: return {key: None for key in self.asset_graph.get_materializable_roots(asset_key)} record_timestamp = check.not_none(record_timestamp) partitions_def = self.asset_graph.get(asset_key).partitions_def if isinstance(partitions_def, TimeWindowPartitionsDefinition): return self._calculate_data_time_by_key_time_partitioned( asset_key=asset_key, cursor=record_id, partitions_def=partitions_def, ) elif self.asset_graph.get(asset_key).is_observable: return self._calculate_data_time_by_key_observable_source( asset_key=asset_key, record_id=record_id, record_tags=record_tags, current_time=current_time, ) else: return self._calculate_data_time_by_key_unpartitioned( asset_key=asset_key, record_id=record_id, record_timestamp=record_timestamp, record_tags=record_tags, current_time=current_time, ) #################### # IN PROGRESS DATA TIME #################### @cached_method def _get_in_progress_run_ids(self, current_time: datetime.datetime) -> Sequence[str]: return [ record.dagster_run.run_id for record in self.instance_queryer.instance.get_run_records( filters=RunsFilter( statuses=[ status for status in DagsterRunStatus if status not in FINISHED_STATUSES ], # ignore old runs that may be stuck in an unfinished state created_after=current_time - datetime.timedelta(days=1), ), limit=25, ) ] @cached_method def _get_in_progress_data_time_in_run( self, *, run_id: str, asset_key: AssetKey, current_time: datetime.datetime ) -> Optional[datetime.datetime]: """Returns the upstream data times that a given asset key will be expected to have at the completion of the given run. """ planned_keys = self._instance_queryer.get_planned_materializations_for_run(run_id=run_id) materialized_keys = self._instance_queryer.get_current_materializations_for_run( run_id=run_id ) # if key is not pending materialization within the run, then downstream assets will generally # be expected to consume the current version of the asset if asset_key not in planned_keys or asset_key in materialized_keys: return self.get_current_data_time(asset_key, current_time=current_time) # if you're here, then this asset is planned, but not materialized. in the worst case, this # asset's data time will be equal to the current time once it finishes materializing if not self.asset_graph.has_materializable_parents(asset_key): return current_time data_time = current_time for parent_key in self.asset_graph.get(asset_key).parent_keys: if parent_key not in self.asset_graph.materializable_asset_keys: continue parent_data_time = self._get_in_progress_data_time_in_run( run_id=run_id, asset_key=parent_key, current_time=current_time ) if parent_data_time is None: return None data_time = min(data_time, parent_data_time) return data_time def get_in_progress_data_time( self, asset_key: AssetKey, current_time: datetime.datetime ) -> Optional[datetime.datetime]: """Returns a mapping containing the maximum upstream data time that the input asset will have once all in-progress runs complete. """ data_time: Optional[datetime.datetime] = None for run_id in self._get_in_progress_run_ids(current_time=current_time): if not self._instance_queryer.is_asset_planned_for_run(run_id=run_id, asset=asset_key): continue run_data_time = self._get_in_progress_data_time_in_run( run_id=run_id, asset_key=asset_key, current_time=current_time ) if run_data_time is not None: data_time = max(run_data_time, data_time or run_data_time) return data_time #################### # FAILED DATA TIME #################### def get_ignored_failure_data_time( self, asset_key: AssetKey, current_time: datetime.datetime ) -> Optional[datetime.datetime]: """Returns the data time that this asset would have if the most recent run successfully completed. If the most recent run did not fail, then this will return the current data time for this asset. """ current_data_time = self.get_current_data_time(asset_key, current_time=current_time) asset_record = self._instance_queryer.get_asset_record(asset_key) # no latest run if asset_record is None or asset_record.asset_entry.last_run_id is None: return current_data_time run_id = asset_record.asset_entry.last_run_id latest_run_record = self._instance_queryer._get_run_record_by_id( # noqa: SLF001 run_id=run_id ) # latest run did not fail if ( latest_run_record is None or latest_run_record.dagster_run.status != DagsterRunStatus.FAILURE ): return current_data_time # run failed, but asset was materialized successfully latest_materialization = asset_record.asset_entry.last_materialization if ( latest_materialization is not None and latest_materialization.run_id == latest_run_record.dagster_run.run_id ): return current_data_time run_failure_time = datetime_from_timestamp( latest_run_record.end_time or latest_run_record.create_timestamp.timestamp(), ) return self._get_in_progress_data_time_in_run( run_id=run_id, asset_key=asset_key, current_time=run_failure_time ) #################### # MAIN METHODS #################### def get_data_time_by_key_for_record( self, record: EventLogRecord, current_time: Optional[datetime.datetime] = None, ) -> Mapping[AssetKey, Optional[datetime.datetime]]: """Method to enable calculating the timestamps of materializations or observations of upstream assets which were relevant to a given AssetMaterialization. These timestamps can be calculated relative to any upstream asset keys. The heart of this functionality is a recursive method which takes a given asset materialization and finds the most recent materialization of each of its parents which happened *before* that given materialization event. """ event = record.asset_materialization or record.asset_observation if record.asset_key is None or event is None: raise DagsterInvariantViolationError( "Can only calculate data times for records with a materialization / observation " "event and an asset_key." ) return self._calculate_data_time_by_key( asset_key=record.asset_key, record_id=record.storage_id, record_timestamp=record.event_log_entry.timestamp, record_tags=make_hashable(event.tags or {}), current_time=current_time or get_current_datetime(), ) def get_current_data_time( self, asset_key: AssetKey, current_time: datetime.datetime ) -> Optional[datetime.datetime]: latest_record = self.instance_queryer.get_latest_materialization_or_observation_record( AssetKeyPartitionKey(asset_key) ) if latest_record is None: return None data_times = set(self.get_data_time_by_key_for_record(latest_record, current_time).values()) if None in data_times or not data_times: return None return min(cast("AbstractSet[datetime.datetime]", data_times), default=None) def _get_source_data_time( self, asset_key: AssetKey, current_time: datetime.datetime ) -> Optional[datetime.datetime]: latest_record = self.instance_queryer.get_latest_materialization_or_observation_record( AssetKeyPartitionKey(asset_key) ) if latest_record is None: return None observation = latest_record.asset_observation if observation is None: check.failed( "when invoked on a source asset, " "get_latest_materialization_or_observation_record should always return an " "observation" ) data_time = observation.metadata.get(DATA_TIME_METADATA_KEY) if data_time is None: return None else: return datetime.datetime.fromtimestamp( cast("float", data_time.value), datetime.timezone.utc ) def get_minutes_overdue( self, asset_key: AssetKey, evaluation_time: datetime.datetime, ) -> Optional[FreshnessMinutes]: asset = self.asset_graph.get(asset_key) if asset.legacy_freshness_policy is None: raise DagsterInvariantViolationError( "Cannot calculate minutes late for asset without a FreshnessPolicy" ) if asset.is_observable: current_data_time = self._get_source_data_time(asset_key, current_time=evaluation_time) else: current_data_time = self.get_current_data_time(asset_key, current_time=evaluation_time) return asset.legacy_freshness_policy.minutes_overdue( data_time=current_data_time, evaluation_time=evaluation_time, )
CachingDataTimeResolver
python
matplotlib__matplotlib
lib/matplotlib/colors.py
{ "start": 1977, "end": 3044 }
class ____(dict): def __init__(self, mapping): super().__init__(mapping) self.cache = {} def __setitem__(self, key, value): super().__setitem__(key, value) self.cache.clear() def __delitem__(self, key): super().__delitem__(key) self.cache.clear() _colors_full_map = {} # Set by reverse priority order. _colors_full_map.update(XKCD_COLORS) _colors_full_map.update({k.replace('grey', 'gray'): v for k, v in XKCD_COLORS.items() if 'grey' in k}) _colors_full_map.update(CSS4_COLORS) _colors_full_map.update(TABLEAU_COLORS) _colors_full_map.update({k.replace('gray', 'grey'): v for k, v in TABLEAU_COLORS.items() if 'gray' in k}) _colors_full_map.update(BASE_COLORS) _colors_full_map = _ColorMapping(_colors_full_map) _REPR_PNG_SIZE = (512, 64) _BIVAR_REPR_PNG_SIZE = 256 def get_named_colors_mapping(): """Return the global mapping of names to named colors.""" return _colors_full_map
_ColorMapping
python
pallets__click
examples/complex/complex/cli.py
{ "start": 93, "end": 666 }
class ____: def __init__(self): self.verbose = False self.home = os.getcwd() def log(self, msg, *args): """Logs a message to stderr.""" if args: msg %= args click.echo(msg, file=sys.stderr) def vlog(self, msg, *args): """Logs a message to stderr only if verbose is enabled.""" if self.verbose: self.log(msg, *args) pass_environment = click.make_pass_decorator(Environment, ensure=True) cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), "commands"))
Environment
python
huggingface__transformers
src/transformers/models/dia/configuration_dia.py
{ "start": 9904, "end": 14305 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`DiaModel`]. It is used to instantiate a Dia model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [nari-labs/Dia-1.6B](https://huggingface.co/nari-labs/Dia-1.6B) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: encoder_config (`DiaEncoderConfig`, *optional*): Configuration for the encoder part of the model. If not provided, a default `DiaEncoderConfig` will be used. decoder_config (`DiaDecoderConfig`, *optional*): Configuration for the decoder part of the model. If not provided, a default `DiaDecoderConfig` will be used. norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the normalization layers. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Indicating that this model uses an encoder-decoder architecture. pad_token_id (`int`, *optional*, defaults to 1025): Padding token id. eos_token_id (`int`, *optional*, defaults to 1024): End of stream token id. bos_token_id (`int`, *optional*, defaults to 1026): Beginning of stream token id. delay_pattern (`list[int]`, *optional*, defaults to `[0, 8, 9, 10, 11, 12, 13, 14, 15]`): The delay pattern for the decoder. The length of this list must match `decoder_config.num_channels`. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Example: ```python >>> from transformers import DiaConfig, DiaModel >>> # Initializing a DiaConfig with default values >>> configuration = DiaConfig() >>> # Initializing a DiaModel (with random weights) from the configuration >>> model = DiaModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "dia" keys_to_ignore_at_inference = ["past_key_values"] sub_configs = {"encoder_config": DiaEncoderConfig, "decoder_config": DiaDecoderConfig} def __init__( self, encoder_config: Optional[DiaEncoderConfig] = None, decoder_config: Optional[DiaDecoderConfig] = None, norm_eps: float = 1e-5, is_encoder_decoder: bool = True, pad_token_id: int = 1025, eos_token_id: int = 1024, bos_token_id: int = 1026, delay_pattern: Optional[list[int]] = None, initializer_range: float = 0.02, use_cache: bool = True, **kwargs, ): if isinstance(encoder_config, dict): encoder_config = DiaEncoderConfig(**encoder_config) if isinstance(decoder_config, dict): decoder_config = DiaDecoderConfig(**decoder_config) self.encoder_config = encoder_config if encoder_config is not None else DiaEncoderConfig() self.decoder_config = decoder_config if decoder_config is not None else DiaDecoderConfig() self.norm_eps = norm_eps self.delay_pattern = delay_pattern if delay_pattern is not None else [0, 8, 9, 10, 11, 12, 13, 14, 15] self.initializer_range = initializer_range self.use_cache = use_cache assert self.decoder_config.num_channels == len(self.delay_pattern), ( "Number of channels must match delay pattern length." ) super().__init__( pad_token_id=pad_token_id, eos_token_id=eos_token_id, bos_token_id=bos_token_id, is_encoder_decoder=is_encoder_decoder, **kwargs, ) def get_text_config(self, *args, **kwargs): """Defaulting to audio config as it's the decoder in this case which is usually the text backbone""" return self.decoder_config __all__ = ["DiaConfig", "DiaEncoderConfig", "DiaDecoderConfig"]
DiaConfig
python
huggingface__transformers
src/transformers/models/mimi/modeling_mimi.py
{ "start": 45732, "end": 52627 }
class ____(nn.Module): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MimiTransformerLayer`] Args: config: MimiConfig """ def __init__(self, config: MimiConfig): super().__init__() self.layers = nn.ModuleList( [MimiTransformerLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self._attn_implementation = config._attn_implementation self.gradient_checkpointing = False self.config = config def forward( self, hidden_states: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[tuple, BaseModelOutputWithPast]: """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Embedded representation that will be contextualized by the model attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`Cache`, *optional*): It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache). If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." ) use_cache = False if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + hidden_states.shape[1], device=hidden_states.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=hidden_states, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, )
MimiTransformerModel
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_north_carolina_zip.py
{ "start": 1798, "end": 4187 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid North Carolina zipcodes. See https://pypi.org/project/zipcodes/ for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_north_carolina_zip": ["27248", "27612", "28077", "28901"], "invalid_north_carolina_zip": ["-10000", "1234", "99999", "25487"], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_north_carolina_zip"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_north_carolina_zip"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_north_carolina_zip" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["zipcodes"], } if __name__ == "__main__": ExpectColumnValuesToBeValidNorthCarolinaZip().print_diagnostic_checklist()
ExpectColumnValuesToBeValidNorthCarolinaZip
python
tensorflow__tensorflow
tensorflow/python/distribute/distribute_lib.py
{ "start": 12851, "end": 16272 }
class ____(object): """Context manager when you are in `update()` or `update_non_slot()`.""" __slots__ = ["_replica_id", "_old_replica_id"] def __init__(self, replica_id): self._replica_id = replica_id self._old_replica_id = None def __enter__(self): self._old_replica_id = get_update_replica_id() _update_replica_id.current = self._replica_id def __exit__(self, exception_type, exception_value, traceback): del exception_type, exception_value, traceback _update_replica_id.current = self._old_replica_id # ------------------------------------------------------------------------------ # Internal API for validating the current thread mode def _require_cross_replica_or_default_context_extended(extended, error_message=None): """Verify in cross-replica context.""" context = _get_per_thread_mode() cross_replica = context.cross_replica_context if cross_replica is not None and cross_replica.extended is extended: return if context is _get_default_replica_mode(): return strategy = extended._container_strategy() # pylint: disable=protected-access # We have an error to report, figure out the right message. if context.strategy is not strategy: _wrong_strategy_scope(strategy, context) assert cross_replica is None if not error_message: error_message = ("Method requires being in cross-replica context, use " "get_replica_context().merge_call()") raise RuntimeError(error_message) def _wrong_strategy_scope(strategy, context): # Figure out the right error message. if not has_strategy(): raise RuntimeError( 'Need to be inside "with strategy.scope()" for %s' % (strategy,)) else: raise RuntimeError( "Mixing different tf.distribute.Strategy objects: %s is not %s" % (context.strategy, strategy)) def require_replica_context(replica_ctx): """Verify in `replica_ctx` replica context.""" context = _get_per_thread_mode() if context.replica_context is replica_ctx: return # We have an error to report, figure out the right message. if context.replica_context is None: raise RuntimeError("Need to be inside `call_for_each_replica()`") if context.strategy is replica_ctx.strategy: # Two different ReplicaContexts with the same tf.distribute.Strategy. raise RuntimeError("Mismatching ReplicaContext.") raise RuntimeError( "Mismatching tf.distribute.Strategy objects: %s is not %s." % (context.strategy, replica_ctx.strategy)) def _require_strategy_scope_strategy(strategy): """Verify in a `strategy.scope()` in this thread.""" context = _get_per_thread_mode() if context.strategy is strategy: return _wrong_strategy_scope(strategy, context) def _require_strategy_scope_extended(extended): """Verify in a `distribution_strategy.scope()` in this thread.""" context = _get_per_thread_mode() if context.strategy.extended is extended: return # Report error. strategy = extended._container_strategy() # pylint: disable=protected-access _wrong_strategy_scope(strategy, context) _creating_default_strategy_singleton = False # ------------------------------------------------------------------------------ # Internal API for setting the current thread mode as being either in a # replica or cross-replica context for a particular tf.distribute.Strategy.
UpdateContext
python
openai__openai-python
src/openai/types/beta/realtime/conversation_item_with_reference.py
{ "start": 1014, "end": 3288 }
class ____(BaseModel): id: Optional[str] = None """ For an item of type (`message` | `function_call` | `function_call_output`) this field allows the client to assign the unique ID of the item. It is not required because the server will generate one if not provided. For an item of type `item_reference`, this field is required and is a reference to any item that has previously existed in the conversation. """ arguments: Optional[str] = None """The arguments of the function call (for `function_call` items).""" call_id: Optional[str] = None """ The ID of the function call (for `function_call` and `function_call_output` items). If passed on a `function_call_output` item, the server will check that a `function_call` item with the same ID exists in the conversation history. """ content: Optional[List[Content]] = None """The content of the message, applicable for `message` items. - Message items of role `system` support only `input_text` content - Message items of role `user` support `input_text` and `input_audio` content - Message items of role `assistant` support `text` content. """ name: Optional[str] = None """The name of the function being called (for `function_call` items).""" object: Optional[Literal["realtime.item"]] = None """Identifier for the API object being returned - always `realtime.item`.""" output: Optional[str] = None """The output of the function call (for `function_call_output` items).""" role: Optional[Literal["user", "assistant", "system"]] = None """ The role of the message sender (`user`, `assistant`, `system`), only applicable for `message` items. """ status: Optional[Literal["completed", "incomplete", "in_progress"]] = None """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. """ type: Optional[Literal["message", "function_call", "function_call_output", "item_reference"]] = None """ The type of the item (`message`, `function_call`, `function_call_output`, `item_reference`). """
ConversationItemWithReference
python
langchain-ai__langchain
libs/langchain_v1/langchain/agents/middleware/tool_call_limit.py
{ "start": 4892, "end": 18846 }
class ____( AgentMiddleware[ToolCallLimitState[ResponseT], ContextT], Generic[ResponseT, ContextT], ): """Track tool call counts and enforces limits during agent execution. This middleware monitors the number of tool calls made and can terminate or restrict execution when limits are exceeded. It supports both thread-level (persistent across runs) and run-level (per invocation) call counting. Configuration: - `exit_behavior`: How to handle when limits are exceeded - `'continue'`: Block exceeded tools, let execution continue (default) - `'error'`: Raise an exception - `'end'`: Stop immediately with a `ToolMessage` + AI message for the single tool call that exceeded the limit (raises `NotImplementedError` if there are other pending tool calls (due to parallel tool calling). Examples: !!! example "Continue execution with blocked tools (default)" ```python from langchain.agents.middleware.tool_call_limit import ToolCallLimitMiddleware from langchain.agents import create_agent # Block exceeded tools but let other tools and model continue limiter = ToolCallLimitMiddleware( thread_limit=20, run_limit=10, exit_behavior="continue", # default ) agent = create_agent("openai:gpt-4o", middleware=[limiter]) ``` !!! example "Stop immediately when limit exceeded" ```python # End execution immediately with an AI message limiter = ToolCallLimitMiddleware(run_limit=5, exit_behavior="end") agent = create_agent("openai:gpt-4o", middleware=[limiter]) ``` !!! example "Raise exception on limit" ```python # Strict limit with exception handling limiter = ToolCallLimitMiddleware( tool_name="search", thread_limit=5, exit_behavior="error" ) agent = create_agent("openai:gpt-4o", middleware=[limiter]) try: result = await agent.invoke({"messages": [HumanMessage("Task")]}) except ToolCallLimitExceededError as e: print(f"Search limit exceeded: {e}") ``` """ state_schema = ToolCallLimitState # type: ignore[assignment] def __init__( self, *, tool_name: str | None = None, thread_limit: int | None = None, run_limit: int | None = None, exit_behavior: ExitBehavior = "continue", ) -> None: """Initialize the tool call limit middleware. Args: tool_name: Name of the specific tool to limit. If `None`, limits apply to all tools. thread_limit: Maximum number of tool calls allowed per thread. `None` means no limit. run_limit: Maximum number of tool calls allowed per run. `None` means no limit. exit_behavior: How to handle when limits are exceeded. - `'continue'`: Block exceeded tools with error messages, let other tools continue. Model decides when to end. - `'error'`: Raise a `ToolCallLimitExceededError` exception - `'end'`: Stop execution immediately with a `ToolMessage` + AI message for the single tool call that exceeded the limit. Raises `NotImplementedError` if there are multiple parallel tool calls to other tools or multiple pending tool calls. Raises: ValueError: If both limits are `None`, if `exit_behavior` is invalid, or if `run_limit` exceeds `thread_limit`. """ super().__init__() if thread_limit is None and run_limit is None: msg = "At least one limit must be specified (thread_limit or run_limit)" raise ValueError(msg) valid_behaviors = ("continue", "error", "end") if exit_behavior not in valid_behaviors: msg = f"Invalid exit_behavior: {exit_behavior!r}. Must be one of {valid_behaviors}" raise ValueError(msg) if thread_limit is not None and run_limit is not None and run_limit > thread_limit: msg = ( f"run_limit ({run_limit}) cannot exceed thread_limit ({thread_limit}). " "The run limit should be less than or equal to the thread limit." ) raise ValueError(msg) self.tool_name = tool_name self.thread_limit = thread_limit self.run_limit = run_limit self.exit_behavior = exit_behavior @property def name(self) -> str: """The name of the middleware instance. Includes the tool name if specified to allow multiple instances of this middleware with different tool names. """ base_name = self.__class__.__name__ if self.tool_name: return f"{base_name}[{self.tool_name}]" return base_name def _would_exceed_limit(self, thread_count: int, run_count: int) -> bool: """Check if incrementing the counts would exceed any configured limit. Args: thread_count: Current thread call count. run_count: Current run call count. Returns: True if either limit would be exceeded by one more call. """ return (self.thread_limit is not None and thread_count + 1 > self.thread_limit) or ( self.run_limit is not None and run_count + 1 > self.run_limit ) def _matches_tool_filter(self, tool_call: ToolCall) -> bool: """Check if a tool call matches this middleware's tool filter. Args: tool_call: The tool call to check. Returns: True if this middleware should track this tool call. """ return self.tool_name is None or tool_call["name"] == self.tool_name def _separate_tool_calls( self, tool_calls: list[ToolCall], thread_count: int, run_count: int ) -> tuple[list[ToolCall], list[ToolCall], int, int]: """Separate tool calls into allowed and blocked based on limits. Args: tool_calls: List of tool calls to evaluate. thread_count: Current thread call count. run_count: Current run call count. Returns: Tuple of `(allowed_calls, blocked_calls, final_thread_count, final_run_count)`. """ allowed_calls: list[ToolCall] = [] blocked_calls: list[ToolCall] = [] temp_thread_count = thread_count temp_run_count = run_count for tool_call in tool_calls: if not self._matches_tool_filter(tool_call): continue if self._would_exceed_limit(temp_thread_count, temp_run_count): blocked_calls.append(tool_call) else: allowed_calls.append(tool_call) temp_thread_count += 1 temp_run_count += 1 return allowed_calls, blocked_calls, temp_thread_count, temp_run_count @hook_config(can_jump_to=["end"]) def after_model( self, state: ToolCallLimitState[ResponseT], runtime: Runtime[ContextT], # noqa: ARG002 ) -> dict[str, Any] | None: """Increment tool call counts after a model call and check limits. Args: state: The current agent state. runtime: The langgraph runtime. Returns: State updates with incremented tool call counts. If limits are exceeded and exit_behavior is `'end'`, also includes a jump to end with a `ToolMessage` and AI message for the single exceeded tool call. Raises: ToolCallLimitExceededError: If limits are exceeded and `exit_behavior` is `'error'`. NotImplementedError: If limits are exceeded, `exit_behavior` is `'end'`, and there are multiple tool calls. """ # Get the last AIMessage to check for tool calls messages = state.get("messages", []) if not messages: return None # Find the last AIMessage last_ai_message = None for message in reversed(messages): if isinstance(message, AIMessage): last_ai_message = message break if not last_ai_message or not last_ai_message.tool_calls: return None # Get the count key for this middleware instance count_key = self.tool_name if self.tool_name else "__all__" # Get current counts thread_counts = state.get("thread_tool_call_count", {}).copy() run_counts = state.get("run_tool_call_count", {}).copy() current_thread_count = thread_counts.get(count_key, 0) current_run_count = run_counts.get(count_key, 0) # Separate tool calls into allowed and blocked allowed_calls, blocked_calls, new_thread_count, new_run_count = self._separate_tool_calls( last_ai_message.tool_calls, current_thread_count, current_run_count ) # Update counts to include only allowed calls for thread count # (blocked calls don't count towards thread-level tracking) # But run count includes blocked calls since they were attempted in this run thread_counts[count_key] = new_thread_count run_counts[count_key] = new_run_count + len(blocked_calls) # If no tool calls are blocked, just update counts if not blocked_calls: if allowed_calls: return { "thread_tool_call_count": thread_counts, "run_tool_call_count": run_counts, } return None # Get final counts for building messages final_thread_count = thread_counts[count_key] final_run_count = run_counts[count_key] # Handle different exit behaviors if self.exit_behavior == "error": # Use hypothetical thread count to show which limit was exceeded hypothetical_thread_count = final_thread_count + len(blocked_calls) raise ToolCallLimitExceededError( thread_count=hypothetical_thread_count, run_count=final_run_count, thread_limit=self.thread_limit, run_limit=self.run_limit, tool_name=self.tool_name, ) # Build tool message content (sent to model - no thread/run details) tool_msg_content = _build_tool_message_content(self.tool_name) # Inject artificial error ToolMessages for blocked tool calls artificial_messages: list[ToolMessage | AIMessage] = [ ToolMessage( content=tool_msg_content, tool_call_id=tool_call["id"], name=tool_call.get("name"), status="error", ) for tool_call in blocked_calls ] if self.exit_behavior == "end": # Check if there are tool calls to other tools that would continue executing other_tools = [ tc for tc in last_ai_message.tool_calls if self.tool_name is not None and tc["name"] != self.tool_name ] if other_tools: tool_names = ", ".join({tc["name"] for tc in other_tools}) msg = ( f"Cannot end execution with other tool calls pending. " f"Found calls to: {tool_names}. Use 'continue' or 'error' behavior instead." ) raise NotImplementedError(msg) # Build final AI message content (displayed to user - includes thread/run details) # Use hypothetical thread count (what it would have been if call wasn't blocked) # to show which limit was actually exceeded hypothetical_thread_count = final_thread_count + len(blocked_calls) final_msg_content = _build_final_ai_message_content( hypothetical_thread_count, final_run_count, self.thread_limit, self.run_limit, self.tool_name, ) artificial_messages.append(AIMessage(content=final_msg_content)) return { "thread_tool_call_count": thread_counts, "run_tool_call_count": run_counts, "jump_to": "end", "messages": artificial_messages, } # For exit_behavior="continue", return error messages to block exceeded tools return { "thread_tool_call_count": thread_counts, "run_tool_call_count": run_counts, "messages": artificial_messages, } @hook_config(can_jump_to=["end"]) async def aafter_model( self, state: ToolCallLimitState[ResponseT], runtime: Runtime[ContextT], ) -> dict[str, Any] | None: """Async increment tool call counts after a model call and check limits. Args: state: The current agent state. runtime: The langgraph runtime. Returns: State updates with incremented tool call counts. If limits are exceeded and exit_behavior is `'end'`, also includes a jump to end with a `ToolMessage` and AI message for the single exceeded tool call. Raises: ToolCallLimitExceededError: If limits are exceeded and `exit_behavior` is `'error'`. NotImplementedError: If limits are exceeded, `exit_behavior` is `'end'`, and there are multiple tool calls. """ return self.after_model(state, runtime)
ToolCallLimitMiddleware
python
doocs__leetcode
solution/2100-2199/2179.Count Good Triplets in an Array/Solution2.py
{ "start": 0, "end": 128 }
class ____: __slots__ = ("l", "r", "v") def __init__(self): self.l = 0 self.r = 0 self.v = 0
Node
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py
{ "start": 64891, "end": 69495 }
class ____(SageMakerBaseOperator): """ Stops a SageMaker pipeline execution. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SageMakerStopPipelineOperator` :param config: The configuration to start the pipeline execution. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param pipeline_exec_arn: Amazon Resource Name of the pipeline execution to stop. :param wait_for_completion: If true, this operator will only complete once the pipeline is fully stopped. :param check_interval: How long to wait between checks for pipeline status when waiting for completion. :param verbose: Whether to print steps details when waiting for completion. Defaults to true, consider turning off for pipelines that have thousands of steps. :param fail_if_not_running: raises an exception if the pipeline stopped or succeeded before this was run :param deferrable: Run operator in the deferrable mode. :return str: Returns the status of the pipeline execution after the operation has been done. """ template_fields: Sequence[str] = aws_template_fields( "pipeline_exec_arn", ) def __init__( self, *, pipeline_exec_arn: str, wait_for_completion: bool = False, check_interval: int = CHECK_INTERVAL_SECOND, waiter_max_attempts: int = 9999, verbose: bool = True, fail_if_not_running: bool = False, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs, ): super().__init__(config={}, **kwargs) self.pipeline_exec_arn = pipeline_exec_arn self.wait_for_completion = wait_for_completion self.check_interval = check_interval self.waiter_max_attempts = waiter_max_attempts self.verbose = verbose self.fail_if_not_running = fail_if_not_running self.deferrable = deferrable def execute(self, context: Context) -> str: status = self.hook.stop_pipeline( pipeline_exec_arn=self.pipeline_exec_arn, fail_if_not_running=self.fail_if_not_running, ) self.log.info( "Stop requested for pipeline execution with ARN %s. Status is now %s", self.pipeline_exec_arn, status, ) if status not in self.hook.pipeline_non_terminal_states: # pipeline already stopped return status # else, eventually wait for completion if self.deferrable: self.defer( trigger=SageMakerPipelineTrigger( waiter_type=SageMakerPipelineTrigger.Type.STOPPED, pipeline_execution_arn=self.pipeline_exec_arn, waiter_delay=self.check_interval, waiter_max_attempts=self.waiter_max_attempts, aws_conn_id=self.aws_conn_id, ), method_name="execute_complete", ) elif self.wait_for_completion: status = self.hook.check_status( self.pipeline_exec_arn, "PipelineExecutionStatus", lambda p: self.hook.describe_pipeline_exec(p, self.verbose), self.check_interval, non_terminal_states=self.hook.pipeline_non_terminal_states, max_ingestion_time=self.waiter_max_attempts * self.check_interval, )["PipelineExecutionStatus"] return status def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": raise AirflowException(f"Failure during pipeline execution: {validated_event}") # theoretically we should do a `describe` call to know this, # but if we reach this point, this is the only possible status return "Stopped"
SageMakerStopPipelineOperator
python
walkccc__LeetCode
solutions/2150. Find All Lonely Numbers in the Array/2150.py
{ "start": 0, "end": 253 }
class ____: def findLonely(self, nums: list[int]) -> list[int]: count = collections.Counter(nums) return [num for num, freq in count.items() if freq == 1 and count[num - 1] == 0 and count[num + 1] == 0]
Solution
python
pytorch__pytorch
torch/testing/_internal/common_distributed.py
{ "start": 57469, "end": 58522 }
class ____(torch._dynamo.test_case.TestCase): """ Test harness for single-process dynamo distributed tests, initializes dist process group. Prefer this for simple tests, as it's easier to debug. """ @classmethod def setUpClass(cls): super().setUpClass() # _exit_stack is set up in TestCase cls._exit_stack.enter_context( patch.dict( os.environ, { "MASTER_ADDR": "localhost", "MASTER_PORT": "12355", }, ) ) cls.rank = 0 device = torch.accelerator.current_accelerator().type cls.device = f"{device}:{cls.rank}" cls.device_ids = None if device in cls.device else [cls.rank] c10d.init_process_group( c10d.get_default_backend_for_device(device), rank=cls.rank, world_size=1 ) @classmethod def tearDownClass(cls): c10d.destroy_process_group() super().tearDownClass()
DynamoDistributedSingleProcTestCase
python
huggingface__transformers
src/transformers/models/umt5/modeling_umt5.py
{ "start": 21047, "end": 21784 }
class ____(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config: UMT5Config): super().__init__() self.dense = nn.Linear(config.d_model, config.d_model) self.dropout = nn.Dropout(p=config.classifier_dropout) self.out_proj = nn.Linear(config.d_model, config.num_labels) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @auto_docstring
UMT5ClassificationHead
python
PrefectHQ__prefect
src/prefect/events/filters.py
{ "start": 7120, "end": 7457 }
class ____(EventDataFilter): id: Optional[list[UUID]] = Field( default=None, description="Only include events with one of these IDs" ) def includes(self, event: Event) -> bool: if self.id: if not any(event.id == id for id in self.id): return False return True
EventIDFilter
python
PyCQA__pylint
tests/regrtest_data/max_inferable_limit_for_classes/other_funcs.py
{ "start": 316, "end": 357 }
class ____(ReturnsRows): ...
Selectable
python
great-expectations__great_expectations
great_expectations/core/metric_function_types.py
{ "start": 1294, "end": 4940 }
class ____(enum.Enum): """Enum type, whose members depict the nature of return value of a metric implementation function (defined for a specified "ExecutionEngine" subclass) that is a (partial) Callable to be executed once execution plan is complete. The available types are: - `MAP_FN` -- metric implementation function returns a mapping transformation for "Domain" values that evaluates to a quantity (rather than a condition statement, or a series, etc.). - `MAP_SERIES` -- metric implementation function returns a mapping transformation for "Domain" values that evaluates to a series-valued (e.g., Pandas.Series) result (rather than a Callable for deferred execution). - `WINDOW_FN` -- metric implementation function returns specified windowing operation over "Domain" values (currently applicable only to "SparkDFExecutionEngine"). - `MAP_CONDITION_FN` -- metric implementation function returns a mapping transformation for "Domain" values that evaluates to a Callable (partial) computational component (as part of deferred execution plan) that expresses the specified condition (i.e., a logical operation). - `MAP_CONDITION_SERIES` -- metric implementation function returns a mapping transformation for "Domain" values that evaluates to a Callable (partial) computational component (as part of deferred execution plan) that expresses the specified condition (i.e., a logical operation) as a series-valued (e.g., Pandas.Series) result. - `WINDOW_CONDITION_FN` -- metric implementation function returns a windowing operation over "Domain" values that evaluates to a Callable (partial) computational component (as part of deferred execution plan) that expresses the specified condition (i.e., a logical operation). - `AGGREGATE_FN` -- metric implementation function returns an aggregation transformation over "Domain" values that evaluates to a Callable (partial) computational component (as part of deferred execution plan) that expresses the specified aggregated quantity. """ # noqa: E501 # FIXME CoP MAP_FN = "map_fn" # pertains to "PandasExecutionEngine" MAP_SERIES = "map_series" # pertains to "PandasExecutionEngine" WINDOW_FN = "window_fn" # currently pertains only to "SparkDFExecutionEngine" MAP_CONDITION_FN = ( "map_condition_fn" # pertains to "SqlAlchemyExecutionEngine" and "SparkDFExecutionEngine" ) MAP_CONDITION_SERIES = "map_condition_series" # pertains to "PandasExecutionEngine" WINDOW_CONDITION_FN = "window_condition_fn" # pertains to "SqlAlchemyExecutionEngine" and "SparkDFExecutionEngine" # noqa: E501 # FIXME CoP AGGREGATE_FN = ( "aggregate_fn" # pertains to "SqlAlchemyExecutionEngine" and "SparkDFExecutionEngine" ) @property def metric_suffix(self) -> str: """Examines the "name" property of this "Enum" and returns corresponding suffix for metric registration/usage. Returns: (str) designated metric name suffix """ # noqa: E501 # FIXME CoP if self.name in [ "MAP_FN", "MAP_SERIES", "WINDOW_FN", ]: return MetricPartialFunctionTypeSuffixes.MAP.value if self.name in [ "MAP_CONDITION_FN", "MAP_CONDITION_SERIES", "WINDOW_CONDITION_FN", ]: return MetricPartialFunctionTypeSuffixes.CONDITION.value if self.name == "AGGREGATE_FN": return MetricPartialFunctionTypeSuffixes.AGGREGATE_FUNCTION.value return ""
MetricPartialFunctionTypes
python
tiangolo__fastapi
docs_src/dataclasses/tutorial002.py
{ "start": 114, "end": 549 }
class ____: name: str price: float tags: List[str] = field(default_factory=list) description: Union[str, None] = None tax: Union[float, None] = None app = FastAPI() @app.get("/items/next", response_model=Item) async def read_next_item(): return { "name": "Island In The Moon", "price": 12.99, "description": "A place to be playin' and havin' fun", "tags": ["breater"], }
Item
python
chroma-core__chroma
chromadb/db/migrations.py
{ "start": 1731, "end": 1871 }
class ____(Exception): def __init__(self, alg: str): super().__init__(f"Invalid hash algorithm specified: {alg}")
InvalidHashError
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 362203, "end": 362534 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("IssueTimelineItems", graphql_name="node")
IssueTimelineItemsEdge
python
openai__openai-python
src/openai/types/audio/speech_create_params.py
{ "start": 282, "end": 1780 }
class ____(TypedDict, total=False): input: Required[str] """The text to generate audio for. The maximum length is 4096 characters.""" model: Required[Union[str, SpeechModel]] """ One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. """ voice: Required[ Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar"]] ] """The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ instructions: str """Control the voice of your generated audio with additional instructions. Does not work with `tts-1` or `tts-1-hd`. """ response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] """The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`. """ speed: float """The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. """ stream_format: Literal["sse", "audio"] """The format to stream the audio in. Supported formats are `sse` and `audio`. `sse` is not supported for `tts-1` or `tts-1-hd`. """
SpeechCreateParams
python
run-llama__llama_index
llama-index-integrations/embeddings/llama-index-embeddings-huggingface-optimum-intel/llama_index/embeddings/huggingface_optimum_intel/base.py
{ "start": 425, "end": 6229 }
class ____(BaseEmbedding): folder_name: str = Field(description="Folder name to load from.") max_length: int = Field(description="Maximum length of input.") pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].") normalize: bool = Field(default=True, description="Normalize embeddings or not.") query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files.", default=None ) _model: Any = PrivateAttr() _tokenizer: Any = PrivateAttr() _device: Any = PrivateAttr() def __init__( self, folder_name: str, pooling: str = "cls", max_length: Optional[int] = None, normalize: bool = True, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, cache_folder: Optional[str] = None, model: Optional[Any] = None, tokenizer: Optional[Any] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, device: Optional[str] = None, ): try: from optimum.intel import IPEXModel except ImportError: raise ImportError( "Optimum-Intel requires the following dependencies; please install with " "`pip install optimum[exporters] " "optimum-intel neural-compressor intel_extension_for_pytorch`" ) device = device or infer_torch_device() model = model or IPEXModel.from_pretrained(folder_name).to(device) tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name) if max_length is None: try: max_length = int(model.config.max_position_embeddings) except Exception: raise ValueError( "Unable to find max_length from model config. " "Please provide max_length." ) try: max_length = min(max_length, int(tokenizer.model_max_length)) except Exception as exc: print(f"An error occurred while retrieving tokenizer max length: {exc}") if pooling not in ["cls", "mean"]: raise ValueError(f"Pooling {pooling} not supported.") super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, folder_name=folder_name, max_length=max_length, pooling=pooling, normalize=normalize, query_instruction=query_instruction, text_instruction=text_instruction, ) self._model = model self._tokenizer = tokenizer self._device = device @classmethod def class_name(cls) -> str: return "IntelEmbedding" def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any: """Mean Pooling - Take attention mask into account for correct averaging.""" import torch # First element of model_output contains all token embeddings token_embeddings = model_output[0] input_mask_expanded = ( attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() ) return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( input_mask_expanded.sum(1), min=1e-9 ) def _cls_pooling(self, model_output: list) -> Any: """Use the CLS token as the pooling token.""" if isinstance(model_output, dict): token_embeddings = model_output["last_hidden_state"] else: token_embeddings = model_output[0] return token_embeddings[:, 0] def _embed(self, sentences: List[str]) -> List[List[float]]: """Embed sentences.""" encoded_input = self._tokenizer( sentences, padding=True, max_length=self.max_length, truncation=True, return_tensors="pt", ) import torch with torch.inference_mode(), torch.autocast(device_type=self._device): model_output = self._model(**encoded_input.to(self._device)) if self.pooling == "cls": embeddings = self._cls_pooling(model_output) else: embeddings = self._mean_pooling( model_output, encoded_input["attention_mask"].to(self._device) ) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings.tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query = format_query(query, self.model_name, self.query_instruction) return self._embed([query])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text = format_text(text, self.model_name, self.text_instruction) return self._embed([text])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" texts = [ format_text(text, self.model_name, self.text_instruction) for text in texts ] return self._embed(texts)
IntelEmbedding
python
huggingface__transformers
src/transformers/models/janus/modular_janus.py
{ "start": 2848, "end": 7234 }
class ____(SiglipVisionConfig): r""" This is the configuration class to store the configuration of a [`JanusVisionModel`]. It is used to instantiate a `JanusVisionModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): The number of input channels. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. image_size (`int`, *optional*, defaults to 384): The size (resolution) of each image. attention_dropout (`float`, *optional*, defaults to 0.0): Dropout probability for attention weights. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, and `"gelu_new"` are supported. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. attention_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys, and values in the attention layers. hidden_dropout_rate (`float`, *optional*, defaults to 0.0): The dropout probability for fully connected layers in the encoder. projection_dim (`int`, *optional*, defaults to 2048): Dimensionality of the MLP projection head. projection_dropout (`float`, *optional*, defaults to 0.0): Dropout probability for the projection layer. use_qk_norm (`bool`, *optional*, defaults to `False`): Whether to normalize the query and key matrices. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated normal initializer for initializing all weight matrices. depth (`int`, *optional*, defaults to 2): Number of hidden layers in the aligner module. num_image_tokens (`int`, *optional*, defaults to 576): Number of image tokens. """ model_type = "janus_vision_model" base_config_key = "vision_config" def __init__( self, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, num_channels=3, patch_size=16, image_size=384, attention_dropout=0.0, layer_norm_eps=1e-6, hidden_act="gelu", mlp_ratio=4.0, attention_bias=True, hidden_dropout_rate=0.0, projection_dim=2048, projection_dropout=0.0, use_qk_norm=False, initializer_range=0.02, depth=2, num_image_tokens=576, **kwargs, ): super().__init__( hidden_size=hidden_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_channels=num_channels, patch_size=patch_size, image_size=image_size, attention_dropout=attention_dropout, layer_norm_eps=layer_norm_eps, hidden_act=hidden_act, **kwargs, ) del self.intermediate_size self.mlp_ratio = mlp_ratio self.attention_bias = attention_bias self.hidden_dropout_rate = hidden_dropout_rate self.projection_dim = projection_dim self.projection_dropout = projection_dropout self.use_qk_norm = use_qk_norm self.initializer_range = initializer_range self.depth = depth self.num_image_tokens = num_image_tokens
JanusVisionConfig
python
airbytehq__airbyte
airbyte-integrations/connectors/source-braintree/source_braintree/schemas/cards.py
{ "start": 1451, "end": 1587 }
class ____(CreditCard): """ https://developer.paypal.com/braintree/docs/reference/response/samsung-pay-card """
SamsungPayCard
python
astropy__astropy
astropy/visualization/interval.py
{ "start": 4504, "end": 6051 }
class ____(BaseInterval): """ Interval based on a keeping a specified fraction of pixels (can be asymmetric). Parameters ---------- lower_percentile : float or None The lower percentile below which to ignore pixels. If None, then defaults to 0. upper_percentile : float or None The upper percentile above which to ignore pixels. If None, then defaults to 100. n_samples : int, optional Maximum number of values to use. If this is specified, and there are more values in the dataset as this, then values are randomly sampled from the array (with replacement). """ def __init__(self, lower_percentile=None, upper_percentile=None, n_samples=None): self.lower_percentile = ( lower_percentile if lower_percentile is not None else 0.0 ) self.upper_percentile = ( upper_percentile if upper_percentile is not None else 100.0 ) self.n_samples = n_samples def get_limits(self, values): values = self._process_values(values) # If needed, limit the number of samples. We sample with replacement # since this is much faster. if self.n_samples is not None and values.size > self.n_samples: values = np.random.choice(values, self.n_samples) # Determine values at percentiles vmin, vmax = np.percentile( values, (self.lower_percentile, self.upper_percentile) ) return vmin, vmax
AsymmetricPercentileInterval
python
google__pytype
pytype/pyc/opcodes.py
{ "start": 4561, "end": 5309 }
class ____(Opcode): """An opcode with one argument. Attributes: arg: The integer opcode argument read in from the bytecode argval: A decoded version of arg, performing the same steps the cpython interpreter does to convert arg into a python value. """ __slots__ = ("arg", "argval") def __init__(self, index, line, endline, col, endcol, arg, argval): super().__init__(index, line, endline, col, endcol) self.arg = arg self.argval = argval def __str__(self): out = f"{self.basic_str()} {self.argval}" if self.annotation: return f"{out} # type: {self.annotation}" else: return out # -------------------------------------------------------- # Fake opcodes used internally
OpcodeWithArg
python
airbytehq__airbyte
airbyte-ci/connectors/live-tests/src/live_tests/commons/evaluation_modes.py
{ "start": 117, "end": 870 }
class ____(Enum): """ Tests may be run in "diagnostic" mode or "strict" mode. When run in "diagnostic" mode, `AssertionError`s won't fail the test, but we will continue to surface any errors to the test report. In "strict" mode, tests pass/fail as usual. In live tests, diagnostic mode is used for tests that don't affect the overall functionality of the connector but that test an ideal state of the connector. Currently this is applicable to validation tests only. The diagnostic mode can be made available to a test using the @pytest.mark.allow_diagnostic_mode decorator, and passing in the --test-evaluation-mode=diagnostic flag. """ DIAGNOSTIC = "diagnostic" STRICT = "strict"
TestEvaluationMode
python
openai__openai-python
tests/api_resources/responses/test_input_items.py
{ "start": 446, "end": 2542 }
class ____: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_list(self, client: OpenAI) -> None: input_item = client.responses.input_items.list( response_id="response_id", ) assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: input_item = client.responses.input_items.list( response_id="response_id", after="after", include=["file_search_call.results"], limit=0, order="asc", ) assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.responses.input_items.with_raw_response.list( response_id="response_id", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = response.parse() assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.responses.input_items.with_streaming_response.list( response_id="response_id", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" input_item = response.parse() assert_matches_type(SyncCursorPage[ResponseItem], input_item, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `response_id` but received ''"): client.responses.input_items.with_raw_response.list( response_id="", )
TestInputItems
python
matplotlib__matplotlib
lib/matplotlib/dates.py
{ "start": 55103, "end": 56149 }
class ____(RRuleLocator): """ Make ticks on occurrences of each weekday. """ def __init__(self, byweekday=1, interval=1, tz=None): """ Parameters ---------- byweekday : int or list of int, default: all days Ticks will be placed on every weekday in *byweekday*. Default is every day. Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA, SU, the constants from :mod:`dateutil.rrule`, which have been imported into the :mod:`matplotlib.dates` namespace. interval : int, default: 1 The interval between each iteration. For example, if ``interval=2``, mark every second occurrence. tz : str or `~datetime.tzinfo`, default: :rc:`timezone` Ticks timezone. If a string, *tz* is passed to `dateutil.tz`. """ rule = rrulewrapper(DAILY, byweekday=byweekday, interval=interval, **self.hms0d) super().__init__(rule, tz=tz)
WeekdayLocator
python
pandas-dev__pandas
pandas/tests/util/test_deprecate_nonkeyword_arguments.py
{ "start": 3417, "end": 3965 }
class ____: @deprecate_nonkeyword_arguments(WARNING_CATEGORY, allowed_args=["self", "bar"]) def baz(self, bar=None, foobar=None): ... def test_foo_signature(): assert str(inspect.signature(Foo.baz)) == "(self, bar=None, *, foobar=None)" def test_class(): msg = ( rf"Starting with pandas version {WARNING_CATEGORY.version()} all arguments " r"of Foo\.baz except for the argument \'bar\' will be keyword-only" ) with tm.assert_produces_warning(WARNING_CATEGORY, match=msg): Foo().baz("qux", "quox")
Foo
python
scipy__scipy
scipy/signal/_upfirdn.py
{ "start": 2626, "end": 7976 }
class ____: """Helper for resampling.""" def __init__(self, h, x_dtype, up, down): h = np.asarray(h) if h.ndim != 1 or h.size == 0: raise ValueError('h must be 1-D with non-zero length') self._output_type = np.result_type(h.dtype, x_dtype, np.float32) h = np.asarray(h, self._output_type) self._up = int(up) self._down = int(down) if self._up < 1 or self._down < 1: raise ValueError('Both up and down must be >= 1') # This both transposes, and "flips" each phase for filtering self._h_trans_flip = _pad_h(h, self._up) self._h_trans_flip = np.ascontiguousarray(self._h_trans_flip) self._h_len_orig = len(h) def apply_filter(self, x, axis=-1, mode='constant', cval=0): """Apply the prepared filter to the specified axis of N-D signal x.""" output_len = _output_len(self._h_len_orig, x.shape[axis], self._up, self._down) # Explicit use of np.int64 for output_shape dtype avoids OverflowError # when allocating large array on platforms where intp is 32 bits. output_shape = np.asarray(x.shape, dtype=np.int64) output_shape[axis] = output_len out = np.zeros(output_shape, dtype=self._output_type, order='C') axis = axis % x.ndim mode = _check_mode(mode) _apply(np.asarray(x, self._output_type), self._h_trans_flip, out, self._up, self._down, axis, mode, cval) return out def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0): """Upsample, FIR filter, and downsample. Parameters ---------- h : array_like 1-D FIR (finite-impulse response) filter coefficients. x : array_like Input signal array. up : int, optional Upsampling rate. Default is 1. down : int, optional Downsampling rate. Default is 1. axis : int, optional The axis of the input data array along which to apply the linear filter. The filter is applied to each subarray along this axis. Default is -1. mode : str, optional The signal extension mode to use. The set ``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to modes provided by `numpy.pad`. ``"smooth"`` implements a smooth extension by extending based on the slope of the last 2 points at each end of the array. ``"antireflect"`` and ``"antisymmetric"`` are anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode `"line"` extends the signal based on a linear trend defined by the first and last points along the ``axis``. .. versionadded:: 1.4.0 cval : float, optional The constant value to use when ``mode == "constant"``. .. versionadded:: 1.4.0 Returns ------- y : ndarray The output signal array. Dimensions will be the same as `x` except for along `axis`, which will change size according to the `h`, `up`, and `down` parameters. Notes ----- The algorithm is an implementation of the block diagram shown on page 129 of the Vaidyanathan text [1]_ (Figure 4.3-8d). The direct approach of upsampling by factor of P with zero insertion, FIR filtering of length ``N``, and downsampling by factor of Q is O(N*Q) per output sample. The polyphase implementation used here is O(N/P). .. versionadded:: 0.18 References ---------- .. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks, Prentice Hall, 1993. Examples -------- Simple operations: >>> import numpy as np >>> from scipy.signal import upfirdn >>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter array([ 1., 2., 3., 2., 1.]) >>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion array([ 1., 0., 0., 2., 0., 0., 3.]) >>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.]) >>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5]) >>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3 array([ 0., 3., 6., 9.]) >>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3 array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5]) Apply a single filter to multiple signals: >>> x = np.reshape(np.arange(8), (4, 2)) >>> x array([[0, 1], [2, 3], [4, 5], [6, 7]]) Apply along the last dimension of ``x``: >>> h = [1, 1] >>> upfirdn(h, x, 2) array([[ 0., 0., 1., 1.], [ 2., 2., 3., 3.], [ 4., 4., 5., 5.], [ 6., 6., 7., 7.]]) Apply along the 0th dimension of ``x``: >>> upfirdn(h, x, 2, axis=0) array([[ 0., 1.], [ 0., 1.], [ 2., 3.], [ 2., 3.], [ 4., 5.], [ 4., 5.], [ 6., 7.], [ 6., 7.]]) """ xp = array_namespace(h, x) x = np.asarray(x) ufd = _UpFIRDn(h, x.dtype, up, down) # This is equivalent to (but faster than) using np.apply_along_axis return xp.asarray(ufd.apply_filter(x, axis, mode, cval))
_UpFIRDn
python
google__pytype
pytype/overlays/typing_overlay.py
{ "start": 19290, "end": 19750 }
class ____(abstract.AnnotationClass): """Implementation of typing.Optional.""" def _build_value(self, node, inner, ellipses): self.ctx.errorlog.invalid_ellipses(self.ctx.vm.frames, ellipses, self.name) if len(inner) != 1: error = "typing.Optional can only contain one type parameter" self.ctx.errorlog.invalid_annotation(self.ctx.vm.frames, self, error) return abstract.Union((self.ctx.convert.none_type,) + inner, self.ctx)
Optional
python
Lightning-AI__lightning
tests/tests_fabric/strategies/launchers/test_multiprocessing_integration.py
{ "start": 726, "end": 2331 }
class ____(nn.Module): def __init__(self): super().__init__() self.layer = nn.Linear(2, 2) self.tied_layer = nn.Linear(2, 2) self.tied_layer.weight = self.layer.weight self.register_buffer("buffer", torch.ones(3)) @RunIf(skip_windows=True) @pytest.mark.flaky(reruns=3) @pytest.mark.parametrize("strategy", ["ddp_spawn", "ddp_fork"]) def test_memory_sharing_disabled(strategy): """Test that the multiprocessing launcher disables memory sharing on model parameters and buffers to avoid race conditions on model updates.""" if strategy == "ddp_fork" and sys.platform == "darwin": pytest.skip("ddp_fork is unsafe / unsupported on macOS due to fork + ObjC runtime issues") tensor = torch.rand(4) model = SimpleModel() assert not tensor.is_shared() assert not model.layer.weight.is_shared() assert model.layer.weight.data_ptr() == model.tied_layer.weight.data_ptr() fabric = Fabric(accelerator="cpu", devices=2, strategy=strategy) fabric.launch(_test_memory_sharing_disabled, tensor, model) def _test_memory_sharing_disabled(fabric, tensor, model): is_spawn = fabric.strategy.launcher._start_method == "spawn" if is_spawn: assert tensor.is_shared() assert not model.layer.weight.is_shared() assert not model.tied_layer.weight.is_shared() assert not model.buffer.is_shared() # weights remain tied assert model.layer.weight.data_ptr() == model.tied_layer.weight.data_ptr() assert torch.equal(model.layer.weight.data, model.tied_layer.weight.data) fabric.barrier()
SimpleModel
python
bokeh__bokeh
src/bokeh/models/scales.py
{ "start": 1475, "end": 2403 }
class ____(Transform): ''' Base class for ``Scale`` models that represent an invertible computation to be carried out on the client-side. JavaScript implementations should implement the following methods: .. code-block compute(x: number): number { # compute and return the transform of a single value } v_compute(xs: Arrayable<number>): Arrayable<number> { # compute and return the transform of an array of values } invert(sx: number): number { # compute and return the inverse transform of a single value } v_invert(sxs: Arrayable<number>): Arrayable<number> { # compute and return the inverse transform of an array of values } ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs)
Scale
python
pypa__hatch
tests/utils/test_structures.py
{ "start": 121, "end": 1698 }
class ____: def test_restoration(self): num_env_vars = len(os.environ) with EnvVars(): os.environ.clear() assert len(os.environ) == num_env_vars def test_set(self): env_var = get_random_name() with EnvVars({env_var: "foo"}): assert os.environ.get(env_var) == "foo" assert env_var not in os.environ def test_include(self): env_var = get_random_name() pattern = f"{env_var[:-2]}*" with EnvVars({env_var: "foo"}): num_env_vars = len(os.environ) with EnvVars(include=[get_random_name(), pattern]): assert len(os.environ) == 1 assert os.environ.get(env_var) == "foo" assert len(os.environ) == num_env_vars def test_exclude(self): env_var = get_random_name() pattern = f"{env_var[:-2]}*" with EnvVars({env_var: "foo"}): with EnvVars(exclude=[get_random_name(), pattern]): assert env_var not in os.environ assert os.environ.get(env_var) == "foo" def test_precedence(self): env_var1 = get_random_name() env_var2 = get_random_name() pattern = f"{env_var1[:-2]}*" with EnvVars({env_var1: "foo"}): num_env_vars = len(os.environ) with EnvVars({env_var2: "bar"}, include=[pattern], exclude=[pattern, env_var2]): assert len(os.environ) == 1 assert os.environ.get(env_var2) == "bar" assert len(os.environ) == num_env_vars
TestEnvVars
python
tiangolo__fastapi
docs_src/body_nested_models/tutorial002.py
{ "start": 110, "end": 413 }
class ____(BaseModel): name: str description: Union[str, None] = None price: float tax: Union[float, None] = None tags: List[str] = [] @app.put("/items/{item_id}") async def update_item(item_id: int, item: Item): results = {"item_id": item_id, "item": item} return results
Item
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1214550, "end": 1215639 }
class ____(sgqlc.types.Type, Node): """Represents a 'marked_as_duplicate' event on a given issue or pull request. """ __schema__ = github_schema __field_names__ = ("actor", "canonical", "created_at", "duplicate", "is_cross_repository") actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" canonical = sgqlc.types.Field("IssueOrPullRequest", graphql_name="canonical") """The authoritative issue or pull request which has been duplicated by another. """ created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" duplicate = sgqlc.types.Field("IssueOrPullRequest", graphql_name="duplicate") """The issue or pull request which has been marked as a duplicate of another. """ is_cross_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCrossRepository") """Canonical and duplicate belong to different repositories."""
MarkedAsDuplicateEvent
python
getsentry__sentry
tests/sentry/digests/test_notifications.py
{ "start": 4303, "end": 5987 }
class ____(TestCase): def test_old_style_key(self) -> None: assert split_key(f"mail:p:{self.project.id}") == ( self.project, ActionTargetType.ISSUE_OWNERS, None, None, ) def test_new_style_key_no_identifier(self) -> None: assert split_key(f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:") == ( self.project, ActionTargetType.ISSUE_OWNERS, None, None, ) def test_new_style_key_identifier(self) -> None: identifier = 123 assert split_key( f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:{identifier}" ) == (self.project, ActionTargetType.ISSUE_OWNERS, identifier, None) def test_fallthrough_choice(self) -> None: identifier = 123 fallthrough_choice = FallthroughChoiceType.ALL_MEMBERS assert split_key( f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:{identifier}:{fallthrough_choice.value}" ) == (self.project, ActionTargetType.ISSUE_OWNERS, identifier, fallthrough_choice) def test_no_fallthrough_choice(self) -> None: identifier = 123 assert split_key( f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:{identifier}:" ) == (self.project, ActionTargetType.ISSUE_OWNERS, identifier, None) def test_handles_none_identifier_gracefully(self) -> None: assert split_key( f"mail:p:{self.project.id}:{ActionTargetType.ISSUE_OWNERS.value}:None:" ) == (self.project, ActionTargetType.ISSUE_OWNERS, "None", None)
SplitKeyTestCase
python
scipy__scipy
scipy/stats/tests/test_stats.py
{ "start": 305808, "end": 308042 }
class ____: def test_trim_mean(self, xp): # don't use pre-sorted arrays idx = np.array([3, 5, 0, 1, 2, 4]) a2 = np.arange(24).reshape(6, 4)[idx, :] a3 = np.arange(24).reshape(6, 4, order='F')[idx, :] xp_assert_equal(stats.trim_mean(xp.asarray(a3), 2/6.), xp.asarray([2.5, 8.5, 14.5, 20.5])) xp_assert_equal(stats.trim_mean(xp.asarray(a2), 2/6.), xp.asarray([10., 11., 12., 13.])) idx4 = np.array([1, 0, 3, 2]) a4 = np.arange(24).reshape(4, 6)[idx4, :] xp_assert_equal(stats.trim_mean(xp.asarray(a4), 2/6.), xp.asarray([9., 10., 11., 12., 13., 14.])) # shuffled arange(24) as array_like a = [7, 11, 12, 21, 16, 6, 22, 1, 5, 0, 18, 10, 17, 9, 19, 15, 23, 20, 2, 14, 4, 13, 8, 3] xp_assert_equal(stats.trim_mean(xp.asarray(a), 2/6.), xp.asarray(11.5)) xp_assert_equal(stats.trim_mean(xp.asarray([5, 4, 3, 1, 2, 0]), 2/6.), xp.asarray(2.5)) # check axis argument rng = np.random.default_rng(3417115752) a = rng.integers(20, size=(5, 6, 4, 7)) a = xp.asarray(a) for axis in [0, 1, 2, 3, -1]: res1 = stats.trim_mean(a, 2/6., axis=axis) res2 = stats.trim_mean(xp.moveaxis(a, axis, 0), 2/6.) xp_assert_equal(res1, res2) res1 = stats.trim_mean(a, 2/6., axis=None) res2 = stats.trim_mean(xp_ravel(a), 2/6.) xp_assert_equal(res1, res2) with pytest.raises(ValueError, match="Proportion too big."): stats.trim_mean(a, 0.6) @pytest.mark.skip_xp_backends('jax.numpy', reason="lazy -> no _axis_nan_policy") @pytest.mark.skip_xp_backends('dask.array', reason="lazy -> no _axis_nan_policy") def test_empty_input(self, xp): # empty input with pytest.warns(SmallSampleWarning, match='too small'): xp_assert_equal(stats.trim_mean(xp.asarray([]), 0.0), xp.asarray(xp.nan)) with pytest.warns(SmallSampleWarning, match='too small'): xp_assert_equal(stats.trim_mean(xp.asarray([]), 0.6), xp.asarray(xp.nan)) @make_xp_test_case(stats.sigmaclip)
TestTrimMean
python
kamyu104__LeetCode-Solutions
Python/find-k-th-smallest-pair-distance.py
{ "start": 81, "end": 805 }
class ____(object): def smallestDistancePair(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ # Sliding window solution def possible(guess, nums, k): count, left = 0, 0 for right, num in enumerate(nums): while num-nums[left] > guess: left += 1 count += right-left return count >= k nums.sort() left, right = 0, nums[-1]-nums[0]+1 while left < right: mid = left + (right-left)/2 if possible(mid, nums, k): right = mid else: left = mid+1 return left
Solution
python
ethereum__web3.py
web3/_utils/ens.py
{ "start": 1294, "end": 2686 }
class ____: def __init__(self, name_addr_pairs: dict[str, ChecksumAddress]) -> None: self.registry = dict(name_addr_pairs) async def address(self, name: str) -> ChecksumAddress: return self.registry.get(name, None) @contextmanager def ens_addresses( w3: Union["Web3", "AsyncWeb3[Any]"], name_addr_pairs: dict[str, ChecksumAddress] ) -> Iterator[None]: original_ens = w3.ens if w3.provider.is_async: w3.ens = cast(AsyncENS, AsyncStaticENS(name_addr_pairs)) else: w3.ens = cast(ENS, StaticENS(name_addr_pairs)) yield w3.ens = original_ens @contextmanager def contract_ens_addresses( contract: "Contract", name_addr_pairs: dict[str, ChecksumAddress] ) -> Iterator[None]: """ Use this context manager to temporarily resolve name/address pairs supplied as the argument. For example: with contract_ens_addresses(mycontract, [('resolve-as-1s.eth', '0x111...111')]): # any contract call or transaction in here would only resolve the above ENS pair """ with ens_addresses(contract.w3, name_addr_pairs): yield # --- async --- # async def async_validate_name_has_address( async_ens: AsyncENS, name: str ) -> ChecksumAddress: addr = await async_ens.address(name) if not addr: raise NameNotFound(f"Could not find address for name {name!r}") return addr
AsyncStaticENS
python
wandb__wandb
tests/system_tests/test_artifacts/test_object_references.py
{ "start": 23569, "end": 35883 }
class ____: @fixture( params=[ table.__name__, image.__name__, point_cloud.__name__, bokeh.__name__, html.__name__, video.__name__, joined_table.__name__, audio_ref_https.__name__, audio_ref_s3.__name__, audio_ref_gs.__name__, ] ) def orig_obj(self, request) -> WBValue: return request.getfixturevalue(request.param) def test_media_obj_referential_equality( self, user, api, anon_storage_handlers, orig_obj, worker_id ): """General consistency check on media object references. In detail, this will check the following: - Add the object to an artifact Validate that "getting" this asset returns an object that is equal to the first - Add a reference to this asset in an intermediate artifact Validate that "getting" this reference asset returns an object that is equal to the first Validate that the symbolic links are proper - Add a reference to the intermediate reference in yet a third artifact Validate that "getting" this new reference asset returns an object that is equal to the first Validate that the intermediate object is not downloaded - there are no "leftover" assets (eg. classes.json) Validate that the symbolic links are proper """ # Name these temporary artifacts by worker ID to guard against race # conditions between parallel pytest-xdist processes. orig_name = f"orig-artifact-{worker_id}" mid_name = f"mid-artifact-{worker_id}" down_name = f"down-artifact-{worker_id}" with wandb.init() as run: orig_artifact = wandb.Artifact(orig_name, "database") orig_artifact.add(orig_obj, "obj1") run.log_artifact(orig_artifact) orig_artifact_ref = api.artifact(f"{orig_name}:latest") orig_dir = orig_artifact_ref._default_root() obj1 = orig_artifact_ref.get("obj1") if isinstance(orig_obj, (wandb.Table, wandb.JoinedTable)): orig_obj._eq_debug(obj1, True) else: assert orig_obj == obj1 assert (Path(orig_dir) / f"obj1.{orig_obj._log_type}.json").is_file() with wandb.init() as run: orig_artifact_ref = run.use_artifact(f"{orig_name}:latest") mid_artifact = wandb.Artifact(mid_name, "database") mid_obj = orig_artifact_ref.get("obj1") mid_artifact.add(mid_obj, "obj2") run.log_artifact(mid_artifact) mid_artifact_ref = api.artifact(f"{mid_name}:latest") mid_dir = mid_artifact_ref._default_root() obj2 = mid_artifact_ref.get("obj2") if isinstance(orig_obj, (wandb.Table, wandb.JoinedTable)): orig_obj._eq_debug(obj2, True) else: assert orig_obj == obj2 with wandb.init() as run: mid_artifact_ref = run.use_artifact(f"{mid_name}:latest") down_artifact = wandb.Artifact(down_name, "database") down_obj = mid_artifact_ref.get("obj2") down_artifact.add(down_obj, "obj3") run.log_artifact(down_artifact) down_artifact_ref = api.artifact(f"{down_name}:latest") obj3 = down_artifact_ref.get("obj3") if isinstance(orig_obj, (wandb.Table, wandb.JoinedTable)): orig_obj._eq_debug(obj3, True) else: assert orig_obj == obj3 assert not Path(mid_dir).is_dir() def test_joined_table_referential( user, api, make_image, image_path_1, cleanup_temp_subdirs ): src_image_1 = make_image(image_path_1) src_image_2 = make_image(image_path_1) src_image_3 = make_image(image_path_1) src_image_4 = make_image(image_path_1) src_table_1 = wandb.Table(["id", "image"], [[1, src_image_1], [2, src_image_2]]) src_table_2 = wandb.Table(["id", "image"], [[1, src_image_3], [2, src_image_4]]) src_jt_1 = wandb.JoinedTable(src_table_1, src_table_2, "id") with wandb.init() as run: orig_artifact = wandb.Artifact("art1", "database") orig_artifact.add(src_jt_1, "src_jt_1") run.log_artifact(orig_artifact) with wandb.init() as run: art1 = run.use_artifact("art1:latest") src_jt_1 = art1.get("src_jt_1") src_jt_2 = wandb.JoinedTable(src_jt_1._table1, src_jt_1._table2, "id") art2 = wandb.Artifact("art2", "database") art2.add(src_jt_2, "src_jt_2") run.log_artifact(art2) cleanup_temp_subdirs() art2 = api.artifact("art2:latest") src_jt_2 = art2.get("src_jt_2") src_jt_1._eq_debug(src_jt_2, True) assert src_jt_1 == src_jt_2 def test_joined_table_add_by_path( user, api, make_image, image_path_1, cleanup_temp_subdirs ): artifact_name_1 = "tables_1" artifact_name_2 = "tables_2" artifact_type = "database" src_image_1 = make_image(image_path_1) src_image_2 = make_image(image_path_1) src_image_3 = make_image(image_path_1) src_image_4 = make_image(image_path_1) src_table_1 = wandb.Table(["id", "image"], [[1, src_image_1], [2, src_image_2]]) src_table_2 = wandb.Table(["id", "image"], [[1, src_image_3], [2, src_image_4]]) table_name_1 = "src_table_1" table_name_2 = "src_table_2" with wandb.init() as run: tables = wandb.Artifact(artifact_name_1, artifact_type) tables.add(src_table_1, table_name_1) tables.add(src_table_2, table_name_2) # Should be able to add by name directly jt = wandb.JoinedTable( f"{table_name_1}.table.json", f"{table_name_2}.table.json", "id" ) tables.add(jt, "jt") # Make sure it errors when you are not referencing the correct table names bad_table_name = "bad_table_name" jt_bad = wandb.JoinedTable( f"{bad_table_name}.table.json", f"{bad_table_name}.table.json", "id" ) with raises(ValueError): tables.add(jt_bad, "jt_bad") run.log_artifact(tables) cleanup_temp_subdirs() with wandb.init() as run: tables_2 = wandb.Artifact(artifact_name_2, artifact_type) upstream = run.use_artifact(f"{artifact_name_1}:latest") # Able to add by reference jt = wandb.JoinedTable( upstream.get_entry(table_name_1), upstream.get_entry(table_name_2), "id" ) tables_2.add(jt, "jt") run.log_artifact(tables_2) cleanup_temp_subdirs() tables_2 = api.artifact(f"{artifact_name_2}:latest") jt_2 = tables_2.get("jt") assert ( wandb.JoinedTable(upstream.get(table_name_1), upstream.get(table_name_2), "id") == jt_2 ) def test_image_reference_with_preferred_path( user, api, cleanup_temp_subdirs, image_path_1, image_path_2 ): orig_path_1 = str(image_path_1) orig_path_2 = str(image_path_2) desired_artifact_path = "images/sample.png" with wandb.init() as run: artifact = wandb.Artifact("artifact_1", type="test_artifact") # manually add the image to a desired path artifact.add_file(orig_path_1, desired_artifact_path) # create an image that uses this image (it should be smart enough not to add the image twice) image_1 = wandb.Image(orig_path_1) image_2 = wandb.Image(orig_path_2) # this one does not have the path preadded # add the image to the table table = wandb.Table(["image"], data=[[image_1], [image_2]]) # add the table to the artifact artifact.add(table, "table") run.log_artifact(artifact) cleanup_temp_subdirs() with wandb.init() as run: artifact_1 = run.use_artifact("artifact_1:latest") original_table = artifact_1.get("table") artifact = wandb.Artifact("artifact_2", type="test_artifact") # add the image by reference image_1 = wandb.Image(original_table.data[0][0]) image_2 = wandb.Image(original_table.data[1][0]) # add the image to the table table = wandb.Table(["image"], data=[[image_1], [image_2]]) # add the table to the artifact artifact.add(table, "table") run.log_artifact(artifact) cleanup_temp_subdirs() artifact_2 = api.artifact("artifact_2:latest") artifact_2.download() # This test just checks that all this logic does not fail def test_simple_partition_table(user, api): table_name = "dataset" table_parts_dir = "dataset_parts" artifact_name = "simple_dataset" artifact_type = "dataset" columns = ["A", "B", "C"] data = [[i, i * i, 2**i] for i in range(5)] # Add Data with wandb.init() as run: artifact = wandb.Artifact(artifact_name, type=artifact_type) for i, row in enumerate(data): table = wandb.Table(columns=columns, data=[row]) artifact.add(table, f"{table_parts_dir}/{i}") partition_table = wandb.data_types.PartitionedTable(parts_path=table_parts_dir) artifact.add(partition_table, table_name) run.log_artifact(artifact) # test partition_table = api.artifact(f"{artifact_name}:latest").get(table_name) for ndx, row in partition_table.iterrows(): assert row == data[ndx] def test_distributed_artifact_simple(user, api): # table_name = "dataset" artifact_name = f"simple_dist_dataset_{round(time.time())}" group_name = f"test_group_{np.random.rand()}" artifact_type = "distributed_dataset" count = 2 images = [] image_paths = [] # Add Data for i in range(count): with wandb.init(group=group_name) as run: artifact = wandb.Artifact(artifact_name, type=artifact_type) image = wandb.Image(np.random.randint(0, 255, (10, 10))) path = f"image_{i}" images.append(image) image_paths.append(path) artifact.add(image, path) run.upsert_artifact(artifact) # Finish with wandb.init(group=group_name) as run: artifact = wandb.Artifact(artifact_name, type=artifact_type) run.finish_artifact(artifact) # test artifact = api.artifact(f"{artifact_name}:latest") assert len(artifact.manifest.entries.keys()) == count * 2 # for image, path in zip(images, image_paths): # assert image == artifact.get(path) @fixture def cleanup_temp_subdirs(tmp_path: Path) -> Callable[[], None]: """A function to clean up temporary folders created by tests in this module.""" # Check that the current working directory is the same or a subdirectory # of the tmp_path fixture. This *should* be ensured by the # `filesystem_isolate` fixture, but verify. cwd = Path.cwd().resolve() try: cwd.relative_to(tmp_path) except ValueError: fail( f"Current working directory ({cwd!s}) is not a subpath of temporary " f"test directory ({tmp_path!s})" ) cleaned_subdirs = ["wandb", "artifacts", "upstream"] def _cleanup() -> None: for subdir in cleaned_subdirs: with suppress(FileNotFoundError): shutil.rmtree(cwd / subdir) return _cleanup @fixture(scope="session") def anon_storage_handlers(): from wandb.sdk.artifacts.storage_handlers.gcs_handler import GCSHandler from wandb.sdk.artifacts.storage_handlers.s3_handler import S3Handler def init_boto(self): if self._s3 is not None: return self._s3 self._botocore = botocore self._s3 = boto3.session.Session().resource( "s3", config=botocore.client.Config(signature_version=botocore.UNSIGNED) ) return self._s3 def init_gcs(self): if self._client is not None: return self._client self._client = google.cloud.storage.Client.create_anonymous_client() return self._client # Use MonkeyPatch.context(), as this fixture can/should be session-scoped, # while the `monkeypatch` fixture is strictly function-scoped. with MonkeyPatch.context() as patcher: patcher.setattr(S3Handler, "init_boto", init_boto) patcher.setattr(GCSHandler, "init_gcs", init_gcs) yield
TestMediaObjectReferentialEquality
python
google__pytype
pytype/overlays/flax_overlay.py
{ "start": 709, "end": 1003 }
class ____(overlay.Overlay): """A custom overlay for the 'flax.struct' module.""" def __init__(self, ctx): member_map = { "dataclass": Dataclass.make, } ast = ctx.loader.import_name("flax.struct") super().__init__(ctx, "flax.struct", member_map, ast)
DataclassOverlay
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 22570, "end": 23146 }
class ____(sgqlc.types.Enum): """The possible roles for enterprise membership. Enumeration Choices: * `MEMBER`: The user is a member of an organization in the enterprise. * `OWNER`: The user is an owner of an organization in the enterprise. * `UNAFFILIATED`: The user is not an owner of the enterprise, and not a member or owner of any organizations in the enterprise; only for EMU-enabled enterprises. """ __schema__ = github_schema __choices__ = ("MEMBER", "OWNER", "UNAFFILIATED")
EnterpriseUserAccountMembershipRole
python
getsentry__sentry
src/sentry/issues/endpoints/project_event_details.py
{ "start": 5396, "end": 6865 }
class ____(ProjectEndpoint): owner = ApiOwner.ISSUES publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, } def get(self, request: Request, project: Project, event_id: str) -> Response: event = eventstore.backend.get_event_by_id(project.id, event_id) if not event: return Response({"detail": "Event not found"}, status=404) event_dict = event.as_dict() if isinstance(event_dict["datetime"], datetime): event_dict["datetime"] = event_dict["datetime"].isoformat() try: scrub_ip_addresses = project.organization.get_option( "sentry:require_scrub_ip_address", False ) or project.get_option("sentry:scrub_ip_address", False) if scrub_ip_addresses: if "spans" in event_dict: for span in event_dict["spans"]: if "sentry_tags" not in span: continue if "user.ip" in span["sentry_tags"]: del span["sentry_tags"]["user.ip"] if "user" in span["sentry_tags"] and span["sentry_tags"]["user"].startswith( "ip:" ): span["sentry_tags"]["user"] = "ip:[ip]" except Exception as e: sentry_sdk.capture_exception(e) return Response(event_dict, status=200)
EventJsonEndpoint
python
django-mptt__django-mptt
mptt/querysets.py
{ "start": 55, "end": 1133 }
class ____(models.query.QuerySet): def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from mptt.managers import TreeManager manager = TreeManager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) def get_descendants(self, *args, **kwargs): """ Alias to `mptt.managers.TreeManager.get_queryset_descendants`. """ return self.model._tree_manager.get_queryset_descendants(self, *args, **kwargs) get_descendants.queryset_only = True def get_ancestors(self, *args, **kwargs): """ Alias to `mptt.managers.TreeManager.get_queryset_ancestors`. """ return self.model._tree_manager.get_queryset_ancestors(self, *args, **kwargs) get_ancestors.queryset_only = True def get_cached_trees(self): """ Alias to `mptt.utils.get_cached_trees`. """ return utils.get_cached_trees(self)
TreeQuerySet
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 12225, "end": 12469 }
class ____(_GenerativeProvider): generative: Union[GenerativeSearches, _EnumLikeStr] = Field( default=GenerativeSearches.OLLAMA, frozen=True, exclude=True ) model: Optional[str] apiEndpoint: Optional[str]
_GenerativeOllama
python
fsspec__filesystem_spec
fsspec/parquet.py
{ "start": 477, "end": 13775 }
class ____(AbstractBufferedFile): def _fetch_range(self, start, end): raise NotImplementedError def open_parquet_files( path: list[str], mode: Literal["rb"] = "rb", fs: None | fsspec.AbstractFileSystem = None, metadata=None, columns: None | list[str] = None, row_groups: None | list[int] = None, storage_options: None | dict = None, engine: str = "auto", max_gap: int = 64_000, max_block: int = 256_000_000, footer_sample_size: int = 1_000_000, filters: None | list[list[list[str]]] = None, **kwargs, ): """ Return a file-like object for a single Parquet file. The specified parquet `engine` will be used to parse the footer metadata, and determine the required byte ranges from the file. The target path will then be opened with the "parts" (`KnownPartsOfAFile`) caching strategy. Note that this method is intended for usage with remote file systems, and is unlikely to improve parquet-read performance on local file systems. Parameters ---------- path: str Target file path. mode: str, optional Mode option to be passed through to `fs.open`. Default is "rb". metadata: Any, optional Parquet metadata object. Object type must be supported by the backend parquet engine. For now, only the "fastparquet" engine supports an explicit `ParquetFile` metadata object. If a metadata object is supplied, the remote footer metadata will not need to be transferred into local memory. fs: AbstractFileSystem, optional Filesystem object to use for opening the file. If nothing is specified, an `AbstractFileSystem` object will be inferred. engine : str, default "auto" Parquet engine to use for metadata parsing. Allowed options include "fastparquet", "pyarrow", and "auto". The specified engine must be installed in the current environment. If "auto" is specified, and both engines are installed, "fastparquet" will take precedence over "pyarrow". columns: list, optional List of all column names that may be read from the file. row_groups : list, optional List of all row-groups that may be read from the file. This may be a list of row-group indices (integers), or it may be a list of `RowGroup` metadata objects (if the "fastparquet" engine is used). storage_options : dict, optional Used to generate an `AbstractFileSystem` object if `fs` was not specified. max_gap : int, optional Neighboring byte ranges will only be merged when their inter-range gap is <= `max_gap`. Default is 64KB. max_block : int, optional Neighboring byte ranges will only be merged when the size of the aggregated range is <= `max_block`. Default is 256MB. footer_sample_size : int, optional Number of bytes to read from the end of the path to look for the footer metadata. If the sampled bytes do not contain the footer, a second read request will be required, and performance will suffer. Default is 1MB. filters : list[list], optional List of filters to apply to prevent reading row groups, of the same format as accepted by the loading engines. Ignored if ``row_groups`` is specified. **kwargs : Optional key-word arguments to pass to `fs.open` """ # Make sure we have an `AbstractFileSystem` object # to work with if fs is None: path0 = path if isinstance(path, (list, tuple)): path = path[0] fs, path = url_to_fs(path, **(storage_options or {})) else: path0 = path # For now, `columns == []` not supported, is the same # as all columns if columns is not None and len(columns) == 0: columns = None # Set the engine engine = _set_engine(engine) if isinstance(path0, (list, tuple)): paths = path0 elif "*" in path: paths = fs.glob(path) elif path0.endswith("/"): # or fs.isdir(path): paths = [ _ for _ in fs.find(path, withdirs=False, detail=False) if _.endswith((".parquet", ".parq")) ] else: paths = [path] data = _get_parquet_byte_ranges( paths, fs, metadata=metadata, columns=columns, row_groups=row_groups, engine=engine, max_gap=max_gap, max_block=max_block, footer_sample_size=footer_sample_size, filters=filters, ) # Call self.open with "parts" caching options = kwargs.pop("cache_options", {}).copy() return [ AlreadyBufferedFile( fs=None, path=fn, mode=mode, cache_type="parts", cache_options={ **options, "data": data.get(fn, {}), }, size=max(_[1] for _ in data.get(fn, {})), **kwargs, ) for fn in data ] def open_parquet_file(*args, **kwargs): """Create files tailed to reading specific parts of parquet files Please see ``open_parquet_files`` for details of the arguments. The difference is, this function always returns a single ``AleadyBufferedFile``, whereas `open_parquet_files`` always returns a list of files, even if there are one or zero matching parquet files. """ return open_parquet_files(*args, **kwargs)[0] def _get_parquet_byte_ranges( paths, fs, metadata=None, columns=None, row_groups=None, max_gap=64_000, max_block=256_000_000, footer_sample_size=1_000_000, engine="auto", filters=None, ): """Get a dictionary of the known byte ranges needed to read a specific column/row-group selection from a Parquet dataset. Each value in the output dictionary is intended for use as the `data` argument for the `KnownPartsOfAFile` caching strategy of a single path. """ # Set engine if necessary if isinstance(engine, str): engine = _set_engine(engine) # Pass to specialized function if metadata is defined if metadata is not None: # Use the provided parquet metadata object # to avoid transferring/parsing footer metadata return _get_parquet_byte_ranges_from_metadata( metadata, fs, engine, columns=columns, row_groups=row_groups, max_gap=max_gap, max_block=max_block, filters=filters, ) # Get file sizes asynchronously file_sizes = fs.sizes(paths) # Populate global paths, starts, & ends result = {} data_paths = [] data_starts = [] data_ends = [] add_header_magic = True if columns is None and row_groups is None and filters is None: # We are NOT selecting specific columns or row-groups. # # We can avoid sampling the footers, and just transfer # all file data with cat_ranges for i, path in enumerate(paths): result[path] = {} data_paths.append(path) data_starts.append(0) data_ends.append(file_sizes[i]) add_header_magic = False # "Magic" should already be included else: # We ARE selecting specific columns or row-groups. # # Gather file footers. # We just take the last `footer_sample_size` bytes of each # file (or the entire file if it is smaller than that) footer_starts = [] footer_ends = [] for i, path in enumerate(paths): footer_ends.append(file_sizes[i]) sample_size = max(0, file_sizes[i] - footer_sample_size) footer_starts.append(sample_size) footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends) # Check our footer samples and re-sample if necessary. missing_footer_starts = footer_starts.copy() large_footer = 0 for i, path in enumerate(paths): footer_size = int.from_bytes(footer_samples[i][-8:-4], "little") real_footer_start = file_sizes[i] - (footer_size + 8) if real_footer_start < footer_starts[i]: missing_footer_starts[i] = real_footer_start large_footer = max(large_footer, (footer_size + 8)) if large_footer: warnings.warn( f"Not enough data was used to sample the parquet footer. " f"Try setting footer_sample_size >= {large_footer}." ) for i, block in enumerate( fs.cat_ranges( paths, missing_footer_starts, footer_starts, ) ): footer_samples[i] = block + footer_samples[i] footer_starts[i] = missing_footer_starts[i] # Calculate required byte ranges for each path for i, path in enumerate(paths): # Use "engine" to collect data byte ranges path_data_starts, path_data_ends = engine._parquet_byte_ranges( columns, row_groups=row_groups, footer=footer_samples[i], footer_start=footer_starts[i], filters=filters, ) data_paths += [path] * len(path_data_starts) data_starts += path_data_starts data_ends += path_data_ends result.setdefault(path, {})[(footer_starts[i], file_sizes[i])] = ( footer_samples[i] ) # Merge adjacent offset ranges data_paths, data_starts, data_ends = merge_offset_ranges( data_paths, data_starts, data_ends, max_gap=max_gap, max_block=max_block, sort=False, # Should already be sorted ) # Start by populating `result` with footer samples for i, path in enumerate(paths): result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]} # Transfer the data byte-ranges into local memory _transfer_ranges(fs, result, data_paths, data_starts, data_ends) # Add b"PAR1" to header if necessary if add_header_magic: _add_header_magic(result) return result def _get_parquet_byte_ranges_from_metadata( metadata, fs, engine, columns=None, row_groups=None, max_gap=64_000, max_block=256_000_000, filters=None, ): """Simplified version of `_get_parquet_byte_ranges` for the case that an engine-specific `metadata` object is provided, and the remote footer metadata does not need to be transferred before calculating the required byte ranges. """ # Use "engine" to collect data byte ranges data_paths, data_starts, data_ends = engine._parquet_byte_ranges( columns, row_groups=row_groups, metadata=metadata, filters=filters ) # Merge adjacent offset ranges data_paths, data_starts, data_ends = merge_offset_ranges( data_paths, data_starts, data_ends, max_gap=max_gap, max_block=max_block, sort=False, # Should be sorted ) # Transfer the data byte-ranges into local memory result = {fn: {} for fn in list(set(data_paths))} _transfer_ranges(fs, result, data_paths, data_starts, data_ends) # Add b"PAR1" to header _add_header_magic(result) return result def _transfer_ranges(fs, blocks, paths, starts, ends): # Use cat_ranges to gather the data byte_ranges ranges = (paths, starts, ends) for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)): blocks[path][(start, stop)] = data def _add_header_magic(data): # Add b"PAR1" to file headers for path in list(data.keys()): add_magic = True for k in data[path]: if k[0] == 0 and k[1] >= 4: add_magic = False break if add_magic: data[path][(0, 4)] = b"PAR1" def _set_engine(engine_str): # Define a list of parquet engines to try if engine_str == "auto": try_engines = ("fastparquet", "pyarrow") elif not isinstance(engine_str, str): raise ValueError( "Failed to set parquet engine! " "Please pass 'fastparquet', 'pyarrow', or 'auto'" ) elif engine_str not in ("fastparquet", "pyarrow"): raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`") else: try_engines = [engine_str] # Try importing the engines in `try_engines`, # and choose the first one that succeeds for engine in try_engines: try: if engine == "fastparquet": return FastparquetEngine() elif engine == "pyarrow": return PyarrowEngine() except ImportError: pass # Raise an error if a supported parquet engine # was not found raise ImportError( f"The following parquet engines are not installed " f"in your python environment: {try_engines}." f"Please install 'fastparquert' or 'pyarrow' to " f"utilize the `fsspec.parquet` module." )
AlreadyBufferedFile
python
astropy__astropy
astropy/io/registry/interface.py
{ "start": 290, "end": 4232 }
class ____: """Base class for the worker object used in unified read() or write() methods. This lightweight object is created for each `read()` or `write()` call via ``read`` / ``write`` descriptors on the data object class. The key driver is to allow complete format-specific documentation of available method options via a ``help()`` method, e.g. ``Table.read.help('fits')``. Subclasses must define a ``__call__`` method which is what actually gets called when the data object ``read()`` or ``write()`` method is called. For the canonical example see the `~astropy.table.Table` class implementation (in particular the ``connect.py`` module there). Parameters ---------- instance : object Descriptor calling instance or None if no instance cls : type Descriptor calling class (either owner class or instance class) method_name : str Method name, e.g. 'read' or 'write' registry : ``_UnifiedIORegistryBase`` or None, optional The IO registry. """ def __init__(self, instance, cls, method_name, registry=None): if registry is None: from astropy.io.registry.compat import default_registry as registry self._registry = registry self._instance = instance self._cls = cls self._method_name = method_name # 'read' or 'write' @property def registry(self): """Unified I/O registry instance.""" return self._registry def help(self, format=None, out=None): """Output help documentation for the specified unified I/O ``format``. By default the help output is printed to the console via ``pydoc.pager``. Instead one can supplied a file handle object as ``out`` and the output will be written to that handle. Parameters ---------- format : str Unified I/O format name, e.g. 'fits' or 'ascii.ecsv' out : None or file-like Output destination (default is stdout via a pager) """ cls = self._cls method_name = self._method_name # Get reader or writer function associated with the registry get_func = ( self._registry.get_reader if method_name == "read" else self._registry.get_writer ) try: if format: read_write_func = get_func(format, cls) except IORegistryError as err: reader_doc = "ERROR: " + str(err) else: if format: # Format-specific header = ( f"{cls.__name__}.{method_name}(format='{format}') documentation\n" ) doc = read_write_func.__doc__ else: # General docs header = f"{cls.__name__}.{method_name} general documentation\n" doc = getattr(cls, method_name).__doc__ reader_doc = re.sub(".", "=", header) reader_doc += header reader_doc += re.sub(".", "=", header) reader_doc += os.linesep if doc is not None: reader_doc += inspect.cleandoc(doc) if out is None: pydoc.pager(reader_doc) else: out.write(reader_doc) def list_formats(self, out=None): """Print a list of available formats to console (or ``out`` filehandle). out : None or file handle object Output destination (default is stdout via a pager) """ tbl = self._registry.get_formats(self._cls, self._method_name.capitalize()) del tbl["Data class"] if out is None: tbl.pprint(max_lines=-1, max_width=-1) else: out.write("\n".join(tbl.pformat(max_lines=-1, max_width=-1))) return out # -----------------------------------------------------------------------------
UnifiedReadWrite
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataproc_metastore.py
{ "start": 13784, "end": 15467 }
class ____: @mock.patch("airflow.providers.google.cloud.operators.dataproc_metastore.DataprocMetastoreHook") @mock.patch( "airflow.providers.google.cloud.operators.dataproc_metastore" ".DataprocMetastoreRestoreServiceOperator._wait_for_restore_service" ) def test_assert_valid_hook_call(self, mock_wait, mock_hook) -> None: task = DataprocMetastoreRestoreServiceOperator( task_id=TASK_ID, region=GCP_LOCATION, project_id=GCP_PROJECT_ID, service_id=TEST_SERVICE_ID, backup_id=TEST_BACKUP_ID, backup_region=GCP_LOCATION, backup_project_id=GCP_PROJECT_ID, backup_service_id=TEST_SERVICE_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) mock_wait.return_value = None task.execute(context=mock.MagicMock()) mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN) mock_hook.return_value.restore_service.assert_called_once_with( region=GCP_LOCATION, project_id=GCP_PROJECT_ID, service_id=TEST_SERVICE_ID, backup_id=TEST_BACKUP_ID, backup_region=GCP_LOCATION, backup_project_id=GCP_PROJECT_ID, backup_service_id=TEST_SERVICE_ID, restore_type=None, request_id=None, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, )
TestDataprocMetastoreRestoreServiceOperator
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 269342, "end": 271131 }
class ____(GeneratedAirbyteSource): @public def __init__( self, name: str, pardot_business_unit_id: str, client_id: str, client_secret: str, refresh_token: str, start_date: Optional[str] = None, is_sandbox: Optional[bool] = None, ): """Airbyte Source for Pardot. Args: name (str): The name of the destination. pardot_business_unit_id (str): Pardot Business ID, can be found at Setup > Pardot > Pardot Account Setup client_id (str): The Consumer Key that can be found when viewing your app in Salesforce client_secret (str): The Consumer Secret that can be found when viewing your app in Salesforce refresh_token (str): Salesforce Refresh Token used for Airbyte to access your Salesforce account. If you don't know what this is, follow this guide to retrieve it. start_date (Optional[str]): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated. Leave blank to skip this filter is_sandbox (Optional[bool]): Whether or not the app is in a Salesforce sandbox. If you do not know what this, assume it is false. """ self.pardot_business_unit_id = check.str_param( pardot_business_unit_id, "pardot_business_unit_id" ) self.client_id = check.str_param(client_id, "client_id") self.client_secret = check.str_param(client_secret, "client_secret") self.refresh_token = check.str_param(refresh_token, "refresh_token") self.start_date = check.opt_str_param(start_date, "start_date") self.is_sandbox = check.opt_bool_param(is_sandbox, "is_sandbox") super().__init__("Pardot", name)
PardotSource
python
huggingface__transformers
src/transformers/models/glm/modular_glm.py
{ "start": 5011, "end": 5277 }
class ____(LlamaAttention): def __init__(self, config: GlmConfig, layer_idx: Optional[int] = None): super().__init__(config, layer_idx) self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
GlmAttention
python
python-pillow__Pillow
src/PIL/MspImagePlugin.py
{ "start": 1110, "end": 1915 }
class ____(ImageFile.ImageFile): format = "MSP" format_description = "Windows Paint" def _open(self) -> None: # Header assert self.fp is not None s = self.fp.read(32) if not _accept(s): msg = "not an MSP file" raise SyntaxError(msg) # Header checksum checksum = 0 for i in range(0, 32, 2): checksum = checksum ^ i16(s, i) if checksum != 0: msg = "bad MSP checksum" raise SyntaxError(msg) self._mode = "1" self._size = i16(s, 4), i16(s, 6) if s.startswith(b"DanM"): self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 32, "1")] else: self.tile = [ImageFile._Tile("MSP", (0, 0) + self.size, 32)]
MspImageFile
python
getsentry__sentry
src/sentry/utils/kvstore/redis.py
{ "start": 248, "end": 1015 }
class ____(KVStorage[str, T]): """ This class provides a key/value store backed by Redis (either a single node or cluster.) """ def __init__(self, client: StrictRedis[T] | RedisCluster[T]) -> None: self.client: StrictRedis[T] | RedisCluster[T] = client def get(self, key: str) -> T | None: return self.client.get(key.encode("utf8")) def set(self, key: str, value: T, ttl: timedelta | None = None) -> None: self.client.set(key.encode("utf8"), value, ex=ttl) def delete(self, key: str) -> None: self.client.delete(key.encode("utf8")) def bootstrap(self, automatic_expiry: bool = True) -> None: pass # nothing to do def destroy(self) -> None: self.client.flushdb()
RedisKVStorage
python
great-expectations__great_expectations
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_georgia_zip.py
{ "start": 1743, "end": 4078 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid Georgia zipcodes. See https://pypi.org/project/zipcodes/ for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_georgia_zip": ["30002", "30157", "30612", "31999"], "invalid_georgia_zip": ["-10000", "1234", "99999", "25487"], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_georgia_zip"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_georgia_zip"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_georgia_zip" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon", "typed-entities", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["zipcodes"], } if __name__ == "__main__": ExpectColumnValuesToBeValidGeorgiaZip().print_diagnostic_checklist()
ExpectColumnValuesToBeValidGeorgiaZip
python
python-visualization__folium
folium/plugins/polyline_text_path.py
{ "start": 161, "end": 2304 }
class ____(JSCSSMixin, MacroElement): """ Shows a text along a PolyLine. Parameters ---------- polyline: folium.features.PolyLine object The folium.features.PolyLine object to attach the text to. text: string The string to be attached to the polyline. repeat: bool, default False Specifies if the text should be repeated along the polyline. center: bool, default False Centers the text according to the polyline's bounding box below: bool, default False Show text below the path offset: int, default 0 Set an offset to position text relative to the polyline. orientation: int, default 0 Rotate text to a specified angle. attributes: dict Object containing the attributes applied to the text tag. Check valid attributes here: https://developer.mozilla.org/en-US/docs/Web/SVG/Element/text#attributes Example: {'fill': '#007DEF', 'font-weight': 'bold', 'font-size': '24'} See https://github.com/makinacorpus/Leaflet.TextPath for more information. """ _template = Template( """ {% macro script(this, kwargs) %} {{ this.polyline.get_name() }}.setText( {{ this.text|tojson }}, {{ this.options|tojavascript }} ); {% endmacro %} """ ) default_js = [ ( "polylinetextpath", "https://cdn.jsdelivr.net/npm/leaflet-textpath@1.2.3/leaflet.textpath.min.js", ) ] def __init__( self, polyline, text, repeat=False, center=False, below=False, offset=0, orientation=0, attributes=None, **kwargs ): super().__init__() self._name = "PolyLineTextPath" self.polyline = polyline self.text = text self.options = remove_empty( repeat=repeat, center=center, below=below, offset=offset, orientation=orientation, attributes=attributes, **kwargs )
PolyLineTextPath
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classes1.py
{ "start": 245, "end": 271 }
class ____(app.C): ...
D
python
gevent__gevent
src/gevent/tests/test__semaphore.py
{ "start": 11152, "end": 11325 }
class ____(greentest.TestCase): def test_c_extension(self): self.assertEqual(Semaphore.__module__, 'gevent._gevent_c_semaphore')
TestCExt
python
ZoranPandovski__al-go-rithms
data_structures/Tree/python/tree_utils.py
{ "start": 38, "end": 179 }
class ____(object): def __init__(self, data=None, left=None, right=None): self.data = data self.left = left self.right = right
TreeNode
python
more-itertools__more-itertools
tests/test_more.py
{ "start": 166882, "end": 167731 }
class ____(TestCase): def test_basic(self): iterable = 'abcdefg' r = 4 for index, expected in enumerate( combinations_with_replacement(iterable, r) ): actual = mi.nth_combination_with_replacement(iterable, r, index) self.assertEqual(actual, expected) def test_long(self): actual = mi.nth_combination_with_replacement(range(90), 4, 2000000) expected = (22, 65, 68, 81) self.assertEqual(actual, expected) def test_invalid_r(self): for r in (-1, 3): with self.assertRaises(ValueError): mi.nth_combination_with_replacement([], r, 0) def test_invalid_index(self): with self.assertRaises(IndexError): mi.nth_combination_with_replacement('abcdefg', 3, -85)
NthCombinationWithReplacementTests
python
pytorch__pytorch
torch/_inductor/codegen/cuda/cutlass_python_evt.py
{ "start": 3226, "end": 4199 }
class ____(DefaultHandler): def __init__(self, parent_handler: "CutlassEVTCodegen"): self.parent_handler = parent_handler def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any: # Handle op dispatch here if hasattr(self.parent_handler, name): fn = getattr(self.parent_handler, name) line = fn(*args, **kwargs) if name in ("load", "store"): return OpsValue(line) else: var = self.parent_handler._tmp_var() line = DelayReplaceLine( var, lambda: "D" if var == self.parent_handler.last_stored_var_name else var, f"{var} = {line}", ) self.parent_handler.body.writeline(line) return OpsValue(var) else: raise NotImplementedError(name)
_AssignmentFormatter
python
PyCQA__pylint
tests/functional/ext/docparams/parameter/missing_param_doc_required.py
{ "start": 655, "end": 869 }
class ____: """Example usage of "For the parameters, see" in init docstring""" def __init__(self, x, y): """docstring foo constructor For the parameters, see :func:`bla` """
ClassFoo
python
doocs__leetcode
solution/3300-3399/3379.Transformed Array/Solution.py
{ "start": 0, "end": 240 }
class ____: def constructTransformedArray(self, nums: List[int]) -> List[int]: ans = [] n = len(nums) for i, x in enumerate(nums): ans.append(nums[(i + x + n) % n] if x else 0) return ans
Solution
python
coleifer__peewee
tests/regressions.py
{ "start": 66726, "end": 68101 }
class ____(ModelTestCase): def test_thread_safe_meta(self): d1 = get_in_memory_db() d2 = get_in_memory_db() class Meta: database = d1 model_metadata_class = ThreadSafeDatabaseMetadata attrs = {'Meta': Meta} for i in range(1, 30): attrs['f%d' % i] = IntegerField() M = type('M', (TestModel,), attrs) sql = ('SELECT "t1"."f1", "t1"."f2", "t1"."f3", "t1"."f4" ' 'FROM "m" AS "t1"') query = M.select(M.f1, M.f2, M.f3, M.f4) def swap_db(): for i in range(100): self.assertEqual(M._meta.database, d1) self.assertSQL(query, sql) with d2.bind_ctx([M]): self.assertEqual(M._meta.database, d2) self.assertSQL(query, sql) self.assertEqual(M._meta.database, d1) self.assertSQL(query, sql) # From a separate thread, swap the database and verify it works # correctly. threads = [threading.Thread(target=swap_db) for i in range(20)] for t in threads: t.start() for t in threads: t.join() # In the main thread the original database has not been altered. self.assertEqual(M._meta.database, d1) self.assertSQL(query, sql)
TestThreadSafeMetaRegression
python
kubernetes-client__python
kubernetes/client/models/v1_flow_schema_spec.py
{ "start": 383, "end": 7897 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'distinguisher_method': 'V1FlowDistinguisherMethod', 'matching_precedence': 'int', 'priority_level_configuration': 'V1PriorityLevelConfigurationReference', 'rules': 'list[V1PolicyRulesWithSubjects]' } attribute_map = { 'distinguisher_method': 'distinguisherMethod', 'matching_precedence': 'matchingPrecedence', 'priority_level_configuration': 'priorityLevelConfiguration', 'rules': 'rules' } def __init__(self, distinguisher_method=None, matching_precedence=None, priority_level_configuration=None, rules=None, local_vars_configuration=None): # noqa: E501 """V1FlowSchemaSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._distinguisher_method = None self._matching_precedence = None self._priority_level_configuration = None self._rules = None self.discriminator = None if distinguisher_method is not None: self.distinguisher_method = distinguisher_method if matching_precedence is not None: self.matching_precedence = matching_precedence self.priority_level_configuration = priority_level_configuration if rules is not None: self.rules = rules @property def distinguisher_method(self): """Gets the distinguisher_method of this V1FlowSchemaSpec. # noqa: E501 :return: The distinguisher_method of this V1FlowSchemaSpec. # noqa: E501 :rtype: V1FlowDistinguisherMethod """ return self._distinguisher_method @distinguisher_method.setter def distinguisher_method(self, distinguisher_method): """Sets the distinguisher_method of this V1FlowSchemaSpec. :param distinguisher_method: The distinguisher_method of this V1FlowSchemaSpec. # noqa: E501 :type: V1FlowDistinguisherMethod """ self._distinguisher_method = distinguisher_method @property def matching_precedence(self): """Gets the matching_precedence of this V1FlowSchemaSpec. # noqa: E501 `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501 :return: The matching_precedence of this V1FlowSchemaSpec. # noqa: E501 :rtype: int """ return self._matching_precedence @matching_precedence.setter def matching_precedence(self, matching_precedence): """Sets the matching_precedence of this V1FlowSchemaSpec. `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default. # noqa: E501 :param matching_precedence: The matching_precedence of this V1FlowSchemaSpec. # noqa: E501 :type: int """ self._matching_precedence = matching_precedence @property def priority_level_configuration(self): """Gets the priority_level_configuration of this V1FlowSchemaSpec. # noqa: E501 :return: The priority_level_configuration of this V1FlowSchemaSpec. # noqa: E501 :rtype: V1PriorityLevelConfigurationReference """ return self._priority_level_configuration @priority_level_configuration.setter def priority_level_configuration(self, priority_level_configuration): """Sets the priority_level_configuration of this V1FlowSchemaSpec. :param priority_level_configuration: The priority_level_configuration of this V1FlowSchemaSpec. # noqa: E501 :type: V1PriorityLevelConfigurationReference """ if self.local_vars_configuration.client_side_validation and priority_level_configuration is None: # noqa: E501 raise ValueError("Invalid value for `priority_level_configuration`, must not be `None`") # noqa: E501 self._priority_level_configuration = priority_level_configuration @property def rules(self): """Gets the rules of this V1FlowSchemaSpec. # noqa: E501 `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501 :return: The rules of this V1FlowSchemaSpec. # noqa: E501 :rtype: list[V1PolicyRulesWithSubjects] """ return self._rules @rules.setter def rules(self, rules): """Sets the rules of this V1FlowSchemaSpec. `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema. # noqa: E501 :param rules: The rules of this V1FlowSchemaSpec. # noqa: E501 :type: list[V1PolicyRulesWithSubjects] """ self._rules = rules def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1FlowSchemaSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1FlowSchemaSpec): return True return self.to_dict() != other.to_dict()
V1FlowSchemaSpec
python
google__jax
tests/pallas/tpu_sparsecore_pallas_test.py
{ "start": 60825, "end": 61575 }
class ____(PallasSCTest): def test_basic(self): self.skip_if_tc_tiling() num_steps = 16 x = jnp.arange(num_steps * 8).reshape(-1, 8) @self.vector_subcore_kernel( out_shape=x, in_specs=(pl.BlockSpec(memory_space=pltpu.HBM),), out_specs=pl.BlockSpec(memory_space=pltpu.HBM), ) def kernel(x_hbm_ref, o_hbm_ref): @functools.partial( pltpu.emit_pipeline, grid=(num_steps // 2,), in_specs=pl.BlockSpec((2, 8), lambda i: (i, 0)), out_specs=pl.BlockSpec((2, 8), lambda i: (i, 0)), ) def pipeline(x_ref, o_ref): o_ref[...] = x_ref[...] + 1 pipeline(x_hbm_ref, o_hbm_ref) np.testing.assert_array_equal(kernel(x), x + 1)
PipelineTest
python
ijl__orjson
test/test_numpy.py
{ "start": 319, "end": 33844 }
class ____: def test_numpy_array_d1_uintp(self): low = numpy.iinfo(numpy.uintp).min high = numpy.iinfo(numpy.uintp).max assert orjson.dumps( numpy.array([low, high], numpy.uintp), option=orjson.OPT_SERIALIZE_NUMPY, ) == f"[{low},{high}]".encode("ascii") def test_numpy_array_d1_intp(self): low = numpy.iinfo(numpy.intp).min high = numpy.iinfo(numpy.intp).max assert orjson.dumps( numpy.array([low, high], numpy.intp), option=orjson.OPT_SERIALIZE_NUMPY, ) == f"[{low},{high}]".encode("ascii") def test_numpy_array_d1_i64(self): assert ( orjson.dumps( numpy.array([-9223372036854775807, 9223372036854775807], numpy.int64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[-9223372036854775807,9223372036854775807]" ) def test_numpy_array_d1_u64(self): assert ( orjson.dumps( numpy.array([0, 18446744073709551615], numpy.uint64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[0,18446744073709551615]" ) def test_numpy_array_d1_i8(self): assert ( orjson.dumps( numpy.array([-128, 127], numpy.int8), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[-128,127]" ) def test_numpy_array_d1_u8(self): assert ( orjson.dumps( numpy.array([0, 255], numpy.uint8), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[0,255]" ) def test_numpy_array_d1_i32(self): assert ( orjson.dumps( numpy.array([-2147483647, 2147483647], numpy.int32), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[-2147483647,2147483647]" ) def test_numpy_array_d1_i16(self): assert ( orjson.dumps( numpy.array([-32768, 32767], numpy.int16), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[-32768,32767]" ) def test_numpy_array_d1_u16(self): assert ( orjson.dumps( numpy.array([0, 65535], numpy.uint16), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[0,65535]" ) def test_numpy_array_d1_u32(self): assert ( orjson.dumps( numpy.array([0, 4294967295], numpy.uint32), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[0,4294967295]" ) def test_numpy_array_d1_f32(self): assert ( orjson.dumps( numpy.array([1.0, 3.4028235e38], numpy.float32), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[1.0,3.4028235e38]" ) def test_numpy_array_d1_f16(self): assert ( orjson.dumps( numpy.array([-1.0, 0.0009765625, 1.0, 65504.0], numpy.float16), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[-1.0,0.0009765625,1.0,65504.0]" ) def test_numpy_array_f16_roundtrip(self): ref = [ -1.0, -2.0, 0.000000059604645, 0.000060975552, 0.00006103515625, 0.0009765625, 0.33325195, 0.99951172, 1.0, 1.00097656, 65504.0, ] obj = numpy.array(ref, numpy.float16) # type: ignore serialized = orjson.dumps( obj, option=orjson.OPT_SERIALIZE_NUMPY, ) deserialized = numpy.array(orjson.loads(serialized), numpy.float16) # type: ignore assert numpy.array_equal(obj, deserialized) def test_numpy_array_f16_edge(self): assert ( orjson.dumps( numpy.array( [ numpy.inf, -numpy.inf, numpy.nan, -0.0, 0.0, numpy.pi, ], numpy.float16, ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[null,null,null,-0.0,0.0,3.140625]" ) def test_numpy_array_f32_edge(self): assert ( orjson.dumps( numpy.array( [ numpy.inf, -numpy.inf, numpy.nan, -0.0, 0.0, numpy.pi, ], numpy.float32, ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[null,null,null,-0.0,0.0,3.1415927]" ) def test_numpy_array_f64_edge(self): assert ( orjson.dumps( numpy.array( [ numpy.inf, -numpy.inf, numpy.nan, -0.0, 0.0, numpy.pi, ], numpy.float64, ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[null,null,null,-0.0,0.0,3.141592653589793]" ) def test_numpy_array_d1_f64(self): assert ( orjson.dumps( numpy.array([1.0, 1.7976931348623157e308], numpy.float64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[1.0,1.7976931348623157e308]" ) def test_numpy_array_d1_bool(self): assert ( orjson.dumps( numpy.array([True, False, False, True]), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[true,false,false,true]" ) def test_numpy_array_d1_datetime64_years(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("1"), numpy.datetime64("970"), numpy.datetime64("1920"), numpy.datetime64("1971"), numpy.datetime64("2021"), numpy.datetime64("2022"), numpy.datetime64("2023"), numpy.datetime64("9999"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["0001-01-01T00:00:00","0970-01-01T00:00:00","1920-01-01T00:00:00","1971-01-01T00:00:00","2021-01-01T00:00:00","2022-01-01T00:00:00","2023-01-01T00:00:00","9999-01-01T00:00:00"]' ) def test_numpy_array_d1_datetime64_months(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01"), numpy.datetime64("2022-01"), numpy.datetime64("2023-01"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2022-01-01T00:00:00","2023-01-01T00:00:00"]' ) def test_numpy_array_d1_datetime64_days(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01"), numpy.datetime64("2021-01-01"), numpy.datetime64("2021-01-01"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:00:00","2021-01-01T00:00:00"]' ) def test_numpy_array_d1_datetime64_hours(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00"), numpy.datetime64("2021-01-01T01"), numpy.datetime64("2021-01-01T02"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T01:00:00","2021-01-01T02:00:00"]' ) def test_numpy_array_d1_datetime64_minutes(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00"), numpy.datetime64("2021-01-01T00:01"), numpy.datetime64("2021-01-01T00:02"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:01:00","2021-01-01T00:02:00"]' ) def test_numpy_array_d1_datetime64_seconds(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00:00"), numpy.datetime64("2021-01-01T00:00:01"), numpy.datetime64("2021-01-01T00:00:02"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:00:01","2021-01-01T00:00:02"]' ) def test_numpy_array_d1_datetime64_milliseconds(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00:00"), numpy.datetime64("2021-01-01T00:00:00.172"), numpy.datetime64("2021-01-01T00:00:00.567"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567000"]' ) def test_numpy_array_d1_datetime64_microseconds(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00:00"), numpy.datetime64("2021-01-01T00:00:00.172"), numpy.datetime64("2021-01-01T00:00:00.567891"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567891"]' ) def test_numpy_array_d1_datetime64_nanoseconds(self): assert ( orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00:00"), numpy.datetime64("2021-01-01T00:00:00.172"), numpy.datetime64("2021-01-01T00:00:00.567891234"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'["2021-01-01T00:00:00","2021-01-01T00:00:00.172000","2021-01-01T00:00:00.567891"]' ) def test_numpy_array_d1_datetime64_picoseconds(self): try: orjson.dumps( numpy.array( [ numpy.datetime64("2021-01-01T00:00:00"), numpy.datetime64("2021-01-01T00:00:00.172"), numpy.datetime64("2021-01-01T00:00:00.567891234567"), ], ), option=orjson.OPT_SERIALIZE_NUMPY, ) raise AssertionError() except TypeError as exc: assert str(exc) == "unsupported numpy.datetime64 unit: picoseconds" def test_numpy_array_d2_i64(self): assert ( orjson.dumps( numpy.array([[1, 2, 3], [4, 5, 6]], numpy.int64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[1,2,3],[4,5,6]]" ) def test_numpy_array_d2_f64(self): assert ( orjson.dumps( numpy.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], numpy.float64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[1.0,2.0,3.0],[4.0,5.0,6.0]]" ) def test_numpy_array_d3_i8(self): assert ( orjson.dumps( numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int8), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1,2],[3,4]],[[5,6],[7,8]]]" ) def test_numpy_array_d3_u8(self): assert ( orjson.dumps( numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.uint8), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1,2],[3,4]],[[5,6],[7,8]]]" ) def test_numpy_array_d3_i32(self): assert ( orjson.dumps( numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], numpy.int32), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1,2],[3,4]],[[5,6],[7,8]]]" ) def test_numpy_array_d3_i64(self): assert ( orjson.dumps( numpy.array([[[1, 2], [3, 4], [5, 6], [7, 8]]], numpy.int64), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1,2],[3,4],[5,6],[7,8]]]" ) def test_numpy_array_d3_f64(self): assert ( orjson.dumps( numpy.array( [[[1.0, 2.0], [3.0, 4.0]], [[5.0, 6.0], [7.0, 8.0]]], numpy.float64, ), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1.0,2.0],[3.0,4.0]],[[5.0,6.0],[7.0,8.0]]]" ) def test_numpy_array_fortran(self): array = numpy.array([[1, 2], [3, 4]], order="F") assert array.flags["F_CONTIGUOUS"] is True with pytest.raises(orjson.JSONEncodeError): orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY) assert orjson.dumps( array, default=numpy_default, option=orjson.OPT_SERIALIZE_NUMPY, ) == orjson.dumps(array.tolist()) def test_numpy_array_non_contiguous_message(self): array = numpy.array([[1, 2], [3, 4]], order="F") assert array.flags["F_CONTIGUOUS"] is True try: orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY) raise AssertionError() except TypeError as exc: assert ( str(exc) == "numpy array is not C contiguous; use ndarray.tolist() in default" ) def test_numpy_array_unsupported_dtype(self): array = numpy.array([[1, 2], [3, 4]], numpy.csingle) # type: ignore with pytest.raises(orjson.JSONEncodeError) as cm: orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY) assert "unsupported datatype in numpy array" in str(cm) def test_numpy_array_d1(self): array = numpy.array([1]) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_d2(self): array = numpy.array([[1]]) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_d3(self): array = numpy.array([[[1]]]) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_d4(self): array = numpy.array([[[[1]]]]) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_4_stride(self): array = numpy.random.rand(4, 4, 4, 4) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_dimension_zero(self): array = numpy.array(0) assert array.ndim == 0 with pytest.raises(orjson.JSONEncodeError): orjson.dumps(array, option=orjson.OPT_SERIALIZE_NUMPY) array = numpy.empty((0, 4, 2)) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) array = numpy.empty((4, 0, 2)) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) array = numpy.empty((2, 4, 0)) assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_array_dimension_max(self): array = numpy.random.rand( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ) assert array.ndim == 32 assert ( orjson.loads( orjson.dumps( array, option=orjson.OPT_SERIALIZE_NUMPY, ), ) == array.tolist() ) def test_numpy_scalar_int8(self): assert orjson.dumps(numpy.int8(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps(numpy.int8(127), option=orjson.OPT_SERIALIZE_NUMPY) == b"127" ) assert ( orjson.dumps(numpy.int8(-128), option=orjson.OPT_SERIALIZE_NUMPY) == b"-128" ) def test_numpy_scalar_int16(self): assert orjson.dumps(numpy.int16(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps(numpy.int16(32767), option=orjson.OPT_SERIALIZE_NUMPY) == b"32767" ) assert ( orjson.dumps(numpy.int16(-32768), option=orjson.OPT_SERIALIZE_NUMPY) == b"-32768" ) def test_numpy_scalar_int32(self): assert orjson.dumps(numpy.int32(1), option=orjson.OPT_SERIALIZE_NUMPY) == b"1" assert ( orjson.dumps(numpy.int32(2147483647), option=orjson.OPT_SERIALIZE_NUMPY) == b"2147483647" ) assert ( orjson.dumps(numpy.int32(-2147483648), option=orjson.OPT_SERIALIZE_NUMPY) == b"-2147483648" ) def test_numpy_scalar_int64(self): assert ( orjson.dumps( numpy.int64(-9223372036854775808), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"-9223372036854775808" ) assert ( orjson.dumps( numpy.int64(9223372036854775807), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"9223372036854775807" ) def test_numpy_scalar_uint8(self): assert orjson.dumps(numpy.uint8(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps(numpy.uint8(255), option=orjson.OPT_SERIALIZE_NUMPY) == b"255" ) def test_numpy_scalar_uint16(self): assert orjson.dumps(numpy.uint16(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps(numpy.uint16(65535), option=orjson.OPT_SERIALIZE_NUMPY) == b"65535" ) def test_numpy_scalar_uint32(self): assert orjson.dumps(numpy.uint32(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps(numpy.uint32(4294967295), option=orjson.OPT_SERIALIZE_NUMPY) == b"4294967295" ) def test_numpy_scalar_uint64(self): assert orjson.dumps(numpy.uint64(0), option=orjson.OPT_SERIALIZE_NUMPY) == b"0" assert ( orjson.dumps( numpy.uint64(18446744073709551615), option=orjson.OPT_SERIALIZE_NUMPY, ) == b"18446744073709551615" ) def test_numpy_scalar_float16(self): assert ( orjson.dumps(numpy.float16(1.0), option=orjson.OPT_SERIALIZE_NUMPY) == b"1.0" ) def test_numpy_scalar_float32(self): assert ( orjson.dumps(numpy.float32(1.0), option=orjson.OPT_SERIALIZE_NUMPY) == b"1.0" ) def test_numpy_scalar_float64(self): assert ( orjson.dumps(numpy.float64(123.123), option=orjson.OPT_SERIALIZE_NUMPY) == b"123.123" ) def test_numpy_bool(self): assert ( orjson.dumps( {"a": numpy.bool_(True), "b": numpy.bool_(False)}, option=orjson.OPT_SERIALIZE_NUMPY, ) == b'{"a":true,"b":false}' ) def test_numpy_datetime_year(self): assert ( orjson.dumps(numpy.datetime64("2021"), option=orjson.OPT_SERIALIZE_NUMPY) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_month(self): assert ( orjson.dumps(numpy.datetime64("2021-01"), option=orjson.OPT_SERIALIZE_NUMPY) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_day(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_hour(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_minute(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_second(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_milli(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00.172000"' ) def test_numpy_datetime_micro(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00.172576"' ) def test_numpy_datetime_nano(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576789"), option=orjson.OPT_SERIALIZE_NUMPY, ) == b'"2021-01-01T00:00:00.172576"' ) def test_numpy_datetime_naive_utc_year(self): assert ( orjson.dumps( numpy.datetime64("2021"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_month(self): assert ( orjson.dumps( numpy.datetime64("2021-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_day(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_hour(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_minute(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_second(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00+00:00"' ) def test_numpy_datetime_naive_utc_milli(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00.172000+00:00"' ) def test_numpy_datetime_naive_utc_micro(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00.172576+00:00"' ) def test_numpy_datetime_naive_utc_nano(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576789"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC, ) == b'"2021-01-01T00:00:00.172576+00:00"' ) def test_numpy_datetime_naive_utc_utc_z_year(self): assert ( orjson.dumps( numpy.datetime64("2021"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_month(self): assert ( orjson.dumps( numpy.datetime64("2021-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_day(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_hour(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_minute(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_second(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00Z"' ) def test_numpy_datetime_naive_utc_utc_z_milli(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00.172000Z"' ) def test_numpy_datetime_naive_utc_utc_z_micro(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00.172576Z"' ) def test_numpy_datetime_naive_utc_utc_z_nano(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576789"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NAIVE_UTC | orjson.OPT_UTC_Z, ) == b'"2021-01-01T00:00:00.172576Z"' ) def test_numpy_datetime_omit_microseconds_year(self): assert ( orjson.dumps( numpy.datetime64("2021"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_month(self): assert ( orjson.dumps( numpy.datetime64("2021-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_day(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_hour(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_minute(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_second(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_milli(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_micro(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_omit_microseconds_nano(self): assert ( orjson.dumps( numpy.datetime64("2021-01-01T00:00:00.172576789"), option=orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_OMIT_MICROSECONDS, ) == b'"2021-01-01T00:00:00"' ) def test_numpy_datetime_nat(self): with pytest.raises(orjson.JSONEncodeError): orjson.dumps(numpy.datetime64("NaT"), option=orjson.OPT_SERIALIZE_NUMPY) with pytest.raises(orjson.JSONEncodeError): orjson.dumps([numpy.datetime64("NaT")], option=orjson.OPT_SERIALIZE_NUMPY) def test_numpy_repeated(self): data = numpy.array([[[1, 2], [3, 4], [5, 6], [7, 8]]], numpy.int64) # type: ignore for _ in range(3): assert ( orjson.dumps( data, option=orjson.OPT_SERIALIZE_NUMPY, ) == b"[[[1,2],[3,4],[5,6],[7,8]]]" ) @pytest.mark.skipif(numpy is None, reason="numpy is not installed")
TestNumpy
python
Lightning-AI__lightning
src/lightning/pytorch/utilities/model_helpers.py
{ "start": 3843, "end": 5544 }
class ____(classmethod, Generic[_T, _P, _R_co]): """Drop-in replacement for @classmethod, but raises an exception when the decorated method is called on an instance instead of a class type.""" method: Callable[Concatenate[type[_T], _P], _R_co] def __init__(self, method: Callable[Concatenate[type[_T], _P], _R_co]) -> None: super().__init__(method) self.method = method @override def __get__(self, instance: _T, cls: Optional[type[_T]] = None) -> Callable[_P, _R_co]: # type: ignore[override] # The wrapper ensures that the method can be inspected, but not called on an instance @functools.wraps(self.method) def wrapper(*args: Any, **kwargs: Any) -> _R_co: # Workaround for https://github.com/pytorch/pytorch/issues/67146 is_scripting = any(os.path.join("torch", "jit") in frameinfo.filename for frameinfo in inspect.stack()) cls_type = cls if cls is not None else type(instance) if instance is not None and not is_scripting: raise TypeError( f"The classmethod `{cls_type.__name__}.{self.method.__name__}` cannot be called on an instance." " Please call it on the class type and make sure the return value is used." ) return self.method(cls_type, *args, **kwargs) wrapper.__func__ = self.method return wrapper if TYPE_CHECKING: # trick static type checkers into thinking it's a @classmethod # https://github.com/microsoft/pyright/issues/5865 _restricted_classmethod = classmethod else: _restricted_classmethod = _restricted_classmethod_impl
_restricted_classmethod_impl
python
PyCQA__pylint
pylint/extensions/confusing_elif.py
{ "start": 488, "end": 2049 }
class ____(BaseChecker): """Checks if "elif" is used right after an indented block that finishes with "if" or "elif" itself. """ name = "confusing_elif" msgs = { "R5601": ( "Consecutive elif with differing indentation level, consider creating a function to separate the inner" " elif", "confusing-consecutive-elif", "Used when an elif statement follows right after an indented block which itself ends with if or elif. " "It may not be obvious if the elif statement was willingly or mistakenly unindented. " "Extracting the indented if statement into a separate function might avoid confusion and prevent " "errors.", ) } @only_required_for_messages("confusing-consecutive-elif") def visit_if(self, node: nodes.If) -> None: body_ends_with_if = isinstance( node.body[-1], nodes.If ) and self._has_no_else_clause(node.body[-1]) if node.has_elif_block() and body_ends_with_if: self.add_message("confusing-consecutive-elif", node=node.orelse[0]) @staticmethod def _has_no_else_clause(node: nodes.If) -> bool: orelse = node.orelse while orelse and isinstance(orelse[0], nodes.If): orelse = orelse[0].orelse if not orelse or isinstance(orelse[0], nodes.If): return True return False def register(linter: PyLinter) -> None: linter.register_checker(ConfusingConsecutiveElifChecker(linter))
ConfusingConsecutiveElifChecker
python
dagster-io__dagster
python_modules/libraries/dagster-airlift/dagster_airlift/in_airflow/partition_utils.py
{ "start": 443, "end": 4868 }
class ____(NamedTuple): partitioning_type: PartitionDefinitionType partition_keys: Sequence[str] # Eventually we can add more of these for different partitioning types additional_info: Optional[TimeWindowPartitioningInformation] @staticmethod def from_asset_node_graphql( asset_nodes: Sequence[Mapping[str, Any]], ) -> Optional["PartitioningInformation"]: assets_partitioned = [_asset_is_partitioned(asset_node) for asset_node in asset_nodes] if any(assets_partitioned) and not all(assets_partitioned): raise Exception( "Found some unpartitioned assets and some partitioned assets in the same task. " "For a given task, all assets must have the same partitions definition. " ) partition_keys_per_asset = [ set(asset_node["partitionKeys"]) for asset_node in asset_nodes if asset_node["isPartitioned"] ] if not all_sets_equal(partition_keys_per_asset): raise Exception( "Found differing partition keys across assets in this task. " "For a given task, all assets must have the same partitions definition. " ) # Now we can proceed with the assumption that all assets are partitioned and have the same partition keys. # This, we only look at the first asset node. asset_node = next(iter(asset_nodes)) if not asset_node["isPartitioned"]: return None partitioning_type = PartitionDefinitionType(asset_node["partitionDefinition"]["type"]) return PartitioningInformation( partitioning_type=partitioning_type, partition_keys=asset_node["partitionKeys"], additional_info=_build_additional_info_for_type(asset_node, partitioning_type), ) @property def time_window_partitioning_info(self) -> TimeWindowPartitioningInformation: if self.partitioning_type != PartitionDefinitionType.TIME_WINDOW: raise Exception( f"Partitioning type is {self.partitioning_type}, but expected {PartitionDefinitionType.TIME_WINDOW}" ) if self.additional_info is None: raise Exception( f"Partitioning type is {self.partitioning_type}, but no additional info was provided." ) return self.additional_info def _build_additional_info_for_type( asset_node: Mapping[str, Any], partitioning_type: PartitionDefinitionType ) -> Optional[TimeWindowPartitioningInformation]: if partitioning_type != PartitionDefinitionType.TIME_WINDOW: return None return TimeWindowPartitioningInformation(fmt=asset_node["partitionDefinition"]["fmt"]) def all_sets_equal(list_of_sets): if not list_of_sets: return True return len(set.union(*list_of_sets)) == len(set.intersection(*list_of_sets)) def translate_logical_date_to_partition_key( logical_date: datetime, partitioning_info: PartitioningInformation ) -> str: if not partitioning_info.partitioning_type == PartitionDefinitionType.TIME_WINDOW: raise Exception( "Only time-window partitioned assets or non-partitioned assets are supported out of the box." ) fmt = partitioning_info.time_window_partitioning_info.fmt partitions_and_datetimes = [ (_get_partition_datetime(partition_key, fmt), partition_key) for partition_key in partitioning_info.partition_keys ] matching_partition = next( ( partition_key for datetime, partition_key in partitions_and_datetimes if datetime.timestamp() == logical_date.timestamp() ), None, ) if matching_partition is None: raise Exception(f"No partition key found for logical date {logical_date}") return matching_partition def _asset_is_partitioned(asset_node: Mapping[str, Any]) -> bool: return asset_node["isPartitioned"] def _get_partition_datetime(partition_key: str, fmt: str) -> datetime: try: return _add_default_utc_timezone_if_none(datetime.strptime(partition_key, fmt)) except ValueError: raise Exception(f"Could not parse partition key {partition_key} with format {fmt}.") def _add_default_utc_timezone_if_none(dt: datetime) -> datetime: return dt.replace(tzinfo=tz.utc) if dt.tzinfo is None else dt
PartitioningInformation
python
django__django
django/contrib/sessions/models.py
{ "start": 164, "end": 1250 }
class ____(AbstractBaseSession): """ Django provides full support for anonymous sessions. The session framework lets you store and retrieve arbitrary data on a per-site-visitor basis. It stores data on the server side and abstracts the sending and receiving of cookies. Cookies contain a session ID -- not the data itself. The Django sessions framework is entirely cookie-based. It does not fall back to putting session IDs in URLs. This is an intentional design decision. Not only does that behavior make URLs ugly, it makes your site vulnerable to session-ID theft via the "Referer" header. For complete documentation on using Sessions in your code, consult the sessions documentation that is shipped with Django (also available on the Django web site). """ objects = SessionManager() @classmethod def get_session_store_class(cls): from django.contrib.sessions.backends.db import SessionStore return SessionStore class Meta(AbstractBaseSession.Meta): db_table = "django_session"
Session
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py
{ "start": 1458, "end": 2727 }
class ____(Benchmark): r""" Sargan objective function. This class defines the Sargan [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Sargan}}(x) = \sum_{i=1}^{n} n \left (x_i^2 + 0.4 \sum_{i \neq j}^{n} x_ix_j \right) Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = [(-5, 5), (-5, 5)] self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 x0 = x[:-1] x1 = roll(x, -1)[:-1] return sum(self.N * (x ** 2 + 0.4 * sum(x0 * x1)))
Sargan
python
spyder-ide__spyder
external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py
{ "start": 4979, "end": 5386 }
class ____(BaseFSHandler): @web.authenticated @authorized async def get(self): detail_arg = self.get_argument("detail", default="true").lower() detail = detail_arg == "true" path = self.get_path_argument("path") async with self.stream_json() as write_json: for result in self.fs_ls(path, detail=detail): await write_json(result)
LsHandler
python
scrapy__scrapy
scrapy/core/downloader/handlers/file.py
{ "start": 322, "end": 672 }
class ____: lazy = False @defers def download_request(self, request: Request, spider: Spider) -> Response: filepath = file_uri_to_path(request.url) body = Path(filepath).read_bytes() respcls = responsetypes.from_args(filename=filepath, body=body) return respcls(url=request.url, body=body)
FileDownloadHandler
python
django__django
tests/dbshell/test_postgresql.py
{ "start": 255, "end": 6285 }
class ____(SimpleTestCase): def settings_to_cmd_args_env(self, settings_dict, parameters=None): if parameters is None: parameters = [] settings_dict.setdefault("OPTIONS", {}) return DatabaseClient.settings_to_cmd_args_env(settings_dict, parameters) def test_basic(self): self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": "someuser", "PASSWORD": "somepassword", "HOST": "somehost", "PORT": "444", } ), ( ["psql", "-U", "someuser", "-h", "somehost", "-p", "444", "dbname"], {"PGPASSWORD": "somepassword"}, ), ) def test_nopass(self): self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": "someuser", "HOST": "somehost", "PORT": "444", } ), ( ["psql", "-U", "someuser", "-h", "somehost", "-p", "444", "dbname"], None, ), ) def test_ssl_certificate(self): self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": "someuser", "HOST": "somehost", "PORT": "444", "OPTIONS": { "sslmode": "verify-ca", "sslrootcert": "root.crt", "sslcert": "client.crt", "sslkey": "client.key", }, } ), ( ["psql", "-U", "someuser", "-h", "somehost", "-p", "444", "dbname"], { "PGSSLCERT": "client.crt", "PGSSLKEY": "client.key", "PGSSLMODE": "verify-ca", "PGSSLROOTCERT": "root.crt", }, ), ) def test_service(self): self.assertEqual( self.settings_to_cmd_args_env({"OPTIONS": {"service": "django_test"}}), (["psql"], {"PGSERVICE": "django_test"}), ) def test_passfile(self): self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": "someuser", "HOST": "somehost", "PORT": "444", "OPTIONS": { "passfile": "~/.custompgpass", }, } ), ( ["psql", "-U", "someuser", "-h", "somehost", "-p", "444", "dbname"], {"PGPASSFILE": "~/.custompgpass"}, ), ) self.assertEqual( self.settings_to_cmd_args_env( { "OPTIONS": { "service": "django_test", "passfile": "~/.custompgpass", }, } ), ( ["psql"], {"PGSERVICE": "django_test", "PGPASSFILE": "~/.custompgpass"}, ), ) def test_column(self): self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": "some:user", "PASSWORD": "some:password", "HOST": "::1", "PORT": "444", } ), ( ["psql", "-U", "some:user", "-h", "::1", "-p", "444", "dbname"], {"PGPASSWORD": "some:password"}, ), ) def test_accent(self): username = "rôle" password = "sésame" self.assertEqual( self.settings_to_cmd_args_env( { "NAME": "dbname", "USER": username, "PASSWORD": password, "HOST": "somehost", "PORT": "444", } ), ( ["psql", "-U", username, "-h", "somehost", "-p", "444", "dbname"], {"PGPASSWORD": password}, ), ) def test_parameters(self): self.assertEqual( self.settings_to_cmd_args_env({"NAME": "dbname"}, ["--help"]), (["psql", "--help", "dbname"], None), ) @skipUnless(connection.vendor == "postgresql", "Requires a PostgreSQL connection") def test_sigint_handler(self): """SIGINT is ignored in Python and passed to psql to abort queries.""" def _mock_subprocess_run(*args, **kwargs): handler = signal.getsignal(signal.SIGINT) self.assertEqual(handler, signal.SIG_IGN) sigint_handler = signal.getsignal(signal.SIGINT) # The default handler isn't SIG_IGN. self.assertNotEqual(sigint_handler, signal.SIG_IGN) with mock.patch("subprocess.run", new=_mock_subprocess_run): connection.client.runshell([]) # dbshell restores the original handler. self.assertEqual(sigint_handler, signal.getsignal(signal.SIGINT)) def test_crash_password_does_not_leak(self): # The password doesn't leak in an exception that results from a client # crash. args, env = self.settings_to_cmd_args_env({"PASSWORD": "somepassword"}, []) if env: env = {**os.environ, **env} fake_client = Path(__file__).with_name("fake_client.py") args[0:1] = [sys.executable, str(fake_client)] with self.assertRaises(subprocess.CalledProcessError) as ctx: subprocess.run(args, check=True, env=env) self.assertNotIn("somepassword", str(ctx.exception))
PostgreSqlDbshellCommandTestCase
python
django-extensions__django-extensions
tests/collisions/models.py
{ "start": 552, "end": 777 }
class ____(models.Model): # name conflict with testapp.Name real_name = models.CharField(max_length=50) number_of_users_having_this_name = models.IntegerField() class Meta: app_label = "collisions"
Name
python
huggingface__transformers
src/transformers/models/pegasus/modeling_pegasus.py
{ "start": 19244, "end": 26988 }
class ____(PegasusPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`PegasusEncoderLayer`]. Args: config: PegasusConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: PegasusConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = PegasusSinusoidalPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([PegasusEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def resize_position_embeddings(self, new_num_position_embeddings: int): """ Resizes position embeddings matrix of the model if `new_num_position_embeddings != config.max_position_embeddings`. Arguments: new_num_position_embeddings (`int`): The number of new position embeddings. If position embeddings are learned, increasing the size will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will add correct vectors at the end following the position encoding algorithm, whereas reducing the size will remove vectors from the end. """ logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") self.config.max_position_embeddings = new_num_position_embeddings self.embed_positions = PegasusSinusoidalPositionalEmbedding( self.config.max_position_embeddings, self.config.d_model, self.padding_idx, ) init.copy_(self.embed_positions.weight, self.embed_positions.create_weight()) self.embed_positions.to(self.device) def get_position_embeddings(self) -> nn.Embedding: """ Returns the position embeddings matrix """ return self.embed_positions def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) attention_mask = create_bidirectional_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: layer_outputs = encoder_layer( hidden_states, attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions )
PegasusEncoder
python
dagster-io__dagster
python_modules/libraries/dagster-looker/dagster_looker/lkml/liquid_utils.py
{ "start": 471, "end": 1339 }
class ____(Tag): """Defines a custom Liquid tag to match Looker's condition tag, treats the condition as always true when rendering the output SQL. https://jg-rp.github.io/liquid/guides/custom-tags#add-a-tag. """ name = TAG_CONDITION end = TAG_ENDCONDITION def __init__(self, env: Environment): super().__init__(env) self.parser = get_parser(self.env) def parse(self, stream: TokenStream) -> Node: expect(stream, TOKEN_TAG, value=TAG_CONDITION) stream.next_token() # Skip open condition tag stream.next_token() # Skip condition filter name block = self.parser.parse_block(stream, (TAG_ENDCONDITION, TOKEN_EOF)) expect(stream, TOKEN_TAG, value=TAG_ENDCONDITION) return block TAG_DATE_START = sys.intern("date_start") TAG_DATE_END = sys.intern("date_end")
ConditionTag
python
django__django
tests/test_utils/test_transactiontestcase.py
{ "start": 2361, "end": 2727 }
class ____(TransactionTestCase): available_apps = ["test_utils"] fixtures = ["person.json"] @classmethod def setUpClass(cls): super().setUpClass() cls.elvis = Person.objects.get(name="Elvis Presley") def test_fixture_loaded_during_class_setup(self): self.assertIsInstance(self.elvis, Person)
FixtureAvailableInSetUpClassTest
python
tensorflow__tensorflow
tensorflow/python/framework/extension_type_field.py
{ "start": 7118, "end": 16397 }
class ____(enum.Enum): """Enum to indicate what kind of value is being converted. Used by `_convert_fields` and `_convert_value` and their helper methods. """ VALUE = 1 # Converting an ExtensionType field SPEC = 2 # Converting an ExtensionType.Spec field DEFAULT = 3 # Converting a default value for __init__ def convert_fields(fields, field_values): """Type-checks and converts each field in `field_values` (in place). Args: fields: A list of `ExtensionTypeField` objects. field_values: A `dict` mapping field names to values. Must contain an entry for each field. I.e., `set(field_values.keys())` must be equal to `set([f.name for f in fields])`. Raises: ValueError: If the keys of `field_values` do not match the names of the fields in `fields`. TypeError: If any value in `field_values` does not have the type indicated by the corresponding `ExtensionTypeField` object. """ _convert_fields(fields, field_values, context=_ConversionContext.VALUE) def convert_fields_for_spec(fields, field_values): """Type-checks and converts field values for a TypeSpec (in place). This is similar to `convert_fields`, except that we expect a `TypeSpec` for tensor-like types. In particular, if the `value_type` of a field is `tf.Tensor` or a `CompositeTensor` subclass, then the corresponding value in `fields` is expected to contain a `TypeSpec` (rather than a value described by that `TypeSpec`). Args: fields: A list of `ExtensionTypeField` objects. field_values: A `dict` mapping field names to values. Must contain an entry for each field. I.e., `set(field_values.keys())` must be equal to `set([f.name for f in fields])`. Raises: ValueError: If the keys of `field_values` do not match the names of the fields in `fields`. TypeError: If any value in `field_values` does not have the type indicated by the corresponding `ExtensionTypeField` object. """ _convert_fields(fields, field_values, context=_ConversionContext.SPEC) def _convert_fields(fields, field_values, context): """Type-checks and converts each field in `field_values` (in place). Args: fields: A list of `ExtensionTypeField` objects. field_values: A `dict` mapping field names to values. Must contain an entry for each field. I.e., `set(field_values.keys())` must be equal to `set([f.name for f in fields])`. context: _ConversionContext, indicates what kind of value we are converting. Raises: ValueError: If the keys of `field_values` do not match the names of the fields in `fields`. TypeError: If any value in `field_values` does not have the type indicated by the corresponding `ExtensionTypeField` object. """ converted = {} if len(fields) != len(field_values): _report_field_mismatches(fields, field_values) for field in fields: if field.name not in field_values: _report_field_mismatches(fields, field_values) field_value = field_values[field.name] converted[field.name] = _convert_value(field_value, field.value_type, (field.name,), context) field_values.update(converted) def _convert_value(value, expected_type, path, context=_ConversionContext.VALUE): """Type-checks and converts a value. Args: value: The value to type-check. expected_type: The expected type for the value. path: Tuple of `str` naming the value (used for exception messages). context: _ConversionContext, indicates what kind of value we are converting. Returns: A copy of `value`, converted to the expected type. Raises: TypeError: If `value` can not be converted to the expected type. """ assert isinstance(path, tuple) if expected_type is None: expected_type = _NoneType if expected_type is tensor.Tensor: return _convert_tensor(value, path, context) elif (isinstance(expected_type, type) and _issubclass(expected_type, composite_tensor.CompositeTensor)): return _convert_composite_tensor(value, expected_type, path, context) elif expected_type is tensor_shape.TensorShape: try: return tensor_shape.as_shape(value) except TypeError as e: raise TypeError(f"{''.join(path)}: expected 'tf.TensorShape', got " f'{type(value).__name__!r}') from e elif expected_type is dtypes.DType: try: return dtypes.as_dtype(value) except TypeError as e: raise TypeError(f"{''.join(path)}: expected 'tf.DType', got " f'{type(value).__name__!r}') from e elif expected_type in (int, float, bool, str, bytes, _NoneType): if not isinstance(value, expected_type): raise TypeError(f'{"".join(path)}: expected {expected_type.__name__!r}, ' f'got {type(value).__name__!r}') return value elif type_annotations.is_generic_tuple(expected_type): return _convert_tuple(value, expected_type, path, context) elif type_annotations.is_generic_mapping(expected_type): return _convert_mapping(value, expected_type, path, context) elif type_annotations.is_generic_union(expected_type): return _convert_union(value, expected_type, path, context) else: raise TypeError(f'{"".join(path)}: Unsupported type annotation ' f'{expected_type!r}') def _convert_tensor(value, path, context): """Converts `value` to a `Tensor`.""" if context == _ConversionContext.SPEC: if not (isinstance(value, type_spec.TypeSpec) and value.value_type is tensor.Tensor): raise TypeError( f'{"".join(path)}: expected a TensorSpec, got ' f'{type(value).__name__!r}') return value if not isinstance(value, tensor.Tensor): if context == _ConversionContext.DEFAULT: # TODO(edloper): Convert the value to a numpy array? (Note: we can't just # use `np.array(value)`, since the default dtypes for TF and numpy are # different -- e.g., int->np.int64 but int->tf.int32. return value try: value = ops.convert_to_tensor(value) except (ValueError, TypeError) as e: raise TypeError(f'{"".join(path)}: expected a Tensor, ' f'got {type(value).__name__!r}') from e return value def _convert_composite_tensor(value, expected_type, path, context): """Converts `value` to a value of type `expected_type`.""" if context == _ConversionContext.SPEC: if not (isinstance(value, type_spec.TypeSpec) and _issubclass(value.value_type, expected_type)): raise TypeError(f'{"".join(path)}: expected a TypeSpec for ' f'{expected_type.__name__!r}, got ' f'{type(value).__name__!r}') return value if not isinstance(value, expected_type): raise TypeError(f'{"".join(path)}: expected {expected_type.__name__!r}, ' f'got {type(value).__name__!r}') return value def _convert_tuple(value, expected_type, path, context): """Converts `value` to a tuple with type `expected_type`.""" if not isinstance(value, typing.Sequence): raise TypeError(f'{"".join(path)}: expected tuple, got ' f'{type(value).__name__!r}') element_types = type_annotations.get_generic_type_args(expected_type) if len(element_types) == 2 and element_types[1] is Ellipsis: return tuple([ _convert_value(v, element_types[0], path + (f'[{i}]',), context) for (i, v) in enumerate(value) ]) else: if len(value) != len(element_types): raise TypeError(f'{"".join(path)}: expected tuple with length ' f'{len(element_types)}, got {type(value).__name__!r})') return tuple([ _convert_value(v, t, path + (f'[{i}]',), context) for (i, (v, t)) in enumerate(zip(value, element_types)) ]) def _convert_mapping(value, expected_type, path, context): """Converts `value` to a mapping with type `expected_type`.""" if not isinstance(value, typing.Mapping): raise TypeError(f'{"".join(path)}: expected mapping, got ' f'{type(value).__name__!r}') key_type, value_type = type_annotations.get_generic_type_args(expected_type) return immutable_dict.ImmutableDict([ (_convert_value(k, key_type, path + ('[<key>]',), context), _convert_value(v, value_type, path + (f'[{k!r}]',), context)) for (k, v) in value.items() ]) def _convert_union(value, expected_type, path, context): """Converts `value` to a value with any of the types in `expected_type`.""" for type_option in type_annotations.get_generic_type_args(expected_type): try: return _convert_value(value, type_option, path, context) except TypeError: pass raise TypeError(f'{"".join(path)}: expected {expected_type!r}, got ' f'{type(value).__name__!r}') def _report_field_mismatches(fields, field_values): """Raises an exception with mismatches between fields and field_values.""" expected = set(f.name for f in fields) actual = set(field_values) extra = actual - expected if extra: raise ValueError(f'Got unexpected fields: {extra}') missing = expected - actual if missing: raise ValueError(f'Missing required fields: {missing}')
_ConversionContext
python
nedbat__coveragepy
tests/test_plugins.py
{ "start": 11775, "end": 23483 }
class ____(FileTracerTest): """Tests of file tracer plugin happy paths.""" def test_plugin1(self) -> None: self.make_file( "simple.py", """\ import try_xyz a = 1 b = 2 """, ) self.make_file( "try_xyz.py", """\ c = 3 d = 4 """, ) cov = coverage.Coverage() CheckUniqueFilenames.hook(cov, "_should_trace") CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin1"]) # Import the Python file, executing it. self.start_import_stop(cov, "simple") _, statements, missing, _ = cov.analysis("simple.py") assert statements == [1, 2, 3] assert missing == [] zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz")) _, statements, _, _ = cov.analysis(zzfile) assert statements == [105, 106, 107, 205, 206, 207] def make_render_and_caller(self) -> None: """Make the render.py and caller.py files we need.""" # plugin2 emulates a dynamic tracing plugin: the caller's locals # are examined to determine the source file and line number. # The plugin is in tests/plugin2.py. self.make_file( "render.py", """\ def render(filename, linenum): # This function emulates a template renderer. The plugin # will examine the `filename` and `linenum` locals to # determine the source file and line number. fiddle_around = 1 # not used, just chaff. return "[{} @ {}]".format(filename, linenum) def helper(x): # This function is here just to show that not all code in # this file will be part of the dynamic tracing. return x+1 """, ) self.make_file( "caller.py", """\ import sys from render import helper, render assert render("foo_7.html", 4) == "[foo_7.html @ 4]" # Render foo_7.html again to try the CheckUniqueFilenames asserts. render("foo_7.html", 4) assert helper(42) == 43 assert render("bar_4.html", 2) == "[bar_4.html @ 2]" assert helper(76) == 77 # quux_5.html will be omitted from the results. assert render("quux_5.html", 3) == "[quux_5.html @ 3]" """, ) # will try to read the actual source files, so make some # source files. def lines(n: int) -> str: """Make a string with n lines of text.""" return "".join("line %d\n" % i for i in range(n)) self.make_file("bar_4.html", lines(4)) self.make_file("foo_7.html", lines(7)) def test_plugin2(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(omit=["*quux*"]) CheckUniqueFilenames.hook(cov, "_should_trace") CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") # The way plugin2 works, a file named foo_7.html will be claimed to # have 7 lines in it. If render() was called with line number 4, # then the plugin will claim that lines 4 and 5 were executed. _, statements, missing, _ = cov.analysis("foo_7.html") assert statements == [1, 2, 3, 4, 5, 6, 7] assert missing == [1, 2, 3, 6, 7] assert "foo_7.html" in line_counts(cov.get_data()) _, statements, missing, _ = cov.analysis("bar_4.html") assert statements == [1, 2, 3, 4] assert missing == [1, 4] assert "bar_4.html" in line_counts(cov.get_data()) assert "quux_5.html" not in line_counts(cov.get_data()) def test_plugin2_with_branch(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) CheckUniqueFilenames.hook(cov, "_should_trace") CheckUniqueFilenames.hook(cov, "_check_include_omit_etc") cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") # The way plugin2 works, a file named foo_7.html will be claimed to # have 7 lines in it. If render() was called with line number 4, # then the plugin will claim that lines 4 and 5 were executed. analysis = cov._analyze("foo_7.html") assert analysis.statements == {1, 2, 3, 4, 5, 6, 7} # Plugins don't do branch coverage yet. assert analysis.has_arcs is True assert analysis.arc_possibilities == [] assert analysis.missing == {1, 2, 3, 6, 7} def test_plugin2_with_text_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") repout = io.StringIO() total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True) report = repout.getvalue().splitlines() expected = [ "Name Stmts Miss Branch BrPart Cover Missing", "--------------------------------------------------------", "bar_4.html 4 2 0 0 50% 1, 4", "foo_7.html 7 5 0 0 29% 1-3, 6-7", "--------------------------------------------------------", "TOTAL 11 7 0 0 36%", ] assert expected == report assert math.isclose(total, 4 / 11 * 100) def test_plugin2_with_html_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") total = cov.html_report(include=["*.html"], omit=["uni*.html"]) assert math.isclose(total, 4 / 11 * 100) self.assert_exists("htmlcov/index.html") self.assert_exists("htmlcov/bar_4_html.html") self.assert_exists("htmlcov/foo_7_html.html") def test_plugin2_with_xml_report(self) -> None: self.make_render_and_caller() cov = coverage.Coverage(branch=True, omit=["*quux*"]) cov.set_option("run:plugins", ["tests.plugin2"]) self.start_import_stop(cov, "caller") total = cov.xml_report(include=["*.html"], omit=["uni*.html"]) assert math.isclose(total, 4 / 11 * 100) dom = ElementTree.parse("coverage.xml") classes = {} for elt in dom.findall(".//class"): classes[elt.get("name")] = elt assert classes["bar_4.html"].attrib == { "branch-rate": "1", "complexity": "0", "filename": "bar_4.html", "line-rate": "0.5", "name": "bar_4.html", } assert classes["foo_7.html"].attrib == { "branch-rate": "1", "complexity": "0", "filename": "foo_7.html", "line-rate": "0.2857", "name": "foo_7.html", } def test_defer_to_python(self) -> None: # A plugin that measures, but then wants built-in python reporting. self.make_file( "fairly_odd_plugin.py", """\ # A plugin that claims all the odd lines are executed, and none of # the even lines, and then punts reporting off to the built-in # Python reporting. import coverage.plugin class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): return OddTracer(filename) def file_reporter(self, filename): return "python" class OddTracer(coverage.plugin.FileTracer): def __init__(self, filename): self.filename = filename def source_filename(self): return self.filename def line_number_range(self, frame): lineno = frame.f_lineno if lineno % 2: return (lineno, lineno) else: return (-1, -1) def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """, ) self.make_file( "unsuspecting.py", """\ a = 1 b = 2 c = 3 d = 4 e = 5 f = 6 """, ) cov = coverage.Coverage(include=["unsuspecting.py"]) cov.set_option("run:plugins", ["fairly_odd_plugin"]) self.start_import_stop(cov, "unsuspecting") repout = io.StringIO() total = cov.report(file=repout, show_missing=True) report = repout.getvalue().splitlines() expected = [ "Name Stmts Miss Cover Missing", "-----------------------------------------------", "unsuspecting.py 6 3 50% 2, 4, 6", "-----------------------------------------------", "TOTAL 6 3 50%", ] assert expected == report assert total == 50 def test_find_unexecuted(self) -> None: self.make_file( "unexecuted_plugin.py", """\ import os import coverage.plugin class Plugin(coverage.CoveragePlugin): def file_tracer(self, filename): if filename.endswith("foo.py"): return MyTracer(filename) def file_reporter(self, filename): return MyReporter(filename) def find_executable_files(self, src_dir): # Check that src_dir is the right value files = os.listdir(src_dir) assert "foo.py" in files assert "unexecuted_plugin.py" in files return ["chimera.py"] class MyTracer(coverage.plugin.FileTracer): def __init__(self, filename): self.filename = filename def source_filename(self): return self.filename def line_number_range(self, frame): return (999, 999) class MyReporter(coverage.FileReporter): def lines(self): return {99, 999, 9999} def coverage_init(reg, options): reg.add_file_tracer(Plugin()) """, ) self.make_file("foo.py", "a = 1") cov = coverage.Coverage(source=["."]) cov.set_option("run:plugins", ["unexecuted_plugin"]) self.start_import_stop(cov, "foo") # The file we executed claims to have run line 999. _, statements, missing, _ = cov.analysis("foo.py") assert statements == [99, 999, 9999] assert missing == [99, 9999] # The completely missing file is in the results. _, statements, missing, _ = cov.analysis("chimera.py") assert statements == [99, 999, 9999] assert missing == [99, 999, 9999] # But completely new filenames are not in the results. assert len(cov.get_data().measured_files()) == 3 with pytest.raises(NoSource): cov.analysis("fictional.py")
GoodFileTracerTest
python
pytorch__pytorch
torch/distributed/fsdp/_trace_utils.py
{ "start": 1354, "end": 2296 }
class ____(NamedTuple): """ This is used for ``_ExecutionInfo.module_to_param_usage_infos`` to record execution information. The ``dict`` maps modules to a list of these ``_ParamUsageInfo`` instances, where each instance represents a group of parameters used together. Specifically, for each module key in the ``dict``, each instance of this class represents either: (1) the module and some sublist of its ``named_parameters()`` used together in execution (see ``_patched_create_proxy()``), or (2) a submodule and all of ``submodule.named_parameters()`` (see ``_patched_call_module()``). Type (1) corresponds to directly using parameters in ops without calling ``forward()``, and type (2) corresponds to calling ``forward()``. The mapped-to lists in the ``dict`` follow the execution order. """ module: nn.Module named_params: list[tuple[str, nn.Parameter]]
_ParamUsageInfo