language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
google__pytype
pytype/blocks/blocks.py
{ "start": 1185, "end": 2536 }
class ____: """A block is a node in a directed graph. It has incoming and outgoing edges (jumps). Incoming jumps always jump to the first instruction of our bytecode, and outgoing jumps always jump from the last instruction. There are no jump instructions in the middle of a byte code block. A block implements most of the "sequence" interface, i.e., it can be used as if it was a Python list of bytecode instructions. Attributes: id: Block id code: A bytecode object (a list of instances of opcodes.Opcode). incoming: Incoming edges. These are blocks that jump to the first instruction in our code object. outgoing: Outgoing edges. These are the targets jumped to by the last instruction in our code object. """ def __init__(self, code: list[opcodes.Opcode]): self.id = code[0].index self.code = code self.incoming: set[Self] = set() self.outgoing: set[Self] = set() def connect_outgoing(self, target: Self): """Add an outgoing edge.""" self.outgoing.add(target) target.incoming.add(self) def __str__(self): return "<Block %d>" % self.id def __repr__(self): return "<Block %d: %r>" % (self.id, self.code) def __getitem__(self, index_or_slice): return self.code.__getitem__(index_or_slice) def __iter__(self): return self.code.__iter__()
Block
python
py-pdf__pypdf
pypdf/_protocols.py
{ "start": 885, "end": 945 }
class ____(PdfObjectProtocol): pass
XmpInformationProtocol
python
pytorch__pytorch
torch/distributed/rpc/rref_proxy.py
{ "start": 2380, "end": 2705 }
class ____: def __init__(self, rref, rpc_api, timeout=UNSET_RPC_TIMEOUT): self.rref = rref self.rpc_api = rpc_api self.rpc_timeout = timeout def __getattr__(self, func_name): return partial( _invoke_rpc, self.rref, self.rpc_api, func_name, self.rpc_timeout )
RRefProxy
python
realpython__materials
python-protocol/shapes_v1.py
{ "start": 216, "end": 452 }
class ____(Shape): def __init__(self, radius) -> None: self.radius = radius def get_area(self) -> float: return pi * self.radius**2 def get_perimeter(self) -> float: return 2 * pi * self.radius
Circle
python
openai__openai-python
src/openai/types/responses/tool.py
{ "start": 6227, "end": 8222 }
class ____(BaseModel): type: Literal["image_generation"] """The type of the image generation tool. Always `image_generation`.""" background: Optional[Literal["transparent", "opaque", "auto"]] = None """Background type for the generated image. One of `transparent`, `opaque`, or `auto`. Default: `auto`. """ input_fidelity: Optional[Literal["high", "low"]] = None """ Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`. """ input_image_mask: Optional[ImageGenerationInputImageMask] = None """Optional mask for inpainting. Contains `image_url` (string, optional) and `file_id` (string, optional). """ model: Optional[Literal["gpt-image-1", "gpt-image-1-mini"]] = None """The image generation model to use. Default: `gpt-image-1`.""" moderation: Optional[Literal["auto", "low"]] = None """Moderation level for the generated image. Default: `auto`.""" output_compression: Optional[int] = None """Compression level for the output image. Default: 100.""" output_format: Optional[Literal["png", "webp", "jpeg"]] = None """The output format of the generated image. One of `png`, `webp`, or `jpeg`. Default: `png`. """ partial_images: Optional[int] = None """ Number of partial images to generate in streaming mode, from 0 (default value) to 3. """ quality: Optional[Literal["low", "medium", "high", "auto"]] = None """The quality of the generated image. One of `low`, `medium`, `high`, or `auto`. Default: `auto`. """ size: Optional[Literal["1024x1024", "1024x1536", "1536x1024", "auto"]] = None """The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`, or `auto`. Default: `auto`. """
ImageGeneration
python
TheAlgorithms__Python
maths/matrix_exponentiation.py
{ "start": 301, "end": 3250 }
class ____: def __init__(self, arg): if isinstance(arg, list): # Initializes a matrix identical to the one provided. self.t = arg self.n = len(arg) else: # Initializes a square matrix of the given size and set values to zero. self.n = arg self.t = [[0 for _ in range(self.n)] for _ in range(self.n)] def __mul__(self, b): matrix = Matrix(self.n) for i in range(self.n): for j in range(self.n): for k in range(self.n): matrix.t[i][j] += self.t[i][k] * b.t[k][j] return matrix def modular_exponentiation(a, b): matrix = Matrix([[1, 0], [0, 1]]) while b > 0: if b & 1: matrix *= a a *= a b >>= 1 return matrix def fibonacci_with_matrix_exponentiation(n, f1, f2): """ Returns the nth number of the Fibonacci sequence that starts with f1 and f2 Uses the matrix exponentiation >>> fibonacci_with_matrix_exponentiation(1, 5, 6) 5 >>> fibonacci_with_matrix_exponentiation(2, 10, 11) 11 >>> fibonacci_with_matrix_exponentiation(13, 0, 1) 144 >>> fibonacci_with_matrix_exponentiation(10, 5, 9) 411 >>> fibonacci_with_matrix_exponentiation(9, 2, 3) 89 """ # Trivial Cases if n == 1: return f1 elif n == 2: return f2 matrix = Matrix([[1, 1], [1, 0]]) matrix = modular_exponentiation(matrix, n - 2) return f2 * matrix.t[0][0] + f1 * matrix.t[0][1] def simple_fibonacci(n, f1, f2): """ Returns the nth number of the Fibonacci sequence that starts with f1 and f2 Uses the definition >>> simple_fibonacci(1, 5, 6) 5 >>> simple_fibonacci(2, 10, 11) 11 >>> simple_fibonacci(13, 0, 1) 144 >>> simple_fibonacci(10, 5, 9) 411 >>> simple_fibonacci(9, 2, 3) 89 """ # Trivial Cases if n == 1: return f1 elif n == 2: return f2 n -= 2 while n > 0: f2, f1 = f1 + f2, f2 n -= 1 return f2 def matrix_exponentiation_time(): setup = """ from random import randint from __main__ import fibonacci_with_matrix_exponentiation """ code = "fibonacci_with_matrix_exponentiation(randint(1,70000), 1, 1)" exec_time = timeit.timeit(setup=setup, stmt=code, number=100) print("With matrix exponentiation the average execution time is ", exec_time / 100) return exec_time def simple_fibonacci_time(): setup = """ from random import randint from __main__ import simple_fibonacci """ code = "simple_fibonacci(randint(1,70000), 1, 1)" exec_time = timeit.timeit(setup=setup, stmt=code, number=100) print( "Without matrix exponentiation the average execution time is ", exec_time / 100 ) return exec_time def main(): matrix_exponentiation_time() simple_fibonacci_time() if __name__ == "__main__": main()
Matrix
python
tiangolo__fastapi
scripts/contributors.py
{ "start": 1843, "end": 1909 }
class ____(BaseModel): repository: PRsRepository
PRsResponseData
python
walkccc__LeetCode
solutions/3216. Lexicographically Smallest String After a Swap/3216.py
{ "start": 0, "end": 285 }
class ____: def getSmallestString(self, s: str) -> str: chars = list(s) for i, (a, b) in enumerate(itertools.pairwise(chars)): if ord(a) % 2 == ord(b) % 2 and a > b: chars[i], chars[i + 1] = chars[i + 1], chars[i] return ''.join(chars) return s
Solution
python
facebookresearch__faiss
tests/test_partition.py
{ "start": 1086, "end": 2444 }
class ____(unittest.TestCase, PartitionTests): def do_partition(self, n, q, maxval=None, seed=None): if seed is None: for i in range(50): self.do_partition(n, q, maxval, i + 1234) rs = np.random.RandomState(seed) if maxval is None: vals = rs.rand(n).astype('float32') else: vals = rs.randint(maxval, size=n).astype('float32') ids = (rs.permutation(n) + 12345).astype('int64') dic = dict(zip(ids, vals)) vals_orig = vals.copy() sp = faiss.swig_ptr if isinstance(q, int): faiss.CMax_float_partition_fuzzy( sp(vals), sp(ids), n, q, q, None ) else: q_min, q_max = q q = pointer_to_minus1() faiss.CMax_float_partition_fuzzy( sp(vals), sp(ids), n, q_min, q_max, sp(q) ) q = q[0] assert q_min <= q <= q_max o = vals_orig.argsort() thresh = vals_orig[o[q]] n_eq = (vals_orig[o[:q]] == thresh).sum() for i in range(q): self.assertEqual(vals[i], dic[ids[i]]) self.assertLessEqual(vals[i], thresh) if vals[i] == thresh: n_eq -= 1 self.assertEqual(n_eq, 0)
TestPartitioningFloat
python
scikit-learn__scikit-learn
sklearn/tests/test_base.py
{ "start": 26735, "end": 34034 }
class ____(_Empty, BaseEstimator): pass @pytest.mark.parametrize("estimator", [BaseEstimator(), EmptyEstimator()]) def test_estimator_empty_instance_dict(estimator): """Check that ``__getstate__`` returns an empty ``dict`` with an empty instance. Python 3.11+ changed behaviour by returning ``None`` instead of raising an ``AttributeError``. Non-regression test for gh-25188. """ state = estimator.__getstate__() expected = {"_sklearn_version": sklearn.__version__} assert state == expected # this should not raise pickle.loads(pickle.dumps(BaseEstimator())) def test_estimator_getstate_using_slots_error_message(): """Using a `BaseEstimator` with `__slots__` is not supported.""" class WithSlots: __slots__ = ("x",) class Estimator(BaseEstimator, WithSlots): pass msg = ( "You cannot use `__slots__` in objects inheriting from " "`sklearn.base.BaseEstimator`" ) with pytest.raises(TypeError, match=msg): Estimator().__getstate__() with pytest.raises(TypeError, match=msg): pickle.dumps(Estimator()) @pytest.mark.parametrize( "constructor_name, minversion", [ ("dataframe", "1.5.0"), ("pyarrow", "12.0.0"), ("polars", "0.20.23"), ], ) def test_dataframe_protocol(constructor_name, minversion): """Uses the dataframe exchange protocol to get feature names.""" data = [[1, 4, 2], [3, 3, 6]] columns = ["col_0", "col_1", "col_2"] df = _convert_container( data, constructor_name, columns_name=columns, minversion=minversion ) class NoOpTransformer(TransformerMixin, BaseEstimator): def fit(self, X, y=None): validate_data(self, X) return self def transform(self, X): return validate_data(self, X, reset=False) no_op = NoOpTransformer() no_op.fit(df) assert_array_equal(no_op.feature_names_in_, columns) X_out = no_op.transform(df) if constructor_name != "pyarrow": # pyarrow does not work with `np.asarray` # https://github.com/apache/arrow/issues/34886 assert_allclose(df, X_out) bad_names = ["a", "b", "c"] df_bad = _convert_container(data, constructor_name, columns_name=bad_names) with pytest.raises(ValueError, match="The feature names should match"): no_op.transform(df_bad) @config_context(enable_metadata_routing=True) def test_transformer_fit_transform_with_metadata_in_transform(): """Test that having a transformer with metadata for transform raises a warning when calling fit_transform.""" class CustomTransformer(BaseEstimator, TransformerMixin): def fit(self, X, y=None, prop=None): return self def transform(self, X, prop=None): return X # passing the metadata to `fit_transform` should raise a warning since it # could potentially be consumed by `transform` with pytest.warns(UserWarning, match="`transform` method which consumes metadata"): CustomTransformer().set_transform_request(prop=True).fit_transform( [[1]], [1], prop=1 ) # not passing a metadata which can potentially be consumed by `transform` should # not raise a warning with warnings.catch_warnings(record=True) as record: CustomTransformer().set_transform_request(prop=True).fit_transform([[1]], [1]) assert len(record) == 0 @config_context(enable_metadata_routing=True) def test_outlier_mixin_fit_predict_with_metadata_in_predict(): """Test that having an OutlierMixin with metadata for predict raises a warning when calling fit_predict.""" class CustomOutlierDetector(BaseEstimator, OutlierMixin): def fit(self, X, y=None, prop=None): return self def predict(self, X, prop=None): return X # passing the metadata to `fit_predict` should raise a warning since it # could potentially be consumed by `predict` with pytest.warns(UserWarning, match="`predict` method which consumes metadata"): CustomOutlierDetector().set_predict_request(prop=True).fit_predict( [[1]], [1], prop=1 ) # not passing a metadata which can potentially be consumed by `predict` should # not raise a warning with warnings.catch_warnings(record=True) as record: CustomOutlierDetector().set_predict_request(prop=True).fit_predict([[1]], [1]) assert len(record) == 0 def test_get_params_html(): """Check the behaviour of the `_get_params_html` method.""" est = MyEstimator(empty="test") assert est._get_params_html() == {"l1": 0, "empty": "test"} assert est._get_params_html().non_default == ("empty",) def make_estimator_with_param(default_value): class DynamicEstimator(BaseEstimator): def __init__(self, param=default_value): self.param = param return DynamicEstimator @pytest.mark.parametrize( "default_value, test_value", [ ((), (1,)), ((), [1]), ((), np.array([1])), ((1, 2), (3, 4)), ((1, 2), [3, 4]), ((1, 2), np.array([3, 4])), (None, 1), (None, []), (None, lambda x: x), (np.nan, 1.0), (np.nan, np.array([np.nan])), ("abc", "def"), ("abc", ["abc"]), (True, False), (1, 2), (1, [1]), (1, np.array([1])), (1.0, 2.0), (1.0, [1.0]), (1.0, np.array([1.0])), ([1, 2], [3]), (np.array([1]), [2, 3]), (None, KFold()), (None, get_scorer("accuracy")), ], ) def test_param_is_non_default(default_value, test_value): """Check that we detect non-default parameters with various types. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/31525 """ estimator = make_estimator_with_param(default_value)(param=test_value) non_default = estimator._get_params_html().non_default assert "param" in non_default def test_param_is_non_default_when_pandas_NA(): """Check that we detect pandas.Na as non-default parameter. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/32312 """ pd = pytest.importorskip("pandas") estimator = make_estimator_with_param(default_value=0)(param=pd.NA) non_default = estimator._get_params_html().non_default assert "param" in non_default @pytest.mark.parametrize( "default_value, test_value", [ (None, None), ((), ()), ((), []), ((), np.array([])), ((1, 2, 3), (1, 2, 3)), ((1, 2, 3), [1, 2, 3]), ((1, 2, 3), np.array([1, 2, 3])), (np.nan, np.nan), ("abc", "abc"), (True, True), (1, 1), (1.0, 1.0), (2, 2.0), ], ) def test_param_is_default(default_value, test_value): """Check that we detect the default parameters and values in an array-like will be reported as default as well. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/31525 """ estimator = make_estimator_with_param(default_value)(param=test_value) non_default = estimator._get_params_html().non_default assert "param" not in non_default
EmptyEstimator
python
openai__openai-python
src/openai/types/beta/threads/runs/code_interpreter_tool_call_delta.py
{ "start": 1025, "end": 1479 }
class ____(BaseModel): index: int """The index of the tool call in the tool calls array.""" type: Literal["code_interpreter"] """The type of tool call. This is always going to be `code_interpreter` for this type of tool call. """ id: Optional[str] = None """The ID of the tool call.""" code_interpreter: Optional[CodeInterpreter] = None """The Code Interpreter tool call definition."""
CodeInterpreterToolCallDelta
python
modin-project__modin
modin/tests/core/storage_formats/pandas/test_internals.py
{ "start": 16902, "end": 45185 }
class ____: """Test draining virtual partition call queues. Test creating a virtual partition made of block partitions and/or one or more layers of virtual partitions, draining the top-level partition's call queue, and getting the result. In all these test cases, the full_axis argument doesn't matter for correctness because it only affects `apply`, which is not used here. Still, virtual partition users are not supposed to create full-axis virtual partitions out of other full-axis virtual partitions, so set full_axis to False everywhere. """ def test_from_virtual_partitions_with_call_queues( self, axis, virtual_partition_class, ): # reverse the dataframe along the virtual partition axis. def reverse(df): return df.iloc[::-1, :] if axis == 0 else df.iloc[:, ::-1] level_zero_blocks_first = [ block_partition_class(put(pandas.DataFrame([0]))), block_partition_class(put(pandas.DataFrame([1]))), ] level_one_virtual_first = virtual_partition_class( level_zero_blocks_first, full_axis=False ) level_one_virtual_first = level_one_virtual_first.add_to_apply_calls(reverse) level_zero_blocks_second = [ block_partition_class(put(pandas.DataFrame([2]))), block_partition_class(put(pandas.DataFrame([3]))), ] level_one_virtual_second = virtual_partition_class( level_zero_blocks_second, full_axis=False ) level_one_virtual_second = level_one_virtual_second.add_to_apply_calls(reverse) level_two_virtual = virtual_partition_class( [level_one_virtual_first, level_one_virtual_second], full_axis=False ) level_two_virtual.drain_call_queue() if axis == 0: expected_df = pandas.DataFrame([1, 0, 3, 2], index=[0, 0, 0, 0]) else: expected_df = pandas.DataFrame([[1, 0, 3, 2]], columns=[0, 0, 0, 0]) df_equals( level_two_virtual.to_pandas(), expected_df, ) def test_from_block_and_virtual_partition_with_call_queues( self, axis, virtual_partition_class ): # make a function that reverses the dataframe along the virtual # partition axis. # for testing axis == 0, start with two 2-rows-by-1-column blocks. for # axis == 1, start with two 1-rows-by-2-column blocks. def reverse(df): return df.iloc[::-1, :] if axis == 0 else df.iloc[:, ::-1] block_data = [[0, 1], [2, 3]] if axis == 0 else [[[0, 1]], [[2, 3]]] level_zero_blocks = [ block_partition_class(put(pandas.DataFrame(block_data[0]))), block_partition_class(put(pandas.DataFrame(block_data[1]))), ] level_zero_blocks[0] = level_zero_blocks[0].add_to_apply_calls(reverse) level_one_virtual = virtual_partition_class( level_zero_blocks[1], full_axis=False ) level_one_virtual = level_one_virtual.add_to_apply_calls(reverse) level_two_virtual = virtual_partition_class( [level_zero_blocks[0], level_one_virtual], full_axis=False ) level_two_virtual.drain_call_queue() if axis == 0: expected_df = pandas.DataFrame([1, 0, 3, 2], index=[1, 0, 1, 0]) else: expected_df = pandas.DataFrame([[1, 0, 3, 2]], columns=[1, 0, 1, 0]) df_equals(level_two_virtual.to_pandas(), expected_df) def test_virtual_partition_call_queues_at_three_levels( self, axis, virtual_partition_class ): block = block_partition_class(put(pandas.DataFrame([1]))) level_one_virtual = virtual_partition_class([block], full_axis=False) level_one_virtual = level_one_virtual.add_to_apply_calls( lambda df: pandas.concat([df, pandas.DataFrame([2])]) ) level_two_virtual = virtual_partition_class( [level_one_virtual], full_axis=False ) level_two_virtual = level_two_virtual.add_to_apply_calls( lambda df: pandas.concat([df, pandas.DataFrame([3])]) ) level_three_virtual = virtual_partition_class( [level_two_virtual], full_axis=False ) level_three_virtual = level_three_virtual.add_to_apply_calls( lambda df: pandas.concat([df, pandas.DataFrame([4])]) ) level_three_virtual.drain_call_queue() df_equals( level_three_virtual.to_pandas(), pd.DataFrame([1, 2, 3, 4], index=[0, 0, 0, 0]), ) @pytest.mark.parametrize( "virtual_partition_class", (virtual_column_partition_class, virtual_row_partition_class), ids=["partitions_spanning_all_columns", "partitions_spanning_all_rows"], ) def test_virtual_partition_apply_not_returning_pandas_dataframe( virtual_partition_class, ): # see https://github.com/modin-project/modin/issues/4811 partition = virtual_partition_class( block_partition_class(put(pandas.DataFrame())), full_axis=False ) apply_result = partition.apply(lambda df: 1).get() assert apply_result == 1 @pytest.mark.skipif( Engine.get() != "Ray", reason="Only ray.wait() does not take duplicate object refs.", ) def test_virtual_partition_dup_object_ref(): # See https://github.com/modin-project/modin/issues/5045 frame_c = pd.DataFrame(np.zeros((100, 20), dtype=np.float32, order="C")) frame_c = [frame_c] * 20 df = pd.concat(frame_c) partition = df._query_compiler._modin_frame._partitions.flatten()[0] obj_refs = partition.list_of_blocks assert len(obj_refs) != len( set(obj_refs) ), "Test setup did not contain duplicate objects" # The below call to wait() should not crash partition.wait() __test_reorder_labels_cache_axis_positions = [ pytest.param(lambda index: None, id="no_reordering"), pytest.param(lambda index: np.arange(len(index) - 1, -1, -1), id="reordering_only"), pytest.param( lambda index: [0, 1, 2, len(index) - 3, len(index) - 2, len(index) - 1], id="projection_only", ), pytest.param( lambda index: np.repeat(np.arange(len(index)), repeats=3), id="size_grow" ), ] @pytest.mark.parametrize("row_positions", __test_reorder_labels_cache_axis_positions) @pytest.mark.parametrize("col_positions", __test_reorder_labels_cache_axis_positions) @pytest.mark.parametrize( "partitioning_scheme", [ pytest.param( lambda df: { "row_lengths": [df.shape[0]], "column_widths": [df.shape[1]], }, id="single_partition", ), pytest.param( lambda df: { "row_lengths": [32, max(0, df.shape[0] - 32)], "column_widths": [32, max(0, df.shape[1] - 32)], }, id="two_unbalanced_partitions", ), pytest.param( lambda df: { "row_lengths": [df.shape[0] // NPartitions.get()] * NPartitions.get(), "column_widths": [df.shape[1] // NPartitions.get()] * NPartitions.get(), }, id="perfect_partitioning", ), pytest.param( lambda df: { "row_lengths": [2**i for i in range(NPartitions.get())], "column_widths": [2**i for i in range(NPartitions.get())], }, id="unbalanced_partitioning_equals_npartition", ), pytest.param( lambda df: { "row_lengths": [2] * (df.shape[0] // 2), "column_widths": [2] * (df.shape[1] // 2), }, id="unbalanced_partitioning", ), ], ) def test_reorder_labels_cache( row_positions, col_positions, partitioning_scheme, ): pandas_df = pandas.DataFrame(test_data_values[0]) md_df = construct_modin_df_by_scheme(pandas_df, partitioning_scheme(pandas_df)) md_df = md_df._query_compiler._modin_frame result = md_df._reorder_labels( row_positions(md_df.index), col_positions(md_df.columns) ) validate_partitions_cache(result) def test_reorder_labels_dtypes(): pandas_df = pandas.DataFrame( { "a": [1, 2, 3, 4], "b": [1.0, 2.4, 3.4, 4.5], "c": ["a", "b", "c", "d"], "d": pd.to_datetime([1, 2, 3, 4], unit="D"), } ) md_df = construct_modin_df_by_scheme( pandas_df, partitioning_scheme={ "row_lengths": [len(pandas_df)], "column_widths": [ len(pandas_df) // 2, len(pandas_df) // 2 + len(pandas_df) % 2, ], }, ) md_df = md_df._query_compiler._modin_frame result = md_df._reorder_labels( row_positions=None, col_positions=np.arange(len(md_df.columns) - 1, -1, -1) ) df_equals(result.dtypes, result.to_pandas().dtypes) @pytest.mark.parametrize( "left_partitioning, right_partitioning, ref_with_cache_available, ref_with_no_cache", # Note: this test takes into consideration that `MinRowPartitionSize == 32`, # `MinColumnPartitionSize == 32` and `NPartitions == 4` [ ( [2], [2], 1, # the num_splits is computed like (2 + 2 = 4 / chunk_size = 1 split) 2, # the num_splits is just splits sum (1 + 1 == 2) ), ( [24], [54], 3, # the num_splits is computed like (24 + 54 = 78 / chunk_size = 3 splits) 2, # the num_splits is just splits sum (1 + 1 == 2) ), ( [2], [299], 4, # the num_splits is bounded by NPartitions (2 + 299 = 301 / chunk_size = 10 splits -> bound by 4) 2, # the num_splits is just splits sum (1 + 1 == 2) ), ( [32, 32], [128], 4, # the num_splits is bounded by NPartitions (32 + 32 + 128 = 192 / chunk_size = 6 splits -> bound by 4) 3, # the num_splits is just splits sum (2 + 1 == 3) ), ( [128] * 7, [128] * 6, 4, # the num_splits is bounded by NPartitions (128 * 7 + 128 * 6 = 1664 / chunk_size = 52 splits -> bound by 4) 4, # the num_splits is just splits sum bound by NPartitions (7 + 6 = 13 splits -> 4 splits) ), ], ) @pytest.mark.parametrize( "modify_config", [{NPartitions: 4, MinRowPartitionSize: 32, MinColumnPartitionSize: 32}], indirect=True, ) def test_merge_partitioning( left_partitioning, right_partitioning, ref_with_cache_available, ref_with_no_cache, modify_config, ): from modin.core.storage_formats.pandas.utils import merge_partitioning left_df = pandas.DataFrame( [np.arange(sum(left_partitioning)) for _ in range(sum(left_partitioning))] ) right_df = pandas.DataFrame( [np.arange(sum(right_partitioning)) for _ in range(sum(right_partitioning))] ) left = construct_modin_df_by_scheme( left_df, {"row_lengths": left_partitioning, "column_widths": left_partitioning} )._query_compiler._modin_frame right = construct_modin_df_by_scheme( right_df, {"row_lengths": right_partitioning, "column_widths": right_partitioning}, )._query_compiler._modin_frame assert left.row_lengths == left.column_widths == left_partitioning assert right.row_lengths == right.column_widths == right_partitioning res = merge_partitioning(left, right, axis=0) assert res == ref_with_cache_available res = merge_partitioning(left, right, axis=1) assert res == ref_with_cache_available ( left._row_lengths_cache, left._column_widths_cache, right._row_lengths_cache, right._column_widths_cache, ) = [None] * 4 res = merge_partitioning(left, right, axis=0) assert res == ref_with_no_cache # Verifying that no computations are being triggered assert all( cache is None for cache in ( left._row_lengths_cache, left._column_widths_cache, right._row_lengths_cache, right._column_widths_cache, ) ) res = merge_partitioning(left, right, axis=1) assert res == ref_with_no_cache # Verifying that no computations are being triggered assert all( cache is None for cache in ( left._row_lengths_cache, left._column_widths_cache, right._row_lengths_cache, right._column_widths_cache, ) ) def test_merge_with_bad_partitioning(): # https://github.com/modin-project/modin/pull/7229 left_partitioning = [256] right_partitioning = [32, 32, 32, 32] left_df = pandas.DataFrame( [np.arange(sum(left_partitioning)) for _ in range(sum(left_partitioning))] ) right_df = pandas.DataFrame( [np.arange(sum(right_partitioning)) for _ in range(sum(right_partitioning))] ) left = construct_modin_df_by_scheme( left_df, {"row_lengths": left_partitioning, "column_widths": left_partitioning} ) right = construct_modin_df_by_scheme( right_df, {"row_lengths": right_partitioning, "column_widths": right_partitioning}, ) left_frame = left._query_compiler._modin_frame right_frame = right._query_compiler._modin_frame assert left_frame.row_lengths == left_frame.column_widths == left_partitioning assert right_frame.row_lengths == right_frame.column_widths == right_partitioning # just a dummy value return_value = pd.DataFrame([1, 2, 3, 4])._query_compiler with mock.patch.object( left._query_compiler, "repartition", return_value=return_value ) as repartition: _ = left.merge(right) repartition.assert_called_once_with(axis=0) def test_groupby_with_empty_partition(): # see #5461 for details md_df = construct_modin_df_by_scheme( pandas_df=pandas.DataFrame({"a": [1, 1, 2, 2], "b": [3, 4, 5, 6]}), partitioning_scheme={"row_lengths": [2, 2], "column_widths": [2]}, ) md_res = md_df.query("a > 1", engine="python") grp_obj = md_res.groupby("a") # check index error due to partitioning mismatching grp_obj.count() md_df = construct_modin_df_by_scheme( pandas_df=pandas.DataFrame({"a": [1, 1, 2, 2], "b": [3, 4, 5, 6]}), partitioning_scheme={"row_lengths": [2, 2], "column_widths": [2]}, ) md_res = md_df.query("a > 1", engine="python") grp_obj = md_res.groupby(md_res["a"]) grp_obj.count() @pytest.mark.parametrize("set_num_partitions", [2], indirect=True) def test_repartitioning(set_num_partitions): """ This test verifies that 'keep_partitioning=False' doesn't actually preserve partitioning. For more details see: https://github.com/modin-project/modin/issues/5621 """ assert NPartitions.get() == 2 pandas_df = pandas.DataFrame( {"a": [1, 1, 2, 2], "b": [3, 4, 5, 6], "c": [1, 2, 3, 4], "d": [4, 5, 6, 7]} ) modin_df = construct_modin_df_by_scheme( pandas_df=pandas.DataFrame( {"a": [1, 1, 2, 2], "b": [3, 4, 5, 6], "c": [1, 2, 3, 4], "d": [4, 5, 6, 7]} ), partitioning_scheme={"row_lengths": [4], "column_widths": [2, 2]}, ) modin_frame = modin_df._query_compiler._modin_frame assert modin_frame._partitions.shape == (1, 2) assert modin_frame.column_widths == [2, 2] res = modin_frame.apply_full_axis( axis=1, func=lambda df: df, keep_partitioning=False, new_index=[0, 1, 2, 3], new_columns=["a", "b", "c", "d"], ) assert res._partitions.shape == (1, 1) assert res.column_widths == [4] df_equals(res._partitions[0, 0].to_pandas(), pandas_df) df_equals(res.to_pandas(), pandas_df) @pytest.mark.parametrize("col_name", ["numeric_col", "non_numeric_col"]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("num_pivots", [3, 2, 1]) @pytest.mark.parametrize("all_pivots_are_unique", [True, False]) def test_split_partitions_kernel( col_name, ascending, num_pivots, all_pivots_are_unique ): """ This test verifies proper work of the `split_partitions_using_pivots_for_sort` function used in partitions reshuffling. The function being tested splits the passed dataframe into parts according to the 'pivots' indicating boundary values for the parts. Parameters ---------- col_name : {"numeric_col", "non_numeric_col"} The tested function takes a key column name to which the pivot values belong. The function may behave differently depending on the type of that column. ascending : {True, False} The split parts are returned either in ascending or descending order. This parameter helps us to test both of the cases. num_pivots : {3, 2, 1} The function's behavior may depend on the number of boundary values being passed. all_pivots_are_unique : {True, False} Duplicate pivot values cause empty partitions to be produced. This parameter helps to verify that the function still behaves correctly in such cases. """ random_state = np.random.RandomState(42) df = pandas.DataFrame( { "numeric_col": range(9), "non_numeric_col": list("abcdefghi"), } ) min_val, max_val = df[col_name].iloc[0], df[col_name].iloc[-1] # Selecting random boundary values for the key column pivots = random_state.choice(df[col_name], num_pivots, replace=False) if not all_pivots_are_unique: # Making the 'pivots' contain only duplicate values pivots = np.repeat(pivots[0], num_pivots) # The tested function assumes that we pass pivots in the ascending order pivots = np.sort(pivots) # Randomly reordering rows in the dataframe df = df.reindex(random_state.permutation(df.index)) bins = ShuffleSortFunctions.split_partitions_using_pivots_for_sort( df, [ ColumnInfo( name=col_name, is_numeric=pandas.api.types.is_numeric_dtype(df.dtypes[col_name]), pivots=pivots, ) ], ascending=ascending, ) # Building reference bounds to make the result verification simpler bounds = np.concatenate([[min_val], pivots, [max_val]]) if not ascending: # If the order is descending we want bounds to be in the descending order as well: # Ex: bounds = [0, 2, 5, 10] for ascending and [10, 5, 2, 0] for descending. bounds = bounds[::-1] for idx, part in enumerate(bins): if ascending: # Check that each part is in the range of 'bound[i] <= part <= bound[i + 1]' # Example, if the `pivots` were [2, 5] and the min/max values for the colum are min=0, max=10 # Then each part satisfies: 0 <= part[0] <= 2; 2 <= part[1] <= 5; 5 <= part[2] <= 10 assert ( (bounds[idx] <= part[col_name]) & (part[col_name] <= bounds[idx + 1]) ).all() else: # Check that each part is in the range of 'bound[i + 1] <= part <= bound[i]' # Example, if the `pivots` were [2, 5] and the min/max values for the colum are min=0, max=10 # Then each part satisfies: 5 <= part[0] <= 10; 2 <= part[1] <= 5; 0 <= part[2] <= 2 assert ( (bounds[idx + 1] <= part[col_name]) & (part[col_name] <= bounds[idx]) ).all() @pytest.mark.parametrize("col_name", ["numeric_col", "non_numeric_col"]) @pytest.mark.parametrize("ascending", [True, False]) def test_split_partitions_with_empty_pivots(col_name, ascending): """ This test verifies that the splitting function performs correctly when an empty pivots list is passed. The expected behavior is to return a single split consisting of the exact copy of the input dataframe. """ df = pandas.DataFrame( { "numeric_col": range(9), "non_numeric_col": list("abcdefghi"), } ) result = ShuffleSortFunctions.split_partitions_using_pivots_for_sort( df, [ ColumnInfo( name=col_name, is_numeric=pandas.api.types.is_numeric_dtype(df.dtypes[col_name]), pivots=[], ) ], ascending=ascending, ) # We're expecting to recieve a single split here assert isinstance(result, tuple) assert len(result) == 1 assert result[0].equals(df) @pytest.mark.parametrize("ascending", [True, False]) def test_shuffle_partitions_with_empty_pivots(ascending): """ This test verifies that the `PartitionMgr.shuffle_partitions` method can handle empty pivots list. """ modin_frame = pd.DataFrame( np.array([["hello", "goodbye"], ["hello", "Hello"]]) )._query_compiler._modin_frame assert modin_frame._partitions.shape == (1, 1) column_name = modin_frame.columns[1] shuffle_functions = ShuffleSortFunctions( # These are the parameters we pass in the `.sort_by()` implementation modin_frame, columns=column_name, ascending=ascending, ideal_num_new_partitions=1, ) new_partitions = modin_frame._partition_mgr_cls.shuffle_partitions( modin_frame._partitions, index=0, shuffle_functions=shuffle_functions, final_shuffle_func=lambda df: df.sort_values(column_name), ) ref = modin_frame.to_pandas().sort_values(column_name) res = new_partitions[0, 0].get() assert new_partitions.shape == (1, 1) assert ref.equals(res) @pytest.mark.parametrize("ascending", [True, False]) def test_split_partition_preserve_names(ascending): """ This test verifies that the dataframes being split by ``split_partitions_using_pivots_for_sort`` preserve their index/column names. """ df = pandas.DataFrame( { "numeric_col": range(9), "non_numeric_col": list("abcdefghi"), } ) index_name = "custom_name" df.index.name = index_name df.columns.name = index_name # Pivots that contain empty bins pivots = [2, 2, 5, 7] splits = ShuffleSortFunctions.split_partitions_using_pivots_for_sort( df, [ColumnInfo(name="numeric_col", is_numeric=True, pivots=pivots)], ascending=ascending, ) for part in splits: assert part.index.name == index_name assert part.columns.name == index_name @pytest.mark.parametrize("has_cols_metadata", [True, False]) @pytest.mark.parametrize("has_dtypes_metadata", [True, False]) def test_merge_preserves_metadata(has_cols_metadata, has_dtypes_metadata): df1 = pd.DataFrame({"a": [1, 1, 2, 2], "b": list("abcd")}) df2 = pd.DataFrame({"a": [4, 2, 1, 3], "b": list("bcaf"), "c": [3, 2, 1, 0]}) modin_frame = df1._query_compiler._modin_frame if has_cols_metadata: # Verify that there were initially materialized metadata assert modin_frame.has_materialized_columns else: modin_frame._columns_cache = None if has_dtypes_metadata: # Verify that there were initially materialized metadata assert modin_frame.has_materialized_dtypes else: modin_frame.set_dtypes_cache(None) res = df1.merge(df2, on="b")._query_compiler._modin_frame if has_cols_metadata: assert res.has_materialized_columns if has_dtypes_metadata: assert res.has_materialized_dtypes else: # Verify that no materialization was triggered assert not res.has_materialized_dtypes assert not modin_frame.has_materialized_dtypes else: # Verify that no materialization was triggered assert not res.has_materialized_columns assert not res.has_materialized_dtypes assert not modin_frame.has_materialized_columns if not has_dtypes_metadata: assert not modin_frame.has_materialized_dtypes def test_binary_op_preserve_dtypes(): df = pd.DataFrame({"a": [1, 2, 3], "b": [4.0, 5.0, 6.0]}) def setup_cache(df, has_cache=True): if has_cache: _ = df.dtypes assert df._query_compiler.frame_has_materialized_dtypes else: df._query_compiler.set_frame_dtypes_cache(None) assert not df._query_compiler.frame_has_materialized_dtypes return df def assert_cache(df, has_cache=True): assert not (has_cache ^ df._query_compiler.frame_has_materialized_dtypes) # Check when `other` is a non-distributed object assert_cache(setup_cache(df) + 2.0) assert_cache(setup_cache(df) + {"a": 2.0, "b": 4}) assert_cache(setup_cache(df) + [2.0, 4]) assert_cache(setup_cache(df) + np.array([2.0, 4])) # Check when `other` is a dataframe other = pd.DataFrame({"b": [3, 4, 5], "c": [4.0, 5.0, 6.0]}) assert_cache(setup_cache(df) + setup_cache(other, has_cache=True)) assert_cache(setup_cache(df) + setup_cache(other, has_cache=False), has_cache=False) # Check when `other` is a series other = pd.Series({"b": 3.0, "c": 4.0}) assert_cache(setup_cache(df) + setup_cache(other, has_cache=True)) assert_cache(setup_cache(df) + setup_cache(other, has_cache=False), has_cache=False) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_dont_materialize_opposite_axis(axis): data = {"a": [1, 2, 3], "b": [4.0, 5.0, 6.0]} df1, df2 = pd.DataFrame(data), pd.DataFrame(data) def assert_no_cache(df, axis): if axis: assert not df._query_compiler.frame_has_materialized_columns else: assert not df._query_compiler.frame_has_materialized_index def remove_cache(df, axis): if axis: df._query_compiler.set_frame_columns_cache(None) else: df._query_compiler.set_frame_index_cache(None) assert_no_cache(df, axis) return df df1, df2 = remove_cache(df1, axis), remove_cache(df2, axis) df_concated = pd.concat((df1, df2), axis=axis) assert_no_cache(df1, axis) assert_no_cache(df2, axis) assert_no_cache(df_concated, axis) def test_setitem_bool_preserve_dtypes(): df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [3, 4, 5, 6]}) indexer = pd.Series([True, False, True, False]) assert df._query_compiler.frame_has_materialized_dtypes # slice(None) as a col_loc df.loc[indexer] = 2.0 assert df._query_compiler.frame_has_materialized_dtypes # list as a col_loc df.loc[indexer, ["a", "b"]] = 2.0 assert df._query_compiler.frame_has_materialized_dtypes # scalar as a col_loc df.loc[indexer, "a"] = 2.0 assert df._query_compiler.frame_has_materialized_dtypes def test_setitem_unhashable_preserve_dtypes(): df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) assert df._query_compiler.frame_has_materialized_dtypes df2 = pd.DataFrame([[9, 9], [5, 5]]) assert df2._query_compiler.frame_has_materialized_dtypes df[[1, 2]] = df2 assert df._query_compiler.frame_has_materialized_dtypes @pytest.mark.parametrize("modify_config", [{RangePartitioning: True}], indirect=True) def test_groupby_size_shuffling(modify_config): # verifies that 'groupby.size()' works with reshuffling implementation # https://github.com/modin-project/modin/issues/6367 df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [3, 4, 5, 6]}) modin_frame = df._query_compiler._modin_frame with mock.patch.object( modin_frame, "_apply_func_to_range_partitioning", wraps=modin_frame._apply_func_to_range_partitioning, ) as shuffling_method: try_cast_to_pandas(df.groupby("a").size()) shuffling_method.assert_called() @pytest.mark.parametrize( "kwargs", [dict(axis=0, labels=[]), dict(axis=1, labels=["a"]), dict(axis=1, labels=[])], ) def test_reindex_preserve_dtypes(kwargs): df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [3, 4, 5, 6]}) reindexed_df = df.reindex(**kwargs) assert reindexed_df._query_compiler.frame_has_materialized_dtypes
TestDrainVirtualPartitionCallQueue
python
h5py__h5py
h5py/tests/common.py
{ "start": 983, "end": 9278 }
class ____(ut.TestCase): """ Base class for unit tests. """ @classmethod def setUpClass(cls): cls.tempdir = tempfile.mkdtemp(prefix='h5py-test_') @classmethod def tearDownClass(cls): shutil.rmtree(cls.tempdir) def mktemp(self, suffix='.hdf5', prefix='tmp', dir=None): if dir is None: dir = self.tempdir return tempfile.mktemp(suffix, make_name(prefix), dir=dir) def mktemp_mpi(self, comm=None, suffix='.hdf5', prefix='', dir=None): if comm is None: from mpi4py import MPI comm = MPI.COMM_WORLD fname = None if comm.Get_rank() == 0: fname = self.mktemp(suffix, prefix, dir) fname = comm.bcast(fname, 0) return fname def setUp(self): self.f = h5py.File(self.mktemp(), 'w') def tearDown(self): try: if self.f: self.f.close() except: pass def assertSameElements(self, a, b): for x in a: match = False for y in b: if x == y: match = True if not match: raise AssertionError("Item '%s' appears in a but not b" % x) for x in b: match = False for y in a: if x == y: match = True if not match: raise AssertionError("Item '%s' appears in b but not a" % x) def assertArrayEqual(self, dset, arr, message=None, precision=None, check_alignment=True): """ Make sure dset and arr have the same shape, dtype and contents, to within the given precision, optionally ignoring differences in dtype alignment. Note that dset may be a NumPy array or an HDF5 dataset. """ if precision is None: precision = 1e-5 if message is None: message = '' else: message = ' (%s)' % message if np.isscalar(dset) or np.isscalar(arr): assert np.isscalar(dset) and np.isscalar(arr), \ 'Scalar/array mismatch ("%r" vs "%r")%s' % (dset, arr, message) dset = np.asarray(dset) arr = np.asarray(arr) assert dset.shape == arr.shape, \ "Shape mismatch (%s vs %s)%s" % (dset.shape, arr.shape, message) if dset.dtype != arr.dtype: if check_alignment: normalized_dset_dtype = dset.dtype normalized_arr_dtype = arr.dtype else: normalized_dset_dtype = repack_fields(dset.dtype) normalized_arr_dtype = repack_fields(arr.dtype) assert normalized_dset_dtype == normalized_arr_dtype, \ "Dtype mismatch (%s vs %s)%s" % (normalized_dset_dtype, normalized_arr_dtype, message) if not check_alignment: if normalized_dset_dtype != dset.dtype: dset = repack_fields(np.asarray(dset)) if normalized_arr_dtype != arr.dtype: arr = repack_fields(np.asarray(arr)) if arr.dtype.names is not None: for n in arr.dtype.names: message = '[FIELD %s] %s' % (n, message) self.assertArrayEqual(dset[n], arr[n], message=message, precision=precision, check_alignment=check_alignment) elif arr.dtype.kind in ('i', 'f'): assert np.all(np.abs(dset[...] - arr[...]) < precision), \ "Arrays differ by more than %.3f%s" % (precision, message) elif arr.dtype.kind == 'O': for v1, v2 in zip(dset.flat, arr.flat, strict=True): self.assertArrayEqual(v1, v2, message=message, precision=precision, check_alignment=check_alignment) else: assert np.all(dset[...] == arr[...]), \ "Arrays are not equal (dtype %s) %s" % (arr.dtype.str, message) def assertNumpyBehavior(self, dset, arr, s, skip_fast_reader=False): """ Apply slicing arguments "s" to both dset and arr. Succeeds if the results of the slicing are identical, or the exception raised is of the same type for both. "arr" must be a Numpy array; "dset" may be a NumPy array or dataset. """ exc = None try: arr_result = arr[s] except Exception as e: exc = type(e) s_fast = s if isinstance(s, tuple) else (s,) if exc is None: self.assertArrayEqual(dset[s], arr_result) if not skip_fast_reader: with phil: self.assertArrayEqual( dset._fast_reader.read(s_fast), arr_result, ) else: with self.assertRaises(exc): dset[s] if not skip_fast_reader: with self.assertRaises(exc), phil: dset._fast_reader.read(s_fast) NUMPY_RELEASE_VERSION = tuple([int(i) for i in np.__version__.split(".")[0:2]]) @contextmanager def closed_tempfile(suffix='', text=None): """ Context manager which yields the path to a closed temporary file with the suffix `suffix`. The file will be deleted on exiting the context. An additional argument `text` can be provided to have the file contain `text`. """ with tempfile.NamedTemporaryFile( 'w+t', suffix=suffix, delete=False ) as test_file: file_name = test_file.name if text is not None: test_file.write(text) test_file.flush() yield file_name shutil.rmtree(file_name, ignore_errors=True) def insubprocess(f): """Runs a test in its own subprocess""" @wraps(f) def wrapper(request, *args, **kwargs): curr_test = inspect.getsourcefile(f) + "::" + request.node.name # get block around test name insub = "IN_SUBPROCESS_" + curr_test for c in "/\\,:.": insub = insub.replace(c, "_") defined = os.environ.get(insub, None) if defined: # We're already running in a subprocess return f(request, *args, **kwargs) else: # Spawn a new interpreter and run pytest in it env = os.environ.copy() env[insub] = '1' env.update(getattr(f, 'subproc_env', {})) with closed_tempfile() as stdout: with open(stdout, 'w+t') as fh: rtn = subprocess.call([sys.executable, '-m', 'pytest', curr_test], stdout=fh, stderr=fh, env=env) with open(stdout, 'rt') as fh: out = fh.read() assert rtn == 0, "\n" + out return wrapper def subproc_env(d): """Set environment variables for the @insubprocess decorator""" def decorator(f): f.subproc_env = d return f return decorator MAIN_THREAD_ID = threading.get_ident() def make_name(template_or_prefix: str = "foo", /) -> str: """Return a static name, to be used e.g. as dataset name. When running in pytest-run-parallel, append a thread ID to the name. This allows running tests on shared resources, e.g. two threads can attempt to write to separate datasets on the same File at the same time (even though the actual writes will be serialized by the `phil` lock). Calling this function twice from the same thread will return the same name. Parameters ---------- template_or_prefix Either a prefix to which potentially append the thread ID, or a template containing exactly one "{}" to be replaced with the thread ID. """ tid = threading.get_ident() suffix = "" if tid == MAIN_THREAD_ID else f"-{tid}" if "{}" in template_or_prefix: return template_or_prefix.format(suffix) else: return template_or_prefix + suffix def is_main_thread() -> bool: """Return True if the test calling this function is being executed in the main thread; False otherwise. This can be used to detect when a test is running in pytest-run-parallel. that spawns multiple separate threads to run the tests. """ tid = threading.get_ident() return tid == MAIN_THREAD_ID
TestCase
python
django__django
django/db/models/fields/json.py
{ "start": 23465, "end": 23551 }
class ____(KeyTransformTextLookupMixin, lookups.EndsWith): pass
KeyTransformEndsWith
python
mlflow__mlflow
mlflow/server/jobs/__init__.py
{ "start": 679, "end": 1010 }
class ____(RuntimeError): """ Raise `TransientError` in a job to trigger job retry """ def __init__(self, origin_error: Exception): super().__init__() self._origin_error = origin_error @property def origin_error(self) -> Exception: return self._origin_error @dataclass
TransientError
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-robots-within-budget.py
{ "start": 93, "end": 923 }
class ____(object): def maximumRobots(self, chargeTimes, runningCosts, budget): """ :type chargeTimes: List[int] :type runningCosts: List[int] :type budget: int :rtype: int """ result = left = curr = 0 dq = collections.deque() for right in xrange(len(chargeTimes)): while dq and chargeTimes[dq[-1]] <= chargeTimes[right]: dq.pop() dq.append(right) curr += runningCosts[right] if chargeTimes[dq[0]]+(right-left+1)*curr > budget: if dq[0] == left: dq.popleft() curr -= runningCosts[left] left += 1 return right-left+1 # Time: O(n) # Space: O(n) import collections # sliding window, two pointers, mono deque
Solution
python
docker__docker-py
tests/unit/utils_test.py
{ "start": 15338, "end": 15925 }
class ____(unittest.TestCase): def test_parse_bytes_valid(self): assert parse_bytes("512MB") == 536870912 assert parse_bytes("512M") == 536870912 assert parse_bytes("512m") == 536870912 def test_parse_bytes_invalid(self): with pytest.raises(DockerException): parse_bytes("512MK") with pytest.raises(DockerException): parse_bytes("512L") with pytest.raises(DockerException): parse_bytes("127.0.0.1K") def test_parse_bytes_float(self): assert parse_bytes("1.5k") == 1536
ParseBytesTest
python
airbytehq__airbyte
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
{ "start": 2334, "end": 2855 }
class ____(RkiCovidStream): """Docs: https://api.corona-zahlen.org/germany/age-groups""" primary_key = None def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: yield response.json().get("data") def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return "germany/age-groups" # class that contains main source states | full-refresh
GermanyAgeGroups
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 854047, "end": 854599 }
class ____(sgqlc.types.Type, Node): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("database_id", "issue", "pinned_by", "repository") database_id = sgqlc.types.Field(Int, graphql_name="databaseId") issue = sgqlc.types.Field(sgqlc.types.non_null(Issue), graphql_name="issue") pinned_by = sgqlc.types.Field(sgqlc.types.non_null(Actor), graphql_name="pinnedBy") repository = sgqlc.types.Field( sgqlc.types.non_null("Repository"), graphql_name="repository" )
PinnedIssue
python
pandas-dev__pandas
asv_bench/benchmarks/sparse.py
{ "start": 1375, "end": 1589 }
class ____: def setup(self): N = 1000 self.sparse = scipy.sparse.rand(N, N, 0.005) def time_from_scipy(self): pd.DataFrame.sparse.from_spmatrix(self.sparse)
SparseDataFrameConstructor
python
django-guardian__django-guardian
example_project/posts/migrations/0001_initial.py
{ "start": 43, "end": 869 }
class ____(migrations.Migration): dependencies = [] operations = [ migrations.CreateModel( name="Post", fields=[ ("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)), ("title", models.CharField(max_length=64, verbose_name=b"title")), ("slug", models.SlugField(max_length=64)), ("content", models.TextField(verbose_name=b"content")), ("created_at", models.DateTimeField(auto_now_add=True, db_index=True)), ], options={ "get_latest_by": "created_at", "default_permissions": ("add", "change", "delete"), "permissions": (("view_post", "Can view post"),), }, ), ]
Migration
python
google__pytype
pytype/tools/traces/source_test.py
{ "start": 187, "end": 267 }
class ____(source.AbstractTrace): """Fake trace class for testing."""
_FakeTrace
python
getsentry__sentry
tests/sentry/api/test_paginator.py
{ "start": 6555, "end": 15591 }
class ____(TestCase): def test_ascending(self) -> None: joined = timezone.now() # The DateTime pager only has accuracy up to 1000th of a second. # Everything can't be added within less than 10 microseconds of each # other. This is handled by the pager (see test_rounding_offset), but # this case shouldn't rely on it. res1 = self.create_user("foo@example.com", date_joined=joined) res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(seconds=1)) res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(seconds=2)) res4 = self.create_user("qux@example.com", date_joined=joined + timedelta(seconds=3)) queryset = User.objects.all() paginator = DateTimePaginator(queryset, "date_joined") result1 = paginator.get_result(limit=2, cursor=None) assert len(result1) == 2, result1 assert result1[0] == res1 assert result1[1] == res2 assert result1.next assert not result1.prev result2 = paginator.get_result(limit=2, cursor=result1.next) assert len(result2) == 2, result2 assert result2[0] == res3 assert result2[1] == res4 assert not result2.next assert result2.prev result3 = paginator.get_result(limit=1, cursor=result2.prev) assert len(result3) == 1, result3 assert result3[0] == res2 assert result3.next assert result3.prev result4 = paginator.get_result(limit=1, cursor=result3.prev) assert len(result4) == 1, result4 assert result4[0] == res1 assert result4.next assert not result4.prev def test_descending(self) -> None: joined = timezone.now() res1 = self.create_user("foo@example.com", date_joined=joined) res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(seconds=1)) res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(seconds=2)) queryset = User.objects.all() paginator = DateTimePaginator(queryset, "-date_joined") result1 = paginator.get_result(limit=1, cursor=None) assert len(result1) == 1, result1 assert result1[0] == res3 assert result1.next assert not result1.prev result2 = paginator.get_result(limit=2, cursor=result1.next) assert len(result2) == 2, result2 assert result2[0] == res2 assert result2[1] == res1 assert not result2.next assert result2.prev result3 = paginator.get_result(limit=2, cursor=result2.prev) assert len(result3) == 1, result3 assert result3[0] == res3 assert result3.next assert not result3.prev def test_prev_descending_with_new(self) -> None: joined = timezone.now() res1 = self.create_user("foo@example.com", date_joined=joined) res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(seconds=1)) queryset = User.objects.all() paginator = DateTimePaginator(queryset, "-date_joined") result1 = paginator.get_result(limit=10, cursor=None) assert len(result1) == 2, result1 assert result1[0] == res2 assert result1[1] == res1 res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(seconds=2)) res4 = self.create_user("qux@example.com", date_joined=joined + timedelta(seconds=3)) result2 = paginator.get_result(limit=10, cursor=result1.prev) assert len(result2) == 2, result2 assert result2[0] == res4 assert result2[1] == res3 result3 = paginator.get_result(limit=10, cursor=result2.prev) assert len(result3) == 0, result3 result4 = paginator.get_result(limit=10, cursor=result1.next) assert len(result4) == 0, result4 def test_rounding_offset(self) -> None: joined = timezone.now() res1 = self.create_user("foo@example.com", date_joined=joined) res2 = self.create_user("bar@example.com", date_joined=joined + timedelta(microseconds=1)) res3 = self.create_user("baz@example.com", date_joined=joined + timedelta(microseconds=2)) res4 = self.create_user("qux@example.com", date_joined=joined + timedelta(microseconds=3)) queryset = User.objects.all() paginator = DateTimePaginator(queryset, "date_joined") result1 = paginator.get_result(limit=3, cursor=None) assert len(result1) == 3, result1 assert result1[0] == res1 assert result1[1] == res2 assert result1[2] == res3 result2 = paginator.get_result(limit=10, cursor=result1.next) assert len(result2) == 1, result2 assert result2[0] == res4 result3 = paginator.get_result(limit=2, cursor=result2.prev) assert len(result3) == 2, result3 assert result3[0] == res2 assert result3[1] == res3 result4 = paginator.get_result(limit=1, cursor=result3.prev) assert len(result4) == 1, result4 assert result4[0] == res1 result5 = paginator.get_result(limit=10, cursor=result4.prev) assert len(result5) == 0, list(result5) def test_same_row_updated(self) -> None: joined = timezone.now() res1 = self.create_user("foo@example.com", date_joined=joined) queryset = User.objects.all() paginator = DateTimePaginator(queryset, "-date_joined") result1 = paginator.get_result(limit=3, cursor=None) assert len(result1) == 1, result1 assert result1[0] == res1 # Prev page should return no results result2 = paginator.get_result(limit=3, cursor=result1.prev) assert len(result2) == 0, result2 # If the same row has an updated join date then it should # show up on the prev page res1.update(date_joined=joined + timedelta(seconds=1)) result3 = paginator.get_result(limit=3, cursor=result1.prev) assert len(result3) == 1, result3 assert result3[0] == res1 # Make sure updates work as expected with extra rows res1.update(date_joined=res1.date_joined + timedelta(seconds=1)) res2 = self.create_user( "bar@example.com", date_joined=res1.date_joined + timedelta(seconds=1) ) res3 = self.create_user( "baz@example.com", date_joined=res1.date_joined + timedelta(seconds=2) ) res4 = self.create_user( "bat@example.com", date_joined=res1.date_joined + timedelta(seconds=3) ) result4 = paginator.get_result(limit=1, cursor=result3.prev) assert len(result4) == 1, result4 assert result4[0] == res1 result5 = paginator.get_result(limit=3, cursor=result3.prev) assert len(result5) == 3, result5 assert result5[0] == res3 assert result5[1] == res2 assert result5[2] == res1 result6 = paginator.get_result(limit=3, cursor=result5.prev) assert len(result6) == 1, result6 assert result6[0] == res4 res4.update(date_joined=res4.date_joined + timedelta(seconds=1)) result7 = paginator.get_result(limit=3, cursor=result6.prev) assert len(result7) == 1, result7 assert result7[0] == res4 def test_reverse_bisect_left() -> None: assert reverse_bisect_left([], 0) == 0 assert reverse_bisect_left([1], -1) == 1 assert reverse_bisect_left([1], 0) == 1 assert reverse_bisect_left([1], 1) == 0 assert reverse_bisect_left([1], 2) == 0 assert reverse_bisect_left([2, 1], -1) == 2 assert reverse_bisect_left([2, 1], 0) == 2 assert reverse_bisect_left([2, 1], 1) == 1 assert reverse_bisect_left([2, 1], 2) == 0 assert reverse_bisect_left([2, 1], 3) == 0 assert reverse_bisect_left([3, 2, 1], -1) == 3 assert reverse_bisect_left([3, 2, 1], 0) == 3 assert reverse_bisect_left([3, 2, 1], 1) == 2 assert reverse_bisect_left([3, 2, 1], 2) == 1 assert reverse_bisect_left([3, 2, 1], 3) == 0 assert reverse_bisect_left([3, 2, 1], 4) == 0 assert reverse_bisect_left([4, 3, 2, 1], -1) == 4 assert reverse_bisect_left([4, 3, 2, 1], 0) == 4 assert reverse_bisect_left([4, 3, 2, 1], 1) == 3 assert reverse_bisect_left([4, 3, 2, 1], 2) == 2 assert reverse_bisect_left([4, 3, 2, 1], 3) == 1 assert reverse_bisect_left([4, 3, 2, 1], 4) == 0 assert reverse_bisect_left([4, 3, 2, 1], 5) == 0 assert reverse_bisect_left([1, 1], 0) == 2 assert reverse_bisect_left([1, 1], 1) == 0 assert reverse_bisect_left([1, 1], 2) == 0 assert reverse_bisect_left([2, 1, 1], 0) == 3 assert reverse_bisect_left([2, 1, 1], 1) == 1 assert reverse_bisect_left([2, 1, 1], 2) == 0 assert reverse_bisect_left([2, 2, 1], 0) == 3 assert reverse_bisect_left([2, 2, 1], 1) == 2 assert reverse_bisect_left([2, 2, 1], 2) == 0 assert reverse_bisect_left([3, 2, 1], 2, hi=10) == 1
DateTimePaginatorTest
python
conda__conda
conda/exceptions.py
{ "start": 40321, "end": 40531 }
class ____(CondaEnvException): def __init__(self, msg: str, *args, **kwargs): msg = f"Provided environment.yaml is invalid: {msg}" super().__init__(msg, *args, **kwargs)
EnvironmentFileInvalid
python
pyqtgraph__pyqtgraph
pyqtgraph/multiprocess/parallelizer.py
{ "start": 9714, "end": 12444 }
class ____(object): def __init__(self, parallelizer, process, tasks, kwds): self.proc = process self.par = parallelizer self.tasks = tasks for k, v in kwds.items(): setattr(self, k, v) def __iter__(self): ## we could fix this up such that tasks are retrieved from the parent process one at a time.. for i, task in enumerate(self.tasks): self.index = i #print os.getpid(), 'starting task', i self._taskStarted(os.getpid(), i, _callSync='off') yield task if self.proc is not None: #print os.getpid(), 'no more tasks' self.proc.close() def process(self): """ Process requests from parent. Usually it is not necessary to call this unless you would like to receive messages (such as exit requests) during an iteration. """ if self.proc is not None: self.proc.processRequests() def numWorkers(self): """ Return the number of parallel workers """ return self.par.workers #class Parallelizer: #""" #Use:: #p = Parallelizer() #with p(4) as i: #p.finish(do_work(i)) #print p.results() #""" #def __init__(self): #pass #def __call__(self, n): #self.replies = [] #self.conn = None ## indicates this is the parent process #return Session(self, n) #def finish(self, data): #if self.conn is None: #self.replies.append((self.i, data)) #else: ##print "send", self.i, data #self.conn.send((self.i, data)) #os._exit(0) #def result(self): #print self.replies #class Session: #def __init__(self, par, n): #self.par = par #self.n = n #def __enter__(self): #self.childs = [] #for i in range(1, self.n): #c1, c2 = multiprocessing.Pipe() #pid = os.fork() #if pid == 0: ## child #self.par.i = i #self.par.conn = c2 #self.childs = None #c1.close() #return i #else: #self.childs.append(c1) #c2.close() #self.par.i = 0 #return 0 #def __exit__(self, *exc_info): #if exc_info[0] is not None: #sys.excepthook(*exc_info) #if self.childs is not None: #self.par.replies.extend([conn.recv() for conn in self.childs]) #else: #self.par.finish(None)
Tasker
python
python-attrs__attrs
tests/test_slots.py
{ "start": 448, "end": 923 }
class ____: x = attr.ib(validator=attr.validators.instance_of(int)) y = attr.ib() def method(self): return self.x @classmethod def classmethod(cls): return "clsmethod" @staticmethod def staticmethod(): return "staticmethod" def my_class(self): return __class__ def my_super(self): """Just to test out the no-arg super.""" return super().__repr__() @attr.s(slots=True, unsafe_hash=True)
C1
python
walkccc__LeetCode
solutions/483. Smallest Good Base/483.py
{ "start": 0, "end": 237 }
class ____: def smallestGoodBase(self, n: str) -> str: n = int(n) for m in range(int(math.log(n, 2)), 1, -1): k = int(n**m**-1) if (k**(m + 1) - 1) // (k - 1) == n: return str(k) return str(n - 1)
Solution
python
apache__airflow
providers/neo4j/tests/unit/neo4j/hooks/test_neo4j.py
{ "start": 961, "end": 9795 }
class ____: @pytest.mark.parametrize( ("conn_extra", "expected_uri"), [ ({}, "bolt://host:7687"), ({"neo4j_scheme": False}, "bolt://host:7687"), ({"certs_self_signed": True, "neo4j_scheme": False}, "bolt+ssc://host:7687"), ({"certs_trusted_ca": True, "neo4j_scheme": False}, "bolt+s://host:7687"), ({"certs_self_signed": True, "neo4j_scheme": True}, "neo4j+ssc://host:7687"), ({"certs_trusted_ca": True, "neo4j_scheme": True}, "neo4j+s://host:7687"), ], ) def test_get_uri_neo4j_scheme(self, conn_extra, expected_uri): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema="schema", extra=conn_extra, ) # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() uri = neo4j_hook.get_uri(connection) assert uri == expected_uri @mock.patch("airflow.providers.neo4j.hooks.neo4j.GraphDatabase") def test_run_with_schema(self, mock_graph_database): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema="schema" ) mock_sql = mock.MagicMock(name="sql") # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() op_result = neo4j_hook.run(mock_sql) mock_graph_database.assert_has_calls( [ mock.call.driver("bolt://host:7687", auth=("login", "password"), encrypted=False), mock.call.driver().session(database="schema"), mock.call.driver().session().__enter__(), mock.call.driver().session().__enter__().run(mock_sql), mock.call.driver().session().__enter__().run().data(), mock.call.driver().session().__exit__(None, None, None), ] ) session = mock_graph_database.driver.return_value.session.return_value.__enter__.return_value assert op_result == session.run.return_value.data.return_value @mock.patch("airflow.providers.neo4j.hooks.neo4j.GraphDatabase") def test_run_with_schema_and_params(self, mock_graph_database): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema="schema" ) mock_sql = mock.MagicMock(name="sql") mock_parameters = mock.MagicMock(name="parameters") # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() op_result = neo4j_hook.run(mock_sql, mock_parameters) mock_graph_database.assert_has_calls( [ mock.call.driver("bolt://host:7687", auth=("login", "password"), encrypted=False), mock.call.driver().session(database="schema"), mock.call.driver().session().__enter__(), mock.call.driver().session().__enter__().run(mock_sql, mock_parameters), mock.call.driver().session().__enter__().run().data(), mock.call.driver().session().__exit__(None, None, None), ] ) session = mock_graph_database.driver.return_value.session.return_value.__enter__.return_value assert op_result == session.run.return_value.data.return_value @mock.patch("airflow.providers.neo4j.hooks.neo4j.GraphDatabase") def test_run_without_schema(self, mock_graph_database): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema=None ) mock_sql = mock.MagicMock(name="sql") # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() op_result = neo4j_hook.run(mock_sql) mock_graph_database.assert_has_calls( [ mock.call.driver("bolt://host:7687", auth=("login", "password"), encrypted=False), mock.call.driver().session(), mock.call.driver().session().__enter__(), mock.call.driver().session().__enter__().run(mock_sql), mock.call.driver().session().__enter__().run().data(), mock.call.driver().session().__exit__(None, None, None), ] ) session = mock_graph_database.driver.return_value.session.return_value.__enter__.return_value assert op_result == session.run.return_value.data.return_value @mock.patch("airflow.providers.neo4j.hooks.neo4j.GraphDatabase") def test_run_without_schema_and_params(self, mock_graph_database): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema=None ) mock_sql = mock.MagicMock(name="sql") mock_parameters = mock.MagicMock(name="parameters") # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() op_result = neo4j_hook.run(mock_sql, mock_parameters) mock_graph_database.assert_has_calls( [ mock.call.driver("bolt://host:7687", auth=("login", "password"), encrypted=False), mock.call.driver().session(), mock.call.driver().session().__enter__(), mock.call.driver().session().__enter__().run(mock_sql, mock_parameters), mock.call.driver().session().__enter__().run().data(), mock.call.driver().session().__exit__(None, None, None), ] ) session = mock_graph_database.driver.return_value.session.return_value.__enter__.return_value assert op_result == session.run.return_value.data.return_value @pytest.mark.parametrize( ("conn_extra", "should_provide_encrypted", "expected_encrypted"), [ ({}, True, False), ({"neo4j_scheme": False, "encrypted": True}, True, True), ({"certs_self_signed": False, "neo4j_scheme": False, "encrypted": False}, True, False), ({"certs_trusted_ca": False, "neo4j_scheme": False, "encrypted": False}, True, False), ({"certs_self_signed": False, "neo4j_scheme": True, "encrypted": False}, True, False), ({"certs_trusted_ca": False, "neo4j_scheme": True, "encrypted": False}, True, False), ({"certs_self_signed": True, "neo4j_scheme": False, "encrypted": False}, False, None), ({"certs_trusted_ca": True, "neo4j_scheme": False, "encrypted": False}, False, None), ({"certs_self_signed": True, "neo4j_scheme": True, "encrypted": False}, False, None), ({"certs_trusted_ca": True, "neo4j_scheme": True, "encrypted": False}, False, None), ], ) @mock.patch("airflow.providers.neo4j.hooks.neo4j.GraphDatabase.driver") def test_encrypted_provided( self, mock_graph_database, conn_extra, should_provide_encrypted, expected_encrypted ): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema="schema", extra=conn_extra, ) with mock.patch.dict("os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri()): neo4j_hook = Neo4jHook() with neo4j_hook.get_conn(): if should_provide_encrypted: assert "encrypted" in mock_graph_database.call_args.kwargs assert mock_graph_database.call_args.kwargs["encrypted"] == expected_encrypted else: assert "encrypted" not in mock_graph_database.call_args.kwargs
TestNeo4jHookConn
python
kamyu104__LeetCode-Solutions
Python/number-of-students-doing-homework-at-a-given-time.py
{ "start": 48, "end": 358 }
class ____(object): def busyStudent(self, startTime, endTime, queryTime): """ :type startTime: List[int] :type endTime: List[int] :type queryTime: int :rtype: int """ return sum(s <= queryTime <= e for s, e in itertools.izip(startTime, endTime))
Solution
python
django__django
django/db/models/functions/text.py
{ "start": 11393, "end": 11465 }
class ____(Transform): function = "TRIM" lookup_name = "trim"
Trim
python
django__django
tests/view_tests/tests/test_static.py
{ "start": 6724, "end": 7805 }
class ____(StaticTests): """ Test case to make sure the static URL pattern helper works as expected """ def setUp(self): super().setUp() self._old_views_urlpatterns = urls.urlpatterns[:] urls.urlpatterns += static("media/", document_root=media_dir) def tearDown(self): super().tearDown() urls.urlpatterns = self._old_views_urlpatterns def test_prefix(self): self.assertEqual(static("test")[0].pattern.regex.pattern, "^test(?P<path>.*)$") @override_settings(DEBUG=False) def test_debug_off(self): """No URLs are served if DEBUG=False.""" self.assertEqual(static("test"), []) def test_empty_prefix(self): with self.assertRaisesMessage( ImproperlyConfigured, "Empty static prefix not permitted" ): static("") def test_special_prefix(self): """No URLs are served if prefix contains a netloc part.""" self.assertEqual(static("http://example.org"), []) self.assertEqual(static("//example.org"), [])
StaticHelperTest
python
pyca__cryptography
src/cryptography/x509/extensions.py
{ "start": 11836, "end": 13081 }
class ____: def __init__( self, access_method: ObjectIdentifier, access_location: GeneralName ) -> None: if not isinstance(access_method, ObjectIdentifier): raise TypeError("access_method must be an ObjectIdentifier") if not isinstance(access_location, GeneralName): raise TypeError("access_location must be a GeneralName") self._access_method = access_method self._access_location = access_location def __repr__(self) -> str: return ( f"<AccessDescription(access_method={self.access_method}, " f"access_location={self.access_location})>" ) def __eq__(self, other: object) -> bool: if not isinstance(other, AccessDescription): return NotImplemented return ( self.access_method == other.access_method and self.access_location == other.access_location ) def __hash__(self) -> int: return hash((self.access_method, self.access_location)) @property def access_method(self) -> ObjectIdentifier: return self._access_method @property def access_location(self) -> GeneralName: return self._access_location
AccessDescription
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/layout/containers.py
{ "start": 96991, "end": 100179 }
class ____(Container): """ Container class that dynamically returns any Container. :param get_container: Callable that returns a :class:`.Container` instance or any widget with a ``__pt_container__`` method. """ def __init__(self, get_container: Callable[[], AnyContainer]) -> None: self.get_container = get_container def _get_container(self) -> Container: """ Return the current container object. We call `to_container`, because `get_container` can also return a widget with a ``__pt_container__`` method. """ obj = self.get_container() return to_container(obj) def reset(self) -> None: self._get_container().reset() def preferred_width(self, max_available_width: int) -> Dimension: return self._get_container().preferred_width(max_available_width) def preferred_height(self, width: int, max_available_height: int) -> Dimension: return self._get_container().preferred_height(width, max_available_height) def write_to_screen( self, screen: Screen, mouse_handlers: MouseHandlers, write_position: WritePosition, parent_style: str, erase_bg: bool, z_index: int | None, ) -> None: self._get_container().write_to_screen( screen, mouse_handlers, write_position, parent_style, erase_bg, z_index ) def is_modal(self) -> bool: return False def get_key_bindings(self) -> KeyBindingsBase | None: # Key bindings will be collected when `layout.walk()` finds the child # container. return None def get_children(self) -> list[Container]: # Here we have to return the current active container itself, not its # children. Otherwise, we run into issues where `layout.walk()` will # never see an object of type `Window` if this contains a window. We # can't/shouldn't proxy the "isinstance" check. return [self._get_container()] def to_container(container: AnyContainer) -> Container: """ Make sure that the given object is a :class:`.Container`. """ if isinstance(container, Container): return container elif hasattr(container, "__pt_container__"): return to_container(container.__pt_container__()) else: raise ValueError(f"Not a container object: {container!r}") def to_window(container: AnyContainer) -> Window: """ Make sure that the given argument is a :class:`.Window`. """ if isinstance(container, Window): return container elif hasattr(container, "__pt_container__"): return to_window(cast("MagicContainer", container).__pt_container__()) else: raise ValueError(f"Not a Window object: {container!r}.") def is_container(value: object) -> TypeGuard[AnyContainer]: """ Checks whether the given value is a container object (for use in assert statements). """ if isinstance(value, Container): return True if hasattr(value, "__pt_container__"): return is_container(cast("MagicContainer", value).__pt_container__()) return False
DynamicContainer
python
apache__thrift
lib/py/src/protocol/TJSONProtocol.py
{ "start": 16843, "end": 18855 }
class ____(TJSONProtocolBase): """Simple, readable, write-only JSON protocol. Useful for interacting with scripting languages. """ def readMessageBegin(self): raise NotImplementedError() def readMessageEnd(self): raise NotImplementedError() def readStructBegin(self): raise NotImplementedError() def readStructEnd(self): raise NotImplementedError() def writeMessageBegin(self, name, request_type, seqid): self.resetWriteContext() def writeMessageEnd(self): pass def writeStructBegin(self, name): self.writeJSONObjectStart() def writeStructEnd(self): self.writeJSONObjectEnd() def writeFieldBegin(self, name, ttype, fid): self.writeJSONString(name) def writeFieldEnd(self): pass def writeMapBegin(self, ktype, vtype, size): self.writeJSONObjectStart() def writeMapEnd(self): self.writeJSONObjectEnd() def _writeCollectionBegin(self, etype, size): self.writeJSONArrayStart() def _writeCollectionEnd(self): self.writeJSONArrayEnd() writeListBegin = _writeCollectionBegin writeListEnd = _writeCollectionEnd writeSetBegin = _writeCollectionBegin writeSetEnd = _writeCollectionEnd def writeByte(self, byte): checkIntegerLimits(byte, 8) self.writeJSONNumber(byte) def writeI16(self, i16): checkIntegerLimits(i16, 16) self.writeJSONNumber(i16) def writeI32(self, i32): checkIntegerLimits(i32, 32) self.writeJSONNumber(i32) def writeI64(self, i64): checkIntegerLimits(i64, 64) self.writeJSONNumber(i64) def writeBool(self, boolean): self.writeJSONNumber(1 if boolean is True else 0) def writeDouble(self, dbl): self.writeJSONNumber(dbl) def writeString(self, string): self.writeJSONString(string) def writeBinary(self, binary): self.writeJSONBase64(binary)
TSimpleJSONProtocol
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_lookup.py
{ "start": 19245, "end": 19387 }
class ____: def __init__(self, nxt: typing.Optional["B"]): self.nxt = nxt def __repr__(self): return f"A({self.nxt})"
A
python
astropy__astropy
astropy/time/formats.py
{ "start": 73721, "end": 74394 }
class ____(TimeNumeric): """ Base class for support of Besselian and Julian epoch dates. """ _default_scale = "tt" # As of astropy 3.2, this is no longer 'utc'. def set_jds(self, val1, val2): self._check_scale(self._scale) # validate scale. epoch_to_jd = getattr(erfa, self.epoch_to_jd) jd1, jd2 = epoch_to_jd(val1 + val2) self.jd1, self.jd2 = day_frac(jd1, jd2) def to_value(self, **kwargs): jd_to_epoch = getattr(erfa, self.jd_to_epoch) value = jd_to_epoch(self.jd1, self.jd2) return super().to_value(jd1=value, jd2=np.float64(0.0), **kwargs) value = property(to_value)
TimeEpochDate
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/model_query_cache.py
{ "start": 703, "end": 804 }
class ____: def foo(self, x): return 0 def bar(self, x): return 1 @decorated
X
python
spack__spack
lib/spack/spack/spec.py
{ "start": 41256, "end": 46585 }
class ____: """Descriptor used to forward queries from Spec to Package""" def __init__( self, attribute_name: str, default_handler: Optional[Callable[["Spec"], Any]] = None, _indirect: bool = False, ) -> None: """Create a new descriptor. Parameters: attribute_name: name of the attribute to be searched for in the Package instance default_handler: default function to be called if the attribute was not found in the Package instance _indirect: temporarily added to redirect a query to another package. """ self.attribute_name = attribute_name self.default = default_handler self.indirect = _indirect def __get__(self, instance: "SpecBuildInterface", cls): """Retrieves the property from Package using a well defined chain of responsibility. The order of call is: 1. if the query was through the name of a virtual package try to search for the attribute ``{virtual_name}_{attribute_name}`` in Package 2. try to search for attribute ``{attribute_name}`` in Package 3. try to call the default handler The first call that produces a value will stop the chain. If no call can handle the request then AttributeError is raised with a message indicating that no relevant attribute exists. If a call returns None, an AttributeError is raised with a message indicating a query failure, e.g. that library files were not found in a 'libs' query. """ # TODO: this indirection exist solely for `spec["python"].command` to actually return # spec["python-venv"].command. It should be removed when `python` is a virtual. if self.indirect and instance.indirect_spec: pkg = instance.indirect_spec.package else: pkg = instance.wrapped_obj.package try: query = instance.last_query except AttributeError: # There has been no query yet: this means # a spec is trying to access its own attributes _ = instance.wrapped_obj[instance.wrapped_obj.name] # NOQA: ignore=F841 query = instance.last_query callbacks_chain = [] # First in the chain : specialized attribute for virtual packages if query.isvirtual: specialized_name = "{0}_{1}".format(query.name, self.attribute_name) callbacks_chain.append(lambda: getattr(pkg, specialized_name)) # Try to get the generic method from Package callbacks_chain.append(lambda: getattr(pkg, self.attribute_name)) # Final resort : default callback if self.default is not None: _default = self.default # make mypy happy callbacks_chain.append(lambda: _default(instance.wrapped_obj)) # Trigger the callbacks in order, the first one producing a # value wins value = None message = None for f in callbacks_chain: try: value = f() # A callback can return None to trigger an error indicating # that the query failed. if value is None: msg = "Query of package '{name}' for '{attrib}' failed\n" msg += "\tprefix : {spec.prefix}\n" msg += "\tspec : {spec}\n" msg += "\tqueried as : {query.name}\n" msg += "\textra parameters : {query.extra_parameters}" message = msg.format( name=pkg.name, attrib=self.attribute_name, spec=instance, query=instance.last_query, ) else: return value break except AttributeError: pass # value is 'None' if message is not None: # Here we can use another type of exception. If we do that, the # unit test 'test_getitem_exceptional_paths' in the file # lib/spack/spack/test/spec_dag.py will need to be updated to match # the type. raise AttributeError(message) # 'None' value at this point means that there are no appropriate # properties defined and no default handler, or that all callbacks # raised AttributeError. In this case, we raise AttributeError with an # appropriate message. fmt = "'{name}' package has no relevant attribute '{query}'\n" fmt += "\tspec : '{spec}'\n" fmt += "\tqueried as : '{spec.last_query.name}'\n" fmt += "\textra parameters : '{spec.last_query.extra_parameters}'\n" message = fmt.format(name=pkg.name, query=self.attribute_name, spec=instance) raise AttributeError(message) def __set__(self, instance, value): cls_name = type(instance).__name__ msg = "'{0}' object attribute '{1}' is read-only" raise AttributeError(msg.format(cls_name, self.attribute_name)) # Represents a query state in a BuildInterface object QueryState = collections.namedtuple("QueryState", ["name", "extra_parameters", "isvirtual"])
ForwardQueryToPackage
python
PrefectHQ__prefect
tests/test_flow_engine.py
{ "start": 14775, "end": 28087 }
class ____: async def test_flow_retry_with_error_in_flow(self): run_count = 0 @flow(retries=1) async def foo(): nonlocal run_count run_count += 1 if run_count == 1: raise ValueError() return "hello" assert await foo() == "hello" assert run_count == 2 async def test_flow_retry_with_error_in_flow_sync(self): run_count = 0 @flow(retries=1) def foo(): nonlocal run_count run_count += 1 if run_count == 1: raise ValueError() return "hello" assert foo() == "hello" assert run_count == 2 async def test_flow_retry_with_error_in_flow_and_successful_task(self): task_run_count = 0 flow_run_count = 0 @task(persist_result=True) async def my_task(): nonlocal task_run_count task_run_count += 1 return "hello" @flow(retries=1, persist_result=True) async def foo(): nonlocal flow_run_count flow_run_count += 1 state = await my_task(return_state=True) if flow_run_count == 1: raise ValueError() return await state.result() assert await foo() == "hello" assert flow_run_count == 2 assert task_run_count == 1 def test_flow_retry_with_no_error_in_flow_and_one_failed_task(self): task_run_count = 0 flow_run_count = 0 @task def my_task(): nonlocal task_run_count task_run_count += 1 # Fail on the first flow run but not the retry if flow_run_count == 1: raise ValueError() return "hello" @flow(retries=1) def foo(): nonlocal flow_run_count flow_run_count += 1 return my_task() assert foo() == "hello" assert flow_run_count == 2 assert task_run_count == 2, "Task should be reset and run again" def test_flow_retry_with_error_in_flow_and_one_failed_task(self): task_run_count = 0 flow_run_count = 0 @task def my_task(): nonlocal task_run_count task_run_count += 1 # Fail on the first flow run but not the retry if flow_run_count == 1: raise ValueError() return "hello" @flow(retries=1) def my_flow(): nonlocal flow_run_count flow_run_count += 1 fut = my_task() # It is important that the flow run fails after the task run is created if flow_run_count == 1: raise ValueError() return fut assert my_flow() == "hello" assert flow_run_count == 2 assert task_run_count == 2, "Task should be reset and run again" @pytest.mark.xfail async def test_flow_retry_with_branched_tasks(self, sync_prefect_client): flow_run_count = 0 @task def identity(value): return value @flow(retries=1) def my_flow(): nonlocal flow_run_count flow_run_count += 1 # Raise on the first run but use 'foo' if flow_run_count == 1: identity("foo") raise ValueError() else: # On the second run, switch to 'bar' result = identity("bar") return result my_flow() assert flow_run_count == 2 # The state is pulled from the API and needs to be decoded document = await (await my_flow().result()).result() result = sync_prefect_client.retrieve_data(document) assert result == "bar" # AssertionError: assert 'foo' == 'bar' # Wait, what? Because tasks are identified by dynamic key which is a simple # increment each time the task is called, if there branching is different # after a flow run retry, the stale value will be pulled from the cache. async def test_flow_retry_with_no_error_in_flow_and_one_failed_child_flow( self, sync_prefect_client ): child_run_count = 0 flow_run_count = 0 @flow async def child_flow(): nonlocal child_run_count child_run_count += 1 # Fail on the first flow run but not the retry if flow_run_count == 1: raise ValueError() return "hello" @flow(retries=1) async def parent_flow(): nonlocal flow_run_count flow_run_count += 1 return await child_flow() state = await parent_flow(return_state=True) assert await state.result() == "hello" assert flow_run_count == 2 assert child_run_count == 2, "Child flow should be reset and run again" # Ensure that the tracking task run for the subflow is reset and tracked task_runs = sync_prefect_client.read_task_runs( flow_run_filter=FlowRunFilter( id={"any_": [state.state_details.flow_run_id]} ) ) state_types = {task_run.state_type for task_run in task_runs} assert state_types == {StateType.COMPLETED} # There should only be the child flow run's task assert len(task_runs) == 1 async def test_flow_retry_with_error_in_flow_and_one_successful_child_flow(self): child_run_count = 0 flow_run_count = 0 @flow(persist_result=True) async def child_flow(): nonlocal child_run_count child_run_count += 1 return "hello" @flow(retries=1, persist_result=True) async def parent_flow(): nonlocal flow_run_count flow_run_count += 1 child_result = await child_flow() # Fail on the first flow run but not the retry if flow_run_count == 1: raise ValueError() return child_result assert await parent_flow() == "hello" assert flow_run_count == 2 assert child_run_count == 1, "Child flow should not run again" async def test_flow_retry_with_error_in_flow_and_one_failed_child_flow( self, sync_prefect_client: SyncPrefectClient ): child_flow_run_count = 0 flow_run_count = 0 @flow def child_flow(): nonlocal child_flow_run_count child_flow_run_count += 1 # Fail on the first flow run but not the retry if flow_run_count == 1: raise ValueError() return "hello" @flow(retries=1) def parent_flow(): nonlocal flow_run_count flow_run_count += 1 state = child_flow(return_state=True) # It is important that the flow run fails after the child flow run is created if flow_run_count == 1: raise ValueError() return state parent_state = parent_flow(return_state=True) child_state = await parent_state.result() assert await child_state.result() == "hello" assert flow_run_count == 2 assert child_flow_run_count == 2, "Child flow should run again" child_flow_run = sync_prefect_client.read_flow_run( child_state.state_details.flow_run_id ) child_flow_runs = sync_prefect_client.read_flow_runs( flow_filter=FlowFilter(id={"any_": [child_flow_run.flow_id]}), sort=FlowRunSort.EXPECTED_START_TIME_ASC, ) assert len(child_flow_runs) == 2 # The original flow run has its failed state preserved assert child_flow_runs[0].state.is_failed() # The final flow run is the one returned by the parent flow assert child_flow_runs[-1] == child_flow_run async def test_flow_retry_with_failed_child_flow_with_failed_task(self): child_task_run_count = 0 child_flow_run_count = 0 flow_run_count = 0 @task async def child_task(): nonlocal child_task_run_count child_task_run_count += 1 # Fail on the first task run but not the retry if child_task_run_count == 1: raise ValueError() return "hello" @flow async def child_flow(): nonlocal child_flow_run_count child_flow_run_count += 1 return await child_task() @flow(retries=1) async def parent_flow(): nonlocal flow_run_count flow_run_count += 1 state = await child_flow() return state assert await parent_flow() == "hello" assert flow_run_count == 2 assert child_flow_run_count == 2, "Child flow should run again" assert child_task_run_count == 2, "Child tasks should run again with child flow" def test_flow_retry_with_error_in_flow_and_one_failed_task_with_retries(self): task_run_retry_count = 0 task_run_count = 0 flow_run_count = 0 @task(retries=1) def my_task(): nonlocal task_run_count, task_run_retry_count task_run_count += 1 task_run_retry_count += 1 # Always fail on the first flow run if flow_run_count == 1: raise ValueError("Fail on first flow run") # Only fail the first time this task is called within a given flow run # This ensures that we will always retry this task so we can ensure # retry logic is preserved if task_run_retry_count == 1: raise ValueError("Fail on first task run") return "hello" @flow(retries=1) def foo(): nonlocal flow_run_count, task_run_retry_count task_run_retry_count = 0 flow_run_count += 1 fut = my_task() # It is important that the flow run fails after the task run is created if flow_run_count == 1: raise ValueError() return fut assert foo() == "hello" assert flow_run_count == 2 assert task_run_count == 4, "Task should use all of its retries every time" async def test_flow_retry_with_error_in_flow_and_one_failed_task_with_retries_cannot_exceed_retries( self, ): task_run_count = 0 flow_run_count = 0 @task(retries=2) async def my_task(): nonlocal task_run_count task_run_count += 1 raise ValueError("This task always fails") @flow(retries=1) async def my_flow(): nonlocal flow_run_count flow_run_count += 1 fut = await my_task() # It is important that the flow run fails after the task run is created if flow_run_count == 1: raise ValueError() return fut with pytest.raises(ValueError, match="This task always fails"): fut = await my_flow() flow_result = await fut.result() await flow_result.result() assert flow_run_count == 2 assert task_run_count == 6, "Task should use all of its retries every time" async def test_flow_with_failed_child_flow_with_retries(self): child_flow_run_count = 0 flow_run_count = 0 @flow(retries=1) def child_flow(): nonlocal child_flow_run_count child_flow_run_count += 1 # Fail on first try. if child_flow_run_count == 1: raise ValueError() return "hello" @flow def parent_flow(): nonlocal flow_run_count flow_run_count += 1 state = child_flow() return state assert parent_flow() == "hello" assert flow_run_count == 1, "Parent flow should only run once" assert child_flow_run_count == 2, "Child flow should run again" async def test_parent_flow_retries_failed_child_flow_with_retries(self): child_flow_retry_count = 0 child_flow_run_count = 0 flow_run_count = 0 @flow(retries=1) def child_flow(): nonlocal child_flow_run_count, child_flow_retry_count child_flow_run_count += 1 child_flow_retry_count += 1 # Fail during first parent flow run, but not on parent retry. if flow_run_count == 1: raise ValueError() # Fail on first try after parent retry. if child_flow_retry_count == 1: raise ValueError() return "hello" @flow(retries=1) def parent_flow(): nonlocal flow_run_count, child_flow_retry_count child_flow_retry_count = 0 flow_run_count += 1 state = child_flow() return state assert parent_flow() == "hello" assert flow_run_count == 2, "Parent flow should exhaust retries" assert child_flow_run_count == 4, ( "Child flow should run 2 times for each parent run" )
TestFlowRetries
python
kamyu104__LeetCode-Solutions
Python/count-elements-with-maximum-frequency.py
{ "start": 63, "end": 337 }
class ____(object): def maxFrequencyElements(self, nums): """ :type nums: List[int] :rtype: int """ cnt = collections.Counter(nums) mx = max(cnt.itervalues()) return sum(v for v in cnt.itervalues() if v == mx)
Solution
python
django__django
django/db/migrations/operations/models.py
{ "start": 25815, "end": 26142 }
class ____(AlterTogetherOptionOperation): """ Change the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): super().__init__(name, unique_together)
AlterUniqueTogether
python
scikit-learn__scikit-learn
sklearn/tests/test_base.py
{ "start": 1901, "end": 2066 }
class ____(NaNTag): def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.allow_nan = False return tags
OverrideTag
python
kamyu104__LeetCode-Solutions
Python/flatten-a-multilevel-doubly-linked-list.py
{ "start": 199, "end": 837 }
class ____(object): def flatten(self, head): """ :type head: Node :rtype: Node """ curr = head while curr: if curr.child: curr_next = curr.next curr.child.prev = curr curr.next = curr.child last_child = curr while last_child.next: last_child = last_child.next if curr_next: last_child.next = curr_next curr_next.prev = last_child curr.child = None curr = curr.next return head
Solution
python
pytorch__pytorch
torch/_dynamo/variables/misc.py
{ "start": 75199, "end": 75816 }
class ____(ConstantLikeVariable): _error_prefix = "np.dtype[...]" def as_proxy(self): """Similar to how numpy dtype descriptors (e.g. np.float32 ) are handled by NumpyVariable: np.dtype() objects are serialized as strings, torch._numpy wrappers will normalize to the torch dtype. This also handles unsupported things nicely (i.e. structured arrays and object arrays). """ return self.value.type.__name__ np_constant_collections_map = { tnp.finfo: NumpyTypeInfoVariable, tnp.iinfo: NumpyTypeInfoVariable, tnp.dtype: NumpyDTypeVariable, }
NumpyDTypeVariable
python
pydata__xarray
xarray/tests/test_strategies.py
{ "start": 9230, "end": 10237 }
class ____: """ These tests are for checking that the examples given in the docs page on testing actually work. """ @given(st.data(), variables(dims=dimension_names(min_dims=1))) def test_mean(self, data, var): """ Test that given a Variable of at least one dimension, the mean of the Variable is always equal to the mean of the underlying array. """ with set_options(use_numbagg=False): # specify arbitrary reduction along at least one dimension reduction_dims = data.draw(unique_subset_of(var.dims, min_size=1)) # create expected result (using nanmean because arrays with Nans will be generated) reduction_axes = tuple(var.get_axis_num(dim) for dim in reduction_dims) expected = np.nanmean(var.data, axis=reduction_axes) # assert property is always satisfied result = var.mean(dim=reduction_dims).data npt.assert_equal(expected, result)
TestReduction
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_H.py
{ "start": 6251, "end": 7687 }
class ____(Benchmark): r""" HelicalValley objective function. This class defines the HelicalValley [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{HelicalValley}}({x}) = 100{[z-10\Psi(x_1,x_2)]^2 +(\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2 Where, in this exercise: .. math:: 2\pi\Psi(x,y) = \begin{cases} \arctan(y/x) & \textrm{for} x > 0 \\ \pi + \arctan(y/x) & \textrm{for } x < 0 \end{cases} with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2, 3`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [1, 0, 0]` .. [1] Fletcher, R. & Powell, M. A Rapidly Convergent Descent Method for Minimization, Computer Journal, 1963, 62, 163-168 TODO: Jamil equation is different to original reference. The above paper can be obtained from http://galton.uchicago.edu/~lekheng/courses/302/classics/ fletcher-powell.pdf """ def __init__(self, dimensions=3): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.] * self.N, [10.] * self.N)) self.global_optimum = [[1.0, 0.0, 0.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 r = sqrt(x[0] ** 2 + x[1] ** 2) theta = 1 / (2. * pi) * arctan2(x[1], x[0]) return x[2] ** 2 + 100 * ((x[2] - 10 * theta) ** 2 + (r - 1) ** 2)
HelicalValley
python
python__mypy
mypy/test/data.py
{ "start": 877, "end": 1008 }
class ____(NamedTuple): module: str content: str target_path: str # File delete operation: delete module file.
UpdateFile
python
getsentry__sentry
src/sentry/feedback/lib/types.py
{ "start": 44, "end": 593 }
class ____(TypedDict): """Use for weak type checking of user report data. Keys correspond to fields of the UserReport model.""" event_id: str comments: str name: NotRequired[str] # defaults to "" email: NotRequired[str] # defaults to "" # required for the model, but functions usually infer this from an explicit Project argument. project_id: NotRequired[int] environment_id: NotRequired[int] # defaults to "production". group_id: NotRequired[int] level: NotRequired[str] # defaults to "info".
UserReportDict
python
django-extensions__django-extensions
tests/management/commands/shell_plus_tests/test_collision_resolver.py
{ "start": 911, "end": 950 }
class ____(BaseCR): pass
CRNoFunction
python
scipy__scipy
scipy/signal/tests/test_filter_design.py
{ "start": 27600, "end": 30105 }
class ____: def test_basic(self, xp): _, h = freqs_zpk( xp.asarray([1.0]), xp.asarray([1.0]), xp.asarray([1.0]), worN=8 ) assert_array_almost_equal(h, xp.ones(8)) def test_output(self, xp): # 1st order low-pass filter: H(s) = 1 / (s + 1) w = xp.asarray([0.1, 1, 10, 100]) z = xp.asarray([]) p = xp.asarray([-1.0]) k = 1 w, H = freqs_zpk(z, p, k, worN=w) s = w * 1j expected = 1 / (s + 1) assert_array_almost_equal(xp.real(H), xp.real(expected)) assert_array_almost_equal(xp.imag(H), xp.imag(expected)) def test_freq_range(self, xp): # Test that freqresp() finds a reasonable frequency range. # 1st order low-pass filter: H(s) = 1 / (s + 1) # Expected range is from 0.01 to 10. z = xp.asarray([]) p = xp.asarray([-1.]) k = 1 n = 10 expected_w = _logspace(-2, 1, n, xp=xp) w, H = freqs_zpk(z, p, k, worN=n) assert_array_almost_equal(w, expected_w) def test_vs_freqs(self, xp): b, a = cheby1(4, 5, 100., analog=True, output='ba') z, p, k = cheby1(4, 5, 100., analog=True, output='zpk') w1, h1 = map(xp.asarray, freqs(b, a)) z, p, k = map(xp.asarray, (z, p, k)) w2, h2 = freqs_zpk(z, p, k) xp_assert_close(w1, w2) xp_assert_close(h1, h2, rtol=1e-6) def test_backward_compat(self, xp): # For backward compatibility, test if None act as a wrapper for default # Also, keep testing `k` a length-one list: it is documented as a scalar, # but the implementation was allowing for a one-element array-likes w1, h1 = freqs_zpk(xp.asarray([1.0]), xp.asarray([1.0]), [1.0]) w2, h2 = freqs_zpk(xp.asarray([1.0]), xp.asarray([1.0]), [1.0], None) assert_array_almost_equal(w1, w2) assert_array_almost_equal(h1, h2) def test_w_or_N_types(self): # Measure at 8 equally-spaced points for N in (8, np.int8(8), np.int16(8), np.int32(8), np.int64(8), np.array(8)): w, h = freqs_zpk([], [], 1, worN=N) assert len(w) == 8 assert_array_almost_equal(h, np.ones(8)) # Measure at frequency 8 rad/sec for w in (8.0, 8.0+0j): w_out, h = freqs_zpk([], [], 1, worN=w) assert_array_almost_equal(w_out, [8]) assert_array_almost_equal(h, [1]) @make_xp_test_case(freqz)
TestFreqs_zpk
python
run-llama__llama_index
llama-index-integrations/voice_agents/llama-index-voice-agents-openai/llama_index/voice_agents/openai/types.py
{ "start": 2087, "end": 2176 }
class ____(BaseVoiceAgentEvent): session: ConversationSession
ConversationSessionUpdate
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
{ "start": 4615, "end": 5173 }
class ____: """Demo.""" @overload def bar(self, x: int) -> int: ... @overload def bar(self, x: str) -> str: ... def bar(self, x: int | str) -> int | str: return x # end # no error @overload def foo(x: int) -> int: ... @overload def foo(x: str) -> str: ... def foo(x: int | str) -> int | str: if not isinstance(x, (int, str)): raise TypeError return x # end # no error def foo(self, x: int) -> int: ... def bar(self, x: str) -> str: ... def baz(self, x: int | str) -> int | str: return x # end # E301
Foo
python
django__django
tests/messages_tests/tests.py
{ "start": 406, "end": 1844 }
class ____(SimpleTestCase): def test_eq(self): msg_1 = Message(constants.INFO, "Test message 1") msg_2 = Message(constants.INFO, "Test message 2") msg_3 = Message(constants.WARNING, "Test message 1") self.assertEqual(msg_1, msg_1) self.assertEqual(msg_1, mock.ANY) self.assertNotEqual(msg_1, msg_2) self.assertNotEqual(msg_1, msg_3) self.assertNotEqual(msg_2, msg_3) @override_settings( MESSAGE_TAGS={ constants.WARNING: "caution", constants.ERROR: "", 12: "custom", } ) def test_repr(self): tests = [ (constants.INFO, "thing", "", "Message(level=20, message='thing')"), ( constants.WARNING, "careful", "tag1 tag2", "Message(level=30, message='careful', extra_tags='tag1 tag2')", ), ( constants.ERROR, "oops", "tag", "Message(level=40, message='oops', extra_tags='tag')", ), (12, "custom", "", "Message(level=12, message='custom')"), ] for level, message, extra_tags, expected in tests: with self.subTest(level=level, message=message): msg = Message(level, message, extra_tags=extra_tags) self.assertEqual(repr(msg), expected)
MessageTests
python
falconry__falcon
tests/asgi/_asgi_test_app.py
{ "start": 8913, "end": 10562 }
class ____: _SUPPORTED_KEYS = frozenset( {'default_close_reasons', 'error_close_code', 'max_receive_queue'} ) def __init__(self, ws_options): self._ws_options = ws_options async def on_get(self, req, resp): resp.media = { key: getattr(self._ws_options, key) for key in self._SUPPORTED_KEYS } async def on_patch(self, req, resp): update = await req.get_media() for key, value in update.items(): if key not in self._SUPPORTED_KEYS: raise falcon.HTTPInvalidParam('unsupported option', key) setattr(self._ws_options, key, value) resp.status = falcon.HTTP_NO_CONTENT def create_app(): app = falcon.asgi.App() bucket = Bucket() lifespan_handler = LifespanHandler() app.add_route('/', Things()) app.add_route('/bucket', bucket) app.add_route('/bucket/drops', bucket, suffix='drops') app.add_route('/events', Events()) app.add_route('/forms', Multipart()) app.add_route('/jars', TestJar()) app.add_route('/feeds/{feed_id}', Feed()) app.add_route('/wsoptions', WSOptions(app.ws_options)) app.add_middleware(lifespan_handler) async def _on_ws_error(req, resp, error, params, ws=None): if not ws: raise if ws.unaccepted: await ws.accept() if not ws.closed: await ws.send_text(error.__class__.__name__) await ws.close() app.add_error_handler(falcon.errors.OperationNotAllowed, _on_ws_error) app.add_error_handler(ValueError, _on_ws_error) return app application = create_app()
WSOptions
python
pyodide__pyodide
src/py/_pyodide/_core_docs.py
{ "start": 22268, "end": 22729 }
class ____(JsProxy, Generic[T_co]): """A JsProxy of a JavaScript iterator. An object is a :py:class:`JsAsyncIterator` if it has a :js:meth:`~Iterator.next` method and either has a :js:data:`Symbol.iterator` or has no :js:data:`Symbol.asyncIterator`. """ _js_type_flags = ["IS_ITERATOR"] def __next__(self) -> T_co: raise NotImplementedError def __iter__(self) -> Iterator[T_co]: raise NotImplementedError
JsIterator
python
django__django
tests/model_fields/test_binaryfield.py
{ "start": 146, "end": 2190 }
class ____(TestCase): binary_data = b"\x00\x46\xfe" def test_set_and_retrieve(self): data_set = ( self.binary_data, bytearray(self.binary_data), memoryview(self.binary_data), ) for bdata in data_set: with self.subTest(data=repr(bdata)): dm = DataModel(data=bdata) dm.save() dm = DataModel.objects.get(pk=dm.pk) self.assertEqual(bytes(dm.data), bytes(bdata)) # Resave (=update) dm.save() dm = DataModel.objects.get(pk=dm.pk) self.assertEqual(bytes(dm.data), bytes(bdata)) # Test default value self.assertEqual(bytes(dm.short_data), b"\x08") def test_max_length(self): dm = DataModel(short_data=self.binary_data * 4) with self.assertRaises(ValidationError): dm.full_clean() def test_editable(self): field = models.BinaryField() self.assertIs(field.editable, False) field = models.BinaryField(editable=True) self.assertIs(field.editable, True) field = models.BinaryField(editable=False) self.assertIs(field.editable, False) def test_filter(self): dm = DataModel.objects.create(data=self.binary_data) DataModel.objects.create(data=b"\xef\xbb\xbf") self.assertSequenceEqual(DataModel.objects.filter(data=self.binary_data), [dm]) def test_filter_bytearray(self): dm = DataModel.objects.create(data=self.binary_data) DataModel.objects.create(data=b"\xef\xbb\xbf") self.assertSequenceEqual( DataModel.objects.filter(data=bytearray(self.binary_data)), [dm] ) def test_filter_memoryview(self): dm = DataModel.objects.create(data=self.binary_data) DataModel.objects.create(data=b"\xef\xbb\xbf") self.assertSequenceEqual( DataModel.objects.filter(data=memoryview(self.binary_data)), [dm] )
BinaryFieldTests
python
kamyu104__LeetCode-Solutions
Python/the-k-strongest-values-in-an-array.py
{ "start": 33, "end": 585 }
class ____(object): def getStrongest(self, arr, k): """ :type arr: List[int] :type k: int :rtype: List[int] """ arr.sort() m = arr[(len(arr)-1)//2] result = [] left, right = 0, len(arr)-1 while len(result) < k: if m-arr[left] > arr[right]-m: result.append(arr[left]) left += 1 else: result.append(arr[right]) right -= 1 return result # Time: O(nlogn) # Space: O(1)
Solution
python
OmkarPathak__pygorithm
pygorithm/math/matrix_operations.py
{ "start": 168, "end": 7104 }
class ____(object): ''' Matrix class for performing various transformations Matrix operations can be performed on two matrices with any number of dimensions ''' def __init__(self, matrix_one = None, matrix_two=None): ''' :param matrix_one: matrix with nxn dimensions :param matrix_two: matrix with nxn dimensions .. code-block:: python: matrix_one = [[1, 2], [1, 3], [1, 4]] (a 3x2 matrix) ''' self.matrix_one = matrix_one self.matrix_two = matrix_two def add(self): ''' function for adding the two matrices .. note:: Matrix addition requires both the matrices to be of same size. That is both the matrices should be of nxn dimensional. ''' # check if both the matrices are of same shape if not (len(self.matrix_one) == len(self.matrix_two)) or not (len(self.matrix_one[0]) == len(self.matrix_two[0])): raise Exception('Both Matrices should be of same dimensions') added_matrix = [[0 for i in range(len(self.matrix_one))] for j in range(len(self.matrix_two))] # iterate through rows for row in range(len(self.matrix_one)): # iterate through columns for column in range(len(self.matrix_one[0])): added_matrix[row][column] = self.matrix_one[row][column] + self.matrix_two[row][column] return added_matrix def subtract(self): ''' function for subtracting the two matrices .. note:: Matrix subtraction requires both the matrices to be of same size. That is both the matrices should be of nxn dimensional. ''' # check if both the matrices are of same shape if not (len(self.matrix_one) == len(self.matrix_two)) or not (len(self.matrix_one[0]) == len(self.matrix_two[0])): raise Exception('Both Matrices should be of same dimensions') subtracted_matrix = [[0 for i in range(len(self.matrix_one))] for j in range(len(self.matrix_two))] # iterate through rows for row in range(len(self.matrix_one)): # iterate through columns for column in range(len(self.matrix_one[0])): subtracted_matrix[row][column] = self.matrix_one[row][column] - self.matrix_two[row][column] return subtracted_matrix def multiply(self): ''' function for multiplying the two matrices .. note:: Matrix multiplication can be carried out even on matrices with different dimensions. ''' multiplied_matrix = [[0 for i in range(len(self.matrix_two[0]))] for j in range(len(self.matrix_one))] # iterate through rows for row_one in range(len(self.matrix_one)): # iterate through columns matrix_two for column in range(len(self.matrix_two[0])): # iterate through rows of matrix_two for row_two in range(len(self.matrix_two)): multiplied_matrix[row_one][column] += self.matrix_one[row_one][row_two] * self.matrix_two[row_two][column] return multiplied_matrix def transpose(self): ''' The transpose of a matrix is a new matrix whose rows are the columns of the original. (This makes the columns of the new matrix the rows of the original) ''' transpose_matrix = [[0 for i in range(len(self.matrix_one))] for j in range(len(self.matrix_one[0]))] # iterate through rows for row in range(len(self.matrix_one)): # iterate through columns for column in range(len(self.matrix_one[0])): transpose_matrix[column][row] = self.matrix_one[row][column] return transpose_matrix def rotate(self): ''' Given a matrix, clockwise rotate elements in it. .. code-block:: python: **Examples:** Input 1 2 3 4 5 6 7 8 9 Output: 4 1 2 7 5 3 8 9 6 For detailed information visit: https://github.com/keon/algorithms/blob/master/matrix/matrix_rotation.txt ''' top = 0 bottom = len(self.matrix_one) - 1 left = 0 right = len(self.matrix_one[0]) - 1 while left < right and top < bottom: # Store the first element of next row, this element will replace first element of # current row prev = self.matrix_one[top + 1][left] # Move elements of top row one step right for i in range(left, right + 1): curr = self.matrix_one[top][i] self.matrix_one[top][i] = prev prev = curr top += 1 # Move elements of rightmost column one step downwards for i in range(top, bottom+1): curr = self.matrix_one[i][right] self.matrix_one[i][right] = prev prev = curr right -= 1 # Move elements of bottom row one step left for i in range(right, left-1, -1): curr = self.matrix_one[bottom][i] self.matrix_one[bottom][i] = prev prev = curr bottom -= 1 # Move elements of leftmost column one step upwards for i in range(bottom, top-1, -1): curr = self.matrix_one[i][left] self.matrix_one[i][left] = prev prev = curr left += 1 return self.matrix_one def count_unique_paths(self, m, n): ''' Count the number of unique paths from a[0][0] to a[m-1][n-1] We are allowed to move either right or down from a cell in the matrix. Approaches- (i) Recursion - Recurse starting from a[m-1][n-1], upwards and leftwards, add the path count of both recursions and return count. (ii) Dynamic Programming- Start from a[0][0].Store the count in a count matrix. Return count[m-1][n-1] Time Complexity = O(mn), Space Complexity = O(mn) :param m: number of rows :param n: number of columns ''' if m < 1 or n < 1: return count = [[None for j in range(n)] for i in range(m)] # Taking care of the edge cases- matrix of size 1xn or mx1 for i in range(n): count[0][i] = 1 for j in range(m): count[j][0] = 1 for i in range(1, m): for j in range(1, n): count[i][j] = count[i-1][j] + count[i][j-1] return count[m-1][n-1]
Matrix
python
allegroai__clearml
clearml/backend_api/services/v2_23/dataviews.py
{ "start": 45131, "end": 46235 }
class ____(NonStrictDataModel): """ :param rules: Rules list :type rules: Sequence[MappingRule] """ _schema = { "properties": { "rules": { "description": "Rules list", "items": {"$ref": "#/definitions/mapping_rule"}, "type": ["array", "null"], } }, "type": "object", } def __init__(self, rules=None, **kwargs): super(Mapping, self).__init__(**kwargs) self.rules = rules @schema_property("rules") def rules(self): return self._property_rules @rules.setter def rules(self, value): if value is None: self._property_rules = None return self.assert_isinstance(value, "rules", (list, tuple)) if any(isinstance(v, dict) for v in value): value = [ MappingRule.from_dict(v) if isinstance(v, dict) else v for v in value ] else: self.assert_isinstance(value, "rules", MappingRule, is_array=True) self._property_rules = value
Mapping
python
Textualize__textual
tests/css/test_nested_css.py
{ "start": 306, "end": 1267 }
class ____(App): CSS = """ Screen { & > #foo { background: red; #egg { background: green; } .paul { background: blue; } &.jessica { color: magenta; } } } """ def compose(self) -> ComposeResult: with Vertical(id="foo", classes="jessica"): yield Label("Hello", id="egg") yield Label("World", classes="paul") async def test_nest_app(): """Test nested CSS works as expected.""" app = NestedApp() async with app.run_test(): assert app.query_one("#foo").styles.background == Color.parse("red") assert app.query_one("#foo").styles.color == Color.parse("magenta") assert app.query_one("#egg").styles.background == Color.parse("green") assert app.query_one("#foo .paul").styles.background == Color.parse("blue")
NestedApp
python
ray-project__ray
python/ray/serve/config.py
{ "start": 1126, "end": 6103 }
class ____: """Rich context provided to custom autoscaling policies. This class provides comprehensive information about a deployment's current state, metrics, and configuration that can be used by custom autoscaling policies to make intelligent scaling decisions. The context includes deployment metadata, current replica state, built-in and custom metrics, capacity bounds, policy state, and timing information. Note: The aggregated_metrics and raw_metrics fields support lazy evaluation. You can pass callables that will be evaluated only when accessed, with results cached for subsequent accesses. """ def __init__( self, deployment_id: DeploymentID, deployment_name: str, app_name: Optional[str], current_num_replicas: int, target_num_replicas: int, running_replicas: List[ReplicaID], total_num_requests: Union[float, Callable[[], float]], total_queued_requests: Optional[Union[float, Callable[[], float]]], aggregated_metrics: Optional[ Union[ Dict[str, Dict[ReplicaID, float]], Callable[[], Dict[str, Dict[ReplicaID, float]]], ] ], raw_metrics: Optional[ Union[ Dict[str, Dict[ReplicaID, TimeSeries]], Callable[[], Dict[str, Dict[ReplicaID, TimeSeries]]], ] ], capacity_adjusted_min_replicas: int, capacity_adjusted_max_replicas: int, policy_state: Dict[str, Any], last_scale_up_time: Optional[float], last_scale_down_time: Optional[float], current_time: Optional[float], config: Optional[Any], ): # Deployment information self.deployment_id = deployment_id #: Unique identifier for the deployment. self.deployment_name = deployment_name #: Name of the deployment. self.app_name = app_name #: Name of the application containing this deployment. # Current state self.current_num_replicas = ( current_num_replicas #: Current number of running replicas. ) self.target_num_replicas = ( target_num_replicas #: Target number of replicas set by the autoscaler. ) self.running_replicas = ( running_replicas #: List of currently running replica IDs. ) # Built-in metrics self._total_num_requests_value = ( total_num_requests #: Total number of requests across all replicas. ) self._total_queued_requests_value = ( total_queued_requests #: Number of requests currently queued. ) # Custom metrics - store potentially lazy callables privately self._aggregated_metrics_value = aggregated_metrics self._raw_metrics_value = raw_metrics # Capacity and bounds self.capacity_adjusted_min_replicas = capacity_adjusted_min_replicas #: Minimum replicas adjusted for cluster capacity. self.capacity_adjusted_max_replicas = capacity_adjusted_max_replicas #: Maximum replicas adjusted for cluster capacity. # Policy state self.policy_state = ( policy_state #: Persistent state dictionary for the autoscaling policy. ) # Timing self.last_scale_up_time = ( last_scale_up_time #: Timestamp of last scale-up action. ) self.last_scale_down_time = ( last_scale_down_time #: Timestamp of last scale-down action. ) self.current_time = current_time #: Current timestamp. # Config self.config = config #: Autoscaling configuration for this deployment. @cached_property def aggregated_metrics(self) -> Optional[Dict[str, Dict[ReplicaID, float]]]: if callable(self._aggregated_metrics_value): return self._aggregated_metrics_value() return self._aggregated_metrics_value @cached_property def raw_metrics(self) -> Optional[Dict[str, Dict[ReplicaID, TimeSeries]]]: if callable(self._raw_metrics_value): return self._raw_metrics_value() return self._raw_metrics_value @cached_property def total_num_requests(self) -> float: if callable(self._total_num_requests_value): return self._total_num_requests_value() return self._total_num_requests_value @cached_property def total_queued_requests(self) -> float: if callable(self._total_queued_requests_value): return self._total_queued_requests_value() return self._total_queued_requests_value @property def total_running_requests(self) -> float: # NOTE: for non-additive aggregation functions, total_running_requests is not # accurate, consider this is an approximation. return self.total_num_requests - self.total_queued_requests @PublicAPI(stability="alpha")
AutoscalingContext
python
huggingface__transformers
src/transformers/models/plbart/modular_plbart.py
{ "start": 15364, "end": 17240 }
class ____(BigBirdPegasusForSequenceClassification): def forward(**super_kwargs): r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`] or [`PLBartMultiTokenizer`] depending on the checkpoint. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) PLBart uses a specific language id token as the starting token for `decoder_input_ids` generation that varies according to source and target language, *e.g.* 50003 for *en_XX*, and 50001 for *java*. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (: obj:*torch.LongTensor* of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ super().forward(**super_kwargs)
PLBartForSequenceClassification
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/summary_ops/summary_ops_test.py
{ "start": 42214, "end": 47535 }
class ____(test_util.TensorFlowTestCase): def testWriter_savedAsModuleProperty_loadInEagerMode(self): with context.eager_mode(): class Model(module.Module): def __init__(self, model_dir): self._writer = summary_ops.create_file_writer_v2( model_dir, experimental_trackable=True) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64) ]) def train(self, step): with self._writer.as_default(): summary_ops.write('tag', 'foo', step=step) return constant_op.constant(0) logdir = self.get_temp_dir() to_export = Model(logdir) pre_save_files = set(events_from_multifile_logdir(logdir)) export_dir = os.path.join(logdir, 'export') saved_model_save.save( to_export, export_dir, signatures={'train': to_export.train}) # Reset context to ensure we don't share any resources with saving code. context._reset_context() # pylint: disable=protected-access with context.eager_mode(): restored = saved_model_load.load(export_dir) restored.train(1) restored.train(2) post_restore_files = set(events_from_multifile_logdir(logdir)) restored2 = saved_model_load.load(export_dir) restored2.train(3) restored2.train(4) files_to_events = events_from_multifile_logdir(logdir) post_restore2_files = set(files_to_events) self.assertLen(files_to_events, 3) def unwrap_singleton(iterable): self.assertLen(iterable, 1) return next(iter(iterable)) restore_file = unwrap_singleton(post_restore_files - pre_save_files) restore2_file = unwrap_singleton(post_restore2_files - post_restore_files) restore_events = files_to_events[restore_file] restore2_events = files_to_events[restore2_file] self.assertLen(restore_events, 3) self.assertEqual(1, restore_events[1].step) self.assertEqual(2, restore_events[2].step) self.assertLen(restore2_events, 3) self.assertEqual(3, restore2_events[1].step) self.assertEqual(4, restore2_events[2].step) def testWriter_savedAsModuleProperty_loadInGraphMode(self): with context.eager_mode(): class Model(module.Module): def __init__(self, model_dir): self._writer = summary_ops.create_file_writer_v2( model_dir, experimental_trackable=True) @def_function.function(input_signature=[ tensor_spec.TensorSpec(shape=[], dtype=dtypes.int64) ]) def train(self, step): with self._writer.as_default(): summary_ops.write('tag', 'foo', step=step) return constant_op.constant(0) logdir = self.get_temp_dir() to_export = Model(logdir) pre_save_files = set(events_from_multifile_logdir(logdir)) export_dir = os.path.join(logdir, 'export') saved_model_save.save( to_export, export_dir, signatures={'train': to_export.train}) # Reset context to ensure we don't share any resources with saving code. context._reset_context() # pylint: disable=protected-access def load_and_run_model(sess, input_values): """Load and run the SavedModel signature in the TF 1.x style.""" model = saved_model_loader.load(sess, [tag_constants.SERVING], export_dir) signature = model.signature_def['train'] inputs = list(signature.inputs.values()) assert len(inputs) == 1, inputs outputs = list(signature.outputs.values()) assert len(outputs) == 1, outputs input_tensor = sess.graph.get_tensor_by_name(inputs[0].name) output_tensor = sess.graph.get_tensor_by_name(outputs[0].name) for v in input_values: sess.run(output_tensor, feed_dict={input_tensor: v}) with context.graph_mode(), ops.Graph().as_default(): # Since writer shared_name is fixed, within a single session, all loads of # this SavedModel will refer to a single writer resouce, so it will be # initialized only once and write to a single file. with self.session() as sess: load_and_run_model(sess, [1, 2]) load_and_run_model(sess, [3, 4]) post_restore_files = set(events_from_multifile_logdir(logdir)) # New session will recreate the resource and write to a second file. with self.session() as sess: load_and_run_model(sess, [5, 6]) files_to_events = events_from_multifile_logdir(logdir) post_restore2_files = set(files_to_events) self.assertLen(files_to_events, 3) def unwrap_singleton(iterable): self.assertLen(iterable, 1) return next(iter(iterable)) restore_file = unwrap_singleton(post_restore_files - pre_save_files) restore2_file = unwrap_singleton(post_restore2_files - post_restore_files) restore_events = files_to_events[restore_file] restore2_events = files_to_events[restore2_file] self.assertLen(restore_events, 5) self.assertEqual(1, restore_events[1].step) self.assertEqual(2, restore_events[2].step) self.assertEqual(3, restore_events[3].step) self.assertEqual(4, restore_events[4].step) self.assertLen(restore2_events, 3) self.assertEqual(5, restore2_events[1].step) self.assertEqual(6, restore2_events[2].step)
SummaryWriterSavedModelTest
python
realpython__materials
python-selenium/src/bandcamp/web/base.py
{ "start": 438, "end": 696 }
class ____: def __init__(self, driver: WebDriver) -> None: self._driver = driver self._driver.set_window_size(*DEFAULT_WINDOW_SIZE) self._driver.implicitly_wait(5) self._wait = WebDriverWait(driver, MAX_WAIT_SECONDS)
WebPage
python
tensorflow__tensorflow
tensorflow/python/keras/layers/core.py
{ "start": 16188, "end": 20181 }
class ____(Layer): """Layer that reshapes inputs into the given shape. Input shape: Arbitrary, although all dimensions in the input shape must be known/fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples/batch size axis) when using this layer as the first layer in a model. Output shape: `(batch_size,) + target_shape` Example: >>> # as first layer in a Sequential model >>> model = tf.keras.Sequential() >>> model.add(tf.keras.layers.Reshape((3, 4), input_shape=(12,))) >>> # model.output_shape == (None, 3, 4), `None` is the batch size. >>> model.output_shape (None, 3, 4) >>> # as intermediate layer in a Sequential model >>> model.add(tf.keras.layers.Reshape((6, 2))) >>> model.output_shape (None, 6, 2) >>> # also supports shape inference using `-1` as dimension >>> model.add(tf.keras.layers.Reshape((-1, 2, 2))) >>> model.output_shape (None, 3, 2, 2) """ def __init__(self, target_shape, **kwargs): """Creates a `tf.keras.layers.Reshape` layer instance. Args: target_shape: Target shape. Tuple of integers, does not include the samples dimension (batch size). **kwargs: Any additional layer keyword arguments. """ super(Reshape, self).__init__(**kwargs) self.target_shape = tuple(target_shape) def _fix_unknown_dimension(self, input_shape, output_shape): """Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` Args: input_shape: Shape of array being reshaped output_shape: Desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises: ValueError: If the total array size of the output_shape is different than the input_shape, or more than one unknown dimension is specified. """ output_shape = list(output_shape) msg = ('total size of new array must be unchanged, ' 'input_shape = {}, output_shape = {}' .format(input_shape, output_shape)) known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError('Can only specify one unknown dimension.') else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if None in input_shape[1:]: output_shape = [input_shape[0]] # input shape (partially) unknown? replace -1's with None's output_shape += tuple(s if s != -1 else None for s in self.target_shape) else: output_shape = [input_shape[0]] output_shape += self._fix_unknown_dimension(input_shape[1:], self.target_shape) return tensor_shape.TensorShape(output_shape) def call(self, inputs): result = array_ops.reshape( inputs, (array_ops.shape(inputs)[0],) + self.target_shape) if not context.executing_eagerly(): # Set the static shape for the result since it might lost during array_ops # reshape, eg, some `None` dim in the result could be inferred. result.set_shape(self.compute_output_shape(inputs.shape)) return result def get_config(self): config = {'target_shape': self.target_shape} base_config = super(Reshape, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Reshape
python
huggingface__transformers
src/transformers/utils/quantization_config.py
{ "start": 87204, "end": 89776 }
class ____(QuantizationConfigMixin): """ This is a wrapper class about `spqr` parameters. Refer to the original publication for more details. Args: bits (`int`, *optional*, defaults to 3): Specifies the bit count for the weights and first order zero-points and scales. Currently only bits = 3 is supported. beta1 (`int`, *optional*, defaults to 16): SpQR tile width. Currently only beta1 = 16 is supported. beta2 (`int`, *optional*, defaults to 16): SpQR tile height. Currently only beta2 = 16 is supported. shapes (`Optional`, *optional*): A dictionary holding the shape of each object. We need this because it's impossible to deduce the exact size of the parameters just from bits, beta1, beta2. modules_to_not_convert (`Optional[list[str]]`, *optional*): Optionally, provides a list of full paths of `nn.Linear` weight parameters that shall not be quantized. Defaults to None. kwargs (`dict[str, Any]`, *optional*): Additional parameters from which to initialize the configuration object. """ def __init__( self, bits: int = 3, beta1: int = 16, beta2: int = 16, shapes: dict[str, int] | None = None, modules_to_not_convert: list[str] | None = None, **kwargs, ): if shapes is None: shapes = {} self.shapes = shapes self.quant_method = QuantizationMethod.SPQR self.bits = bits self.beta1 = beta1 self.beta2 = beta2 self.modules_to_not_convert = modules_to_not_convert self.post_init() def post_init(self): r""" Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.bits, int): raise TypeError("bits must be an int") if not isinstance(self.beta1, int): raise TypeError("beta1 must be an int") if not isinstance(self.beta2, int): raise TypeError("beta2 must be an int") if self.bits != 3: raise ValueError("SpQR currently only supports bits = 3") if self.beta1 != 16: raise ValueError("SpQR currently only supports beta1 = 16") if self.beta2 != 16: raise ValueError("SpQR currently only supports beta2 = 16") if not isinstance(self.shapes, dict): raise TypeError("shapes must be a dict") @dataclass
SpQRConfig
python
pikepdf__pikepdf
src/pikepdf/form.py
{ "start": 6812, "end": 10023 }
class ____(_FieldWrapper): """Represents an editable text field.""" @property def is_multiline(self) -> bool: """Is this a multiline text field? If True, text will be wrapped and newlines will be allowed. If False, text will not be wrapped and newlines are stripped. """ return bool(self._field.flags & FormFieldFlag.tx_multiline) @property def is_combed(self) -> bool: """Is this a combed text field? If True, the field will be split into equal-length segments, based on ``max_length``, containing one character each. """ return bool(self._field.flags & FormFieldFlag.tx_comb) @property def is_rich_text(self) -> bool: """Is this a rich text field? Rich text functionality is not currently implemented, but this flag is presented for your information. """ return bool(self._field.flags & FormFieldFlag.tx_rich_text) @property def is_password(self) -> bool: """Is this a password field? Password fields are not currently implemented, but this flag is presented for your information. """ return bool(self._field.flags & FormFieldFlag.tx_password) @property def is_file_select(self) -> bool: """Is this a file select field? File select fields are not currently implemented, but this flag is presented for your information. """ return bool(self._field.flags & FormFieldFlag.tx_file_select) @property def spell_check_enabled(self) -> bool: """Should spell-checking be enabled in this field?""" return not self._field.flags & FormFieldFlag.tx_do_not_spell_check @property def scrolling_enabled(self) -> bool: """Should scrolling (horizontal or vertical) be allowed in this field?""" return not self._field.flags & FormFieldFlag.tx_do_not_scroll @property def max_length(self) -> int | None: """The maximum length of the text in this field.""" return self._field.get_inheritable_field_value("/MaxLen") @property def default_value(self) -> str: """The default (placeholder) value of the text field.""" return self._field.default_value_as_string @property def value(self) -> str: """The value of the text field.""" return self._field.value_as_string @value.setter def value(self, value: str): # Coerce the value into something acceptable if it isn't if not self.is_multiline: value = value.replace('\n', '') max_length = self.max_length if ( not self._form.ignore_max_length and max_length is not None and len(value) > max_length ): value = value[:max_length] log.warning('Value is too long for text field and is being truncated') # Set the value self._field.set_value(value, self._form.generate_appearances is None) # Generate appearance streams if requested. if self._form.generate_appearances is not None: self._form.generate_appearances.generate_text(self._field)
TextField
python
FactoryBoy__factory_boy
tests/test_declarations.py
{ "start": 6899, "end": 8301 }
class ____(unittest.TestCase): def test_invalid_path(self): with self.assertRaises(ValueError): declarations._FactoryWrapper('UnqualifiedSymbol') with self.assertRaises(ValueError): declarations._FactoryWrapper(42) def test_class(self): w = declarations._FactoryWrapper(datetime.date) self.assertEqual(datetime.date, w.get()) def test_path(self): w = declarations._FactoryWrapper('datetime.date') self.assertEqual(datetime.date, w.get()) def test_lazyness(self): f = declarations._FactoryWrapper('factory.declarations.Sequence') self.assertEqual(None, f.factory) factory_class = f.get() self.assertEqual(declarations.Sequence, factory_class) def test_cache(self): """Ensure that _FactoryWrapper tries to import only once.""" orig_date = datetime.date w = declarations._FactoryWrapper('datetime.date') self.assertEqual(None, w.factory) factory_class = w.get() self.assertEqual(orig_date, factory_class) try: # Modify original value datetime.date = None # Repeat import factory_class = w.get() self.assertEqual(orig_date, factory_class) finally: # IMPORTANT: restore attribute. datetime.date = orig_date
FactoryWrapperTestCase
python
mlflow__mlflow
tests/utils/test_async_logging_queue.py
{ "start": 8109, "end": 14315 }
class ____: def __init__(self) -> None: self.metrics = [] self.tags = [] self.params = [] def consume_queue_data(self, run_id, metrics, tags, params): time.sleep(0.5) self.metrics.extend(metrics or []) self.params.extend(params or []) self.tags.extend(tags or []) def test_async_logging_queue_pickle(): run_id = "test_run_id" consumer = Consumer() with generate_async_logging_queue(consumer) as async_logging_queue: # Pickle the queue without activating it. buffer = io.BytesIO() pickle.dump(async_logging_queue, buffer) deserialized_queue = pickle.loads(buffer.getvalue()) # Type: AsyncLoggingQueue # activate the queue and then try to pickle it async_logging_queue.activate() run_operations = [ async_logging_queue.log_batch_async( run_id=run_id, metrics=[Metric("metric", val, timestamp=time.time(), step=1)], tags=[], params=[], ) for val in range(0, 10) ] # Pickle the queue buffer = io.BytesIO() pickle.dump(async_logging_queue, buffer) deserialized_queue = pickle.loads(buffer.getvalue()) # Type: AsyncLoggingQueue assert deserialized_queue._queue.empty() assert deserialized_queue._lock is not None assert deserialized_queue._status is QueueStatus.IDLE for run_operation in run_operations: run_operation.wait() assert len(consumer.metrics) == 10 # try to log using deserialized queue after activating it. deserialized_queue.activate() assert deserialized_queue.is_active() run_operations = [] for val in range(0, 10): run_operations.append( deserialized_queue.log_batch_async( run_id=run_id, metrics=[Metric("metric", val, timestamp=time.time(), step=1)], tags=[], params=[], ) ) for run_operation in run_operations: run_operation.wait() assert len(deserialized_queue._logging_func.__self__.metrics) == 10 deserialized_queue.shut_down_async_logging() def _send_metrics_tags_params(run_data_queueing_processor, run_id, run_operations=None): if run_operations is None: run_operations = [] metrics_sent = [] tags_sent = [] params_sent = [] for params, tags, metrics in _get_run_data(): run_operations.append( run_data_queueing_processor.log_batch_async( run_id=run_id, metrics=metrics, tags=tags, params=params ) ) time.sleep(random.randint(1, 3)) metrics_sent += metrics tags_sent += tags params_sent += params def _get_run_data( total_batches=TOTAL_BATCHES, params_per_batch=PARAMS_PER_BATCH, tags_per_batch=TAGS_PER_BATCH, metrics_per_batch=METRIC_PER_BATCH, ): for num in range(0, total_batches): guid8 = str(uuid.uuid4())[:8] params = [ Param(f"batch param-{guid8}-{val}", value=str(time.time())) for val in range(params_per_batch) ] tags = [ RunTag(f"batch tag-{guid8}-{val}", value=str(time.time())) for val in range(tags_per_batch) ] metrics = [ Metric( key=f"batch metrics async-{num}", value=val, timestamp=int(time.time() * 1000), step=0, ) for val in range(metrics_per_batch) ] yield params, tags, metrics def _assert_sent_received_data( metrics_sent, params_sent, tags_sent, received_metrics, received_params, received_tags ): for num in range(1, len(metrics_sent)): assert metrics_sent[num].key == received_metrics[num].key assert metrics_sent[num].value == received_metrics[num].value assert metrics_sent[num].timestamp == received_metrics[num].timestamp assert metrics_sent[num].step == received_metrics[num].step for num in range(1, len(tags_sent)): assert tags_sent[num].key == received_tags[num].key assert tags_sent[num].value == received_tags[num].value for num in range(1, len(params_sent)): assert params_sent[num].key == received_params[num].key assert params_sent[num].value == received_params[num].value def test_batch_split(monkeypatch): monkeypatch.setattr(mlflow.utils.async_logging.async_logging_queue, "_MAX_ITEMS_PER_BATCH", 10) monkeypatch.setattr(mlflow.utils.async_logging.async_logging_queue, "_MAX_PARAMS_PER_BATCH", 6) monkeypatch.setattr(mlflow.utils.async_logging.async_logging_queue, "_MAX_TAGS_PER_BATCH", 8) run_data = RunData() with generate_async_logging_queue(run_data) as async_logging_queue: async_logging_queue.activate() run_id = "test_run_id" for params, tags, metrics in _get_run_data(2, 3, 3, 3): async_logging_queue.log_batch_async( run_id=run_id, metrics=metrics, tags=tags, params=params ) async_logging_queue.flush() assert run_data.batch_count == 2 run_data = RunData() with generate_async_logging_queue(run_data) as async_logging_queue: async_logging_queue.activate() run_id = "test_run_id" for params, tags, metrics in _get_run_data(2, 4, 0, 0): async_logging_queue.log_batch_async( run_id=run_id, metrics=metrics, tags=tags, params=params ) async_logging_queue.flush() assert run_data.batch_count == 2 run_data = RunData() with generate_async_logging_queue(run_data) as async_logging_queue: async_logging_queue.activate() run_id = "test_run_id" for params, tags, metrics in _get_run_data(2, 0, 5, 0): async_logging_queue.log_batch_async( run_id=run_id, metrics=metrics, tags=tags, params=params ) async_logging_queue.flush() assert run_data.batch_count == 2
Consumer
python
redis__redis-py
tests/test_maint_notifications_handling.py
{ "start": 21886, "end": 93103 }
class ____(TestMaintenanceNotificationsBase): """Integration tests for maintenance notifications handling with real connection pool.""" def _validate_connection_handlers(self, conn, pool_handler, config): """Helper method to validate connection handlers are properly set.""" # Test that the node moving handler function is correctly set parser_handler = conn._parser.node_moving_push_handler_func assert parser_handler is not None assert hasattr(parser_handler, "__self__") assert hasattr(parser_handler, "__func__") assert parser_handler.__self__.connection is conn assert parser_handler.__self__.pool is pool_handler.pool assert parser_handler.__self__._lock is pool_handler._lock assert ( parser_handler.__self__._processed_notifications is pool_handler._processed_notifications ) assert parser_handler.__func__ is pool_handler.handle_notification.__func__ # Test that the maintenance handler function is correctly set maintenance_handler = conn._parser.maintenance_push_handler_func assert maintenance_handler is not None assert hasattr(maintenance_handler, "__self__") assert hasattr(maintenance_handler, "__func__") # The maintenance handler should be bound to the connection's # maintenance notification connection handler assert ( maintenance_handler.__self__ is conn._maint_notifications_connection_handler ) assert ( maintenance_handler.__func__ is conn._maint_notifications_connection_handler.handle_notification.__func__ ) # Validate that the connection's maintenance handler has the same config object assert conn._maint_notifications_connection_handler.config is config def _validate_current_timeout(self, expected_timeout, error_msg=None): """Helper method to validate the current timeout for the calling thread.""" actual_timeout = None # Get the actual thread ID from the current thread current_thread_id = threading.current_thread().ident for sock in self.mock_sockets: if current_thread_id in sock.thread_timeouts: actual_timeout = sock.thread_timeouts[current_thread_id] break assert actual_timeout == expected_timeout, ( f"{error_msg or ''}" f"Expected timeout ({expected_timeout}), " f"but found timeout: {actual_timeout}. " f"All thread timeouts: {[sock.thread_timeouts for sock in self.mock_sockets]}", ) def _validate_disconnected(self, expected_count): """Helper method to validate all socket timeouts""" disconnected_sockets_count = 0 for sock in self.mock_sockets: if sock.closed: disconnected_sockets_count += 1 assert disconnected_sockets_count == expected_count def _validate_connected(self, expected_count): """Helper method to validate all socket timeouts""" connected_sockets_count = 0 for sock in self.mock_sockets: if sock.connected: connected_sockets_count += 1 assert connected_sockets_count == expected_count def test_client_initialization(self): """Test that Redis client is created with maintenance notifications configuration.""" # Create a pool and Redis client with maintenance notifications test_redis_client = Redis( protocol=3, # Required for maintenance notifications maint_notifications_config=self.config, ) pool_handler = test_redis_client.connection_pool.connection_kwargs.get( "maint_notifications_pool_handler" ) assert pool_handler is not None assert pool_handler.config == self.config conn = test_redis_client.connection_pool.get_connection() assert conn.should_reconnect() is False assert conn.orig_host_address == "localhost" assert conn.orig_socket_timeout is None self._validate_connection_handlers(conn, pool_handler, self.config) def test_maint_handler_init_for_existing_connections(self): """Test that maintenance notification handlers are properly set on existing and new connections when configuration is enabled after client creation.""" # Create a Redis client with disabled maintenance notifications configuration disabled_config = MaintNotificationsConfig(enabled=False) test_redis_client = Redis( protocol=3, # Required for maintenance notifications maint_notifications_config=disabled_config, ) # Extract an existing connection before enabling maintenance notifications existing_conn = test_redis_client.connection_pool.get_connection() # Verify that maintenance notifications are initially disabled assert existing_conn._parser.node_moving_push_handler_func is None assert existing_conn._maint_notifications_connection_handler is None assert existing_conn._parser.maintenance_push_handler_func is None # Create a new enabled configuration and set up pool handler enabled_config = MaintNotificationsConfig( enabled=True, proactive_reconnect=True, relaxed_timeout=30 ) test_redis_client.connection_pool.update_maint_notifications_config( enabled_config ) pool_handler = ( test_redis_client.connection_pool._maint_notifications_pool_handler ) # Validate the existing connection after enabling maintenance notifications # Both existing and new connections should now have full handler setup self._validate_connection_handlers(existing_conn, pool_handler, enabled_config) # Create a new connection and validate it has full handlers new_conn = test_redis_client.connection_pool.get_connection() self._validate_connection_handlers(new_conn, pool_handler, enabled_config) self._validate_connection_handlers(existing_conn, pool_handler, enabled_config) # Clean up connections test_redis_client.connection_pool.release(existing_conn) test_redis_client.connection_pool.release(new_conn) @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_connection_pool_creation_with_maintenance_notifications(self, pool_class): """Test that connection pools are created with maintenance notifications configuration.""" # Create a pool and Redis client with maintenance notifications max_connections = 3 if pool_class == BlockingConnectionPool else 10 test_redis_client = self._get_client( pool_class, max_connections=max_connections ) test_pool = test_redis_client.connection_pool try: assert ( test_pool.connection_kwargs.get("maint_notifications_config") == self.config ) # Pool should have maintenance notifications enabled assert test_pool.maint_notifications_enabled() is True # Create and set a pool handler test_pool.update_maint_notifications_config(self.config) pool_handler = test_pool._maint_notifications_pool_handler # Validate that the handler is properly set on the pool assert ( test_pool.connection_kwargs.get("maint_notifications_pool_handler") == pool_handler ) assert ( test_pool.connection_kwargs.get("maint_notifications_config") == pool_handler.config ) # Verify that the pool handler has the correct configuration assert pool_handler.pool == test_pool assert pool_handler.config == self.config finally: if hasattr(test_pool, "disconnect"): test_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_redis_operations_with_mock_sockets(self, pool_class): """ Test basic Redis operations work with mocked sockets and proper response parsing. Basically with test - the mocked socket is validated. """ # Create a pool and Redis client with maintenance notifications test_redis_client = self._get_client(pool_class, max_connections=5) try: # Perform Redis operations that should work with our improved mock responses result_set = test_redis_client.set("hello", "world") result_get = test_redis_client.get("hello") # Verify operations completed successfully assert result_set is True assert result_get == b"world" # Verify socket interactions assert len(self.mock_sockets) >= 1 assert self.mock_sockets[0].connected assert len(self.mock_sockets[0].sent_data) >= 2 # HELLO, SET, GET commands # Verify that the connection has maintenance notification handler connection = test_redis_client.connection_pool.get_connection() assert hasattr(connection, "_maint_notifications_connection_handler") test_redis_client.connection_pool.release(connection) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() def test_pool_handler_with_migrating_notification(self): """Test that pool handler correctly handles migrating notifications.""" # Create a pool and Redis client with maintenance notifications test_redis_client = self._get_client(ConnectionPool) test_pool = test_redis_client.connection_pool try: # Create and set a pool handler pool_handler = MaintNotificationsPoolHandler(test_pool, self.config) # Create a migrating notification (not handled by pool handler) migrating_notification = NodeMigratingNotification(id=1, ttl=5) # Mock the required functions with ( patch.object( pool_handler, "remove_expired_notifications" ) as mock_remove_expired, patch.object( pool_handler, "handle_node_moving_notification" ) as mock_handle_moving, patch("redis.maint_notifications.logging.error") as mock_logging_error, ): # Pool handler should return None for migrating notifications (not its responsibility) pool_handler.handle_notification(migrating_notification) # Validate that remove_expired_notifications has been called once mock_remove_expired.assert_called_once() # Validate that handle_node_moving_notification hasn't been called mock_handle_moving.assert_not_called() # Validate that logging.error has been called once mock_logging_error.assert_called_once() finally: if hasattr(test_pool, "disconnect"): test_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_migration_related_notifications_handling_integration(self, pool_class): """ Test full integration of migration-related notifications (MIGRATING/MIGRATED) handling. This test validates the complete migration lifecycle: 1. Executes 5 Redis commands sequentially 2. Injects MIGRATING push message before command 2 (SET key_receive_migrating) 3. Validates socket timeout is updated to relaxed value (30s) after MIGRATING 4. Executes commands 3-4 while timeout remains relaxed 5. Injects MIGRATED push message before command 5 (SET key_receive_migrated) 6. Validates socket timeout is restored after MIGRATED 7. Tests both ConnectionPool and BlockingConnectionPool implementations 8. Uses proper RESP3 push message format for realistic protocol simulation """ # Create a pool and Redis client with maintenance notifications test_redis_client = self._get_client(pool_class, max_connections=10) try: # Command 1: Initial command key1 = "key1" value1 = "value1" result1 = test_redis_client.set(key1, value1) # Validate Command 1 result assert result1 is True, "Command 1 (SET key1) failed" # Command 2: This SET command will receive MIGRATING push message before response key_migrating = "key_receive_migrating" value_migrating = "value2" result2 = test_redis_client.set(key_migrating, value_migrating) # Validate Command 2 result assert result2 is True, "Command 2 (SET key_receive_migrating) failed" # Step 4: Validate timeout was updated to relaxed value after MIGRATING self._validate_current_timeout(30, "Right after MIGRATING is received. ") # Command 3: Another command while timeout is still relaxed result3 = test_redis_client.get(key1) # Validate Command 3 result expected_value3 = value1.encode() assert result3 == expected_value3, ( f"Command 3 (GET key1) failed. Expected {expected_value3}, got {result3}" ) # Command 4: Execute command (step 5) result4 = test_redis_client.get(key_migrating) # Validate Command 4 result expected_value4 = value_migrating.encode() assert result4 == expected_value4, ( f"Command 4 (GET key_receive_migrating) failed. Expected {expected_value4}, got {result4}" ) # Step 6: Validate socket timeout is still relaxed during commands 3-4 self._validate_current_timeout( 30, "Execute a command with a connection extracted from the pool (after it has received MIGRATING)", ) # Command 5: This SET command will receive # MIGRATED push message before actual response key_migrated = "key_receive_migrated" value_migrated = "value3" result5 = test_redis_client.set(key_migrated, value_migrated) # Validate Command 5 result assert result5 is True, "Command 5 (SET key_receive_migrated) failed" # Step 8: Validate socket timeout is reversed back to original after MIGRATED self._validate_current_timeout(None) # Verify maintenance notifications were processed correctly # The key is that we have at least 1 socket and all operations succeeded assert len(self.mock_sockets) >= 1, ( f"Expected at least 1 socket for operations, got {len(self.mock_sockets)}" ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_migrating_notification_with_disabled_relaxed_timeout(self, pool_class): """ Test maintenance notifications handling when relaxed timeout is disabled. This test validates that when relaxed_timeout is disabled (-1): 1. MIGRATING, MIGRATED, FAILING_OVER, and FAILED_OVER notifications are received and processed 2. No timeout updates are applied to connections 3. Socket timeouts remain unchanged during all maintenance notifications 4. Tests both ConnectionPool and BlockingConnectionPool implementations 5. Tests the complete lifecycle: MIGRATING -> MIGRATED -> FAILING_OVER -> FAILED_OVER """ # Create config with disabled relaxed timeout disabled_config = MaintNotificationsConfig( enabled=True, relaxed_timeout=-1, # This means the relaxed timeout is Disabled ) # Create a pool and Redis client with disabled relaxed timeout config test_redis_client = self._get_client( pool_class, max_connections=5, maint_notifications_config=disabled_config ) try: # Command 1: Initial command key1 = "key1" value1 = "value1" result1 = test_redis_client.set(key1, value1) # Validate Command 1 result assert result1 is True, "Command 1 (SET key1) failed" # Command 2: This SET command will receive MIGRATING push message before response key_migrating = "key_receive_migrating" value_migrating = "value2" result2 = test_redis_client.set(key_migrating, value_migrating) # Validate Command 2 result assert result2 is True, "Command 2 (SET key_receive_migrating) failed" # Validate timeout was NOT updated (relaxed is disabled) # Should remain at default timeout (None), not relaxed to 30s self._validate_current_timeout(None) # Command 3: Another command to verify timeout remains unchanged result3 = test_redis_client.get(key1) # Validate Command 3 result expected_value3 = value1.encode() assert result3 == expected_value3, ( f"Command 3 (GET key1) failed. Expected: {expected_value3}, Got: {result3}" ) # Command 4: This SET command will receive MIGRATED push message before response key_migrated = "key_receive_migrated" value_migrated = "value3" result4 = test_redis_client.set(key_migrated, value_migrated) # Validate Command 4 result assert result4 is True, "Command 4 (SET key_receive_migrated) failed" # Validate timeout is still NOT updated after MIGRATED (relaxed is disabled) self._validate_current_timeout(None) # Command 5: This SET command will receive FAILING_OVER push message before response key_failing_over = "key_receive_failing_over" value_failing_over = "value4" result5 = test_redis_client.set(key_failing_over, value_failing_over) # Validate Command 5 result assert result5 is True, "Command 5 (SET key_receive_failing_over) failed" # Validate timeout is still NOT updated after FAILING_OVER (relaxed is disabled) self._validate_current_timeout(None) # Command 6: Another command to verify timeout remains unchanged during failover result6 = test_redis_client.get(key_failing_over) # Validate Command 6 result expected_value6 = value_failing_over.encode() assert result6 == expected_value6, ( f"Command 6 (GET key_receive_failing_over) failed. Expected: {expected_value6}, Got: {result6}" ) # Command 7: This SET command will receive FAILED_OVER push message before response key_failed_over = "key_receive_failed_over" value_failed_over = "value5" result7 = test_redis_client.set(key_failed_over, value_failed_over) # Validate Command 7 result assert result7 is True, "Command 7 (SET key_receive_failed_over) failed" # Validate timeout is still NOT updated after FAILED_OVER (relaxed is disabled) self._validate_current_timeout(None) # Command 8: Final command to verify timeout remains unchanged after all notifications result8 = test_redis_client.get(key_failed_over) # Validate Command 8 result expected_value8 = value_failed_over.encode() assert result8 == expected_value8, ( f"Command 8 (GET key_receive_failed_over) failed. Expected: {expected_value8}, Got: {result8}" ) # Verify maintenance notifications were processed correctly # The key is that we have at least 1 socket and all operations succeeded assert len(self.mock_sockets) >= 1, ( f"Expected at least 1 socket for operations, got {len(self.mock_sockets)}" ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_failing_over_related_notifications_handling_integration(self, pool_class): """ Test full integration of failing-over-related notifications (FAILING_OVER/FAILED_OVER) handling. This test validates the complete FAILING_OVER -> FAILED_OVER lifecycle: 1. Executes 5 Redis commands sequentially 2. Injects FAILING_OVER push message before command 2 (SET key_receive_failing_over) 3. Validates socket timeout is updated to relaxed value (30s) after FAILING_OVER 4. Executes commands 3-4 while timeout remains relaxed 5. Injects FAILED_OVER push message before command 5 (SET key_receive_failed_over) 6. Validates socket timeout is restored after FAILED_OVER 7. Tests both ConnectionPool and BlockingConnectionPool implementations 8. Uses proper RESP3 push message format for realistic protocol simulation """ # Create a pool and Redis client with maintenance notifications test_redis_client = self._get_client(pool_class, max_connections=10) try: # Command 1: Initial command key1 = "key1" value1 = "value1" result1 = test_redis_client.set(key1, value1) # Validate Command 1 result assert result1 is True, "Command 1 (SET key1) failed" # Command 2: This SET command will receive FAILING_OVER push message before response key_failing_over = "key_receive_failing_over" value_failing_over = "value4" result2 = test_redis_client.set(key_failing_over, value_failing_over) # Validate Command 2 result assert result2 is True, "Command 2 (SET key_receive_failing_over) failed" # Step 4: Validate timeout was updated to relaxed value after MIGRATING self._validate_current_timeout(30, "Right after FAILING_OVER is received. ") # Command 3: Another command while timeout is still relaxed result3 = test_redis_client.get(key1) # Validate Command 3 result expected_value3 = value1.encode() assert result3 == expected_value3, ( f"Command 3 (GET key1) failed. Expected {expected_value3}, got {result3}" ) # Command 4: Execute command (step 5) result4 = test_redis_client.get(key_failing_over) # Validate Command 4 result expected_value4 = value_failing_over.encode() assert result4 == expected_value4, ( f"Command 4 (GET key_receive_failing_over) failed. Expected {expected_value4}, got {result4}" ) # Step 6: Validate socket timeout is still relaxed during commands 3-4 self._validate_current_timeout( 30, "Execute a command with a connection extracted from the pool (after it has received FAILING_OVER)", ) # Command 5: This SET command will receive # FAILED_OVER push message before actual response key_failed_over = "key_receive_failed_over" value_migrated = "value3" result5 = test_redis_client.set(key_failed_over, value_migrated) # Validate Command 5 result assert result5 is True, "Command 5 (SET key_receive_failed_over) failed" # Step 8: Validate socket timeout is reversed back to original after FAILED_OVER self._validate_current_timeout(None) # Verify maintenance notifications were processed correctly # The key is that we have at least 1 socket and all operations succeeded assert len(self.mock_sockets) >= 1, ( f"Expected at least 1 socket for operations, got {len(self.mock_sockets)}" ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_moving_related_notifications_handling_integration(self, pool_class): """ Test full integration of moving-related notifications (MOVING) handling with Redis commands. This test validates the complete MOVING notification lifecycle: 1. Creates multiple connections in the pool 2. Executes a Redis command that triggers a MOVING push message 3. Validates that pool configuration is updated with temporary address and timeout - for new connections creation 4. Validates that existing connections are marked for disconnection 5. Tests both ConnectionPool and BlockingConnectionPool implementations """ # Create a pool and Redis client with maintenance notifications and pool handler test_redis_client = self._get_client(pool_class, max_connections=10) try: # Create several connections and return them in the pool connections = [] for _ in range(10): connection = test_redis_client.connection_pool.get_connection() connections.append(connection) for connection in connections: test_redis_client.connection_pool.release(connection) # Take 5 connections to be "in use" in_use_connections = [] for _ in range(5): connection = test_redis_client.connection_pool.get_connection() in_use_connections.append(connection) # Validate all connections are connected prior MOVING notification self._validate_disconnected(0) # Run command that will receive and handle MOVING notification key_moving = "key_receive_moving_0" value_moving = "value3_0" # the connection used for the command is expected to be reconnected to the new address # before it is returned to the pool result2 = test_redis_client.set(key_moving, value_moving) # Validate Command 2 result assert result2 is True, "Command 2 (SET key_receive_moving) failed" # Validate pool and connections settings were updated according to MOVING notification expected_notification_hash = hash(MOVING_NOTIFICATION) Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=expected_notification_hash, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) self._validate_disconnected(5) self._validate_connected(6) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[ 0 ], # the in use connections reconnect when they complete their current task ) Helpers.validate_free_connections_state( pool=test_redis_client.connection_pool, expected_state=MaintenanceState.MOVING, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, should_be_connected_count=1, connected_to_tmp_address=True, ) # Wait for MOVING timeout to expire and the moving completed handler to run sleep(MOVING_TIMEOUT + 0.5) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.NONE, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=None, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.NONE, expected_maintenance_notification_hash=None, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) Helpers.validate_free_connections_state( pool=test_redis_client.connection_pool, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, should_be_connected_count=1, connected_to_tmp_address=True, expected_state=MaintenanceState.NONE, ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_moving_none_notifications_handling_integration(self, pool_class): """ Test full integration of moving-related notifications (MOVING) handling with Redis commands. This test validates the complete MOVING notification lifecycle, when the push notification doesn't contain host and port: 1. Creates multiple connections in the pool 2. Executes a Redis command that triggers a MOVING with "null" push message 3. Validates that pool configuration is updated with temporary address and timeout - for new connections creation 4. Validates that existing connections are marked for disconnection after ttl/2 seconds 5. Tests both ConnectionPool and BlockingConnectionPool implementations """ # Create a pool and Redis client with maintenance notifications and pool handler test_redis_client = self._get_client(pool_class, max_connections=10) try: # Create several connections and return them in the pool connections = [] for _ in range(10): connection = test_redis_client.connection_pool.get_connection() connections.append(connection) for connection in connections: test_redis_client.connection_pool.release(connection) # Take 5 connections to be "in use" in_use_connections = [] for _ in range(5): connection = test_redis_client.connection_pool.get_connection() in_use_connections.append(connection) # Validate all connections are connected prior MOVING notification self._validate_disconnected(0) # Run command that will receive and handle MOVING notification key_moving = "key_receive_moving_none_0" value_moving = "value3_0" # the connection used for the command is expected to be reconnected to the new address # before it is returned to the pool result2 = test_redis_client.set(key_moving, value_moving) # Validate Command 2 result assert result2 is True, "Command 2 (SET key_receive_moving) failed" # Validate pool and connections settings were updated according to MOVING notification Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NONE_NOTIFICATION), expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) self._validate_disconnected(0) self._validate_connected(10) Helpers.validate_in_use_connections_state( in_use_connections, expected_should_reconnect=False, expected_state=MaintenanceState.MOVING, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[ 0 ], # the in use connections reconnect when they complete their current task ) Helpers.validate_free_connections_state( pool=test_redis_client.connection_pool, expected_state=MaintenanceState.MOVING, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, should_be_connected_count=5, connected_to_tmp_address=False, ) # Wait for half of MOVING timeout to expire and the proactive reconnect to run sleep(MOVING_TIMEOUT / 2 + 0.2) Helpers.validate_in_use_connections_state( in_use_connections, expected_should_reconnect=True, expected_state=MaintenanceState.MOVING, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[ 0 ], # the in use connections reconnect when they complete their current task ) self._validate_disconnected(5) self._validate_connected(5) # Wait for MOVING timeout to expire and the moving completed handler to run sleep(MOVING_TIMEOUT / 2 + 0.2) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.NONE, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=None, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.NONE, expected_maintenance_notification_hash=None, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) Helpers.validate_free_connections_state( pool=test_redis_client.connection_pool, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, should_be_connected_count=0, connected_to_tmp_address=True, expected_state=MaintenanceState.NONE, ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_create_new_conn_while_moving_not_expired(self, pool_class): """ Test creating new connections while MOVING notification is active (not expired). This test validates that: 1. After MOVING notification is processed, new connections are created with temporary address 2. New connections inherit the relaxed timeout settings 3. Pool configuration is properly applied to newly created connections """ # Create a pool and Redis client with maintenance notifications and pool handler test_redis_client = self._get_client(pool_class, max_connections=10) try: # Create several connections and return them in the pool connections = [] for _ in range(5): connection = test_redis_client.connection_pool.get_connection() connections.append(connection) for connection in connections: test_redis_client.connection_pool.release(connection) # Take 3 connections to be "in use" in_use_connections = [] for _ in range(3): connection = test_redis_client.connection_pool.get_connection() in_use_connections.append(connection) # Validate all connections are connected prior MOVING notification self._validate_disconnected(0) # Run command that will receive and handle MOVING notification key_moving = "key_receive_moving_0" value_moving = "value3_0" result = test_redis_client.set(key_moving, value_moving) # Validate command result assert result is True, "SET key_receive_moving command failed" # Validate pool and connections settings were updated according to MOVING notification Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NOTIFICATION), expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, ) # Now get several more connections to force creation of new ones # This should create new connections with the temporary address old_connections = [] for _ in range(2): connection = test_redis_client.connection_pool.get_connection() old_connections.append(connection) new_connection = test_redis_client.connection_pool.get_connection() # Validate that new connections are created with temporary address and relaxed timeout # and when connecting those configs are used # get_connection() returns a connection that is already connected assert new_connection.host == AFTER_MOVING_ADDRESS.split(":")[0] assert new_connection.socket_timeout is self.config.relaxed_timeout # New connections should be connected to the temporary address assert new_connection._get_socket() is not None assert new_connection._get_socket().connected is True assert ( new_connection._get_socket().getpeername()[0] == AFTER_MOVING_ADDRESS.split(":")[0] ) assert ( new_connection._get_socket().gettimeout() == self.config.relaxed_timeout ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_create_new_conn_after_moving_expires(self, pool_class): """ Test creating new connections after MOVING notification expires. This test validates that: 1. After MOVING timeout expires, new connections use original address 2. Pool configuration is reset to original values 3. New connections don't inherit temporary settings """ # Create a pool and Redis client with maintenance notifications and pool handler test_redis_client = self._get_client(pool_class, max_connections=10) try: # Create several connections and return them in the pool connections = [] for _ in range(5): connection = test_redis_client.connection_pool.get_connection() connections.append(connection) for connection in connections: test_redis_client.connection_pool.release(connection) # Take 3 connections to be "in use" in_use_connections = [] for _ in range(3): connection = test_redis_client.connection_pool.get_connection() in_use_connections.append(connection) # Run command that will receive and handle MOVING notification key_moving = "key_receive_moving_0" value_moving = "value3_0" result = test_redis_client.set(key_moving, value_moving) # Validate command result assert result is True, "SET key_receive_moving command failed" # Wait for MOVING timeout to expire sleep(MOVING_TIMEOUT + 0.5) # Now get several new connections after expiration old_connections = [] for _ in range(2): connection = test_redis_client.connection_pool.get_connection() old_connections.append(connection) new_connection = test_redis_client.connection_pool.get_connection() # Validate that new connections are created with original address (no temporary settings) assert new_connection.orig_host_address == DEFAULT_ADDRESS.split(":")[0] assert new_connection.orig_socket_timeout is None # New connections should be connected to the original address assert new_connection._get_socket() is not None assert new_connection._get_socket().connected is True # Socket timeout should be None (original timeout) assert new_connection._get_socket().gettimeout() is None finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_receive_migrated_after_moving(self, pool_class): """ Test receiving MIGRATED notification after MOVING notification. This test validates the complete MOVING -> MIGRATED lifecycle: 1. MOVING notification is processed and temporary settings are applied 2. MIGRATED notification is received during command execution 3. Temporary settings are cleared after MIGRATED 4. Pool configuration is restored to original values Note: When MIGRATED comes after MOVING and MOVING hasn't yet expired, it should not decrease timeouts (future refactoring consideration). """ # Create a pool and Redis client with maintenance notifications and pool handler test_redis_client = self._get_client(pool_class, max_connections=10) try: # Create several connections and return them in the pool connections = [] for _ in range(5): connection = test_redis_client.connection_pool.get_connection() connections.append(connection) for connection in connections: test_redis_client.connection_pool.release(connection) # Take 3 connections to be "in use" in_use_connections = [] for _ in range(3): connection = test_redis_client.connection_pool.get_connection() in_use_connections.append(connection) # Validate all connections are connected prior MOVING notification self._validate_disconnected(0) # Step 1: Run command that will receive and handle MOVING notification key_moving = "key_receive_moving_0" value_moving = "value3_0" result_moving = test_redis_client.set(key_moving, value_moving) # Validate MOVING command result assert result_moving is True, "SET key_receive_moving command failed" # Validate pool and connections settings were updated according to MOVING notification Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NOTIFICATION), expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, ) # TODO validate current socket timeout # Step 2: Run command that will receive and handle MIGRATED notification # This should clear the temporary settings key_migrated = "key_receive_migrated_0" value_migrated = "migrated_value" result_migrated = test_redis_client.set(key_migrated, value_migrated) # Validate MIGRATED command result assert result_migrated is True, "SET key_receive_migrated command failed" # Step 3: Validate that MIGRATED notification was processed but MOVING settings remain # (MIGRATED doesn't automatically clear MOVING settings - they are separate notifications) # MOVING settings should still be active # MOVING timeout should still be active Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NOTIFICATION), expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, ) # Step 4: Create new connections after MIGRATED to verify they still use MOVING settings # (since MOVING settings are still active) new_connections = [] for _ in range(2): connection = test_redis_client.connection_pool.get_connection() new_connections.append(connection) # Validate that new connections are created with MOVING settings (still active) for connection in new_connections: assert connection.host == AFTER_MOVING_ADDRESS.split(":")[0] # Note: New connections may not inherit the exact relaxed timeout value # but they should have the temporary host address # New connections should be connected if connection._get_socket() is not None: assert connection._get_socket().connected is True # Release the new connections for connection in new_connections: test_redis_client.connection_pool.release(connection) # Validate free connections state with MOVING settings still active # Note: We'll validate with the pool's current settings rather than individual connection settings # since new connections may have different timeout values but still use the temporary address finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_overlapping_moving_notifications(self, pool_class): """ Test handling of overlapping/duplicate MOVING notifications (e.g., two MOVING notifications before the first expires). Ensures that the second MOVING notification updates the pool and connections as expected, and that expiry/cleanup works. """ global AFTER_MOVING_ADDRESS test_redis_client = self._get_client(pool_class, max_connections=5) try: # Create and release some connections in_use_connections = [] for _ in range(3): in_use_connections.append( test_redis_client.connection_pool.get_connection() ) for conn in in_use_connections: test_redis_client.connection_pool.release(conn) # Take 2 connections to be in use in_use_connections = [] for _ in range(2): conn = test_redis_client.connection_pool.get_connection() in_use_connections.append(conn) # Trigger first MOVING notification key_moving1 = "key_receive_moving_0" value_moving1 = "value3_0" result1 = test_redis_client.set(key_moving1, value_moving1) assert result1 is True Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NOTIFICATION), expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) # Validate all connections reflect the first MOVING notification Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) Helpers.validate_free_connections_state( pool=test_redis_client.connection_pool, should_be_connected_count=1, connected_to_tmp_address=True, expected_state=MaintenanceState.MOVING, expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) # Reconnect in use connections for conn in in_use_connections: conn.disconnect() conn.connect() # Before the first MOVING expires, trigger a second MOVING notification (simulate new address) # Validate the orig properties are not changed! second_moving_address = "5.6.7.8:6380" orig_after_moving = AFTER_MOVING_ADDRESS # Temporarily modify the global constant for this test AFTER_MOVING_ADDRESS = second_moving_address second_moving_notification = NodeMovingNotification( id=1, new_node_host=second_moving_address.split(":")[0], new_node_port=int(second_moving_address.split(":")[1]), ttl=MOVING_TIMEOUT, ) try: key_moving2 = "key_receive_moving_1" value_moving2 = "value3_1" result2 = test_redis_client.set(key_moving2, value_moving2) assert result2 is True Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash( second_moving_notification ), expected_host_address=second_moving_address.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) # Validate all connections reflect the second MOVING notification Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=second_moving_address.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=orig_after_moving.split(":")[0], ) Helpers.validate_free_connections_state( test_redis_client.connection_pool, should_be_connected_count=1, connected_to_tmp_address=True, tmp_address=second_moving_address.split(":")[0], expected_state=MaintenanceState.MOVING, expected_host_address=second_moving_address.split(":")[0], expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) finally: AFTER_MOVING_ADDRESS = orig_after_moving # Wait for both MOVING timeouts to expire sleep(MOVING_TIMEOUT + 0.5) Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.NONE, expected_maintenance_notification_hash=None, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) finally: if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool]) def test_thread_safety_concurrent_notification_handling(self, pool_class): """ Test thread-safety under concurrent maintenance notification handling. Simulates multiple threads triggering MOVING notifications and performing operations concurrently. """ import threading test_redis_client = self._get_client(pool_class, max_connections=5) results = [] errors = [] def worker(idx): try: key = f"key_receive_moving_{idx}" value = f"value3_{idx}" result = test_redis_client.set(key, value) results.append(result) except Exception as e: errors.append(e) threads = [threading.Thread(target=worker, args=(i,)) for i in range(5)] for t in threads: t.start() for t in threads: t.join() assert all(results), f"Not all threads succeeded: {results}" assert not errors, f"Errors occurred in threads: {errors}" # After all threads, MOVING notification should have been handled safely Helpers.validate_conn_kwargs( pool=test_redis_client.connection_pool, expected_maintenance_state=MaintenanceState.MOVING, expected_maintenance_notification_hash=hash(MOVING_NOTIFICATION), expected_host_address=AFTER_MOVING_ADDRESS.split(":")[0], expected_port=int(DEFAULT_ADDRESS.split(":")[1]), expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) if hasattr(test_redis_client.connection_pool, "disconnect"): test_redis_client.connection_pool.disconnect() @pytest.mark.parametrize( "pool_class,enable_cache", [ (ConnectionPool, False), (ConnectionPool, True), (BlockingConnectionPool, False), (BlockingConnectionPool, True), ], ) def test_moving_migrating_migrated_moved_state_transitions( self, pool_class, enable_cache ): """ Test moving configs are not lost if the per connection notifications get picked up after moving is handled. Sequence of notifications: MOVING, MIGRATING, MIGRATED, FAILING_OVER, FAILED_OVER, MOVED. Note: FAILING_OVER and FAILED_OVER notifications do not change the connection state when already in MOVING state. Checks the state after each notification for all connections and for new connections created during each state. """ # Setup test_redis_client = self._get_client( pool_class, max_connections=5, enable_cache=enable_cache, ) pool = test_redis_client.connection_pool # Create and release some connections in_use_connections = [] for _ in range(3): in_use_connections.append(pool.get_connection()) pool_handler = in_use_connections[0]._maint_notifications_pool_handler while len(in_use_connections) > 0: pool.release(in_use_connections.pop()) # Take 2 connections to be in use in_use_connections = [] for _ in range(2): conn = pool.get_connection() in_use_connections.append(conn) # 1. MOVING notification tmp_address = "22.23.24.25" moving_notification = NodeMovingNotification( id=1, new_node_host=tmp_address, new_node_port=6379, ttl=1 ) pool_handler.handle_notification(moving_notification) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) Helpers.validate_free_connections_state( pool=pool, should_be_connected_count=0, connected_to_tmp_address=False, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) # 2. MIGRATING notification (simulate direct connection handler call) for conn in in_use_connections: conn._maint_notifications_connection_handler.handle_notification( NodeMigratingNotification(id=2, ttl=1) ) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) # 3. MIGRATED notification (simulate direct connection handler call) for conn in in_use_connections: conn._maint_notifications_connection_handler.handle_notification( NodeMigratedNotification(id=2) ) # State should not change for connections that are in MOVING state Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) # 4. FAILING_OVER notification (simulate direct connection handler call) for conn in in_use_connections: conn._maint_notifications_connection_handler.handle_notification( NodeFailingOverNotification(id=3, ttl=1) ) # State should not change for connections that are in MOVING state Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) # 5. FAILED_OVER notification (simulate direct connection handler call) for conn in in_use_connections: conn._maint_notifications_connection_handler.handle_notification( NodeFailedOverNotification(id=3) ) # State should not change for connections that are in MOVING state Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.MOVING, expected_host_address=tmp_address, expected_socket_timeout=self.config.relaxed_timeout, expected_socket_connect_timeout=self.config.relaxed_timeout, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=self.config.relaxed_timeout, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) # 6. MOVED notification (simulate timer expiry) pool_handler.handle_node_moved_notification(moving_notification) Helpers.validate_in_use_connections_state( in_use_connections, expected_state=MaintenanceState.NONE, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, expected_current_socket_timeout=None, expected_current_peername=DEFAULT_ADDRESS.split(":")[0], ) Helpers.validate_free_connections_state( pool=pool, should_be_connected_count=0, connected_to_tmp_address=False, expected_state=MaintenanceState.NONE, expected_host_address=DEFAULT_ADDRESS.split(":")[0], expected_socket_timeout=None, expected_socket_connect_timeout=None, expected_orig_host_address=DEFAULT_ADDRESS.split(":")[0], expected_orig_socket_timeout=None, expected_orig_socket_connect_timeout=None, ) # New connection after MOVED new_conn_none = pool.get_connection() assert new_conn_none.maintenance_state == MaintenanceState.NONE pool.release(new_conn_none) # Cleanup for conn in in_use_connections: pool.release(conn) if hasattr(pool, "disconnect"): pool.disconnect()
TestMaintenanceNotificationsHandlingSingleProxy
python
allegroai__clearml
clearml/backend_api/services/v2_23/events.py
{ "start": 76200, "end": 82798 }
class ____(Request): """ Return the debug image per metric and variant for the provided iteration :param task: Task ID :type task: str :param metric: Metric name :type metric: str :param variant: Metric variant :type variant: str :param iteration: The iteration to bring debug image from. If not specified then the latest reported image is retrieved :type iteration: int :param refresh: If set then scroll state will be refreshed to reflect the latest changes in the debug images :type refresh: bool :param scroll_id: Scroll ID from the previous call to get_debug_image_sample or empty :type scroll_id: str :param navigate_current_metric: If set then subsequent navigation with next_debug_image_sample is done on the debug images for the passed metric only. Otherwise for all the metrics :type navigate_current_metric: bool :param model_events: If set then the retrieving model debug images. Otherwise task debug images :type model_events: bool """ _service = "events" _action = "get_debug_image_sample" _version = "2.23" _schema = { "definitions": {}, "properties": { "iteration": { "description": "The iteration to bring debug image from. If not specified then the latest reported image is retrieved", "type": "integer", }, "metric": {"description": "Metric name", "type": "string"}, "model_events": { "default": False, "description": "If set then the retrieving model debug images. Otherwise task debug images", "type": "boolean", }, "navigate_current_metric": { "default": True, "description": "If set then subsequent navigation with next_debug_image_sample is done on the debug images for the passed metric only. Otherwise for all the metrics", "type": "boolean", }, "refresh": { "description": "If set then scroll state will be refreshed to reflect the latest changes in the debug images", "type": "boolean", }, "scroll_id": { "description": "Scroll ID from the previous call to get_debug_image_sample or empty", "type": "string", }, "task": {"description": "Task ID", "type": "string"}, "variant": {"description": "Metric variant", "type": "string"}, }, "required": ["task", "metric", "variant"], "type": "object", } def __init__( self, task: str, metric: str, variant: str, iteration: Optional[int] = None, refresh: Optional[bool] = None, scroll_id: Optional[str] = None, navigate_current_metric: Optional[bool] = True, model_events: Optional[bool] = False, **kwargs: Any ) -> None: super(GetDebugImageSampleRequest, self).__init__(**kwargs) self.task = task self.metric = metric self.variant = variant self.iteration = iteration self.refresh = refresh self.scroll_id = scroll_id self.navigate_current_metric = navigate_current_metric self.model_events = model_events @schema_property("task") def task(self) -> str: return self._property_task @task.setter def task(self, value: str) -> None: if value is None: self._property_task = None return self.assert_isinstance(value, "task", six.string_types) self._property_task = value @schema_property("metric") def metric(self) -> str: return self._property_metric @metric.setter def metric(self, value: str) -> None: if value is None: self._property_metric = None return self.assert_isinstance(value, "metric", six.string_types) self._property_metric = value @schema_property("variant") def variant(self) -> str: return self._property_variant @variant.setter def variant(self, value: str) -> None: if value is None: self._property_variant = None return self.assert_isinstance(value, "variant", six.string_types) self._property_variant = value @schema_property("iteration") def iteration(self) -> Optional[int]: return self._property_iteration @iteration.setter def iteration(self, value: Optional[int]) -> None: if value is None: self._property_iteration = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "iteration", six.integer_types) self._property_iteration = value @schema_property("refresh") def refresh(self) -> Optional[bool]: return self._property_refresh @refresh.setter def refresh(self, value: Optional[bool]) -> None: if value is None: self._property_refresh = None return self.assert_isinstance(value, "refresh", (bool,)) self._property_refresh = value @schema_property("scroll_id") def scroll_id(self) -> Optional[str]: return self._property_scroll_id @scroll_id.setter def scroll_id(self, value: Optional[str]) -> None: if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value @schema_property("navigate_current_metric") def navigate_current_metric(self) -> Optional[bool]: return self._property_navigate_current_metric @navigate_current_metric.setter def navigate_current_metric(self, value: Optional[bool]) -> None: if value is None: self._property_navigate_current_metric = None return self.assert_isinstance(value, "navigate_current_metric", (bool,)) self._property_navigate_current_metric = value @schema_property("model_events") def model_events(self) -> Optional[bool]: return self._property_model_events @model_events.setter def model_events(self, value: Optional[bool]) -> None: if value is None: self._property_model_events = None return self.assert_isinstance(value, "model_events", (bool,)) self._property_model_events = value
GetDebugImageSampleRequest
python
pytorch__pytorch
torch/utils/data/datapipes/iter/grouping.py
{ "start": 2857, "end": 4831 }
class ____(IterDataPipe): r""" Undos batching of data (functional name: ``unbatch``). In other words, it flattens the data up to the specified level within a batched DataPipe. Args: datapipe: Iterable DataPipe being un-batched unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``, it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) >>> dp1 = source_dp.unbatch() >>> list(dp1) [[0, 1], [2], [3, 4], [5], [6]] >>> dp2 = source_dp.unbatch(unbatch_level=2) >>> list(dp2) [0, 1, 2, 3, 4, 5, 6] """ def __init__(self, datapipe: IterDataPipe, unbatch_level: int = 1) -> None: self.datapipe = datapipe self.unbatch_level = unbatch_level def __iter__(self): for element in self.datapipe: yield from self._dive(element, unbatch_level=self.unbatch_level) def _dive(self, element, unbatch_level): if unbatch_level < -1: raise ValueError("unbatch_level must be -1 or >= 0") if unbatch_level == -1: if isinstance(element, (list, DataChunk)): for item in element: yield from self._dive(item, unbatch_level=-1) else: yield element elif unbatch_level == 0: yield element else: if isinstance(element, (list, DataChunk)): for item in element: yield from self._dive(item, unbatch_level=unbatch_level - 1) else: raise IndexError( f"unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe" ) @functional_datapipe("groupby")
UnBatcherIterDataPipe
python
scrapy__scrapy
tests/test_scheduler.py
{ "start": 1611, "end": 2159 }
class ____: def __init__(self): self.slots = {} def get_slot_key(self, request): if Downloader.DOWNLOAD_SLOT in request.meta: return request.meta[Downloader.DOWNLOAD_SLOT] return urlparse_cached(request).hostname or "" def increment(self, slot_key): slot = self.slots.setdefault(slot_key, MockSlot(active=[])) slot.active.append(1) def decrement(self, slot_key): slot = self.slots.get(slot_key) slot.active.pop() def close(self): pass
MockDownloader
python
getsentry__sentry-python
tests/integrations/django/myapp/management/commands/mycrash.py
{ "start": 54, "end": 187 }
class ____(BaseCommand): def add_arguments(self, parser): pass def handle(self, *args, **options): 1 / 0
Command
python
huggingface__transformers
tests/models/opt/test_modeling_opt.py
{ "start": 13707, "end": 14632 }
class ____(unittest.TestCase): @slow def test_inference_no_head(self): model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) with torch.no_grad(): output = model(input_ids=input_ids).last_hidden_state expected_shape = torch.Size((1, 11, 512)) self.assertEqual(output.shape, expected_shape) # expected value works for CPU, as well as GPU (with TF32 disabled) expected_slice = torch.tensor( [ [-0.28726277, -1.9241608, -0.3058734], [-1.2737825, -0.13332152, -0.18766522], [0.41159445, 0.1191957, -1.3107123], ], device=torch_device, ) assert_tensors_close(output[0, :3, :3], expected_slice, atol=5e-5) @require_torch @slow
OPTModelIntegrationTests
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/solids.py
{ "start": 19923, "end": 20095 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneError,) name = "SolidStepStatusUnavailableError"
GrapheneSolidStepStatsUnavailableError
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/call10.py
{ "start": 496, "end": 946 }
class ____(str): ... def test_kwargs2( a: Mapping[str, Any], b: Mapping[Any, Hashable], c: dict[StrSubclass, Hashable], d: int, e: Mapping[int, Hashable], f: tuple[str, ...], ): test_kwargs(**a) test_kwargs(**b) test_kwargs(**c) # This should generate an error test_kwargs(**d) # This should generate an error test_kwargs(**e) # This should generate an error test_kwargs(**f)
StrSubclass
python
keras-team__keras
keras/src/layers/pooling/global_average_pooling_test.py
{ "start": 174, "end": 2815 }
class ____(testing.TestCase): @parameterized.parameters( ("channels_last", False, (3, 5, 4), (3, 4)), ("channels_last", True, (3, 5, 4), (3, 1, 4)), ("channels_first", False, (3, 5, 4), (3, 5)), ) def test_global_average_pooling1d( self, data_format, keepdims, input_shape, output_shape, ): self.run_layer_test( layers.GlobalAveragePooling1D, init_kwargs={ "data_format": data_format, "keepdims": keepdims, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=True, assert_built_after_instantiation=True, ) @parameterized.parameters( ("channels_last", False, (3, 5, 6, 4), (3, 4)), ("channels_last", True, (3, 5, 6, 4), (3, 1, 1, 4)), ("channels_first", False, (3, 5, 6, 4), (3, 5)), ) def test_global_average_pooling2d( self, data_format, keepdims, input_shape, output_shape, ): self.run_layer_test( layers.GlobalAveragePooling2D, init_kwargs={ "data_format": data_format, "keepdims": keepdims, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, assert_built_after_instantiation=True, ) @parameterized.parameters( ("channels_last", False, (3, 5, 6, 5, 4), (3, 4)), ("channels_last", True, (3, 5, 6, 5, 4), (3, 1, 1, 1, 4)), ("channels_first", False, (3, 5, 6, 5, 4), (3, 5)), ) def test_global_average_pooling3d( self, data_format, keepdims, input_shape, output_shape, ): self.run_layer_test( layers.GlobalAveragePooling3D, init_kwargs={ "data_format": data_format, "keepdims": keepdims, }, input_shape=input_shape, expected_output_shape=output_shape, expected_num_trainable_weights=0, expected_num_non_trainable_weights=0, expected_num_losses=0, supports_masking=False, assert_built_after_instantiation=True, )
GlobalAveragePoolingBasicTest
python
dagster-io__dagster
python_modules/libraries/dagster-aws/dagster_aws/secretsmanager/resources.py
{ "start": 4758, "end": 12745 }
class ____(ResourceWithBoto3Configuration): """Resource that provides a dict which maps selected SecretsManager secrets to their string values. Also optionally sets chosen secrets as environment variables. Example: .. code-block:: python import os from dagster import build_op_context, job, op, ResourceParam from dagster_aws.secretsmanager import SecretsManagerSecretsResource @op def example_secretsmanager_secrets_op(secrets: SecretsManagerSecretsResource): return secrets.fetch_secrets().get("my-secret-name") @op def example_secretsmanager_secrets_op_2(secrets: SecretsManagerSecretsResource): with secrets.secrets_in_environment(): return os.getenv("my-other-secret-name") @job def example_job(): example_secretsmanager_secrets_op() example_secretsmanager_secrets_op_2() Definitions( jobs=[example_job], resources={ 'secrets': SecretsManagerSecretsResource( region_name='us-west-1', secrets_tag="dagster", add_to_environment=True, ) } ) Note that your ops must also declare that they require this resource with or it will not be initialized for the execution of their compute functions. """ secrets: list[str] = Field( default=[], description="An array of AWS Secrets Manager secrets arns to fetch." ) secrets_tag: Optional[str] = Field( default=None, description="AWS Secrets Manager secrets with this tag will be fetched and made available.", ) @classmethod def _is_dagster_maintained(cls) -> bool: return True @contextmanager def secrets_in_environment( self, secrets: Optional[list[str]] = None, secrets_tag: Optional[str] = None, ) -> Generator[dict[str, str], None, None]: """Yields a dict which maps selected SecretsManager secrets to their string values. Also sets chosen secrets as environment variables. Args: secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch. Note that this will override the secrets specified in the resource config. secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched and made available. Note that this will override the secrets_tag specified in the resource config. """ secrets_manager = construct_secretsmanager_client( max_attempts=self.max_attempts, region_name=self.region_name, profile_name=self.profile_name, ) secrets_tag_to_fetch = secrets_tag if secrets_tag is not None else self.secrets_tag secrets_to_fetch = secrets if secrets is not None else self.secrets secret_arns = merge_dicts( ( get_tagged_secrets(secrets_manager, [secrets_tag_to_fetch]) if secrets_tag_to_fetch else {} ), get_secrets_from_arns(secrets_manager, secrets_to_fetch), ) secrets_map = { name: secrets_manager.get_secret_value(SecretId=arn).get("SecretString") for name, arn in secret_arns.items() } with environ(secrets_map): yield secrets_map def fetch_secrets( self, secrets: Optional[list[str]] = None, secrets_tag: Optional[str] = None, ) -> dict[str, str]: """Fetches secrets from AWS Secrets Manager and returns them as a dict. Args: secrets (Optional[List[str]]): An array of AWS Secrets Manager secrets arns to fetch. Note that this will override the secrets specified in the resource config. secrets_tag (Optional[str]): AWS Secrets Manager secrets with this tag will be fetched and made available. Note that this will override the secrets_tag specified in the resource config. """ with self.secrets_in_environment(secrets=secrets, secrets_tag=secrets_tag) as secret_values: return secret_values LEGACY_SECRETSMANAGER_SECRETS_SCHEMA = { **cast("Shape", SecretsManagerSecretsResource.to_config_schema().as_field().config_type).fields, "add_to_environment": LegacyDagsterField( bool, default_value=False, description="Whether to add the secrets to the environment. Defaults to False.", ), } @beta @dagster_maintained_resource @resource(config_schema=LEGACY_SECRETSMANAGER_SECRETS_SCHEMA) @contextmanager def secretsmanager_secrets_resource(context): """Resource that provides a dict which maps selected SecretsManager secrets to their string values. Also optionally sets chosen secrets as environment variables. Example: .. code-block:: python import os from dagster import build_op_context, job, op from dagster_aws.secretsmanager import secretsmanager_secrets_resource @op(required_resource_keys={'secrets'}) def example_secretsmanager_secrets_op(context): return context.resources.secrets.get("my-secret-name") @op(required_resource_keys={'secrets'}) def example_secretsmanager_secrets_op_2(context): return os.getenv("my-other-secret-name") @job(resource_defs={'secrets': secretsmanager_secrets_resource}) def example_job(): example_secretsmanager_secrets_op() example_secretsmanager_secrets_op_2() example_job.execute_in_process( run_config={ 'resources': { 'secrets': { 'config': { 'region_name': 'us-west-1', 'secrets_tag': 'dagster', 'add_to_environment': True, } } } } ) Note that your ops must also declare that they require this resource with `required_resource_keys`, or it will not be initialized for the execution of their compute functions. You may configure this resource as follows: .. code-block:: YAML resources: secretsmanager: config: region_name: "us-west-1" # Optional[str]: Specifies a custom region for the SecretsManager session. Default is chosen # through the ordinary boto credential chain. profile_name: "dev" # Optional[str]: Specifies a custom profile for SecretsManager session. Default is default # profile as specified in ~/.aws/credentials file secrets: ["arn:aws:secretsmanager:region:aws_account_id:secret:appauthexample-AbCdEf"] # Optional[List[str]]: Specifies a list of secret ARNs to pull from SecretsManager. secrets_tag: "dagster" # Optional[str]: Specifies a tag, all secrets which have the tag set will be pulled # from SecretsManager. add_to_environment: true # Optional[bool]: Whether to set the selected secrets as environment variables. Defaults # to false. """ add_to_environment = context.resource_config.get("add_to_environment", False) if add_to_environment: with SecretsManagerSecretsResource.from_resource_context( context ).secrets_in_environment() as secrets: yield secrets else: yield SecretsManagerSecretsResource.from_resource_context(context).fetch_secrets()
SecretsManagerSecretsResource
python
tensorflow__tensorflow
tensorflow/python/summary/tb_summary.py
{ "start": 918, "end": 14995 }
class ____(Exception): def __init__(self, summary_api): self.error_message = f"{_TENSORBOARD_NOT_INSTALLED_ERROR} {summary_api}" super().__init__(self.error_message) @tf_export("summary.audio", v1=[]) def audio( name, data, sample_rate, step=None, max_outputs=3, encoding=None, description=None, ): """Write an audio summary. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing audio data with shape `[k, t, c]`, where `k` is the number of audio clips, `t` is the number of frames, and `c` is the number of channels. Elements should be floating-point values in `[-1.0, 1.0]`. Any of the dimensions may be statically unknown (i.e., `None`). sample_rate: An `int` or rank-0 `int32` `Tensor` that represents the sample rate, in Hz. Must be positive. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many audio clips will be emitted at each step. When more than `max_outputs` many clips are provided, the first `max_outputs` many clips will be used and the rest silently discarded. encoding: Optional constant `str` for the desired encoding. Only "wav" is currently supported, but this is not guaranteed to remain the default, so if you want "wav" in particular, set this explicitly. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ try: from tensorboard.summary.v2 import audio as audio_v2 # pylint: disable=g-import-not-at-top, g-importing-member except ImportError as exc: raise TBNotInstalledError("tf.summary.audio") from exc return audio_v2( name=name, data=data, sample_rate=sample_rate, step=step, max_outputs=max_outputs, encoding=encoding, description=description, ) @tf_export("summary.histogram", v1=[]) def histogram(name, data, step=None, buckets=None, description=None): """Write a histogram summary. See also `tf.summary.scalar`, `tf.summary.SummaryWriter`. Writes a histogram to the current default summary writer, for later analysis in TensorBoard's 'Histograms' and 'Distributions' dashboards (data written using this API will appear in both places). Like `tf.summary.scalar` points, each histogram is associated with a `step` and a `name`. All the histograms with the same `name` constitute a time series of histograms. The histogram is calculated over all the elements of the given `Tensor` without regard to its shape or rank. This example writes 2 histograms: ```python w = tf.summary.create_file_writer('test/logs') with w.as_default(): tf.summary.histogram("activations", tf.random.uniform([100, 50]), step=0) tf.summary.histogram("initial_weights", tf.random.normal([1000]), step=0) ``` A common use case is to examine the changing activation patterns (or lack thereof) at specific layers in a neural network, over time. ```python w = tf.summary.create_file_writer('test/logs') with w.as_default(): for step in range(100): # Generate fake "activations". activations = [ tf.random.normal([1000], mean=step, stddev=1), tf.random.normal([1000], mean=step, stddev=10), tf.random.normal([1000], mean=step, stddev=100), ] tf.summary.histogram("layer1/activate", activations[0], step=step) tf.summary.histogram("layer2/activate", activations[1], step=step) tf.summary.histogram("layer3/activate", activations[2], step=step) ``` Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` of any shape. The histogram is computed over its elements, which must be castable to `float64`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. buckets: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then all buckets' left and right endpoints are the same and only the last bucket has nonzero count. Defaults to 30 if not specified. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ try: from tensorboard.summary.v2 import histogram as histogram_v2 # pylint: disable=g-import-not-at-top, g-importing-member except ImportError as exc: raise TBNotInstalledError("tf.summary.histogram") from exc return histogram_v2( name=name, data=data, step=step, buckets=buckets, description=description ) @tf_export("summary.image", v1=[]) def image(name, data, step=None, max_outputs=3, description=None): """Write an image summary. See also `tf.summary.scalar`, `tf.summary.SummaryWriter`. Writes a collection of images to the current default summary writer. Data appears in TensorBoard's 'Images' dashboard. Like `tf.summary.scalar` points, each collection of images is associated with a `step` and a `name`. All the image collections with the same `name` constitute a time series of image collections. This example writes 2 random grayscale images: ```python w = tf.summary.create_file_writer('test/logs') with w.as_default(): image1 = tf.random.uniform(shape=[8, 8, 1]) image2 = tf.random.uniform(shape=[8, 8, 1]) tf.summary.image("grayscale_noise", [image1, image2], step=0) ``` To avoid clipping, data should be converted to one of the following: - floating point values in the range [0,1], or - uint8 values in the range [0,255] ```python # Convert the original dtype=int32 `Tensor` into `dtype=float64`. rgb_image_float = tf.constant([ [[1000, 0, 0], [0, 500, 1000]], ]) / 1000 tf.summary.image("picture", [rgb_image_float], step=0) # Convert original dtype=uint8 `Tensor` into proper range. rgb_image_uint8 = tf.constant([ [[1, 1, 0], [0, 0, 1]], ], dtype=tf.uint8) * 255 tf.summary.image("picture", [rgb_image_uint8], step=1) ``` Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A `Tensor` representing pixel data with shape `[k, h, w, c]`, where `k` is the number of images, `h` and `w` are the height and width of the images, and `c` is the number of channels, which should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA). Any of the dimensions may be statically unknown (i.e., `None`). Floating point data will be clipped to the range [0,1]. Other data types will be clipped into an allowed range for safe casting to uint8, using `tf.image.convert_image_dtype`. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this many images will be emitted at each step. When more than `max_outputs` many images are provided, the first `max_outputs` many images will be used and the rest silently discarded. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ try: from tensorboard.summary.v2 import image as image_v2 # pylint: disable=g-import-not-at-top, g-importing-member except ImportError as exc: raise TBNotInstalledError("tf.summary.image") from exc return image_v2( name=name, data=data, step=step, max_outputs=max_outputs, description=description, ) @tf_export("summary.scalar", v1=[]) def scalar(name, data, step=None, description=None): """Write a scalar summary. See also `tf.summary.image`, `tf.summary.histogram`, `tf.summary.SummaryWriter`. Writes simple numeric values for later analysis in TensorBoard. Writes go to the current default summary writer. Each summary point is associated with an integral `step` value. This enables the incremental logging of time series data. A common usage of this API is to log loss during training to produce a loss curve. For example: ```python test_summary_writer = tf.summary.create_file_writer('test/logdir') with test_summary_writer.as_default(): tf.summary.scalar('loss', 0.345, step=1) tf.summary.scalar('loss', 0.234, step=2) tf.summary.scalar('loss', 0.123, step=3) ``` Multiple independent time series may be logged by giving each series a unique `name` value. See [Get started with TensorBoard](https://www.tensorflow.org/tensorboard/get_started) for more examples of effective usage of `tf.summary.scalar`. In general, this API expects that data points are logged with a monotonically increasing step value. Duplicate points for a single step or points logged out of order by step are not guaranteed to display as desired in TensorBoard. Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A real numeric scalar value, convertible to a `float32` Tensor. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ try: from tensorboard.summary.v2 import scalar as scalar_v2 # pylint: disable=g-import-not-at-top, g-importing-member except ImportError as exc: raise TBNotInstalledError("tf.summary.scalar") from exc return scalar_v2(name=name, data=data, step=step, description=description) @tf_export("summary.text", v1=[]) def text(name, data, step=None, description=None): r"""Write a text summary. See also `tf.summary.scalar`, `tf.summary.SummaryWriter`, `tf.summary.image`. Writes text Tensor values for later visualization and analysis in TensorBoard. Writes go to the current default summary writer. Like `tf.summary.scalar` points, text points are each associated with a `step` and a `name`. All the points with the same `name` constitute a time series of text values. For Example: ```python test_summary_writer = tf.summary.create_file_writer('test/logdir') with test_summary_writer.as_default(): tf.summary.text('first_text', 'hello world!', step=0) tf.summary.text('first_text', 'nice to meet you!', step=1) ``` The text summary can also contain Markdown, and TensorBoard will render the text as such. ```python with test_summary_writer.as_default(): text_data = ''' | *hello* | *there* | |---------|---------| | this | is | | a | table | ''' text_data = '\n'.join(l.strip() for l in text_data.splitlines()) tf.summary.text('markdown_text', text_data, step=0) ``` Since text is Tensor valued, each text point may be a Tensor of string values. rank-1 and rank-2 Tensors are rendered as tables in TensorBoard. For higher ranked Tensors, you'll see just a 2D slice of the data. To avoid this, reshape the Tensor to at most rank-2 prior to passing it to this function. Demo notebook at ["Displaying text data in TensorBoard"](https://www.tensorflow.org/tensorboard/text_summaries). Arguments: name: A name for this summary. The summary tag used for TensorBoard will be this name prefixed by any active name scopes. data: A UTF-8 string Tensor value. step: Explicit `int64`-castable monotonic step value for this summary. If omitted, this defaults to `tf.summary.experimental.get_step()`, which must not be None. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. Returns: True on success, or false if no summary was emitted because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and `tf.summary.experimental.get_step()` is None. """ try: from tensorboard.summary.v2 import text as text_v2 # pylint: disable=g-import-not-at-top, g-importing-member except ImportError as exc: raise TBNotInstalledError("tf.summary.text") from exc return text_v2(name=name, data=data, step=step, description=description)
TBNotInstalledError
python
getsentry__sentry
src/sentry/sentry_apps/api/endpoints/sentry_app_installations.py
{ "start": 1323, "end": 1503 }
class ____(serializers.Serializer): slug = SentrySerializerSlugField(required=True, max_length=SENTRY_APP_SLUG_MAX_LENGTH) @control_silo_endpoint
SentryAppInstallationsSerializer
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_hex_color.py
{ "start": 490, "end": 1603 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.valid_hexcolor" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): def matches_hexcolor_regex(x): return bool(re.match(HEX_COLOR_REGEX, str(x))) return column.apply(lambda x: matches_hexcolor_regex(x) if x else False) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeValidHexColor
python
faif__python-patterns
patterns/behavioral/observer.py
{ "start": 1930, "end": 2074 }
class ____: def update(self, subject: Data) -> None: print(f"HexViewer: Subject {subject.name} has data 0x{subject.data:x}")
HexViewer
python
PrefectHQ__prefect
src/integrations/prefect-aws/prefect_aws/glue_job.py
{ "start": 247, "end": 2199 }
class ____(BaseModel, JobRun): """Execute a Glue Job""" job_name: str = Field( ..., title="AWS Glue Job Name", description="The name of the job definition to use.", ) job_id: str = Field( ..., title="AWS Glue Job ID", description="The ID of the job run.", ) job_watch_poll_interval: float = Field( default=60.0, description=( "The amount of time to wait between AWS API calls while monitoring the " "state of an Glue Job." ), ) _error_states = ["FAILED", "STOPPED", "ERROR", "TIMEOUT"] aws_credentials: AwsCredentials = Field( title="AWS Credentials", default_factory=AwsCredentials, description="The AWS credentials to use to connect to Glue.", ) client: _GlueJobClient = Field(default=None, description="") async def fetch_result(self) -> str: """fetch glue job state""" job = self._get_job_run() return job["JobRun"]["JobRunState"] def wait_for_completion(self) -> None: """ Wait for the job run to complete and get exit code """ self.logger.info(f"watching job {self.job_name} with run id {self.job_id}") while True: job = self._get_job_run() job_state = job["JobRun"]["JobRunState"] if job_state in self._error_states: # Generate a dynamic exception type from the AWS name self.logger.error(f"job failed: {job['JobRun']['ErrorMessage']}") raise RuntimeError(job["JobRun"]["ErrorMessage"]) elif job_state == "SUCCEEDED": self.logger.info(f"job succeeded: {self.job_id}") break time.sleep(self.job_watch_poll_interval) def _get_job_run(self): """get glue job""" return self.client.get_job_run(JobName=self.job_name, RunId=self.job_id)
GlueJobRun
python
kamyu104__LeetCode-Solutions
Python/determine-the-minimum-sum-of-a-k-avoiding-array.py
{ "start": 61, "end": 425 }
class ____(object): def minimumSum(self, n, k): """ :type n: int :type k: int :rtype: int """ def arithmetic_progression_sum(a, d, n): return (a+(a+(n-1)*d))*n//2 a = min(k//2, n) b = n-a return arithmetic_progression_sum(1, 1, a)+arithmetic_progression_sum(k, 1, b)
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 456824, "end": 457465 }
class ____(sgqlc.types.Interface): """Metadata for an audit entry with action team.*""" __schema__ = github_schema __field_names__ = ("team", "team_name", "team_resource_path", "team_url") team = sgqlc.types.Field("Team", graphql_name="team") """The team associated with the action""" team_name = sgqlc.types.Field(String, graphql_name="teamName") """The name of the team""" team_resource_path = sgqlc.types.Field(URI, graphql_name="teamResourcePath") """The HTTP path for this team""" team_url = sgqlc.types.Field(URI, graphql_name="teamUrl") """The HTTP URL for this team"""
TeamAuditEntryData
python
pytorch__pytorch
torch/ao/nn/quantized/modules/activation.py
{ "start": 5885, "end": 7293 }
class ____(torch.nn.Softmax): r"""This is the quantized version of :class:`~torch.nn.Softmax`. Args: dim: A dimension along which Softmax will be computed (so every slice along dim will sum to 1). scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor """ def __init__(self, dim=None, scale=1.0, zero_point=0): super().__init__() self.dim = dim self.scale = scale self.zero_point = zero_point def forward(self, input): dim = self.dim if dim is None: stacklevel = 3 # Note: adding the mypy ignore on _get_softmax_dim seems less bad # than making `_get_softmax_dim` an official API. dim = torch.nn.functional._get_softmax_dim( # type: ignore[attr-defined] "softmax", input.dim(), stacklevel ) return torch.ops.quantized.softmax(input, dim, self.scale, self.zero_point) def _get_name(self): return "QuantizedSoftmax" @staticmethod def from_float(mod, use_precomputed_fake_quant=False): scale, zero_point = mod.activation_post_process.calculate_qparams() return Softmax(mod.dim, float(scale), int(zero_point)) @classmethod def from_reference(cls, mod, scale, zero_point): return cls(mod.dim, float(scale), int(zero_point))
Softmax
python
EpistasisLab__tpot
tpot/tpot_estimator/templates/tpottemplates.py
{ "start": 1863, "end": 19046 }
class ____(TPOTEstimator): def __init__( self, search_space = "linear", scorers=['neg_mean_squared_error'], scorers_weights=[1], cv = 10, #remove this and use a value based on dataset size? other_objective_functions=[], #tpot.objectives.estimator_objective_functions.number_of_nodes_objective], other_objective_functions_weights = [], objective_function_names = None, bigger_is_better = True, categorical_features = None, memory = None, preprocessing = False, max_time_mins=60, max_eval_time_mins=10, n_jobs = 1, validation_strategy = "none", validation_fraction = .2, early_stop = None, warm_start = False, periodic_checkpoint_folder = None, verbose = 2, memory_limit = None, client = None, random_state=None, allow_inner_regressors=None, **tpotestimator_kwargs, ): ''' An sklearn baseestimator that uses genetic programming to optimize a regression pipeline. For more parameters, see the TPOTEstimator class. Parameters ---------- search_space : (String, tpot.search_spaces.SearchSpace) - String : The default search space to use for the optimization. | String | Description | | :--- | :----: | | linear | A linear pipeline with the structure of "Selector->(transformers+Passthrough)->(classifiers/regressors+Passthrough)->final classifier/regressor." For both the transformer and inner estimator layers, TPOT may choose one or more transformers/classifiers, or it may choose none. The inner classifier/regressor layer is optional. | | linear-light | Same search space as linear, but without the inner classifier/regressor layer and with a reduced set of faster running estimators. | | graph | TPOT will optimize a pipeline in the shape of a directed acyclic graph. The nodes of the graph can include selectors, scalers, transformers, or classifiers/regressors (inner classifiers/regressors can optionally be not included). This will return a custom GraphPipeline rather than an sklearn Pipeline. More details in Tutorial 6. | | graph-light | Same as graph search space, but without the inner classifier/regressors and with a reduced set of faster running estimators. | | mdr |TPOT will search over a series of feature selectors and Multifactor Dimensionality Reduction models to find a series of operators that maximize prediction accuracy. The TPOT MDR configuration is specialized for genome-wide association studies (GWAS), and is described in detail online here. Note that TPOT MDR may be slow to run because the feature selection routines are computationally expensive, especially on large datasets. | - SearchSpace : The search space to use for the optimization. This should be an instance of a SearchSpace. The search space to use for the optimization. This should be an instance of a SearchSpace. TPOT has groups of search spaces found in the following folders, tpot.search_spaces.nodes for the nodes in the pipeline and tpot.search_spaces.pipelines for the pipeline structure. scorers : (list, scorer) A scorer or list of scorers to be used in the cross-validation process. see https://scikit-learn.org/stable/modules/model_evaluation.html scorers_weights : list A list of weights to be applied to the scorers during the optimization process. classification : bool If True, the problem is treated as a classification problem. If False, the problem is treated as a regression problem. Used to determine the CV strategy. cv : int, cross-validator - (int): Number of folds to use in the cross-validation process. By uses the sklearn.model_selection.KFold cross-validator for regression and StratifiedKFold for classification. In both cases, shuffled is set to True. - (sklearn.model_selection.BaseCrossValidator): A cross-validator to use in the cross-validation process. - max_depth (int): The maximum depth from any node to the root of the pipelines to be generated. other_objective_functions : list, default=[] A list of other objective functions to apply to the pipeline. The function takes a single parameter for the graphpipeline estimator and returns either a single score or a list of scores. other_objective_functions_weights : list, default=[] A list of weights to be applied to the other objective functions. objective_function_names : list, default=None A list of names to be applied to the objective functions. If None, will use the names of the objective functions. bigger_is_better : bool, default=True If True, the objective function is maximized. If False, the objective function is minimized. Use negative weights to reverse the direction. categorical_features : list or None Categorical columns to inpute and/or one hot encode during the preprocessing step. Used only if preprocessing is not False. categorical_features: list or None Categorical columns to inpute and/or one hot encode during the preprocessing step. Used only if preprocessing is not False. - None : If None, TPOT will automatically use object columns in pandas dataframes as objects for one hot encoding in preprocessing. - List of categorical features. If X is a dataframe, this should be a list of column names. If X is a numpy array, this should be a list of column indices memory: Memory object or string, default=None If supplied, pipeline will cache each transformer after calling fit with joblib.Memory. This feature is used to avoid computing the fit transformers within a pipeline if the parameters and input data are identical with another fitted pipeline during optimization process. - String 'auto': TPOT uses memory caching with a temporary directory and cleans it up upon shutdown. - String path of a caching directory TPOT uses memory caching with the provided directory and TPOT does NOT clean the caching directory up upon shutdown. If the directory does not exist, TPOT will create it. - Memory object: TPOT uses the instance of joblib.Memory for memory caching, and TPOT does NOT clean the caching directory up upon shutdown. - None: TPOT does not use memory caching. preprocessing : bool or BaseEstimator/Pipeline, EXPERIMENTAL A pipeline that will be used to preprocess the data before CV. Note that the parameters for these steps are not optimized. Add them to the search space to be optimized. - bool : If True, will use a default preprocessing pipeline which includes imputation followed by one hot encoding. - Pipeline : If an instance of a pipeline is given, will use that pipeline as the preprocessing pipeline. max_time_mins : float, default=float("inf") Maximum time to run the optimization. If none or inf, will run until the end of the generations. max_eval_time_mins : float, default=60*5 Maximum time to evaluate a single individual. If none or inf, there will be no time limit per evaluation. n_jobs : int, default=1 Number of processes to run in parallel. validation_strategy : str, default='none' EXPERIMENTAL The validation strategy to use for selecting the final pipeline from the population. TPOT may overfit the cross validation score. A second validation set can be used to select the final pipeline. - 'auto' : Automatically determine the validation strategy based on the dataset shape. - 'reshuffled' : Use the same data for cross validation and final validation, but with different splits for the folds. This is the default for small datasets. - 'split' : Use a separate validation set for final validation. Data will be split according to validation_fraction. This is the default for medium datasets. - 'none' : Do not use a separate validation set for final validation. Select based on the original cross-validation score. This is the default for large datasets. validation_fraction : float, default=0.2 EXPERIMENTAL The fraction of the dataset to use for the validation set when validation_strategy is 'split'. Must be between 0 and 1. early_stop : int, default=None Number of generations without improvement before early stopping. All objectives must have converged within the tolerance for this to be triggered. In general a value of around 5-20 is good. warm_start : bool, default=False If True, will use the continue the evolutionary algorithm from the last generation of the previous run. periodic_checkpoint_folder : str, default=None Folder to save the population to periodically. If None, no periodic saving will be done. If provided, training will resume from this checkpoint. verbose : int, default=1 How much information to print during the optimization process. Higher values include the information from lower values. 0. nothing 1. progress bar 3. best individual 4. warnings >=5. full warnings trace 6. evaluations progress bar. (Temporary: This used to be 2. Currently, using evaluation progress bar may prevent some instances were we terminate a generation early due to it reaching max_time_mins in the middle of a generation OR a pipeline failed to be terminated normally and we need to manually terminate it.) memory_limit : str, default=None Memory limit for each job. See Dask [LocalCluster documentation](https://distributed.dask.org/en/stable/api.html#distributed.Client) for more information. client : dask.distributed.Client, default=None A dask client to use for parallelization. If not None, this will override the n_jobs and memory_limit parameters. If None, will create a new client with num_workers=n_jobs and memory_limit=memory_limit. random_state : int, None, default=None A seed for reproducability of experiments. This value will be passed to numpy.random.default_rng() to create an instnce of the genrator to pass to other classes - int Will be used to create and lock in Generator instance with 'numpy.random.default_rng()' - None Will be used to create Generator for 'numpy.random.default_rng()' where a fresh, unpredictable entropy will be pulled from the OS allow_inner_regressors : bool, default=True If True, the search space will include ensembled regressors. Attributes ---------- fitted_pipeline_ : GraphPipeline A fitted instance of the GraphPipeline that inherits from sklearn BaseEstimator. This is fitted on the full X, y passed to fit. evaluated_individuals : A pandas data frame containing data for all evaluated individuals in the run. Columns: - *objective functions : The first few columns correspond to the passed in scorers and objective functions - Parents : A tuple containing the indexes of the pipelines used to generate the pipeline of that row. If NaN, this pipeline was generated randomly in the initial population. - Variation_Function : Which variation function was used to mutate or crossover the parents. If NaN, this pipeline was generated randomly in the initial population. - Individual : The internal representation of the individual that is used during the evolutionary algorithm. This is not an sklearn BaseEstimator. - Generation : The generation the pipeline first appeared. - Pareto_Front : The nondominated front that this pipeline belongs to. 0 means that its scores is not strictly dominated by any other individual. To save on computational time, the best frontier is updated iteratively each generation. The pipelines with the 0th pareto front do represent the exact best frontier. However, the pipelines with pareto front >= 1 are only in reference to the other pipelines in the final population. All other pipelines are set to NaN. - Instance : The unfitted GraphPipeline BaseEstimator. - *validation objective functions : Objective function scores evaluated on the validation set. - Validation_Pareto_Front : The full pareto front calculated on the validation set. This is calculated for all pipelines with Pareto_Front equal to 0. Unlike the Pareto_Front which only calculates the frontier and the final population, the Validation Pareto Front is calculated for all pipelines tested on the validation set. pareto_front : The same pandas dataframe as evaluated individuals, but containing only the frontier pareto front pipelines. ''' self.search_space = search_space self.scorers = scorers self.scorers_weights = scorers_weights self.cv = cv self.other_objective_functions = other_objective_functions self.other_objective_functions_weights = other_objective_functions_weights self.objective_function_names = objective_function_names self.bigger_is_better = bigger_is_better self.categorical_features = categorical_features self.memory = memory self.preprocessing = preprocessing self.max_time_mins = max_time_mins self.max_eval_time_mins = max_eval_time_mins self.n_jobs = n_jobs self.validation_strategy = validation_strategy self.validation_fraction = validation_fraction self.early_stop = early_stop self.warm_start = warm_start self.periodic_checkpoint_folder = periodic_checkpoint_folder self.verbose = verbose self.memory_limit = memory_limit self.client = client self.random_state = random_state self.allow_inner_regressors = allow_inner_regressors self.tpotestimator_kwargs = tpotestimator_kwargs self.initialized = False def fit(self, X, y): if not self.initialized: get_search_space_params = {"n_classes": None, "n_samples":len(y), "n_features":X.shape[1], "random_state":self.random_state} search_space = get_template_search_spaces(self.search_space, classification=False, inner_predictors=self.allow_inner_regressors, **get_search_space_params) super(TPOTRegressor,self).__init__( search_space=search_space, scorers=self.scorers, scorers_weights=self.scorers_weights, cv=self.cv, other_objective_functions=self.other_objective_functions, #tpot.objectives.estimator_objective_functions.number_of_nodes_objective], other_objective_functions_weights = self.other_objective_functions_weights, objective_function_names = self.objective_function_names, bigger_is_better = self.bigger_is_better, categorical_features = self.categorical_features, memory = self.memory, preprocessing = self.preprocessing, max_time_mins=self.max_time_mins, max_eval_time_mins=self.max_eval_time_mins, n_jobs=self.n_jobs, validation_strategy = self.validation_strategy, validation_fraction = self.validation_fraction, early_stop = self.early_stop, warm_start = self.warm_start, periodic_checkpoint_folder = self.periodic_checkpoint_folder, verbose = self.verbose, classification=False, memory_limit = self.memory_limit, client = self.client, random_state=self.random_state, **self.tpotestimator_kwargs) self.initialized = True return super().fit(X,y)
TPOTRegressor
python
pytorch__pytorch
test/dynamo/test_guard_serialization.py
{ "start": 9199, "end": 14199 }
class ____(torch._inductor.test_case.TestCase): def _tracefunc(self, frame, event, arg): if event != "call": return if self._frame_state is not None: return self._frame_state = _FrameState( f_locals=dict(frame.f_locals), f_globals=frame.f_globals, f_code=frame.f_code, f_builtins=frame.f_builtins, ) def _test_serialization(self, guard_type, fn, *args, **kwargs): # kwargs might contain a callable that generates kwargs torch._dynamo.reset() kwarg_gen_fn = kwargs.get("_gen_fn") if kwarg_gen_fn is not None: kwargs = kwarg_gen_fn() self._frame_state = None sys.settrace(self._tracefunc) if isinstance(fn, torch.nn.Module): fn = fn.forward try: fn(*args, **kwargs) finally: sys.settrace(None) assert self._frame_state is not None # Set f_locals from regenerated kwargs to handle exhausted input iterators # NB: This is super janky and might cause unforeseen problems if kwarg_gen_fn is not None: kwargs = kwarg_gen_fn() for key in self._frame_state.f_locals: if key in kwargs and isinstance(kwargs[key], Iterator): self._frame_state.f_locals[key] = kwargs[key] def guard_filter_fn(guards): ret = [ g.guard_type == guard_type or guard_type in g.derived_guard_types for g in guards ] self.assertTrue(any(ret)) return ret ref_gm = None loaded_gm = None def transform(instructions: list, code_options: dict[str, object]): """ The goal is here is not to reimplement dynamo, but just to have a simplified version to extract the state from symbolic convert. Should not work on all cases, but should work on simple functions in this test file. """ nonlocal ref_gm nonlocal loaded_gm torch._dynamo.convert_frame.initial_global_state = ( torch._C._dynamo.guards.GlobalStateGuard() ) tracer = InstructionTranslator( instructions, self._frame_state.f_code, self._frame_state.f_locals, self._frame_state.f_globals, self._frame_state.f_builtins, fn.__closure__ or (), torch.overrides._get_current_function_mode_stack(), code_options, torch._dynamo.lookup_backend("eager"), one_graph=False, export=False, export_constraints=None, frame_state=None, speculation_log=SpeculationLog(), exn_vt_stack=ExceptionStack(), distributed_state=None, package=None, ) with ( compile_context( CompileContext(CompileId(frame_id=0, frame_compile_id=0)) ), tracing(tracer.output.tracing_context), tracer.set_current_tx(), get_metrics_context(), dynamo_timed(""), ): tracer.run() ref_gm = CheckFunctionManager( self._frame_state.f_code, tracer.output, guard_filter_fn=guard_filter_fn, ).guard_manager check_fn_manager = CheckFunctionManager( self._frame_state.f_code, tracer.output, guard_filter_fn=guard_filter_fn, save_guards=True, ) guards_state = check_fn_manager.guards_state self._cached_guards_state = guards_state self._cached_f_code = self._frame_state.f_code self.assertIsNotNone(guards_state) guards_state = torch._dynamo.package.load_guards_state(guards_state) loaded_gm = torch._dynamo.package.load_guard_manager( guards_state, self._frame_state.f_code, self._frame_state.f_globals, ) try: transform_code_object(self._frame_state.f_code, transform) finally: torch._dynamo.convert_frame.initial_global_state = None self._frame_state = None self.assertIsNotNone(ref_gm) self.assertIsNotNone(loaded_gm) return ref_gm, loaded_gm def _test_check_fn(self, ref, loaded, inputs, expected): self.assertIsInstance(inputs, dict) self.assertEqual(ref.check(inputs), expected) self.assertEqual(ref.check(inputs), loaded.check(inputs)) @torch._dynamo.config.patch({"strict_precompile": True})
TestGuardSerializationBase
python
Lightning-AI__lightning
src/lightning/pytorch/demos/boring_classes.py
{ "start": 8374, "end": 8913 }
class ____(BoringModel): """ .. warning:: This is meant for testing/debugging and is experimental. """ def __init__(self) -> None: super().__init__() self.automatic_optimization = False def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT: opt = self.optimizers() assert isinstance(opt, (Optimizer, LightningOptimizer)) loss = self.step(batch) opt.zero_grad() self.manual_backward(loss) opt.step() return loss
ManualOptimBoringModel
python
google__jax
jax/_src/effects.py
{ "start": 3587, "end": 5041 }
class ____: def __init__(self): self._effect_types: set[type[Effect]] = set() def __repr__(self): return f"EffectTypeSet({self._effect_types})" def add_type(self, effect_type: type[Effect]): self._effect_types.add(effect_type) def contains(self, eff: Effect) -> bool: return any(isinstance(eff, eff_type) for eff_type in self._effect_types) def filter_in(self, effects: Iterable[Effect]) -> list[Effect]: return [eff for eff in effects if self.contains(eff)] def filter_not_in(self, effects: Iterable[Effect]) -> list[Effect]: return [eff for eff in effects if not self.contains(eff)] no_effects: Effects = frozenset() ordered_effects: EffectTypeSet = EffectTypeSet() # By default, ordered effects are not allowed in multi-device computations, # because we cannot ensure a total order. Optionally, an effect can be # declared as shardable, which means that effects will appear in program order # but for a given program point we may see several side effects on the # participating devices, and there is no guarantee of their relative ordering. shardable_ordered_effects: EffectTypeSet = EffectTypeSet() lowerable_effects: EffectTypeSet = EffectTypeSet() control_flow_allowed_effects: EffectTypeSet = EffectTypeSet() custom_derivatives_allowed_effects: EffectTypeSet = EffectTypeSet() remat_allowed_effects: EffectTypeSet = EffectTypeSet() partial_eval_kept_effects: EffectTypeSet = EffectTypeSet()
EffectTypeSet
python
doocs__leetcode
solution/1000-1099/1018.Binary Prefix Divisible By 5/Solution.py
{ "start": 0, "end": 214 }
class ____: def prefixesDivBy5(self, nums: List[int]) -> List[bool]: ans = [] x = 0 for v in nums: x = (x << 1 | v) % 5 ans.append(x == 0) return ans
Solution
python
coleifer__peewee
peewee.py
{ "start": 68977, "end": 71602 }
class ____(SelectBase): def __init__(self, lhs, op, rhs): super(CompoundSelectQuery, self).__init__() self.lhs = lhs self.op = op self.rhs = rhs @property def _returning(self): return self.lhs._returning @database_required def exists(self, database): query = Select((self.limit(1),), (SQL('1'),)).bind(database) return bool(query.scalar()) def _get_query_key(self): return (self.lhs.get_query_key(), self.rhs.get_query_key()) def _wrap_parens(self, ctx, subq): csq_setting = ctx.state.compound_select_parentheses if not csq_setting or csq_setting == CSQ_PARENTHESES_NEVER: return False elif csq_setting == CSQ_PARENTHESES_ALWAYS: return True elif csq_setting == CSQ_PARENTHESES_UNNESTED: if ctx.state.in_expr or ctx.state.in_function: # If this compound select query is being used inside an # expression, e.g., an IN or EXISTS(). return False # If the query on the left or right is itself a compound select # query, then we do not apply parentheses. However, if it is a # regular SELECT query, we will apply parentheses. return not isinstance(subq, CompoundSelectQuery) def __sql__(self, ctx): if ctx.scope == SCOPE_COLUMN: return self.apply_column(ctx) # Call parent method to handle any CTEs. super(CompoundSelectQuery, self).__sql__(ctx) outer_parens = ctx.subquery or (ctx.scope == SCOPE_SOURCE) with ctx(parentheses=outer_parens): # Should the left-hand query be wrapped in parentheses? lhs_parens = self._wrap_parens(ctx, self.lhs) with ctx.scope_normal(parentheses=lhs_parens, subquery=False): ctx.sql(self.lhs) ctx.literal(' %s ' % self.op) with ctx.push_alias(): # Should the right-hand query be wrapped in parentheses? rhs_parens = self._wrap_parens(ctx, self.rhs) with ctx.scope_normal(parentheses=rhs_parens, subquery=False): ctx.sql(self.rhs) # Apply ORDER BY, LIMIT, OFFSET. We use the "values" scope so that # entity names are not fully-qualified. This is a bit of a hack, as # we're relying on the logic in Column.__sql__() to not fully # qualify column names. with ctx.scope_values(): self._apply_ordering(ctx) return self.apply_alias(ctx)
CompoundSelectQuery
python
pytest-dev__pytest
doc/en/example/nonpython/conftest.py
{ "start": 1469, "end": 1549 }
class ____(Exception): """Custom exception for error reporting."""
YamlException
python
faif__python-patterns
patterns/structural/proxy.py
{ "start": 1382, "end": 2503 }
class ____(Subject): def __init__(self) -> None: self._real_subject = RealSubject() def do_the_job(self, user: str) -> None: """ logging and controlling access are some examples of proxy usages. """ print(f"[log] Doing the job for {user} is requested.") if user == "admin": self._real_subject.do_the_job(user) else: print("[log] I can do the job just for `admins`.") def client(job_doer: Union[RealSubject, Proxy], user: str) -> None: job_doer.do_the_job(user) def main(): """ >>> proxy = Proxy() >>> real_subject = RealSubject() >>> client(proxy, 'admin') [log] Doing the job for admin is requested. I am doing the job for admin >>> client(proxy, 'anonymous') [log] Doing the job for anonymous is requested. [log] I can do the job just for `admins`. >>> client(real_subject, 'admin') I am doing the job for admin >>> client(real_subject, 'anonymous') I am doing the job for anonymous """ if __name__ == "__main__": import doctest doctest.testmod()
Proxy
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/layout/processors.py
{ "start": 10070, "end": 10671 }
class ____(Processor): """ Processor that masks the input. (For passwords.) :param char: (string) Character to be used. "*" by default. """ def __init__(self, char: str = "*") -> None: self.char = char def apply_transformation(self, ti: TransformationInput) -> Transformation: fragments: StyleAndTextTuples = cast( StyleAndTextTuples, [ (style, self.char * len(text), *handler) for style, text, *handler in ti.fragments ], ) return Transformation(fragments)
PasswordProcessor
python
dagster-io__dagster
python_modules/dagster/dagster/_config/config_type.py
{ "start": 4092, "end": 4468 }
class ____(ConfigType): def __init__( self, key: str, given_name: Optional[str], scalar_kind: ConfigScalarKind, **kwargs: typing.Any, ): self.scalar_kind = check.inst_param(scalar_kind, "scalar_kind", ConfigScalarKind) super().__init__(key, kind=ConfigTypeKind.SCALAR, given_name=given_name, **kwargs)
ConfigScalar
python
matplotlib__matplotlib
lib/matplotlib/backends/backend_gtk3.py
{ "start": 15979, "end": 18656 }
class ____(ToolContainerBase, Gtk.Box): _icon_extension = '-symbolic.svg' def __init__(self, toolmanager): ToolContainerBase.__init__(self, toolmanager) Gtk.Box.__init__(self) self.set_property('orientation', Gtk.Orientation.HORIZONTAL) self._message = Gtk.Label() self._message.set_justify(Gtk.Justification.RIGHT) self.pack_end(self._message, False, False, 0) self.show_all() self._groups = {} self._toolitems = {} def add_toolitem(self, name, group, position, image_file, description, toggle): if toggle: button = Gtk.ToggleToolButton() else: button = Gtk.ToolButton() button.set_label(name) if image_file is not None: image = Gtk.Image.new_from_gicon( Gio.Icon.new_for_string(image_file), Gtk.IconSize.LARGE_TOOLBAR) button.set_icon_widget(image) if position is None: position = -1 self._add_button(button, group, position) signal = button.connect('clicked', self._call_tool, name) button.set_tooltip_text(description) button.show_all() self._toolitems.setdefault(name, []) self._toolitems[name].append((button, signal)) def _add_button(self, button, group, position): if group not in self._groups: if self._groups: self._add_separator() toolbar = Gtk.Toolbar() toolbar.set_style(Gtk.ToolbarStyle.ICONS) self.pack_start(toolbar, False, False, 0) toolbar.show_all() self._groups[group] = toolbar self._groups[group].insert(button, position) def _call_tool(self, btn, name): self.trigger_tool(name) def toggle_toolitem(self, name, toggled): if name not in self._toolitems: return for toolitem, signal in self._toolitems[name]: toolitem.handler_block(signal) toolitem.set_active(toggled) toolitem.handler_unblock(signal) def remove_toolitem(self, name): for toolitem, _signal in self._toolitems.pop(name, []): for group in self._groups: if toolitem in self._groups[group]: self._groups[group].remove(toolitem) def _add_separator(self): sep = Gtk.Separator() sep.set_property("orientation", Gtk.Orientation.VERTICAL) self.pack_start(sep, False, True, 0) sep.show_all() def set_message(self, s): self._message.set_label(s) @backend_tools._register_tool_class(FigureCanvasGTK3)
ToolbarGTK3
python
huggingface__transformers
src/transformers/models/gptj/modeling_gptj.py
{ "start": 16427, "end": 17163 }
class ____(nn.Module): def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim super().__init__() embed_dim = config.n_embd self.fc_in = nn.Linear(embed_dim, intermediate_size) self.fc_out = nn.Linear(intermediate_size, embed_dim) self.act = ACT2FN[config.activation_function] self.dropout = nn.Dropout(config.resid_pdrop) def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor: hidden_states = self.fc_in(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.fc_out(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
GPTJMLP
python
dagster-io__dagster
python_modules/dagster/dagster/components/resolved/core_models.py
{ "start": 9721, "end": 10365 }
class ____(Resolvable): """Resolvable object representing only a configurable asset key.""" key: Optional[ResolvedAssetKey] = None key_prefix: Annotated[ Optional[CoercibleToAssetKeyPrefix], Resolver.default(description="Prefix the existing asset key with the provided value."), ] = None def resolve_asset_spec(context: ResolutionContext, model): return AssetSpec(**resolve_fields(model, AssetSpecKwargs, context)) ResolvedAssetSpec: TypeAlias = Annotated[ AssetSpec, Resolver( resolve_asset_spec, model_field_type=AssetSpecKwargs.model(), ), ] @record
AssetSpecKeyUpdateKwargs