language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pytorch__pytorch
test/inductor/test_max_autotune.py
{ "start": 118154, "end": 133292 }
class ____(TestCase): @classmethod def setUpClass(cls): super().setUpClass() cls._stack = contextlib.ExitStack() cls._stack.enter_context( config.patch( { "max_autotune": True, "prologue_fusion": True, "benchmark_epilogue_fusion": False, "shape_padding": False, "max_autotune_gemm_backends": "TRITON", "test_configs.max_mm_configs": 4, # significantly speeds up tests } ) ) def check_code(self, code_str, num_kernels, num_allocs, num_deallocs): FileCheck().check(get_func_call()).check_count( get_kernel_launch(), num_kernels, exactly=True, ).run(code_str) if num_allocs is not None: FileCheck().check(get_func_call()).check_count( "empty_strided", num_allocs, exactly=True ).run(code_str) # skip the deallocation check when using cpp_wrapper; most deallocations happen # outside of our control via RAIIAtenTensorHandle if num_deallocs is not None and not config.cpp_wrapper: FileCheck().check(get_func_call()).check_count( "del", num_deallocs, exactly=True ).run(code_str) @skipIfXpu( msg="Triton issue exposed by new driver, will be resolved after next triton update." ) @parametrize("sizes", ((64, 128, 256), (128, 128, 128), (63, 120, 250))) def test_upcast(self, sizes): M, K, N = sizes x = torch.rand([M, K], dtype=torch.float16, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float, device=GPU_TYPE) def foo(x, y): return x.to(y.dtype) @ y out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) if config.triton.native_matmul: # native matmul preserves zero mask - need to optimize; see codegen/triton.py FileCheck().check("a =").check("tl.where").check("tl.dot").run(code[0]) else: # upcast preserves zero mask FileCheck().check("a =").check_not("tl.where").check("tl.dot").run(code[0]) @unittest.skip("Triton bug in compilation") def test_gather_fusion(self): M, K, N = (64, 128, 256) x = torch.rand([M, K], dtype=torch.float16, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float16, device=GPU_TYPE) index = torch.randperm(M, device=GPU_TYPE) def foo(x, y, index): return (x[index]) @ y out, code = run_and_get_code(torch.compile(foo), x, y, index) self.assertEqual(out, foo(x, y, index), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=3) # should be done in low precision ( FileCheck() .check("for k_idx") .check_not("to(tl.float32)") .check("dot") .run(code[0]) ) @unittest.skipIf(TEST_WITH_ROCM, "FP8 is not supported on ROCM") @unittest.skipIf( not PLATFORM_SUPPORTS_FP8, "FP8 is only supported on H100+, SM 8.9 and MI300+ devices", ) @config.patch({"triton.native_matmul": False}) def test_low_precision(self): M = K = N = 128 x = torch.rand([M, K], device=GPU_TYPE).to(torch.float8_e4m3fn) y = torch.rand([K, N], dtype=torch.bfloat16, device=GPU_TYPE) def foo(x, y): return x.to(y.dtype) @ y out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) # should be done in low precision, no arithmetic ( FileCheck() .check("for k_idx") .check_not("to(tl.float32)") .check("dot") .run(code[0]) ) def foo(x, y): return (x.to(y.dtype) + 1) @ y out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) # should not be done in low precision, two kernels self.check_code(code[0], num_kernels=2, num_allocs=2, num_deallocs=3) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_downcast(self): # per heuristics, dont fuse a downcast into a mm because it would lead to more reads inside kernel M, K, N = (64, 128, 256) x = torch.rand([M, K], dtype=torch.float, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float16, device=GPU_TYPE) def foo(x, y): return x.to(y.dtype) @ y out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=2, num_allocs=2, num_deallocs=3) @parametrize("sizes", ((64, 128, 256), (64, 64, 64), (64, 120, 64))) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_multiple_fusions(self, sizes): M, K, N = sizes def foo(x, y): return ((x - 1.1) @ (y + 1.1)) * 1.1 x = torch.rand([M, K], dtype=torch.float, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) # check that we do not CSE any variables between prologues, epilogues FileCheck().check("def triton").check_count("= 1.1", 3, exactly=True).check( "tl.store" ).run(code[0]) @config.patch( { "max_autotune_gemm_backends": "Triton", "benchmark_epilogue_fusion": True, "max_epilogue_benchmarked_choices": 3, } ) @skipIfXpu( msg="The fusion not happened because it do not speedup on XPU, see issue #146568" ) def test_pending_fusions_multiple(self): def multi_use(x, y): return (x @ x.T) * (y @ y.T) x = torch.rand([128, 16], device=GPU_TYPE) y = torch.rand([128, 32], device=GPU_TYPE) out, code = run_and_get_code(torch.compile(multi_use), x, y) FileCheck().check(get_func_call()).check_count( get_kernel_launch(), 2, exactly=True ).run(code[0]) self.assertEqual(out, multi_use(x, y), atol=0.05, rtol=0.05) def resolve_pending(x): return (x @ x).relu() x = torch.rand([128, 128], device=GPU_TYPE) out, code = run_and_get_code(torch.compile(resolve_pending), x) FileCheck().check(get_func_call()).check_count( get_kernel_launch(), 1, exactly=True ).run(code[0]) self.assertEqual(out, resolve_pending(x), atol=0.05, rtol=0.05) @config.patch( { "max_autotune_gemm_backends": "Triton", "benchmark_epilogue_fusion": True, "max_epilogue_benchmarked_choices": 3, } ) @skipIfXpu( msg="The fusion not happened because it do not speedup on XPU, see issue #146568" ) def test_pending_fusion_pro_and_epi(self): def test_multiple_fusions(x): y = x.to(torch.float) return (y @ y).relu() x = torch.rand([128, 128], dtype=torch.float16, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(test_multiple_fusions), x) FileCheck().check(get_func_call()).check_count( get_kernel_launch(), 1, exactly=True ).run(code[0]) self.assertEqual(out, test_multiple_fusions(x), atol=0.05, rtol=0.05) @skipIfXpu( msg="Triton issue exposed by new driver, will be resolved after next triton update." ) @parametrize("sizes", ((64, 128, 256), (128, 128, 128), (63, 120, 250))) def test_multiple_inputs(self, sizes): M, K, N = sizes def foo(x, y, z): return (x + y).to(torch.float) @ z x = torch.rand([M, K], dtype=torch.float16, device=GPU_TYPE) y = torch.rand([M, K], dtype=torch.float16, device=GPU_TYPE) z = torch.rand([K, N], dtype=torch.float, device=GPU_TYPE) out_eager = foo(x, y, z) out, code = run_and_get_code(torch.compile(foo), x, y, z) self.assertEqual(out, out_eager, atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=3) def test_storage_offset_prologue(self): def foo(a): q = a[:64, :] k = a[64:, :] return torch.mm(q + 2, k - 2) inp = torch.randn(128, 64, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), inp) self.assertEqual(out, foo(inp), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=1) @config.patch(realize_reads_threshold=1, realize_opcount_threshold=1) @parametrize("sizes", ((64, 128, 256), (128, 128, 128), (63, 120, 250))) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_prologue_multiple_nodes(self, sizes): M, K, N = sizes def foo(x, y): return ((((x * 2) - 1) / 2) @ (y * 4)) * 3.0 x = torch.rand([M, K], dtype=torch.float, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) @parametrize("K", (63, 64)) def test_broadcast_x(self, K): def foo(x, y): return (x.expand([1, y.shape[0]]) + 1) @ y x = torch.rand([1, 1], dtype=torch.float, device=GPU_TYPE) y = torch.rand([K, 128], dtype=torch.float, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo, dynamic=True), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) def test_broadcast_y(self): def foo(x, y): return x @ y M = 20 N = K = 1 x = torch.rand([M, K], dtype=torch.float, device=GPU_TYPE) y = torch.rand([K, N], dtype=torch.float, device=GPU_TYPE) torch._dynamo.mark_dynamic(x, 0) out, code = run_and_get_code(torch.compile(foo, dynamic=True), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_preserves_zero_analysis(self): fns = ( (lambda x: x.relu(), False), # preserves zero (lambda x: x + 1, True), # does not ( lambda x: torch.hypot(x, x), True, ), # not handled in analysis, conservatively assume does not preserve ) def foo(x, y, fn): return fn(x) @ y for fn, should_mask in fns: x = torch.rand([64, 127], dtype=torch.float, device=GPU_TYPE) y = torch.rand([127, 64], dtype=torch.float, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), x, y, fn) self.assertEqual(out, foo(x, y, fn), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=1, num_allocs=1, num_deallocs=2) if should_mask: f = FileCheck().check("k_idx").check("a =").check_same("tl.where") else: f = FileCheck().check("k_idx").check("a =").check_not("tl.where") f.check("tl.dot").run(code[0]) @config.patch(realize_reads_threshold=1, realize_opcount_threshold=1) @parametrize("benchmark_fusion", (True, False)) def test_prologue_read_into_both_inputs(self, benchmark_fusion): M = K = 256 # not supported today. it could be, but typically the pointwise nodes would get # inlined into separate nodes. def foo(x): y = (x + 1) * 2 return y @ (y - 2) with config.patch(benchmark_epilogue_fusion=benchmark_fusion): x = torch.rand([M, K], dtype=torch.float, device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), x) self.assertEqual(out, foo(x), atol=0.05, rtol=0.05) # not guaranteed to fuse, but still checking correctness if not benchmark_fusion: self.check_code( code[0], num_kernels=2, num_allocs=None, num_deallocs=None ) @config.patch(realize_reads_threshold=1, realize_opcount_threshold=1) @config.patch(allow_buffer_reuse=False) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_mismatched_prologue_group(self): def foo(x, y, z): a = (x + 2) * 2 b = a * y return b @ z x = torch.rand([1, 256], device=GPU_TYPE) y = torch.rand([256, 256], device=GPU_TYPE) z = torch.rand([256, 128], device=GPU_TYPE) out, code = run_and_get_code(torch.compile(foo), x, y, z) self.assertEqual(out, foo(x, y, z), atol=0.05, rtol=0.05) # there's one more dealloc than there should be because of a buffer reuse. TODO: # not sure why disabling buffer reuse doesn't stop self.check_code(code[0], num_kernels=2, num_allocs=2, num_deallocs=4) # XPU have not enabled pad_mm in fx_passes, so there is always one kernel. @skipIfXpu @config.patch(shape_padding=True) @config.patch(force_shape_pad=True) @parametrize("sizes", ((250, 245, 128), (250, 256, 128), (256, 128, 62))) @unittest.skipIf( config.triton.native_matmul, "generated code is different in native matmul", ) def test_prologue_masked_load(self, sizes): M, K, N = sizes def foo(x, y): return x @ y x = torch.rand([250, 245], device=GPU_TYPE) y = torch.rand([245, 128], device=GPU_TYPE) # we should not attempt prologue fusion if it turns an aligned load # into an unaligned load out, code = run_and_get_code(torch.compile(foo), x, y) self.assertEqual(out, foo(x, y), atol=0.05, rtol=0.05) self.check_code(code[0], num_kernels=3, num_allocs=3, num_deallocs=4) if __name__ == "__main__": from torch._inductor.utils import is_big_gpu # Set env to make it work in CI. if HAS_GPU and HAS_CPU and is_big_gpu(): run_tests()
TestPrologueFusion
python
ray-project__ray
python/ray/data/_internal/logical/operators/all_to_all_operator.py
{ "start": 4217, "end": 5874 }
class ____(AbstractAllToAll, LogicalOperatorSupportsPredicatePassThrough): """Logical operator for repartition.""" def __init__( self, input_op: LogicalOperator, num_outputs: int, shuffle: bool, keys: Optional[List[str]] = None, sort: bool = False, ): if shuffle: sub_progress_bar_names = [ ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, ] else: sub_progress_bar_names = [ ShuffleTaskSpec.SPLIT_REPARTITION_SUB_PROGRESS_BAR_NAME, ] super().__init__( "Repartition", input_op, num_outputs=num_outputs, sub_progress_bar_names=sub_progress_bar_names, ) self._shuffle = shuffle self._keys = keys self._sort = sort def infer_metadata(self) -> "BlockMetadata": assert len(self._input_dependencies) == 1, len(self._input_dependencies) assert isinstance(self._input_dependencies[0], LogicalOperator) return self._input_dependencies[0].infer_metadata() def infer_schema( self, ) -> Optional["Schema"]: assert len(self._input_dependencies) == 1, len(self._input_dependencies) assert isinstance(self._input_dependencies[0], LogicalOperator) return self._input_dependencies[0].infer_schema() def predicate_passthrough_behavior(self) -> PredicatePassThroughBehavior: # Repartition doesn't affect filtering correctness return PredicatePassThroughBehavior.PASSTHROUGH
Repartition
python
getsentry__sentry
src/sentry/models/grouphashmetadata.py
{ "start": 3065, "end": 8088 }
class ____(Model): __relocation_scope__ = RelocationScope.Excluded # IMPORTANT: # If you make changes to this schema, increment GROUPHASH_METADATA_SCHEMA_VERSION above so # existing records will get updated with the new data. # GENERAL grouphash = models.OneToOneField( "sentry.GroupHash", related_name="_metadata", on_delete=models.CASCADE ) # When the grouphash was created. Will be null for grouphashes created before we started # collecting metadata. date_added = models.DateTimeField(default=timezone.now, null=True) # The date the metadata was last updated. date_updated = models.DateTimeField(default=timezone.now, null=True) # The version of the metadata schema which produced the data. Useful for backfilling when we add # to or change the data we collect and want to update existing records. schema_version = models.CharField(null=True) # The platform of the event when generated the metadata. Likely different than the project # platform, as event platforms are normalized to a handful of known values, whereas project # platforms are all over the place. platform = models.CharField(null=True) # The event ID of the event which generated the metadata. event_id = models.CharField(max_length=32, null=True) # HASHING # Most recent config to produce this hash latest_grouping_config = models.CharField(null=True) # The primary grouping method (message, stacktrace, fingerprint, etc.) hash_basis: models.Field[HashBasis | None, HashBasis | None] = models.CharField( choices=HashBasis, null=True ) # Metadata about the inputs to the hashing process and the hashing process itself (what # fingerprinting rules were matched? did we parameterize the message? etc.). For the specific # data stored, see the class definitions of the `HashingMetadata` subtypes. hashing_metadata = LegacyTextJSONField(null=True) # SEER # When this hash was sent to Seer. This will be different than `date_added` if we send it to # Seer as part of a backfill rather than during ingest. seer_date_sent = models.DateTimeField(null=True) # Id of the event whose stacktrace was sent to Seer seer_event_sent = models.CharField(max_length=32, null=True) # The version of the Seer model used to process this hash value seer_model = models.CharField(null=True) # The `GroupHash` record representing the match Seer sent back as a match (if any) seer_matched_grouphash = FlexibleForeignKey( "sentry.GroupHash", related_name="seer_matchees", on_delete=models.SET_NULL, null=True ) # The similarity between this hash's stacktrace and the parent (matched) hash's stacktrace seer_match_distance = models.FloatField(null=True) class Meta: app_label = "sentry" db_table = "sentry_grouphashmetadata" @property def group_id(self) -> int | None: return self.grouphash.group_id @property def hash(self) -> str: return self.grouphash.hash __repr__ = sane_repr("grouphash_id", "group_id", "hash", "seer_matched_grouphash") __str__ = __repr__ def get_best_guess_schema_version(self) -> str: """ Temporary hack to let us record metrics for grouphash metadata records created before we were storing schema version. Once we hit May of 2025 or so, we should have backfilled or aged out any such records, and can probably delete this. Note: This is "best guess" because the schema transitions didn't happen right at midnight on the dates in question, even though the logic below implies that they did. For our purposes it's close enough, though, especially since this isn't sticking around forever. """ if self.schema_version: return self.schema_version # Any metadata record without a creation date is one created between the time we enabled # backfill for existing grouphashes and the time we added schema versioning, which was # during the version 7 era. if not self.date_added: return "7" if self.date_added < datetime(2024, 10, 1, tzinfo=tz.utc): return "0" elif self.date_added < datetime(2024, 10, 3, tzinfo=tz.utc): return "1" elif self.date_added < datetime(2024, 11, 1, tzinfo=tz.utc): return "2" elif self.date_added < datetime(2024, 11, 18, tzinfo=tz.utc): return "3" elif self.date_added < datetime(2025, 1, 24, tzinfo=tz.utc): return "4" elif self.date_added < datetime(2025, 2, 7, tzinfo=tz.utc): return "5" elif self.date_added < datetime(2025, 2, 11, tzinfo=tz.utc): return "6" # Schema version 8 introduced schema versioning, so anything that version or above will know # its version and have short-circuited at the top of this function else: return "7"
GroupHashMetadata
python
walkccc__LeetCode
solutions/221. Maximal Square/221.py
{ "start": 0, "end": 521 }
class ____: def maximalSquare(self, matrix: list[list[str]]) -> int: m = len(matrix) n = len(matrix[0]) dp = [[0] * n for _ in range(m)] maxLength = 0 for i in range(m): for j in range(n): if i == 0 or j == 0 or matrix[i][j] == '0': dp[i][j] = 1 if matrix[i][j] == '1' else 0 else: dp[i][j] = min(dp[i - 1][j - 1], dp[i - 1] [j], dp[i][j - 1]) + 1 maxLength = max(maxLength, dp[i][j]) return maxLength * maxLength
Solution
python
nedbat__coveragepy
tests/test_process.py
{ "start": 59958, "end": 62935 }
class ____(CoverageTest): """Show that we can configure {[run]source} during process-level coverage. There are three interesting variables, for a total of eight tests: 1. -m versus a simple script argument (for example, `python myscript`), 2. filtering for the top-level (main.py) or second-level (sub.py) module, and 3. whether the files are in a package or not. """ @pytest.mark.parametrize("dashm", ["-m", ""]) @pytest.mark.parametrize("package", ["pkg", ""]) @pytest.mark.parametrize("source", ["main", "sub"]) @pytest.mark.xdist_group(name="needs_pth") def test_pth_and_source_work_together( self, dashm: str, package: str, source: str, _create_pth_file: None, ) -> None: """Run the test for a particular combination of factors. The arguments are all strings: * `dashm`: Either "" (run the program as a file) or "-m" (run the program as a module). * `package`: Either "" (put the source at the top level) or a package name to use to hold the source. * `source`: Either "main" or "sub", which file to use as the ``--source`` argument. """ def fullname(modname: str) -> str: """What is the full module name for `modname` for this test?""" if package and dashm: return ".".join((package, modname)) else: return modname def path(basename: str) -> str: """Where should `basename` be created for this test?""" return os.path.join(package, basename) # Main will run sub.py. self.make_file( path("main.py"), """\ import %s a = 2 b = 3 """ % fullname("sub"), ) if package: self.make_file(path("__init__.py"), "") # sub.py will write a few lines. self.make_file( path("sub.py"), """\ f = open("out.txt", "w", encoding="utf-8") f.write("Hello, world!") f.close() """, ) self.make_file( "coverage.ini", """\ [run] source = %s """ % fullname(source), ) self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") if dashm: cmd = "python -m %s" % fullname("main") else: cmd = "python %s" % path("main.py") self.run_command(cmd) with open("out.txt", encoding="utf-8") as f: assert f.read() == "Hello, world!" # Read the data from .coverage self.assert_exists(".coverage") data = coverage.CoverageData() data.read() summary = line_counts(data) assert summary[source + ".py"] == 3 assert len(summary) == 1
ProcessStartupWithSourceTest
python
pytorch__pytorch
torch/ao/quantization/quantizer/xnnpack_quantizer.py
{ "start": 8443, "end": 16304 }
class ____(Quantizer): """ !!! DEPRECATED !!! XNNPACKQuantizer is a marked as deprecated. It will be removed in the future. It has been moved to executorch.backends.xnnpack.quantizer.xnnpack_quantizer.XNNPACKQuantizer. Please use the new quantizer instead. """ supported_config_and_operators = _get_supported_config_and_operators() STATIC_QAT_ONLY_OPS = [ "conv_bn_relu", "conv_bn", "conv_transpose_bn_relu", "conv_transpose_bn", ] # static quantization ops (both PTQ and QAT) # Preserve the order that fusions come before singular ops STATIC_OPS = [ "linear_relu", "linear", "conv_relu", "conv", "conv_transpose_relu", "adaptive_avg_pool2d", # TODO: move this to BoltNNQuantizer? "gru_io_only", "add_relu", "add", "mul_relu", "mul", "cat", ] DYNAMIC_OPS = [ "linear", ] def __init__(self) -> None: super().__init__() self.global_config: QuantizationConfig | None = None self.operator_type_config: dict[ torch._ops.OpOverloadPacket, QuantizationConfig | None ] = {} self.module_type_config: dict[Callable, QuantizationConfig | None] = {} self.module_name_config: dict[str, QuantizationConfig | None] = {} @classmethod def get_supported_quantization_configs(cls) -> list[QuantizationConfig]: op_configs: set[QuantizationConfig] = { spec for spec, _ in cls.supported_config_and_operators } return list(op_configs) @classmethod def get_supported_operator_for_quantization_config( cls, quantization_config: QuantizationConfig | None ) -> list[OperatorPatternType]: if quantization_config is None: all_ops = [] for _, ops in cls.supported_config_and_operators: all_ops.extend(ops) return all_ops for config, ops in cls.supported_config_and_operators: # note: this assumes each entry in cls.supported_spec_and_operators # corresponds to one spec, e.g. we don't have # [(spec1, op_list1), (spec1, op_list2), (spec2, op_list3)] # where the first and second entry have the same spec but did not # merge the op list if config == quantization_config: return ops return [] def set_global(self, quantization_config: QuantizationConfig) -> XNNPACKQuantizer: self.global_config = quantization_config return self def set_operator_type( self, operator_type: torch._ops.OpOverloadPacket, quantization_config: QuantizationConfig, ) -> XNNPACKQuantizer: self.operator_type_config[operator_type] = quantization_config return self def set_module_type( self, module_type: Callable, quantization_config: QuantizationConfig ): """Set quantization_config for a submodule with type: `module_type`, for example: quantizer.set_module_name(Sub) or quantizer.set_module_name(nn.Linear), it will quantize all supported operator/operator patterns in the submodule with this module type with the given `quantization_config` """ self.module_type_config[module_type] = quantization_config return self def set_module_name( self, module_name: str, quantization_config: QuantizationConfig | None ): """Set quantization_config for a submodule with name: `module_name`, for example: quantizer.set_module_name("blocks.sub"), it will quantize all supported operator/operator patterns in the submodule with this module name with the given `quantization_config` """ if quantization_config is None: raise AssertionError("quantization_config == None is not supported yet") self.module_name_config[module_name] = quantization_config return self def transform_for_annotation( self, model: torch.fx.GraphModule ) -> torch.fx.GraphModule: """Transforms scalar values to tensor attributes""" return _convert_scalars_to_attrs(model) def annotate(self, model: torch.fx.GraphModule) -> torch.fx.GraphModule: """just handling global spec for now""" # hacked for handling dynamic linear quant. will fix later. if self.global_config and self.global_config.input_activation.is_dynamic: # type: ignore[union-attr] model = self._annotate_for_dynamic_quantization_config(model) else: model = self._annotate_for_static_quantization_config(model) propagate_annotation(model) return model def _annotate_all_static_patterns( self, model: torch.fx.GraphModule, quantization_config: QuantizationConfig | None, filter_fn: Callable[[Node], bool] | None = None, ) -> torch.fx.GraphModule: # TODO: implement the support for None to be canceling out previous annotations if quantization_config is None: return model if quantization_config.is_qat: for op in self.STATIC_QAT_ONLY_OPS: OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) for op in self.STATIC_OPS: OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) return model def _annotate_all_dynamic_patterns( self, model: torch.fx.GraphModule, quantization_config: QuantizationConfig | None, filter_fn: Callable[[Node], bool] | None = None, ) -> torch.fx.GraphModule: # TODO: implement the support for None to be canceling out previous annotations if quantization_config is None: return model for op in self.DYNAMIC_OPS: OP_TO_ANNOTATOR[op](model, quantization_config, filter_fn) return model def _annotate_for_static_quantization_config( self, model: torch.fx.GraphModule ) -> torch.fx.GraphModule: module_name_list = list(self.module_name_config.keys()) for module_name, config in self.module_name_config.items(): self._annotate_all_static_patterns( model, config, _get_module_name_filter(module_name) ) tp_list = list(self.module_type_config.keys()) for module_type, config in self.module_type_config.items(): self._annotate_all_static_patterns( model, config, _get_module_type_filter(module_type) ) self._annotate_all_static_patterns( model, self.global_config, _get_not_module_type_or_name_filter(tp_list, module_name_list), ) return model def _annotate_for_dynamic_quantization_config( self, model: torch.fx.GraphModule ) -> torch.fx.GraphModule: module_name_list = list(self.module_name_config.keys()) for module_name, config in self.module_name_config.items(): self._annotate_all_dynamic_patterns( model, config, _get_module_name_filter(module_name) ) tp_list = list(self.module_type_config.keys()) for module_type, config in self.module_type_config.items(): self._annotate_all_dynamic_patterns( model, config, _get_module_type_filter(module_type) ) self._annotate_all_dynamic_patterns( model, self.global_config, _get_not_module_type_or_name_filter(tp_list, module_name_list), ) return model def validate(self, model: torch.fx.GraphModule) -> None: pass @classmethod def get_supported_operators(cls) -> list[OperatorConfig]: return cls.supported_config_and_operators
XNNPACKQuantizer
python
openai__openai-python
src/openai/types/responses/response_input_file.py
{ "start": 222, "end": 717 }
class ____(BaseModel): type: Literal["input_file"] """The type of the input item. Always `input_file`.""" file_data: Optional[str] = None """The content of the file to be sent to the model.""" file_id: Optional[str] = None """The ID of the file to be sent to the model.""" file_url: Optional[str] = None """The URL of the file to be sent to the model.""" filename: Optional[str] = None """The name of the file to be sent to the model."""
ResponseInputFile
python
urllib3__urllib3
src/urllib3/contrib/emscripten/fetch.py
{ "start": 2851, "end": 2900 }
class ____(_RequestError): pass
_StreamingError
python
getsentry__sentry
src/sentry/utils/json.py
{ "start": 1922, "end": 5843 }
class ____(JSONEncoder): # Our variant of JSONEncoderForHTML that also accounts for apostrophes # See: https://github.com/simplejson/simplejson/blob/master/simplejson/encoder.py def encode(self, o: object) -> str: # Override JSONEncoder.encode because it has hacks for # performance that make things more complicated. chunks = self.iterencode(o, True) return "".join(chunks) def iterencode(self, o: object, _one_shot: bool = False) -> Generator[str]: chunks = super().iterencode(o, _one_shot) for chunk in chunks: chunk = chunk.replace("&", "\\u0026") chunk = chunk.replace("<", "\\u003c") chunk = chunk.replace(">", "\\u003e") chunk = chunk.replace("'", "\\u0027") yield chunk _default_encoder = JSONEncoder( # upstream: (', ', ': ') # Ours eliminates whitespace. separators=(",", ":"), # upstream: False # True makes nan, inf, -inf serialize as null in compliance with ECMA-262. ignore_nan=True, default=better_default_encoder, ) _default_escaped_encoder = JSONEncoderForHTML( separators=(",", ":"), ignore_nan=True, default=better_default_encoder, ) # NoReturn here is to make this a mypy error to pass kwargs, since they are currently silently dropped def dump(value: Any, fp: IO[str], **kwargs: NoReturn) -> None: for chunk in _default_encoder.iterencode(value): fp.write(chunk) # NoReturn here is to make this a mypy error to pass kwargs, since they are currently silently dropped def dumps(value: Any, escape: bool = False, **kwargs: NoReturn) -> str: # Legacy use. Do not use. Use dumps_htmlsafe if escape: return _default_escaped_encoder.encode(value) return _default_encoder.encode(value) # NoReturn here is to make this a mypy error to pass kwargs, since they are currently silently dropped def load(fp: IO[str] | IO[bytes], **kwargs: NoReturn) -> Any: return loads(fp.read()) # NoReturn here is to make this a mypy error to pass kwargs, since they are currently silently dropped def loads(value: str | bytes, use_rapid_json: bool = False, **kwargs: NoReturn) -> Any: if use_rapid_json is True: return rapidjson.loads(value) else: return _default_decoder.decode(value) # dumps JSON with `orjson` or the default function depending on `option_name` # TODO: remove this when orjson experiment is successful def dumps_experimental(option_name: str, data: Any) -> str: from sentry.options.rollout import in_random_rollout if in_random_rollout(option_name): return orjson.dumps(data).decode() else: return dumps(data) def dumps_htmlsafe(value: object) -> SafeString: return mark_safe(_default_escaped_encoder.encode(value)) @overload def prune_empty_keys(obj: None) -> None: ... @overload def prune_empty_keys(obj: Mapping[TKey, TValue | None]) -> dict[TKey, TValue]: ... def prune_empty_keys(obj: Mapping[TKey, TValue | None] | None) -> dict[TKey, TValue] | None: if obj is None: return None # eliminate None values for serialization to compress the keyspace # and save (seriously) ridiculous amounts of bytes # # Do not coerce empty arrays/dicts or other "falsy" values here to None, # but rather deal with them case-by-case before calling `prune_empty_keys` # (e.g. in `Interface.to_json`). Rarely, but sometimes, there's a slight # semantic difference between empty containers and a missing value. One # example would be `event.logentry.formatted`, where `{}` means "this # message has no params" and `None` means "this message is already # formatted". return {k: v for k, v in obj.items() if v is not None} __all__ = ( "JSONDecodeError", "JSONEncoder", "dump", "dumps", "dumps_htmlsafe", "load", "loads", "prune_empty_keys", )
JSONEncoderForHTML
python
hynek__structlog
src/structlog/_output.py
{ "start": 3395, "end": 5754 }
class ____: """ Write events into a file. Args: file: File to print to. (default: `sys.stdout`) >>> from structlog import WriteLogger >>> WriteLogger().info("hello") hello Useful if you follow `current logging best practices <logging-best-practices>`. Also very useful for testing and examples since `logging` is finicky in doctests. A little faster and a little less versatile than `structlog.PrintLogger`. .. versionadded:: 22.1.0 """ def __init__(self, file: TextIO | None = None): self._file = file or sys.stdout self._write = self._file.write self._flush = self._file.flush self._lock = _get_lock_for_file(self._file) def __getstate__(self) -> str: """ Our __getattr__ magic makes this necessary. """ if self._file is stdout: return "stdout" if self._file is stderr: return "stderr" raise PicklingError( "Only WriteLoggers to sys.stdout and sys.stderr can be pickled." ) def __setstate__(self, state: Any) -> None: """ Our __getattr__ magic makes this necessary. """ if state == "stdout": self._file = stdout else: self._file = stderr self._lock = _get_lock_for_file(self._file) def __deepcopy__(self, memodict: dict[str, object]) -> WriteLogger: """ Create a new WriteLogger with the same attributes. Similar to pickling. """ if self._file not in (sys.stdout, sys.stderr): raise copy.error( "Only WriteLoggers to sys.stdout and sys.stderr " "can be deepcopied." ) newself = self.__class__(self._file) newself._write = newself._file.write newself._flush = newself._file.flush newself._lock = _get_lock_for_file(newself._file) return newself def __repr__(self) -> str: return f"<WriteLogger(file={self._file!r})>" def msg(self, message: str) -> None: """ Write and flush *message*. """ with self._lock: self._write(message + "\n") self._flush() log = debug = info = warn = warning = msg fatal = failure = err = error = critical = exception = msg
WriteLogger
python
google__jax
docs/autodidax.py
{ "start": 73279, "end": 73326 }
class ____(NamedTuple): pass
LambdaBindingRecipe
python
facebook__pyre-check
client/commands/tests/source_code_context_test.py
{ "start": 339, "end": 4564 }
class ____(testslide.TestCase): def test_source_code_context_for_position(self) -> None: self.assertEqual( SourceCodeContext.from_source_and_position( source="\n".join(f"line {i}" for i in range(1, 10)), position=lsp.LspPosition(line=2, character=5), ), "line 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9", ) self.assertEqual( SourceCodeContext.from_source_and_position( source="\n".join(f"line {i}" for i in range(1, 10)), position=lsp.LspPosition(line=2, character=5), max_lines_before_or_after=2, ), "line 1\nline 2\nline 3\nline 4\nline 5", ) self.assertEqual( SourceCodeContext.from_source_and_position( source="\n".join(f"line {i}" for i in range(1, 10)), position=lsp.LspPosition(line=2, character=5), max_lines_before_or_after=3000, ), "line 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9", ) self.assertEqual( SourceCodeContext.from_source_and_position( source="\n".join(f"line {i}" for i in range(1, 10)), position=lsp.LspPosition(line=50, character=5), ), None, ) def test_character_at_position(self) -> None: self.assertEqual( SourceCodeContext.character_at_position( "", lsp.LspPosition(line=0, character=1) ), None, ) self.assertEqual( SourceCodeContext.character_at_position( " ", lsp.LspPosition(line=1, character=0) ), None, ) self.assertEqual( SourceCodeContext.character_at_position( " ", lsp.LspPosition(line=0, character=0) ), " ", ) self.assertEqual( SourceCodeContext.character_at_position( "\nt", lsp.LspPosition(line=1, character=0) ), "t", ) def test_text_at_range(self) -> None: test_text = """ import bar def foo() -> None: print("Hello") """ self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=1, character=0), end=lsp.LspPosition(line=1, character=6), ), ), "import", ) self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=1, character=0), end=lsp.LspPosition(line=2, character=0), ), ), "import bar\n", ) self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=1, character=0), end=lsp.LspPosition(line=3, character=3), ), ), "import bar\n\ndef", ) self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=-1, character=0), end=lsp.LspPosition(line=1, character=6), ), ), None, ) self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=1, character=0), end=lsp.LspPosition(line=1, character=27), ), ), None, ) self.assertEqual( SourceCodeContext.text_at_range( test_text, lsp.LspRange( start=lsp.LspPosition(line=0, character=6), end=lsp.LspPosition(line=0, character=4), ), ), None, )
SourceCodeContextTest
python
Textualize__rich
examples/repr.py
{ "start": 35, "end": 589 }
class ____: def __init__(self, name, eats=None, fly=True, extinct=False): self.name = name self.eats = list(eats) if eats else [] self.fly = fly self.extinct = extinct # Note that the repr is still generated without Rich # Try commenting out the following line from rich import print BIRDS = { "gull": Bird("gull", eats=["fish", "chips", "ice cream", "sausage rolls"]), "penguin": Bird("penguin", eats=["fish"], fly=False), "dodo": Bird("dodo", eats=["fruit"], fly=False, extinct=True), } print(BIRDS)
Bird
python
kamyu104__LeetCode-Solutions
Python/design-most-recently-used-queue.py
{ "start": 581, "end": 1808 }
class ____(object): # 0-indexed. def __init__(self, n): MAX_CALLS = 2000 self.__bit = [0]*(n+MAX_CALLS+1) # Extra one for dummy node. for i in xrange(1, len(self.__bit)): self.__bit[i] = (1 if i-1 < n else 0) + self.__bit[i-1] for i in reversed(xrange(1, len(self.__bit))): last_i = i - (i & -i) self.__bit[i] -= self.__bit[last_i] def add(self, i, val): i += 1 # Extra one for dummy node. while i < len(self.__bit): self.__bit[i] += val i += (i & -i) def query(self, i): i += 1 # Extra one for dummy node. ret = 0 while i > 0: ret += self.__bit[i] i -= (i & -i) return ret def binary_lift(self, k): floor_log2_n = (len(self.__bit)-1).bit_length()-1 pow_i = 2**floor_log2_n total = pos = 0 # 1-indexed for i in reversed(xrange(floor_log2_n+1)): # O(logN) if pos+pow_i < len(self.__bit) and not (total+self.__bit[pos+pow_i] >= k): total += self.__bit[pos+pow_i] pos += pow_i pow_i >>= 1 return (pos+1)-1 # 0-indexed # fenwick / bit solution
BIT
python
getsentry__sentry
tests/sentry/integrations/discord/test_utils.py
{ "start": 1404, "end": 4710 }
class ____(TestCase): guild_id = "guild-id" channel_id = "channel-id" channel_type = 0 # text integration_id = 1234 guild_name = "server name" @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_happy_path(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.return_value = {"guild_id": self.guild_id, "type": self.channel_type} validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_404(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.side_effect = ApiError(code=404, text="") with raises(ValidationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_403(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.side_effect = ApiError(code=403, text="") with raises(ValidationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_400(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.side_effect = ApiError(code=400, text="") with raises(ValidationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_api_error(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.side_effect = ApiError(code=401, text="") with raises(IntegrationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_bad_response(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.return_value = "" with raises(IntegrationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_not_guild_member(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.return_value = {"guild_id": "not-my-guild", "type": self.channel_type} with raises(ValidationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_timeout(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.side_effect = Timeout("foo") with raises(ApiTimeoutError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name) @mock.patch("sentry.integrations.discord.utils.channel.DiscordClient.get_channel") def test_not_supported_type(self, mock_get_channel: mock.MagicMock) -> None: mock_get_channel.return_value = {"guild_id": self.guild_id, "type": ChannelType.DM.value} with raises(ValidationError): validate_channel_id(self.channel_id, self.guild_id, self.guild_name)
ValidateChannelTest
python
cython__cython
Cython/Compiler/Visitor.py
{ "start": 16429, "end": 26290 }
class ____(EnvTransform): """ Base class for transformations that want to intercept on specific builtin functions or methods of builtin types, including special methods triggered by Python operators. Must run after declaration analysis when entries were assigned. Naming pattern for handler methods is as follows: * builtin functions: _handle_(general|simple|any)_function_NAME * builtin methods: _handle_(general|simple|any)_method_TYPENAME_METHODNAME """ # only visit call nodes and Python operations def visit_GeneralCallNode(self, node): self._process_children(node) function = node.function if not function.type.is_pyobject: return node arg_tuple = node.positional_args if not isinstance(arg_tuple, ExprNodes.TupleNode): return node keyword_args = node.keyword_args if keyword_args and not isinstance(keyword_args, ExprNodes.DictNode): # can't handle **kwargs return node args = arg_tuple.args return self._dispatch_to_handler(node, function, args, keyword_args) def visit_SimpleCallNode(self, node): self._process_children(node) function = node.function if function.type.is_pyobject: arg_tuple = node.arg_tuple if not isinstance(arg_tuple, ExprNodes.TupleNode): return node args = arg_tuple.args else: args = node.args return self._dispatch_to_handler(node, function, args, None) def visit_PrimaryCmpNode(self, node): if node.cascade: # not currently handled below self._process_children(node) return node return self._visit_binop_node(node) def visit_BinopNode(self, node): return self._visit_binop_node(node) def _visit_binop_node(self, node): self._process_children(node) # FIXME: could special case 'not_in' special_method_name = find_special_method_for_binary_operator(node.operator) if special_method_name: operand1, operand2 = node.operand1, node.operand2 if special_method_name == '__contains__': operand1, operand2 = operand2, operand1 elif special_method_name == '__div__': if Future.division in self.current_env().context.future_directives: special_method_name = '__truediv__' obj_type = operand1.type if obj_type.is_builtin_type and not obj_type.is_exception_type: type_name = obj_type.name else: type_name = "object" # safety measure node = self._dispatch_to_method_handler( special_method_name, None, False, type_name, node, None, [operand1, operand2], None) return node def visit_UnopNode(self, node): self._process_children(node) special_method_name = find_special_method_for_unary_operator(node.operator) if special_method_name: operand = node.operand obj_type = operand.type if obj_type.is_builtin_type and not obj_type.is_exception_type: type_name = obj_type.name else: type_name = "object" # safety measure node = self._dispatch_to_method_handler( special_method_name, None, False, type_name, node, None, [operand], None) return node ### dispatch to specific handlers def _find_handler(self, match_name, has_kwargs): if not match_name.isascii(): # Classes with unicode names won't have specific handlers. return None call_type = 'general' if has_kwargs else 'simple' handler = getattr(self, f'_handle_{call_type}_{match_name}', None) if handler is None: handler = getattr(self, f'_handle_any_{match_name}', None) return handler def _delegate_to_assigned_value(self, node, function, arg_list, kwargs): assignment = function.cf_state[0] value = assignment.rhs if value.is_name: if not value.entry or len(value.entry.cf_assignments) > 1: # the variable might have been reassigned => play safe return node elif value.is_attribute and value.obj.is_name: if not value.obj.entry or len(value.obj.entry.cf_assignments) > 1: # the underlying variable might have been reassigned => play safe return node else: return node return self._dispatch_to_handler( node, value, arg_list, kwargs) def _dispatch_to_handler(self, node, function, arg_list, kwargs): if function.is_name: # we only consider functions that are either builtin # Python functions or builtins that were already replaced # into a C function call (defined in the builtin scope) if not function.entry: return node entry = function.entry is_builtin = ( entry.is_builtin or entry is self.current_env().builtin_scope().lookup_here(function.name)) if not is_builtin: if function.cf_state and function.cf_state.is_single: # we know the value of the variable # => see if it's usable instead return self._delegate_to_assigned_value( node, function, arg_list, kwargs) if (arg_list and entry.is_cmethod and entry.scope and entry.scope.parent_type.is_builtin_type and not entry.scope.parent_type.is_exception_type): if entry.scope.parent_type is arg_list[0].type: # Optimised (unbound) method of a builtin type => try to "de-optimise". return self._dispatch_to_method_handler( entry.name, self_arg=None, is_unbound_method=True, type_name=entry.scope.parent_type.name, node=node, function=function, arg_list=arg_list, kwargs=kwargs) return node function_handler = self._find_handler( f"function_{function.name}", kwargs) if function_handler is None: return self._handle_function(node, function.name, function, arg_list, kwargs) if kwargs: return function_handler(node, function, arg_list, kwargs) else: return function_handler(node, function, arg_list) elif function.is_attribute: attr_name = function.attribute if function.type.is_pyobject: self_arg = function.obj elif node.self and function.entry: entry = function.entry.as_variable if not entry or not entry.is_builtin: return node # C implementation of a Python builtin method - see if we find further matches self_arg = node.self arg_list = arg_list[1:] # drop CloneNode of self argument else: return node obj_type = self_arg.type is_unbound_method = False # Exceptions aren't necessarily exact types so could have unknown methods if obj_type.is_builtin_type and not obj_type.is_exception_type: if obj_type is Builtin.type_type and self_arg.is_name and arg_list and arg_list[0].type.is_pyobject: # calling an unbound method like 'list.append(L,x)' # (ignoring 'type.mro()' here ...) type_name = self_arg.name self_arg = None is_unbound_method = True else: type_name = obj_type.name if type_name == 'str': # We traditionally used the type name 'unicode' for 'str' dispatch methods. type_name = 'unicode' else: type_name = "object" # safety measure return self._dispatch_to_method_handler( attr_name, self_arg, is_unbound_method, type_name, node, function, arg_list, kwargs) else: return node def _dispatch_to_method_handler(self, attr_name, self_arg, is_unbound_method, type_name, node, function, arg_list, kwargs): method_handler = self._find_handler( f"method_{type_name}_{attr_name}", kwargs) if method_handler is None: if (attr_name in TypeSlots.special_method_names or attr_name in ['__new__', '__class__']): method_handler = self._find_handler( f"slot{attr_name}", kwargs) if method_handler is None: return self._handle_method( node, type_name, attr_name, function, arg_list, is_unbound_method, kwargs) if self_arg is not None: arg_list = [self_arg] + list(arg_list) if kwargs: result = method_handler( node, function, arg_list, is_unbound_method, kwargs) else: result = method_handler( node, function, arg_list, is_unbound_method) return result def _handle_function(self, node, function_name, function, arg_list, kwargs): """Fallback handler""" return node def _handle_method(self, node, type_name, attr_name, function, arg_list, is_unbound_method, kwargs): """Fallback handler""" return node
MethodDispatcherTransform
python
mlflow__mlflow
mlflow/types/responses_helpers.py
{ "start": 1499, "end": 2056 }
class ____(BaseModel): model_config = ConfigDict(extra="allow") type: str @model_validator(mode="after") def check_type(self) -> "Annotation": if self.type == "file_citation": AnnotationFileCitation(**self.model_dump()) elif self.type == "url_citation": AnnotationURLCitation(**self.model_dump()) elif self.type == "file_path": AnnotationFilePath(**self.model_dump()) else: raise ValueError(f"Invalid annotation type: {self.type}") return self
Annotation
python
py-pdf__pypdf
pypdf/generic/_data_structures.py
{ "start": 40675, "end": 54338 }
class ____(DecodedStreamObject): """ In order to be fast, this data structure can contain either: * raw data in ._data * parsed stream operations in ._operations. At any time, ContentStream object can either have both of those fields defined, or one field defined and the other set to None. These fields are "rebuilt" lazily, when accessed: * when .get_data() is called, if ._data is None, it is rebuilt from ._operations. * when .operations is called, if ._operations is None, it is rebuilt from ._data. Conversely, these fields can be invalidated: * when .set_data() is called, ._operations is set to None. * when .operations is set, ._data is set to None. """ def __init__( self, stream: Any, pdf: Any, forced_encoding: Union[None, str, list[str], dict[int, str]] = None, ) -> None: self.pdf = pdf self._operations: list[tuple[Any, bytes]] = [] # stream may be a StreamObject or an ArrayObject containing # StreamObjects to be concatenated together. if stream is None: super().set_data(b"") else: stream = stream.get_object() if isinstance(stream, ArrayObject): data = b"" for s in stream: s_resolved = s.get_object() if isinstance(s_resolved, NullObject): continue if not isinstance(s_resolved, StreamObject): # No need to emit an exception here for now - the PDF structure # seems to already be broken beforehand in these cases. logger_warning( f"Expected StreamObject, got {type(s_resolved).__name__} instead. Data might be wrong.", __name__ ) else: data += s_resolved.get_data() if len(data) == 0 or data[-1] != b"\n": data += b"\n" super().set_data(bytes(data)) else: stream_data = stream.get_data() assert stream_data is not None super().set_data(stream_data) self.forced_encoding = forced_encoding def replicate( self, pdf_dest: PdfWriterProtocol, ) -> "ContentStream": d__ = cast( "ContentStream", self._reference_clone(self.__class__(None, None), pdf_dest, False), ) d__._data = self._data try: decoded_self = self.decoded_self if decoded_self is None: self.decoded_self = None else: self.decoded_self = cast( "DecodedStreamObject", decoded_self.replicate(pdf_dest) ) except Exception: pass for k, v in self.items(): d__[k.replicate(pdf_dest)] = ( v.replicate(pdf_dest) if hasattr(v, "replicate") else v ) return d__ d__.set_data(self._data) d__.pdf = pdf_dest d__._operations = list(self._operations) d__.forced_encoding = self.forced_encoding return d__ def clone( self, pdf_dest: Any, force_duplicate: bool = False, ignore_fields: Optional[Sequence[Union[str, int]]] = (), ) -> "ContentStream": """ Clone object into pdf_dest. Args: pdf_dest: force_duplicate: ignore_fields: Returns: The cloned ContentStream """ try: if self.indirect_reference.pdf == pdf_dest and not force_duplicate: # type: ignore return self except Exception: pass visited: set[tuple[int, int]] = set() d__ = cast( "ContentStream", self._reference_clone( self.__class__(None, None), pdf_dest, force_duplicate ), ) if ignore_fields is None: ignore_fields = [] d__._clone(self, pdf_dest, force_duplicate, ignore_fields, visited) return d__ def _clone( self, src: DictionaryObject, pdf_dest: PdfWriterProtocol, force_duplicate: bool, ignore_fields: Optional[Sequence[Union[str, int]]], visited: set[tuple[int, int]], ) -> None: """ Update the object from src. Args: src: pdf_dest: force_duplicate: ignore_fields: """ src_cs = cast("ContentStream", src) super().set_data(src_cs._data) self.pdf = pdf_dest self._operations = list(src_cs._operations) self.forced_encoding = src_cs.forced_encoding # no need to call DictionaryObjection or anything # like super(DictionaryObject,self)._clone(src, pdf_dest, force_duplicate, ignore_fields, visited) def _parse_content_stream(self, stream: StreamType) -> None: # 7.8.2 Content Streams stream.seek(0, 0) operands: list[Union[int, str, PdfObject]] = [] while True: peek = read_non_whitespace(stream) if peek in (b"", 0): break stream.seek(-1, 1) if peek.isalpha() or peek in (b"'", b'"'): operator = read_until_regex(stream, NameObject.delimiter_pattern) if operator == b"BI": # begin inline image - a completely different parsing # mechanism is required, of course... thanks buddy... assert operands == [] ii = self._read_inline_image(stream) self._operations.append((ii, b"INLINE IMAGE")) else: self._operations.append((operands, operator)) operands = [] elif peek == b"%": # If we encounter a comment in the content stream, we have to # handle it here. Typically, read_object will handle # encountering a comment -- but read_object assumes that # following the comment must be the object we're trying to # read. In this case, it could be an operator instead. while peek not in (b"\r", b"\n", b""): peek = stream.read(1) else: operands.append(read_object(stream, None, self.forced_encoding)) def _read_inline_image(self, stream: StreamType) -> dict[str, Any]: # begin reading just after the "BI" - begin image # first read the dictionary of settings. settings = DictionaryObject() while True: tok = read_non_whitespace(stream) stream.seek(-1, 1) if tok == b"I": # "ID" - begin of image data break key = read_object(stream, self.pdf) tok = read_non_whitespace(stream) stream.seek(-1, 1) value = read_object(stream, self.pdf) settings[key] = value # left at beginning of ID tmp = stream.read(3) assert tmp[:2] == b"ID" filtr = settings.get("/F", settings.get("/Filter", "not set")) savpos = stream.tell() if isinstance(filtr, list): filtr = filtr[0] # used forencoding if "AHx" in filtr or "ASCIIHexDecode" in filtr: data = extract_inline_AHx(stream) elif "A85" in filtr or "ASCII85Decode" in filtr: data = extract_inline_A85(stream) elif "RL" in filtr or "RunLengthDecode" in filtr: data = extract_inline_RL(stream) elif "DCT" in filtr or "DCTDecode" in filtr: data = extract_inline_DCT(stream) elif filtr == "not set": cs = settings.get("/CS", "") if isinstance(cs, list): cs = cs[0] if "RGB" in cs: lcs = 3 elif "CMYK" in cs: lcs = 4 else: bits = settings.get( "/BPC", 8 if cs in {"/I", "/G", "/Indexed", "/DeviceGray"} else -1, ) if bits > 0: lcs = bits / 8.0 else: data = extract_inline_default(stream) lcs = -1 if lcs > 0: data = stream.read( ceil(cast(int, settings["/W"]) * lcs) * cast(int, settings["/H"]) ) # Move to the `EI` if possible. ei = read_non_whitespace(stream) stream.seek(-1, 1) else: data = extract_inline_default(stream) ei = stream.read(3) stream.seek(-1, 1) if ei[:2] != b"EI" or ei[2:3] not in WHITESPACES: # Deal with wrong/missing `EI` tags. Example: Wrong dimensions specified above. stream.seek(savpos, 0) data = extract_inline_default(stream) ei = stream.read(3) stream.seek(-1, 1) if ei[:2] != b"EI" or ei[2:3] not in WHITESPACES: # pragma: no cover # Check the same condition again. This should never fail as # edge cases are covered by `extract_inline_default` above, # but check this ot make sure that we are behind the `EI` afterwards. raise PdfStreamError( f"Could not extract inline image, even using fallback. Expected 'EI', got {ei!r}" ) return {"settings": settings, "data": data} # This overrides the parent method def get_data(self) -> bytes: if not self._data: new_data = BytesIO() for operands, operator in self._operations: if operator == b"INLINE IMAGE": new_data.write(b"BI") dict_text = BytesIO() operands["settings"].write_to_stream(dict_text) new_data.write(dict_text.getvalue()[2:-2]) new_data.write(b"ID ") new_data.write(operands["data"]) new_data.write(b"EI") else: for op in operands: op.write_to_stream(new_data) new_data.write(b" ") new_data.write(operator) new_data.write(b"\n") self._data = new_data.getvalue() return self._data # This overrides the parent method def set_data(self, data: bytes) -> None: super().set_data(data) self._operations = [] @property def operations(self) -> list[tuple[Any, bytes]]: if not self._operations and self._data: self._parse_content_stream(BytesIO(self._data)) self._data = b"" return self._operations @operations.setter def operations(self, operations: list[tuple[Any, bytes]]) -> None: self._operations = operations self._data = b"" def isolate_graphics_state(self) -> None: if self._operations: self._operations.insert(0, ([], b"q")) self._operations.append(([], b"Q")) elif self._data: self._data = b"q\n" + self._data + b"\nQ\n" # This overrides the parent method def write_to_stream( self, stream: StreamType, encryption_key: Union[None, str, bytes] = None ) -> None: if not self._data and self._operations: self.get_data() # this ensures ._data is rebuilt super().write_to_stream(stream, encryption_key) def read_object( stream: StreamType, pdf: Optional[PdfReaderProtocol], forced_encoding: Union[None, str, list[str], dict[int, str]] = None, ) -> Union[PdfObject, int, str, ContentStream]: tok = stream.read(1) stream.seek(-1, 1) # reset to start if tok == b"/": return NameObject.read_from_stream(stream, pdf) if tok == b"<": # hexadecimal string OR dictionary peek = stream.read(2) stream.seek(-2, 1) # reset to start if peek == b"<<": return DictionaryObject.read_from_stream(stream, pdf, forced_encoding) return read_hex_string_from_stream(stream, forced_encoding) if tok == b"[": return ArrayObject.read_from_stream(stream, pdf, forced_encoding) if tok in (b"t", b"f"): return BooleanObject.read_from_stream(stream) if tok == b"(": return read_string_from_stream(stream, forced_encoding) if tok == b"e" and stream.read(6) == b"endobj": return NullObject() if tok == b"n": return NullObject.read_from_stream(stream) if tok == b"%": # comment skip_over_comment(stream) tok = read_non_whitespace(stream) stream.seek(-1, 1) return read_object(stream, pdf, forced_encoding) if tok in b"0123456789+-.": # number object OR indirect reference peek = stream.read(20) stream.seek(-len(peek), 1) # reset to start if IndirectPattern.match(peek) is not None: assert pdf is not None, "mypy" return IndirectObject.read_from_stream(stream, pdf) return NumberObject.read_from_stream(stream) pos = stream.tell() stream.seek(-20, 1) stream_extract = stream.read(80) stream.seek(pos) read_until_whitespace(stream) raise PdfReadError( f"Invalid Elementary Object starting with {tok!r} @{pos}: {stream_extract!r}" )
ContentStream
python
gevent__gevent
src/greentest/3.10/test_threading.py
{ "start": 44391, "end": 50029 }
class ____(BaseTestCase): # A RuntimeError should be raised if Thread.start() is called # multiple times. def test_start_thread_again(self): thread = threading.Thread() thread.start() self.assertRaises(RuntimeError, thread.start) thread.join() def test_joining_current_thread(self): current_thread = threading.current_thread() self.assertRaises(RuntimeError, current_thread.join); def test_joining_inactive_thread(self): thread = threading.Thread() self.assertRaises(RuntimeError, thread.join) def test_daemonize_active_thread(self): thread = threading.Thread() thread.start() self.assertRaises(RuntimeError, setattr, thread, "daemon", True) thread.join() def test_releasing_unacquired_lock(self): lock = threading.Lock() self.assertRaises(RuntimeError, lock.release) def test_recursion_limit(self): # Issue 9670 # test that excessive recursion within a non-main thread causes # an exception rather than crashing the interpreter on platforms # like Mac OS X or FreeBSD which have small default stack sizes # for threads script = """if True: import threading def recurse(): return recurse() def outer(): try: recurse() except RecursionError: pass w = threading.Thread(target=outer) w.start() w.join() print('end of main thread') """ expected_output = "end of main thread\n" p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() data = stdout.decode().replace('\r', '') self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode()) self.assertEqual(data, expected_output) def test_print_exception(self): script = r"""if True: import threading import time running = False def run(): global running running = True while running: time.sleep(0.01) 1/0 t = threading.Thread(target=run) t.start() while not running: time.sleep(0.01) running = False t.join() """ rc, out, err = assert_python_ok("-c", script) self.assertEqual(out, b'') err = err.decode() self.assertIn("Exception in thread", err) self.assertIn("Traceback (most recent call last):", err) self.assertIn("ZeroDivisionError", err) self.assertNotIn("Unhandled exception", err) def test_print_exception_stderr_is_none_1(self): script = r"""if True: import sys import threading import time running = False def run(): global running running = True while running: time.sleep(0.01) 1/0 t = threading.Thread(target=run) t.start() while not running: time.sleep(0.01) sys.stderr = None running = False t.join() """ rc, out, err = assert_python_ok("-c", script) self.assertEqual(out, b'') err = err.decode() self.assertIn("Exception in thread", err) self.assertIn("Traceback (most recent call last):", err) self.assertIn("ZeroDivisionError", err) self.assertNotIn("Unhandled exception", err) def test_print_exception_stderr_is_none_2(self): script = r"""if True: import sys import threading import time running = False def run(): global running running = True while running: time.sleep(0.01) 1/0 sys.stderr = None t = threading.Thread(target=run) t.start() while not running: time.sleep(0.01) running = False t.join() """ rc, out, err = assert_python_ok("-c", script) self.assertEqual(out, b'') self.assertNotIn("Unhandled exception", err.decode()) def test_bare_raise_in_brand_new_thread(self): def bare_raise(): raise class Issue27558(threading.Thread): exc = None def run(self): try: bare_raise() except Exception as exc: self.exc = exc thread = Issue27558() thread.start() thread.join() self.assertIsNotNone(thread.exc) self.assertIsInstance(thread.exc, RuntimeError) # explicitly break the reference cycle to not leak a dangling thread thread.exc = None def test_multithread_modify_file_noerror(self): # See issue25872 def modify_file(): with open(os_helper.TESTFN, 'w', encoding='utf-8') as fp: fp.write(' ') traceback.format_stack() self.addCleanup(os_helper.unlink, os_helper.TESTFN) threads = [ threading.Thread(target=modify_file) for i in range(100) ] for t in threads: t.start() t.join()
ThreadingExceptionTests
python
kamyu104__LeetCode-Solutions
Python/put-boxes-into-the-warehouse-ii.py
{ "start": 33, "end": 547 }
class ____(object): def maxBoxesInWarehouse(self, boxes, warehouse): """ :type boxes: List[int] :type warehouse: List[int] :rtype: int """ boxes.sort(reverse=True) left, right = 0, len(warehouse)-1 for h in boxes: if h <= warehouse[left]: left += 1 elif h <= warehouse[right]: right -= 1 if left > right: break return left + (len(warehouse)-1-right)
Solution
python
pytorch__pytorch
test/torch_np/numpy_tests/core/test_indexing.py
{ "start": 21683, "end": 23026 }
class ____(TestCase): @xpassIfTorchDynamo_np # ( # reason="XXX: low-prio to support assigning complex values on floating arrays" # ) def test_boolean_index_cast_assign(self): # Setup the boolean index and float arrays. shape = (8, 63) bool_index = np.zeros(shape).astype(bool) bool_index[0, 1] = True zero_array = np.zeros(shape) # Assigning float is fine. zero_array[bool_index] = np.array([1]) assert_equal(zero_array[0, 1], 1) # np.ComplexWarning moved to np.exceptions in numpy>=2.0.0 # np.exceptions only available in numpy>=1.25.0 has_exceptions_ns = hasattr(np, "exceptions") ComplexWarning = ( np.exceptions.ComplexWarning if has_exceptions_ns else np.ComplexWarning ) # Fancy indexing works, although we get a cast warning. assert_warns( ComplexWarning, zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]) ) assert_equal(zero_array[0, 1], 2) # No complex part # Cast complex to float, throwing away the imaginary portion. assert_warns(ComplexWarning, zero_array.__setitem__, bool_index, np.array([1j])) assert_equal(zero_array[0, 1], 0) @xfail # (reason="XXX: requires broadcast() and broadcast_to()")
TestFancyIndexingCast
python
doocs__leetcode
solution/1900-1999/1954.Minimum Garden Perimeter to Collect Enough Apples/Solution2.py
{ "start": 0, "end": 310 }
class ____: def minimumPerimeter(self, neededApples: int) -> int: l, r = 1, 100000 while l < r: mid = (l + r) >> 1 if 2 * mid * (mid + 1) * (2 * mid + 1) >= neededApples: r = mid else: l = mid + 1 return l * 8
Solution
python
automl__auto-sklearn
autosklearn/metalearning/metafeatures/metafeature.py
{ "start": 1258, "end": 1412 }
class ____(AbstractMetaFeature): def __init__(self): super(HelperFunction, self).__init__() self.type_ = "HELPERFUNCTION"
HelperFunction
python
jupyterlab__jupyterlab
jupyterlab/tests/test_jupyterlab.py
{ "start": 4664, "end": 31250 }
class ____(AppHandlerTest): def test_install_extension(self): assert install_extension(self.mock_extension) is True path = pjoin(self.app_dir, "extensions", "*.tgz") assert glob.glob(path) extensions = get_app_info()["extensions"] name = self.pkg_names["extension"] assert name in extensions assert check_extension(name) def test_install_twice(self): assert install_extension(self.mock_extension) is True path = pjoin(self.app_dir, "extensions", "*.tgz") assert install_extension(self.mock_extension) is True assert glob.glob(path) extensions = get_app_info()["extensions"] name = self.pkg_names["extension"] assert name in extensions assert check_extension(name) def test_install_mime_renderer(self): install_extension(self.mock_mimeextension) name = self.pkg_names["mimeextension"] assert name in get_app_info()["extensions"] assert check_extension(name) assert uninstall_extension(name) is True assert name not in get_app_info()["extensions"] assert not check_extension(name) def test_install_incompatible(self): with pytest.raises(ValueError) as excinfo: install_extension(self.mock_incompat) assert "Conflicting Dependencies" in str(excinfo.value) assert not check_extension(self.pkg_names["incompat"]) def test_install_failed(self): path = self.mock_package with pytest.raises(ValueError): install_extension(path) with open(pjoin(path, "package.json")) as fid: data = json.load(fid) extensions = get_app_info()["extensions"] name = data["name"] assert name not in extensions assert not check_extension(name) def test_validation(self): path = self.mock_extension os.remove(pjoin(path, "index.js")) with pytest.raises(ValueError): install_extension(path) assert not check_extension(self.pkg_names["extension"]) path = self.mock_mimeextension os.remove(pjoin(path, "index.js")) with pytest.raises(ValueError): install_extension(path) assert not check_extension(self.pkg_names["mimeextension"]) def test_uninstall_extension(self): assert install_extension(self.mock_extension) is True name = self.pkg_names["extension"] assert check_extension(name) assert uninstall_extension(self.pkg_names["extension"]) is True path = pjoin(self.app_dir, "extensions", "*.tgz") assert not glob.glob(path) extensions = get_app_info()["extensions"] assert name not in extensions assert not check_extension(name) def test_uninstall_all_extensions(self): install_extension(self.mock_extension) install_extension(self.mock_mimeextension) ext_name = self.pkg_names["extension"] mime_ext_name = self.pkg_names["mimeextension"] assert check_extension(ext_name) is True assert check_extension(mime_ext_name) is True assert uninstall_extension(all_=True) is True extensions = get_app_info()["extensions"] assert ext_name not in extensions assert mime_ext_name not in extensions @pytest.mark.slow def test_uninstall_core_extension(self): assert uninstall_extension("@jupyterlab/console-extension") is True app_dir = self.app_dir build() with open(pjoin(app_dir, "staging", "package.json")) as fid: data = json.load(fid) extensions = data["jupyterlab"]["extensions"] assert "@jupyterlab/console-extension" not in extensions assert not check_extension("@jupyterlab/console-extension") assert install_extension("@jupyterlab/console-extension") is True build() with open(pjoin(app_dir, "staging", "package.json")) as fid: data = json.load(fid) extensions = data["jupyterlab"]["extensions"] assert "@jupyterlab/console-extension" in extensions assert check_extension("@jupyterlab/console-extension") def test_install_and_uninstall_pinned(self): """ You should be able to install different versions of the same extension with different pinned names and uninstall them with those names. """ NAMES = ["test-1", "test-2"] # noqa assert install_extension(self.pinned_packages[0], pin=NAMES[0]) assert install_extension(self.pinned_packages[1], pin=NAMES[1]) extensions = get_app_info()["extensions"] assert NAMES[0] in extensions assert NAMES[1] in extensions assert check_extension(NAMES[0]) assert check_extension(NAMES[1]) # Uninstall assert uninstall_extension(NAMES[0]) assert uninstall_extension(NAMES[1]) extensions = get_app_info()["extensions"] assert NAMES[0] not in extensions assert NAMES[1] not in extensions assert not check_extension(NAMES[0]) assert not check_extension(NAMES[1]) @pytest.mark.skipif( platform.system() == "Windows", reason="running npm pack fails on windows CI" ) def test_install_and_uninstall_pinned_folder(self): """ Same as above test, but installs from a local folder instead of from npm. """ # Download each version of the package from NPM: base_dir = Path(self.tempdir()) # The archive file names are printed to stdout when run `npm pack` packages = [ subprocess.run( # noqa S603 ["npm", "pack", name], # noqa S607 stdout=subprocess.PIPE, text=True, check=True, cwd=str(base_dir), ).stdout.strip() for name in self.pinned_packages ] shutil.unpack_archive(str(base_dir / packages[0]), str(base_dir / "1")) shutil.unpack_archive(str(base_dir / packages[1]), str(base_dir / "2")) # Change pinned packages to be these directories now, so we install from these folders self.pinned_packages = [str(base_dir / "1" / "package"), str(base_dir / "2" / "package")] self.test_install_and_uninstall_pinned() def test_link_extension(self): path = self.mock_extension name = self.pkg_names["extension"] link_package(path) linked = get_app_info()["linked_packages"] assert name not in linked assert name in get_app_info()["extensions"] assert check_extension(name) assert unlink_package(path) is True linked = get_app_info()["linked_packages"] assert name not in linked assert name not in get_app_info()["extensions"] assert not check_extension(name) def test_link_package(self): path = self.mock_package name = self.pkg_names["package"] assert link_package(path) is True linked = get_app_info()["linked_packages"] assert name in linked assert name not in get_app_info()["extensions"] assert check_extension(name) assert unlink_package(path) linked = get_app_info()["linked_packages"] assert name not in linked assert not check_extension(name) def test_unlink_package(self): target = self.mock_package assert link_package(target) is True assert unlink_package(target) is True linked = get_app_info()["linked_packages"] name = self.pkg_names["package"] assert name not in linked assert not check_extension(name) def test_list_extensions(self): assert install_extension(self.mock_extension) is True list_extensions() def test_app_dir(self): app_dir = self.tempdir() options = AppOptions(app_dir=app_dir) assert install_extension(self.mock_extension, app_options=options) is True path = pjoin(app_dir, "extensions", "*.tgz") assert glob.glob(path) extensions = get_app_info(app_options=options)["extensions"] ext_name = self.pkg_names["extension"] assert ext_name in extensions assert check_extension(ext_name, app_options=options) assert uninstall_extension(self.pkg_names["extension"], app_options=options) is True path = pjoin(app_dir, "extensions", "*.tgz") assert not glob.glob(path) extensions = get_app_info(app_options=options)["extensions"] assert ext_name not in extensions assert not check_extension(ext_name, app_options=options) assert link_package(self.mock_package, app_options=options) is True linked = get_app_info(app_options=options)["linked_packages"] pkg_name = self.pkg_names["package"] assert pkg_name in linked assert check_extension(pkg_name, app_options=options) assert unlink_package(self.mock_package, app_options=options) is True linked = get_app_info(app_options=options)["linked_packages"] assert pkg_name not in linked assert not check_extension(pkg_name, app_options=options) def test_app_dir_use_sys_prefix(self): app_dir = self.tempdir() options = AppOptions(app_dir=app_dir) if os.path.exists(self.app_dir): os.removedirs(self.app_dir) assert install_extension(self.mock_extension) is True path = pjoin(app_dir, "extensions", "*.tgz") assert not glob.glob(path) extensions = get_app_info(app_options=options)["extensions"] ext_name = self.pkg_names["extension"] assert ext_name in extensions assert check_extension(ext_name, app_options=options) def test_app_dir_disable_sys_prefix(self): app_dir = self.tempdir() options = AppOptions(app_dir=app_dir, use_sys_dir=False) if os.path.exists(self.app_dir): os.removedirs(self.app_dir) assert install_extension(self.mock_extension) is True path = pjoin(app_dir, "extensions", "*.tgz") assert not glob.glob(path) extensions = get_app_info(app_options=options)["extensions"] ext_name = self.pkg_names["extension"] assert ext_name not in extensions assert not check_extension(ext_name, app_options=options) def test_app_dir_shadowing(self): app_dir = self.tempdir() sys_dir = self.app_dir app_options = AppOptions(app_dir=app_dir) if os.path.exists(sys_dir): os.removedirs(sys_dir) assert install_extension(self.mock_extension) is True sys_path = pjoin(sys_dir, "extensions", "*.tgz") assert glob.glob(sys_path) app_path = pjoin(app_dir, "extensions", "*.tgz") assert not glob.glob(app_path) extensions = get_app_info(app_options=app_options)["extensions"] ext_name = self.pkg_names["extension"] assert ext_name in extensions assert check_extension(ext_name, app_options=app_options) assert install_extension(self.mock_extension, app_options=app_options) is True assert glob.glob(app_path) extensions = get_app_info(app_options=app_options)["extensions"] assert ext_name in extensions assert check_extension(ext_name, app_options=app_options) assert uninstall_extension(self.pkg_names["extension"], app_options=app_options) is True assert not glob.glob(app_path) assert glob.glob(sys_path) extensions = get_app_info(app_options=app_options)["extensions"] assert ext_name in extensions assert check_extension(ext_name, app_options=app_options) assert uninstall_extension(self.pkg_names["extension"], app_options=app_options) is True assert not glob.glob(app_path) assert not glob.glob(sys_path) extensions = get_app_info(app_options=app_options)["extensions"] assert ext_name not in extensions assert not check_extension(ext_name, app_options=app_options) @pytest.mark.slow def test_build(self): assert install_extension(self.mock_extension) is True build() # check staging directory. entry = pjoin(self.app_dir, "staging", "build", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data # check static directory. entry = pjoin(self.app_dir, "static", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data @pytest.mark.slow @pytest.mark.skipif(not os.path.exists(DEV_DIR), reason="Not in git checkout") def test_build_splice_packages(self): app_options = AppOptions(splice_source=True) assert install_extension(self.mock_extension) is True build(app_options=app_options) assert "-spliced" in get_app_version(app_options) # check staging directory. entry = pjoin(self.app_dir, "staging", "build", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data # check static directory. entry = pjoin(self.app_dir, "static", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data @pytest.mark.slow def test_build_custom(self): assert install_extension(self.mock_extension) is True build(name="foo", version="1.0", static_url="bar") # check static directory. entry = pjoin(self.app_dir, "static", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data pkg = pjoin(self.app_dir, "static", "package.json") with open(pkg) as fid: data = json.load(fid) assert data["jupyterlab"]["name"] == "foo" assert data["jupyterlab"]["version"] == "1.0" assert data["jupyterlab"]["staticUrl"] == "bar" @pytest.mark.slow def test_build_custom_minimal_core_config(self): default_config = CoreConfig() core_config = CoreConfig() core_config.clear_packages() logger = logging.getLogger("jupyterlab_test_logger") logger.setLevel("DEBUG") app_dir = self.tempdir() options = AppOptions( app_dir=app_dir, core_config=core_config, logger=logger, use_sys_dir=False, ) extensions = ( "@jupyterlab/application-extension", "@jupyterlab/apputils-extension", ) singletons = ( "@jupyterlab/application", "@jupyterlab/apputils", "@jupyterlab/coreutils", "@jupyterlab/services", ) for name in extensions: semver = default_config.extensions[name] core_config.add(name, semver, extension=True) for name in singletons: semver = default_config.singletons[name] core_config.add(name, semver) assert install_extension(self.mock_extension, app_options=options) is True build(app_options=options) # check static directory. entry = pjoin(app_dir, "static", "index.out.js") with open(entry) as fid: data = fid.read() assert self.pkg_names["extension"] in data pkg = pjoin(app_dir, "static", "package.json") with open(pkg) as fid: data = json.load(fid) assert sorted(data["jupyterlab"]["extensions"].keys()) == [ "@jupyterlab/application-extension", "@jupyterlab/apputils-extension", "@jupyterlab/mock-extension", ] assert data["jupyterlab"]["mimeExtensions"] == {} for pkg in data["jupyterlab"]["singletonPackages"]: if pkg.startswith("@jupyterlab/"): assert pkg in singletons def test_disable_extension(self): options = AppOptions(app_dir=self.tempdir()) assert install_extension(self.mock_extension, app_options=options) is True assert disable_extension(self.pkg_names["extension"], app_options=options) is True info = get_app_info(app_options=options) name = self.pkg_names["extension"] assert info["disabled"].get(name) is True assert not check_extension(name, app_options=options) assert check_extension(name, installed=True, app_options=options) assert disable_extension("@jupyterlab/notebook-extension", app_options=options) is True info = get_app_info(app_options=options) assert info["disabled"].get("@jupyterlab/notebook-extension") is True assert not check_extension("@jupyterlab/notebook-extension", app_options=options) assert check_extension( "@jupyterlab/notebook-extension", installed=True, app_options=options ) assert info["disabled"].get(name) is True assert not check_extension(name, app_options=options) assert check_extension(name, installed=True, app_options=options) def test_enable_extension(self): options = AppOptions(app_dir=self.tempdir()) assert install_extension(self.mock_extension, app_options=options) is True assert disable_extension(self.pkg_names["extension"], app_options=options) is True assert enable_extension(self.pkg_names["extension"], app_options=options) is True info = get_app_info(app_options=options) assert "@jupyterlab/notebook-extension" not in info["disabled"] name = self.pkg_names["extension"] assert info["disabled"].get(name, False) is False assert check_extension(name, app_options=options) assert disable_extension("@jupyterlab/notebook-extension", app_options=options) is True assert check_extension(name, app_options=options) assert not check_extension("@jupyterlab/notebook-extension", app_options=options) def test_lock_unlock_extension(self): options = AppOptions(app_dir=self.tempdir()) assert install_extension(self.mock_extension, app_options=options) is True name = self.pkg_names["extension"] info = get_app_info(app_options=options) assert info["locked"].get(name, False) is False lock_extension(self.pkg_names["extension"], app_options=options) info = get_app_info(app_options=options) assert info["locked"].get(name, False) is True unlock_extension(self.pkg_names["extension"], app_options=options) info = get_app_info(app_options=options) assert info["locked"].get(name, False) is False @pytest.mark.slow def test_build_check(self): # Do the initial build. assert build_check() assert install_extension(self.mock_extension) is True assert link_package(self.mock_package) is True build() assert not build_check() # Check installed extensions. assert install_extension(self.mock_mimeextension) is True assert build_check() assert uninstall_extension(self.pkg_names["mimeextension"]) is True assert not build_check() # Check local extensions. pkg_path = pjoin(self.mock_extension, "package.json") with open(pkg_path) as fid: data = json.load(fid) with open(pkg_path, "rb") as fid: orig = fid.read() data["foo"] = "bar" with open(pkg_path, "w") as fid: json.dump(data, fid) assert build_check() assert build_check() with open(pkg_path, "wb") as fid: fid.write(orig) assert not build_check() # Check linked packages. pkg_path = pjoin(self.mock_package, "index.js") with open(pkg_path, "rb") as fid: orig = fid.read() with open(pkg_path, "wb") as fid: fid.write(orig + b'\nconsole.log("hello");') assert build_check() assert build_check() with open(pkg_path, "wb") as fid: fid.write(orig) assert not build_check() def test_compatibility(self): assert _test_overlap("^0.6.0", "^0.6.1") assert _test_overlap(">0.1", "0.6") assert _test_overlap("~0.5.0", "~0.5.2") assert _test_overlap("0.5.2", "^0.5.0") assert not _test_overlap("^0.5.0", "^0.6.0") assert not _test_overlap("~1.5.0", "^1.6.0") assert _test_overlap("*", "0.6") is None assert _test_overlap("<0.6", "0.1") is None assert _test_overlap("^1 || ^2", "^1") assert _test_overlap("^1 || ^2", "^2") assert _test_overlap("^1", "^1 || ^2") assert _test_overlap("^2", "^1 || ^2") assert _test_overlap("^1 || ^2", "^2 || ^3") assert not _test_overlap("^1 || ^2", "^3 || ^4") assert not _test_overlap("^2", "^1 || ^3") def test_compare_ranges(self): assert _compare_ranges("^1 || ^2", "^1") == 0 assert _compare_ranges("^1 || ^2", "^2 || ^3") == 0 assert _compare_ranges("^1 || ^2", "^3 || ^4") == 1 assert _compare_ranges("^3 || ^4", "^1 || ^2") == -1 assert _compare_ranges("^2 || ^3", "^1 || ^4") is None def test_install_compatible(self): core_data = _get_default_core_data() current_app_dep = core_data["dependencies"]["@jupyterlab/application"] def _gen_dep(ver): return {"dependencies": {"@jupyterlab/application": ver}} def _mock_metadata(registry, name, logger): assert name == "mockextension" return { "name": name, "versions": { "0.9.0": _gen_dep(current_app_dep), "1.0.0": _gen_dep(current_app_dep), "1.1.0": _gen_dep(current_app_dep), "2.0.0": _gen_dep("^2000.0.0"), "2.0.0-b0": _gen_dep(current_app_dep), "2.1.0-b0": _gen_dep("^2000.0.0"), "2.1.0": _gen_dep("^2000.0.0"), }, } def _mock_extract(self, source, tempdir, *args, **kwargs): data = { "name": source, "version": "2.1.0", "jupyterlab": {"extension": True}, "jupyterlab_extracted_files": ["index.js"], } data.update(_gen_dep("^2000.0.0")) info = { "source": source, "is_dir": False, "data": data, "name": source, "version": data["version"], "filename": "mockextension.tgz", "path": pjoin(tempdir, "mockextension.tgz"), } return info class Success(Exception): # noqa pass def _mock_install(self, name, *args, **kwargs): assert name in ("mockextension", "mockextension@1.1.0") if name == "mockextension@1.1.0": raise Success() return orig_install(self, name, *args, **kwargs) p1 = patch.object(commands, "_fetch_package_metadata", _mock_metadata) p2 = patch.object(commands._AppHandler, "_extract_package", _mock_extract) p3 = patch.object(commands._AppHandler, "_install_extension", _mock_install) with p1, p2: orig_install = commands._AppHandler._install_extension with p3, pytest.raises(Success): assert install_extension("mockextension") is True def test_update_single(self): installed = [] def _mock_install(self, name, *args, **kwargs): installed.append(name[0] + name[1:].split("@")[0]) return {"name": name, "is_dir": False, "path": "foo/bar/" + name} def _mock_latest(self, name): return "10000.0.0" p1 = patch.object(commands._AppHandler, "_install_extension", _mock_install) p2 = patch.object(commands._AppHandler, "_latest_compatible_package_version", _mock_latest) assert install_extension(self.mock_extension) is True assert install_extension(self.mock_mimeextension) is True with p1, p2: assert update_extension(self.pkg_names["extension"]) is True assert installed == [self.pkg_names["extension"]] def test_update_missing_extension(self): assert update_extension("foo") is False def test_update_multiple(self): installed = [] def _mock_install(self, name, *args, **kwargs): installed.append(name[0] + name[1:].split("@")[0]) return {"name": name, "is_dir": False, "path": "foo/bar/" + name} def _mock_latest(self, name): return "10000.0.0" p1 = patch.object(commands._AppHandler, "_install_extension", _mock_install) p2 = patch.object(commands._AppHandler, "_latest_compatible_package_version", _mock_latest) install_extension(self.mock_extension) install_extension(self.mock_mimeextension) with p1, p2: assert update_extension(self.pkg_names["extension"]) is True assert update_extension(self.pkg_names["mimeextension"]) is True assert installed == [self.pkg_names["extension"], self.pkg_names["mimeextension"]] def test_update_all(self): updated = [] def _mock_update(self, name, *args, **kwargs): updated.append(name[0] + name[1:].split("@")[0]) return True original_app_info = commands._AppHandler._get_app_info def _mock_app_info(self): info = original_app_info(self) info["local_extensions"] = [] return info assert install_extension(self.mock_extension) is True assert install_extension(self.mock_mimeextension) is True p1 = patch.object(commands._AppHandler, "_update_extension", _mock_update) # local packages are not updated, so mock them as non-local: p2 = patch.object(commands._AppHandler, "_get_app_info", _mock_app_info) with p1, p2: assert update_extension(None, all_=True) is True assert sorted(updated) == [self.pkg_names["extension"], self.pkg_names["mimeextension"]] def test_load_extension(jp_serverapp, make_lab_app): app = make_lab_app() stderr = sys.stderr # sys.stderr = self.devnull app._link_jupyter_server_extension(jp_serverapp) app.initialize() sys.stderr = stderr
TestExtension
python
Netflix__metaflow
metaflow/plugins/argo/exit_hooks.py
{ "start": 699, "end": 1406 }
class ____(JsonSerializable): # https://argoproj.github.io/argo-workflows/fields/#template def __init__(self, name): tree = lambda: defaultdict(tree) self.name = name self.payload = tree() self.payload["name"] = name def http(self, http): self.payload["http"] = http.to_json() return self def script(self, script): self.payload["script"] = script.to_json() return self def container(self, container): self.payload["container"] = container return self def service_account_name(self, service_account_name): self.payload["serviceAccountName"] = service_account_name return self
_Template
python
sqlalchemy__sqlalchemy
test/orm/test_transaction.py
{ "start": 72707, "end": 73975 }
class ____: """Test the "join into an external transaction" examples""" def setup_test(self): self.engine = engines.testing_engine( options={"use_reaper": False, "sqlite_savepoint": True} ) self.connection = self.engine.connect() self.metadata = MetaData() self.table = Table( "t1", self.metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), ) with self.connection.begin(): self.table.create(self.connection, checkfirst=True) self.setup_session() def teardown_test(self): self.teardown_session() with self.connection.begin(): self._assert_count(0) with self.connection.begin(): self.table.drop(self.connection) self.connection.close() def test_something(self, connection): A = self.A a1 = A() self.session.add(a1) self.session.commit() self._assert_count(1) def _assert_count(self, count): result = self.connection.scalar( select(func.count()).select_from(self.table) ) eq_(result, count)
JoinIntoAnExternalTransactionFixture
python
fluentpython__example-code-2e
24-class-metaprog/slots/slots_timing.py
{ "start": 24, "end": 117 }
class ____: def __init_subclass__(subclass): subclass.__slots__ = ('x', 'y')
Wrong
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_G.py
{ "start": 3960, "end": 5244 }
class ____(Benchmark): r""" Griewank objective function. This class defines the Griewank global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Griewank}}(x) = \frac{1}{4000}\sum_{i=1}^n x_i^2 - \prod_{i=1}^n\cos\left(\frac{x_i}{\sqrt{i}}\right) + 1 Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-600, 600]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1, ..., n` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ change_dimensionality = True def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = [(-50, 50), (-50, 50)] self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 i = arange(1., np.size(x) + 1.) return sum(x ** 2 / 4000) - prod(cos(x / sqrt(i))) + 1
Griewank
python
pytorch__pytorch
test/test_cuda.py
{ "start": 195708, "end": 207988 }
class ____(TestCase): @property def expandable_segments(self): return EXPANDABLE_SEGMENTS def checkCheckpointedBlock(self, before_block, after_block): for field in ("size", "state"): self.assertEqual(before_block[field], after_block[field]) def checkCheckpointedState(self, before_segments, after_segments): # after may contain additional segments, but all of the segments in before # should be exactly equivalent to after after_ptr_to_segment = { segment["address"]: segment for segment in after_segments } for before_segment in before_segments: self.assertTrue(before_segment["address"] in after_ptr_to_segment) after_segment = after_ptr_to_segment[before_segment["address"]] for field in ( "device", "total_size", "allocated_size", "active_size", "segment_type", "segment_pool_id", ): self.assertEqual(before_segment[field], after_segment[field]) self.assertEqual( len(before_segment["blocks"]), len(after_segment["blocks"]) ) for before_block, after_block in zip( before_segment["blocks"], after_segment["blocks"] ): self.checkCheckpointedBlock(before_block, after_block) @staticmethod def setCheckpointPoolState( device, state, stale_storages_ptr, storages_deleters=None ): stale_storages_ptr = [t.untyped_storage()._cdata for t in stale_storages_ptr] storages_deleters = ( [] if not storages_deleters else [t.untyped_storage()._cdata for t in storages_deleters] ) torch._C._cuda_setCheckpointPoolState( device, state, stale_storages_ptr, storages_deleters ) def checkFunction(self, fn, inputs, pool=None): graph, outputs = cudagraphify(fn, inputs, pool=pool) pool_id = graph.pool() device = outputs[0].device.index segments_before_checkpoint = get_cudagraph_segments(pool_id) state = torch._C._cuda_getCheckpointState(device, pool_id) self.setCheckpointPoolState(device, state, [], []) self.checkCheckpointedState( segments_before_checkpoint, get_cudagraph_segments(pool_id) ) def setUp(self): super().setUp() self.segment_length = len(get_all_cudagraph_segments()) def tearDown(self): torch.cuda.synchronize() gc.collect() torch.cuda.empty_cache() self.assertEqual(len(get_all_cudagraph_segments()), self.segment_length) super().tearDown() def test_simple(self): def foo(): x = torch.zeros([SMALL_SIZE * 8], device="cuda", dtype=torch.uint8) x = x + x x1 = int8_cuda(SMALL_SIZE) + int8_cuda(SMALL_SIZE) + int8_cuda(SMALL_SIZE) y = int8_cuda(SMALL_SIZE) + x1 z = int8_cuda(SMALL_SIZE) return x, y, z self.checkFunction(foo, []) def test_allocated_in_middle_of_segment(self): def foo(): small_buffers = [int8_cuda(MIN_BLOCK_SIZE) for _ in range(11)] return small_buffers[5].add_(2) self.checkFunction(foo, []) def test_multiple_middle_allocations(self): def foo(): small_buffers = [int8_cuda(MIN_BLOCK_SIZE) for _ in range(11)] return small_buffers[5], small_buffers[8] self.checkFunction(foo, []) def test_middle_allocations_contiguous(self): def foo(): small_buffers = [int8_cuda(MIN_BLOCK_SIZE) for _ in range(11)] return small_buffers[5], small_buffers[6] self.checkFunction(foo, []) def test_additional_free_following_checkpoint(self): def foo(): return (int8_cuda(MIN_BLOCK_SIZE),) def foo2(): return (int8_cuda(MIN_BLOCK_SIZE),) graph, outputs = cudagraphify(foo, []) pool_id = graph.pool() segments_before_checkpoint = get_cudagraph_segments(pool_id) state = torch._C._cuda_getCheckpointState(outputs[0].device.index, pool_id) graph2, outputs2 = cudagraphify(foo2, [], pool=graph.pool()) self.setCheckpointPoolState(outputs[0].device.index, state, outputs2, []) del outputs2 self.checkCheckpointedState( segments_before_checkpoint, get_cudagraph_segments(pool_id) ) # TODO: re-enable # def test_additional_free_error(self): # def foo(): # return int8_cuda(MIN_BLOCK_SIZE), # def foo2(): # return int8_cuda(MIN_BLOCK_SIZE), # graph, outputs = cudagraphify(foo, []) # pool_id = graph.pool() # segments_before_checkpoint = get_cudagraph_segments(pool_id) # state = torch._C._cuda_getCheckpointState(outputs[0].device.index, pool_id) # graph2, outputs2 = cudagraphify(foo2, [], pool=graph.pool()) # with self.assertRaisesRegex(Exception, "being manually freed must be passed"): # self.setCheckpointPoolState(outputs[0].device.index, state, [], []) def test_tensor_dies_after_checkpoint(self): def foo(): return int8_cuda(MIN_BLOCK_SIZE), int8_cuda(MIN_BLOCK_SIZE) graph, outputs = cudagraphify(foo, []) pool_id = graph.pool() device = outputs[0].device.index segments_before_checkpoint = get_cudagraph_segments(pool_id) state = torch._C._cuda_getCheckpointState(outputs[0].device.index, pool_id) output_data_ptrs = [output.data_ptr() for output in outputs] del outputs self.setCheckpointPoolState(device, state, [], []) self.assertEqual(live_blocks(pool_id), 2) torch._C._cuda_cudaCachingAllocator_raw_delete(output_data_ptrs[0]) self.assertEqual(live_blocks(pool_id), 1) torch._C._cuda_cudaCachingAllocator_raw_delete(output_data_ptrs[1]) self.assertEqual(live_blocks(pool_id), 0) def test_assigning_back_deleter_fns_to_tensor(self): def foo(x): return ( int8_cuda(SMALL_BUFFER) + x, int8_cuda(SMALL_BUFFER) + x, int8_cuda(LARGE_BUFFER) + x, ) inp = torch.tensor([1], device="cuda") graph, outputs = cudagraphify(foo, [inp]) pool_id = graph.pool() graph.replay() device = outputs[0].device.index for i in range(len(outputs)): self.assertEqual(outputs[i].mean(dtype=torch.float), 2) state = torch._C._cuda_getCheckpointState(outputs[0].device.index, pool_id) output_ptrs = [output.untyped_storage().data_ptr() for output in outputs] ten_metadata = [tensor_metadata(t) for t in outputs] self.assertEqual(live_blocks(pool_id), 3) del outputs self.assertEqual(live_blocks(pool_id), 0) reconstructed_tensors = [ reconstruct_from_tensor_metadata(metadata) for metadata in ten_metadata ] for i in range(len(reconstructed_tensors)): self.assertEqual(reconstructed_tensors[i].mean(dtype=torch.float), 2) inp.add_(1) graph.replay() for i in range(len(reconstructed_tensors)): self.assertEqual(reconstructed_tensors[i].mean(dtype=torch.float), 3) self.setCheckpointPoolState( device, state, [], [reconstructed_tensors[0], reconstructed_tensors[1]] ) self.assertEqual(live_blocks(pool_id), 3) reconstructed_tensors[0] = None self.assertEqual(live_blocks(pool_id), 2) reconstructed_tensors[1] = None self.assertEqual(live_blocks(pool_id), 1) # should not change, we did not pass it in to swap data ptrs reconstructed_tensors[2] = None self.assertEqual(live_blocks(pool_id), 1) torch._C._cuda_cudaCachingAllocator_raw_delete(output_ptrs[2]) self.assertEqual(live_blocks(pool_id), 0) @skipIfNoTorchVision def test_resnet(self): import torchvision m = torchvision.models.resnet50() m.eval() m = m.cuda() inp = torch.rand([1, 3, 255, 255], device="cuda") self.checkFunction(m, [inp]) def test_check_pool_live_allocations(self): def foo(): return torch.ones([4], device="cuda") pool = torch.cuda.graph_pool_handle() graph, outputs = cudagraphify(foo, [], pool=pool) index = outputs[0].device.index def check(live_dps): return torch._C._cuda_checkPoolLiveAllocations(index, pool, live_dps) self.assertTrue(check({outputs[0].data_ptr()})) self.assertFalse(check({outputs[0].data_ptr(), 0})) self.assertFalse(check(set())) del outputs self.assertTrue(check(set())) def test_allocate_in_thread_to_pool(self): def foo(): return torch.rand([4], device="cuda") pool = torch.cuda.graph_pool_handle() graph, outputs = cudagraphify(foo, [], pool=pool) device = outputs[0].device.index del outputs @contextlib.contextmanager def _use_cuda_memory_pool_manager(device, mem_pool): """ Context manager to use cuda graph pool for new allocations. If you use this manager all cudagraph tensors in use should be reflected in the allocator or they will be overwritten. existing_graph should already have been used in a capture, and the mem_pool must already exist. """ torch.cuda.synchronize() stream = torch.cuda.Stream() stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(stream), torch.device(device): torch._C._cuda_beginAllocateCurrentThreadToPool(device, mem_pool) try: yield finally: torch._C._cuda_endAllocateToPool(device, mem_pool) torch._C._cuda_releasePool(device, mem_pool) torch.cuda.current_stream().wait_stream(stream) segments = get_cudagraph_segments(pool) self.assertEqual(len(get_cudagraph_segments(pool)), 1) def use_pool(): def alloc_three(): a = int8_cuda(LARGE_BUFFER) b = int8_cuda(LARGE_BUFFER) c = a + b with _use_cuda_memory_pool_manager(device, pool): # three allocations for _ in range(10): alloc_three() # three more allocations not in pool alloc_three() def no_pool(): # two allocations for _ in range(10): a = int8_cuda(LARGE_BUFFER) b = int8_cuda(LARGE_BUFFER) del a, b graph_thread = threading.Thread(target=use_pool) no_graph_thread = threading.Thread(target=no_pool) graph_thread.start() no_graph_thread.start() graph_thread.join() no_graph_thread.join() self.assertEqual( len(get_cudagraph_segments(pool)), 2 if self.expandable_segments else 4 ) del graph torch.cuda.synchronize() gc.collect() torch.cuda.empty_cache() self.assertEqual(len(get_cudagraph_segments(pool)), 0) def test_no_triton_on_import(self): """Test that Triton is not imported on first GPU use""" script = "import sys; import torch; torch.rand(2, device='cuda'); print('triton' in sys.modules)" rc = ( subprocess.check_output( [sys.executable, "-c", script], # On Windows, opening the subprocess with the default CWD makes `import torch` # fail, so just set CWD to this script's directory cwd=os.path.dirname(os.path.realpath(__file__)), ) .strip() .decode("ascii") ) self.assertEqual(rc, "False", "Triton was imported when importing torch!") @unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
TestBlockStateAbsorption
python
HypothesisWorks__hypothesis
hypothesis-python/tests/cover/test_database_backend.py
{ "start": 24996, "end": 28238 }
class ____(ExampleDatabase): def __init__(self): super().__init__() self.starts = 0 self.ends = 0 def save(self, key: bytes, value: bytes) -> None: ... def fetch(self, key: bytes) -> Iterable[bytes]: ... def delete(self, key: bytes, value: bytes) -> None: ... def _start_listening(self): self.starts += 1 def _stop_listening(self): self.ends += 1 def test_start_end_listening(): db = TracksListens() def listener1(event): pass def listener2(event): pass assert db.starts == 0 db.add_listener(listener1) assert db.starts == 1 db.add_listener(listener2) assert db.starts == 1 assert db.ends == 0 db.remove_listener(listener2) assert db.ends == 0 db.remove_listener(listener1) assert db.ends == 1 db.clear_listeners() assert db.ends == 1 @checks_deprecated_behaviour def test_deprecated_example_database_path(tmp_path): ExampleDatabase(tmp_path) @checks_deprecated_behaviour def test_deprecated_example_database_memory(): ExampleDatabase(":memory:") @checks_deprecated_behaviour def test_deprecated_example_database_no_args(): ExampleDatabase() @pytest.mark.parametrize( "db1, db2", [ (DirectoryBasedExampleDatabase("a"), DirectoryBasedExampleDatabase("a")), ( MultiplexedDatabase( DirectoryBasedExampleDatabase("a"), DirectoryBasedExampleDatabase("b") ), MultiplexedDatabase( DirectoryBasedExampleDatabase("a"), DirectoryBasedExampleDatabase("b") ), ), ( ReadOnlyDatabase(DirectoryBasedExampleDatabase("a")), ReadOnlyDatabase(DirectoryBasedExampleDatabase("a")), ), ( GitHubArtifactDatabase("owner1", "repo1"), GitHubArtifactDatabase("owner1", "repo1"), ), ], ) def test_database_equal(db1, db2): assert db1 == db2 @pytest.mark.parametrize( "db1, db2", [ (InMemoryExampleDatabase(), InMemoryExampleDatabase()), (InMemoryExampleDatabase(), DirectoryBasedExampleDatabase("a")), (BackgroundWriteDatabase(InMemoryExampleDatabase()), InMemoryExampleDatabase()), (DirectoryBasedExampleDatabase("a"), DirectoryBasedExampleDatabase("b")), ( ReadOnlyDatabase(DirectoryBasedExampleDatabase("a")), ReadOnlyDatabase(DirectoryBasedExampleDatabase("b")), ), ( GitHubArtifactDatabase("owner1", "repo1"), GitHubArtifactDatabase("owner2", "repo2"), ), ], ) def test_database_not_equal(db1, db2): assert db1 != db2 @skipif_threading # race in tmp_path def test_directory_db_removes_empty_dirs(tmp_path): db = DirectoryBasedExampleDatabase(tmp_path) db.save(b"k1", b"v1") db.save(b"k1", b"v2") assert db._key_path(b"k1").exists() assert set(db.fetch(db._metakeys_name)) == {b"k1"} db.delete(b"k1", b"v1") assert db._key_path(b"k1").exists() assert set(db.fetch(db._metakeys_name)) == {b"k1"} db.delete(b"k1", b"v2") assert not db._key_path(b"k1").exists() assert set(db.fetch(db._metakeys_name)) == set()
TracksListens
python
Pylons__pyramid
src/pyramid/events.py
{ "start": 7302, "end": 8098 }
class ____: """An instance of this class is emitted as an :term:`event` when the :meth:`pyramid.config.Configurator.make_wsgi_app` is called. The instance has an attribute, ``app``, which is an instance of the :term:`router` that will handle WSGI requests. This class implements the :class:`pyramid.interfaces.IApplicationCreated` interface. .. note:: For backwards compatibility purposes, this class can also be imported as :class:`pyramid.events.WSGIApplicationCreatedEvent`. This was the name of the event class before :app:`Pyramid` 1.0. """ def __init__(self, app): self.app = app self.object = app WSGIApplicationCreatedEvent = ApplicationCreated # b/c (as of 1.0) @implementer(IBeforeRender)
ApplicationCreated
python
doocs__leetcode
solution/1500-1599/1561.Maximum Number of Coins You Can Get/Solution.py
{ "start": 0, "end": 136 }
class ____: def maxCoins(self, piles: List[int]) -> int: piles.sort() return sum(piles[len(piles) // 3 :][::2])
Solution
python
google__jax
tests/multiprocess/multihost_utils_test.py
{ "start": 929, "end": 18691 }
class ____(jt_multiprocess.MultiProcessTest): def test_process_allgather_stacked(self): elems_per_host = 4 num_processes = jax.process_count() x = jnp.ones((4,)).reshape((2, 2)) out = multihost_utils.process_allgather(x, tiled=False) self.assertEqual(out.shape, (num_processes, 2, 2)) np.testing.assert_array_equal(out, np.stack([x] * num_processes)) x = jnp.ones((64,)).reshape((8, 4, 2)) out = multihost_utils.process_allgather(x, tiled=False) self.assertEqual(out.shape, (num_processes, 8, 4, 2)) np.testing.assert_array_equal(out, np.stack([x] * num_processes)) x = np.arange(elems_per_host) + jax.process_index() * elems_per_host out = multihost_utils.process_allgather(x, tiled=False) self.assertEqual(out.shape, (num_processes, 4)) np.testing.assert_array_equal( out, np.arange(elems_per_host * jax.process_count()).reshape( num_processes, elems_per_host ), ) x = np.array(0) + jax.process_index() * elems_per_host out = multihost_utils.process_allgather(x, tiled=False) self.assertEqual(out.shape, (num_processes,)) np.testing.assert_array_equal( out, np.arange(num_processes) * elems_per_host ) def test_process_allgather_concatenated(self): elems_per_host = 4 num_processes = jax.process_count() x = jnp.ones((4,)).reshape((2, 2)) out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, (2 * num_processes, 2)) np.testing.assert_array_equal(out, np.concatenate([x] * num_processes)) x = jnp.ones((64,)).reshape((8, 4, 2)) out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, (8 * num_processes, 4, 2)) np.testing.assert_array_equal(out, np.concatenate([x] * num_processes)) x = np.arange(elems_per_host) + jax.process_index() * elems_per_host out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, (elems_per_host * num_processes,)) np.testing.assert_array_equal( out, np.arange(elems_per_host * jax.process_count()) ) x = np.array(0) + jax.process_index() * elems_per_host out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, (num_processes,)) np.testing.assert_array_equal( out, np.arange(num_processes) * elems_per_host ) def test_process_allgather_set_mesh(self): devices = jax.devices()[1:] + [jax.devices()[0]] user_mesh = jax.sharding.Mesh( np.array(devices).reshape(jax.device_count(), 1, 1), ('x', 'y', 'z'), ) x = jnp.ones((4,)).reshape((2, 2)) # process_allgather should not be impacted by any global mesh context. with jax.set_mesh(user_mesh): num_processes = jax.process_count() out = multihost_utils.process_allgather(x, tiled=True) self.assertEqual(out.shape, (2 * num_processes, 2)) np.testing.assert_array_equal(out, np.concatenate([x] * num_processes)) def test_broadcast_one_to_all(self): elems_per_host = 4 x = np.arange(elems_per_host) + jax.process_index() * elems_per_host out = multihost_utils.broadcast_one_to_all((x, x)) jax.tree.map( lambda x: np.testing.assert_array_equal( # pylint: disable=g-long-lambda x, np.arange(elems_per_host) ), out, ) x = np.array(0) + jax.process_index() * elems_per_host out = multihost_utils.broadcast_one_to_all(x) np.testing.assert_array_equal(out, np.array(0)) def test_broadcast_one_to_all_set_mesh(self): devices = jax.devices()[1:] + [jax.devices()[0]] user_mesh = jax.sharding.Mesh( np.array(devices).reshape(jax.device_count(), 1, 1), ('x', 'y', 'z'), ) # broadcast_one_to_all should not be impacted by any global mesh context. with jax.set_mesh(user_mesh): elems_per_host = 4 x = np.arange(elems_per_host) + jax.process_index() * elems_per_host out = multihost_utils.broadcast_one_to_all((x, x)) jax.tree.map( lambda x: np.testing.assert_array_equal( # pylint: disable=g-long-lambda x, np.arange(elems_per_host) ), out, ) x = np.array(0) + jax.process_index() * elems_per_host out = multihost_utils.broadcast_one_to_all(x) np.testing.assert_array_equal(out, np.array(0)) def test_broadcast_one_to_all_uint8(self): elems_per_host = 4 x = (np.arange(elems_per_host, dtype=jnp.uint8) + jax.process_index() * elems_per_host) out = multihost_utils.broadcast_one_to_all((x, x)) jax.tree.map( lambda x: np.testing.assert_array_equal( # pylint: disable=g-long-lambda x, np.arange(elems_per_host, dtype=jnp.uint8) ), out, ) jax.tree.map(lambda o: self.assertEqual(o.dtype, jnp.uint8), out) x = np.array(0, dtype=jnp.uint8) + jax.process_index() * elems_per_host out = multihost_utils.broadcast_one_to_all(x) self.assertEqual(out.dtype, jnp.uint8) np.testing.assert_array_equal(out, np.array(0, dtype=jnp.uint8)) def test_sync_global_devices(self): multihost_utils.sync_global_devices('test sync global devices') def test_sync_global_devices_error(self): # All processes should raise. with self.assertRaises(AssertionError): if jax.process_index() == 0: multihost_utils.sync_global_devices('test message') else: multihost_utils.sync_global_devices('test message2') def test_sync_global_devices_mesh_context_manager(self): global_mesh = jtu.create_mesh((2, 2), ('x', 'y'), iota_order=True) with global_mesh: multihost_utils.sync_global_devices('test sync global devices') def test_assert_equal_global(self): mesh = jtu.create_mesh((8,), 'x') shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.NamedSharding(mesh, P()), lambda idx: np_inp[idx]) multihost_utils.assert_equal(inp) def test_process_allgather_cache_hit(self): x = jnp.ones((4,)).reshape(2, 2) y = jnp.arange(4.0).reshape(2, 2) num_processes = jax.process_count() with jtu.count_pjit_cpp_cache_miss() as count: out = multihost_utils.process_allgather(x, tiled=False) out2 = multihost_utils.process_allgather(y, tiled=False) # Cpp cache hit. self.assertEqual(count(), 1) self.assertEqual(out.shape, (num_processes, 2, 2)) np.testing.assert_array_equal(out, np.stack([x] * num_processes)) self.assertEqual(out2.shape, (num_processes, 2, 2)) np.testing.assert_array_equal(out2, np.stack([y] * num_processes)) def test_reshard(self): mesh1 = jtu.create_mesh((8,), 'x') mesh2 = jax.sharding.Mesh( np.asarray(jax.devices()[::-1]).reshape(4, 2), ('x', 'y') ) shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.sharding.NamedSharding(mesh1, P('x')), lambda idx: np_inp[idx], ) out = jax.device_put(inp, jax.sharding.NamedSharding(mesh2, P('x', 'y'))) self.assertIsInstance(out.sharding, jax.sharding.NamedSharding) for s in out.addressable_shards: np.testing.assert_array_equal(s.data, np_inp[s.index]) @parameterized.named_parameters( ('inp_replicated', P(), P('x', 'y')), ('target_replicated', P('x'), P()), ('both_replicated', P(), P()), ) def test_reshard_replicated_sharding(self, inp_spec, target_spec): mesh1 = jtu.create_mesh((8,), 'x') mesh2 = jax.sharding.Mesh( np.asarray(jax.devices()[::-1]).reshape(4, 2), ('x', 'y') ) shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.sharding.NamedSharding(mesh1, inp_spec), lambda idx: np_inp[idx], ) out = jax.device_put(inp, jax.sharding.NamedSharding(mesh2, target_spec)) self.assertIsInstance(out.sharding, jax.sharding.NamedSharding) for s in out.addressable_shards: np.testing.assert_array_equal(s.data, np_inp[s.index]) def test_reshard_same_device_assignment(self): mesh1 = jtu.create_mesh((4, 2), ('x', 'y')) mesh2 = jtu.create_mesh((2, 4), ('x', 'y')) shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.sharding.NamedSharding(mesh1, P('x', 'y')), lambda idx: np_inp[idx], ) out = jax.device_put(inp, jax.sharding.NamedSharding(mesh2, P('y'))) self.assertIsInstance(out.sharding, jax.sharding.NamedSharding) for s in out.addressable_shards: np.testing.assert_array_equal(s.data, np_inp[s.index]) def test_reshard_pytree(self): mesh1 = jtu.create_mesh((8,), 'x') dev = jax.devices() if len(dev) < 8: raise unittest.SkipTest('Test requires 8 devices') dev_list = [dev[0], dev[7], dev[6], dev[2], dev[4], dev[3], dev[5], dev[1]] mesh2 = jax.sharding.Mesh( np.asarray(dev_list).reshape(2, 2, 2), ('x', 'y', 'z') ) shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.sharding.NamedSharding(mesh1, P('x')), lambda idx: np_inp[idx], ) out1, out2 = jax.device_put( (inp, inp), jax.sharding.NamedSharding(mesh2, P('x', 'y')) ) for out in (out1, out2): self.assertIsInstance(out.sharding, jax.sharding.NamedSharding) for s in out.addressable_shards: np.testing.assert_array_equal(s.data, np_inp[s.index]) def test_reshard_different_devices(self): if jtu.is_device_tpu('5', 'e'): raise unittest.SkipTest('Test fails on v5e') dev = jax.devices() if len(dev) < 8: raise unittest.SkipTest('Test requires 8 devices') mesh1 = jax.sharding.Mesh([dev[0], dev[2], dev[4], dev[6]], 'x') mesh2 = jax.sharding.Mesh(jax.devices(), 'x') shape = (8, 2) np_inp = np.arange(math.prod(shape)).reshape(shape) inp = jax.make_array_from_callback( shape, jax.sharding.NamedSharding(mesh1, P('x')), lambda idx: np_inp[idx], ) with self.assertRaisesRegex( ValueError, 'input and target sharding should have the same set of devices', ): jax.device_put(inp, jax.sharding.NamedSharding(mesh2, P('x'))) def test_process_allgather_array_not_fully_addressable(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) global_input_shape = (8, 2) global_input_data = np.arange(math.prod(global_input_shape)).reshape( global_input_shape ) arr = jax.make_array_from_callback( global_input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y')), lambda idx: global_input_data[idx], ) out = multihost_utils.process_allgather(arr, tiled=True) np.testing.assert_array_equal(out, global_input_data) with self.assertRaisesRegex( ValueError, 'Gathering global non-fully-addressable arrays only supports' ' tiled=True'): multihost_utils.process_allgather(arr, tiled=False) def test_host_local_array_to_global_array_already_global(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) global_input_shape = (8, 2) global_input_data = np.arange(math.prod(global_input_shape)).reshape( global_input_shape ) arr = jax.make_array_from_callback( global_input_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y')), lambda idx: global_input_data[idx], ) out = multihost_utils.host_local_array_to_global_array( arr, global_mesh, P('x', 'y') ) self.assertEqual(id(arr), id(out)) def test_host_local_array_to_global_array_same_sharding_array(self): if jtu.is_device_tpu('5', 'e'): raise unittest.SkipTest('Test fails on v5e') global_mesh = jtu.create_mesh((4, 2), ('x', 'y'), iota_order=True) local_input_shape = (2, 2) elems_per_host = 4 local_input_data = ( jnp.arange(elems_per_host) + jax.process_index() * elems_per_host ).reshape(local_input_shape) arr = jax.make_array_from_callback( local_input_shape, jax.sharding.NamedSharding(global_mesh.local_mesh, P('x', 'y')), lambda idx: local_input_data[idx], ) out = multihost_utils.host_local_array_to_global_array( arr, global_mesh, P('x', 'y') ) expected_global_shape = (8, 2) self.assertEqual(out.shape, expected_global_shape) global_data = np.arange(math.prod(expected_global_shape)).reshape( expected_global_shape ) for a, o in zip(arr.addressable_shards, out.addressable_shards): self.assertEqual( a.data.unsafe_buffer_pointer(), o.data.unsafe_buffer_pointer() ) np.testing.assert_array_equal(o.data, global_data[o.index]) def test_host_local_to_global_reshard_committed_single_device_array(self): if jtu.is_device_tpu('5', 'e'): raise unittest.SkipTest('Test fails on v5e') global_mesh = jtu.create_mesh((4, 2), ('x', 'y'), iota_order=True) local_input_shape = (2, 2) elems_per_host = 4 local_input_data = ( jnp.arange(elems_per_host) + jax.process_index() * elems_per_host ).reshape(local_input_shape) arr = jax.make_array_from_callback( local_input_shape, jax.sharding.NamedSharding(global_mesh.local_mesh, P('x', 'y')), lambda idx: local_input_data[idx], ) out = multihost_utils.host_local_array_to_global_array( arr, global_mesh, P('x', 'y') ) expected_global_shape = (8, 2) self.assertEqual(out.shape, expected_global_shape) global_data = np.arange(math.prod(expected_global_shape)).reshape( expected_global_shape ) for a, o in zip(arr.addressable_shards, out.addressable_shards): self.assertEqual( a.data.unsafe_buffer_pointer(), o.data.unsafe_buffer_pointer() ) np.testing.assert_array_equal(o.data, global_data[o.index]) @jtu.ignore_warning(category=DeprecationWarning) def test_host_local_to_global_replicated(self): num_local_devices = jax.local_device_count() global_mesh = jax.sharding.Mesh(jax.devices(), axis_names=['x']) local_input_shape = (2, 2) local_input_data = jnp.arange(4).reshape(local_input_shape) out = multihost_utils.host_local_array_to_global_array( local_input_data, global_mesh, P() ) expected_global_shape = (2, 2) self.assertEqual(out.shape, expected_global_shape) self.assertLen(out.addressable_shards, num_local_devices) # Array is accessible on every host. np.testing.assert_array_equal(out, local_input_data) @jtu.ignore_warning(category=DeprecationWarning) def test_host_local_to_global_locally_replicated(self): # Make an array which is locally replicated but sharded across hosts. num_processes = jax.process_count() num_local_devices = jax.local_device_count() global_mesh = jtu.create_mesh( (num_processes, num_local_devices), ('host', 'dev'), iota_order=True) local_input_shape = (2, 2) host_id = jax.process_index() local_input_data = jnp.arange(4).reshape(local_input_shape) * host_id out = multihost_utils.host_local_array_to_global_array( local_input_data, global_mesh, P('host', None)) global_data = np.concatenate([jnp.arange(4).reshape(local_input_shape) * i for i in range(num_processes)]) expected_global_shape = global_data.shape self.assertEqual(out.shape, expected_global_shape) self.assertLen(out.addressable_shards, num_local_devices) for o in out.addressable_shards: # Each shard has the same shape matching local_input_shape and smae # global index. self.assertEqual(o.data.shape, local_input_shape) self.assertEqual(o.index, out.addressable_shards[0].index) np.testing.assert_array_equal(o.data, global_data[o.index]) def test_global_array_to_host_local_array(self): if jtu.is_device_tpu('5', 'e'): raise unittest.SkipTest('Test fails on v5e') global_mesh = jtu.create_mesh((4, 2), ('x', 'y'), iota_order=True) global_shape = (8, 2) global_data = np.arange(math.prod(global_shape)).reshape(global_shape) arr = jax.make_array_from_callback( global_shape, jax.sharding.NamedSharding(global_mesh, P('x', 'y')), lambda idx: global_data[idx], ) out = multihost_utils.global_array_to_host_local_array( arr, global_mesh, P('x') ) self.assertEqual(out.shape, (2, 2)) self.assertEqual( out.sharding, jax.sharding.NamedSharding(global_mesh.local_mesh, P('x')) ) local_input_data = (np.arange(4) + jax.process_index() * 4).reshape( out.shape ) for s in out.addressable_shards: np.testing.assert_array_equal(s.data, local_input_data) def test_host_local_array_to_global_array_none_error(self): global_mesh = jtu.create_mesh((4, 2), ('x', 'y')) global_shape = (8, 2) data = np.arange(math.prod(global_shape)).reshape(global_shape) with self.assertRaisesRegex( ValueError, '`None` is not a valid input to the pspecs argument' ): multihost_utils.host_local_array_to_global_array(data, global_mesh, None) with self.assertRaisesRegex( ValueError, '`None` is not a valid input to the pspecs argument' ): multihost_utils.global_array_to_host_local_array(data, global_mesh, None) def test_live_devices(self): with multihost_utils.live_devices(jax.devices()) as live: self.assertEqual(set(live), set(jax.devices())) if __name__ == '__main__': jt_multiprocess.main()
MultiHostUtilsTest
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/scoped_resources_builder.py
{ "start": 1392, "end": 5149 }
class ____( NamedTuple( "_ScopedResourcesBuilder", [("resource_instance_dict", Mapping[str, object]), ("contains_generator", bool)], ) ): """There are concepts in the codebase (e.g. ops, system storage) that receive only the resources that they have specified in required_resource_keys. ScopedResourcesBuilder is responsible for dynamically building a class with only those required resources and returning an instance of that class. """ def __new__( cls, resource_instance_dict: Optional[Mapping[str, object]] = None, contains_generator: bool = False, ): return super().__new__( cls, resource_instance_dict=check.opt_mapping_param( resource_instance_dict, "resource_instance_dict", key_type=str ), contains_generator=contains_generator, ) def build(self, required_resource_keys: Optional[AbstractSet[str]]) -> Resources: from dagster._config.pythonic_config import IAttachDifferentObjectToOpContext """We dynamically create a type that has the resource keys as properties, to enable dotting into the resources from a context. For example, given: resources = {'foo': <some resource>, 'bar': <some other resource>} then this will create the type Resource(namedtuple('foo bar')) and then binds the specified resources into an instance of this object, which can be consumed as, e.g., context.resources.foo. """ required_resource_keys = check.opt_set_param( required_resource_keys, "required_resource_keys", of_type=str ) # it is possible that the surrounding context does NOT have the required resource keys # because we are building a context for steps that we are not going to execute (e.g. in the # resume/retry case, in order to generate copy intermediates events) resource_instance_dict = { key: self.resource_instance_dict[key] for key in required_resource_keys if key in self.resource_instance_dict } resources_to_attach_to_context = { k: ( v.get_object_to_set_on_execution_context() if isinstance(v, IAttachDifferentObjectToOpContext) else v ) for k, v in resource_instance_dict.items() } # If any of the resources are generators, add the IContainsGenerator subclass to flag that # this is the case. if self.contains_generator: class _ScopedResourcesContainsGenerator( namedtuple( "_ScopedResourcesContainsGenerator", list(resources_to_attach_to_context.keys()), ), Resources, IContainsGenerator, ): @property def original_resource_dict(self) -> Mapping[str, object]: return resource_instance_dict return _ScopedResourcesContainsGenerator(**resources_to_attach_to_context) else: class _ScopedResources( namedtuple("_ScopedResources", list(resources_to_attach_to_context.keys())), Resources, ): @property def original_resource_dict(self) -> Mapping[str, object]: return resource_instance_dict return _ScopedResources(**resources_to_attach_to_context) @classmethod def build_empty(cls) -> Resources: """Returns an empty Resources object, equivalent to ScopedResourcesBuilder().build(None).""" return cls().build(None)
ScopedResourcesBuilder
python
coleifer__peewee
tests/sql.py
{ "start": 43074, "end": 48287 }
class ____(BaseTestCase): def test_insert_simple(self): query = User.insert({ User.c.username: 'charlie', User.c.superuser: False, User.c.admin: True}) self.assertSQL(query, ( 'INSERT INTO "users" ("admin", "superuser", "username") ' 'VALUES (?, ?, ?)'), [True, False, 'charlie']) @requires_sqlite def test_replace_sqlite(self): query = User.replace({ User.c.username: 'charlie', User.c.superuser: False}) self.assertSQL(query, ( 'INSERT OR REPLACE INTO "users" ("superuser", "username") ' 'VALUES (?, ?)'), [False, 'charlie']) @requires_mysql def test_replace_mysql(self): query = User.replace({ User.c.username: 'charlie', User.c.superuser: False}) self.assertSQL(query, ( 'REPLACE INTO "users" ("superuser", "username") ' 'VALUES (?, ?)'), [False, 'charlie']) def test_insert_list(self): data = [ {Person.name: 'charlie'}, {Person.name: 'huey'}, {Person.name: 'zaizee'}] query = Person.insert(data) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) def test_insert_list_with_columns(self): data = [(i,) for i in ('charlie', 'huey', 'zaizee')] query = Person.insert(data, columns=[Person.name]) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) # Use column name instead of column instance. query = Person.insert(data, columns=['name']) self.assertSQL(query, ( 'INSERT INTO "person" ("name") VALUES (?), (?), (?)'), ['charlie', 'huey', 'zaizee']) def test_insert_list_infer_columns(self): data = [('p1', '1980-01-01'), ('p2', '1980-02-02')] self.assertSQL(Person.insert(data), ( 'INSERT INTO "person" ("name", "dob") VALUES (?, ?), (?, ?)'), ['p1', '1980-01-01', 'p2', '1980-02-02']) # Cannot infer any columns for User. data = [('u1',), ('u2',)] self.assertRaises(ValueError, User.insert(data).sql) # Note declares columns, but no primary key. So we would have to # include it for this to work. data = [(1, 'p1-n'), (2, 'p2-n')] self.assertRaises(ValueError, Note.insert(data).sql) data = [(1, 1, 'p1-n'), (2, 2, 'p2-n')] self.assertSQL(Note.insert(data), ( 'INSERT INTO "note" ("id", "person_id", "content") ' 'VALUES (?, ?, ?), (?, ?, ?)'), [1, 1, 'p1-n', 2, 2, 'p2-n']) def test_insert_query(self): source = User.select(User.c.username).where(User.c.admin == False) query = Person.insert(source, columns=[Person.name]) self.assertSQL(query, ( 'INSERT INTO "person" ("name") ' 'SELECT "t1"."username" FROM "users" AS "t1" ' 'WHERE ("t1"."admin" = ?)'), [False]) def test_insert_query_cte(self): cte = User.select(User.c.username).cte('foo') source = cte.select(cte.c.username) query = Person.insert(source, columns=[Person.name]).with_cte(cte) self.assertSQL(query, ( 'WITH "foo" AS (SELECT "t1"."username" FROM "users" AS "t1") ' 'INSERT INTO "person" ("name") ' 'SELECT "foo"."username" FROM "foo"'), []) def test_insert_single_value_query(self): query = Person.select(Person.id).where(Person.name == 'huey') insert = Note.insert({ Note.person_id: query, Note.content: 'hello'}) self.assertSQL(insert, ( 'INSERT INTO "note" ("content", "person_id") VALUES (?, ' '(SELECT "t1"."id" FROM "person" AS "t1" ' 'WHERE ("t1"."name" = ?)))'), ['hello', 'huey']) def test_insert_returning(self): query = (Person .insert({ Person.name: 'zaizee', Person.dob: datetime.date(2000, 1, 2)}) .returning(Person.id, Person.name, Person.dob)) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") ' 'VALUES (?, ?) ' 'RETURNING "person"."id", "person"."name", "person"."dob"'), [datetime.date(2000, 1, 2), 'zaizee']) query = query.returning(Person.id, Person.name.alias('new_name')) self.assertSQL(query, ( 'INSERT INTO "person" ("dob", "name") ' 'VALUES (?, ?) ' 'RETURNING "person"."id", "person"."name" AS "new_name"'), [datetime.date(2000, 1, 2), 'zaizee']) def test_empty(self): class Empty(TestModel): pass query = Empty.insert() if isinstance(db, MySQLDatabase): sql = 'INSERT INTO "empty" () VALUES ()' elif isinstance(db, PostgresqlDatabase): sql = 'INSERT INTO "empty" DEFAULT VALUES RETURNING "empty"."id"' else: sql = 'INSERT INTO "empty" DEFAULT VALUES' self.assertSQL(query, sql, [])
TestInsertQuery
python
huggingface__transformers
src/transformers/models/swiftformer/modeling_swiftformer.py
{ "start": 15073, "end": 16560 }
class ____(SwiftFormerPreTrainedModel): def __init__(self, config: SwiftFormerConfig): super().__init__(config) self.config = config self.patch_embed = SwiftFormerPatchEmbedding(config) self.encoder = SwiftFormerEncoder(config) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, pixel_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithNoAttention]: output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.patch_embed(pixel_values) encoder_outputs = self.encoder( embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return tuple(v for v in encoder_outputs if v is not None) return BaseModelOutputWithNoAttention( last_hidden_state=encoder_outputs.last_hidden_state, hidden_states=encoder_outputs.hidden_states, ) @auto_docstring
SwiftFormerModel
python
numba__llvmlite
llvmlite/binding/object_file.py
{ "start": 141, "end": 832 }
class ____(ffi.ObjectRef): def name(self): return ffi.lib.LLVMPY_GetSectionName(self) def is_text(self): return ffi.lib.LLVMPY_IsSectionText(self) def size(self): return ffi.lib.LLVMPY_GetSectionSize(self) def address(self): return ffi.lib.LLVMPY_GetSectionAddress(self) def data(self): return string_at(ffi.lib.LLVMPY_GetSectionContents(self), self.size()) def is_end(self, object_file): return ffi.lib.LLVMPY_IsSectionIteratorAtEnd(object_file, self) def next(self): ffi.lib.LLVMPY_MoveToNextSection(self) def _dispose(self): ffi.lib.LLVMPY_DisposeSectionIterator(self)
SectionIteratorRef
python
scikit-learn__scikit-learn
sklearn/gaussian_process/kernels.py
{ "start": 30113, "end": 33369 }
class ____(KernelOperator): """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2` and combines them via .. math:: k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y) Note that the `__mul__` magic method is overridden, so `Product(RBF(), RBF())` is equivalent to using the * operator with `RBF() * RBF()`. Read more in the :ref:`User Guide <gp_kernels>`. .. versionadded:: 0.18 Parameters ---------- k1 : Kernel The first base-kernel of the product-kernel k2 : Kernel The second base-kernel of the product-kernel Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import (RBF, Product, ... ConstantKernel) >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = Product(ConstantKernel(2), RBF()) >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 1.0 >>> kernel 1.41**2 * RBF(length_scale=1) """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object,\ default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 * K2, np.dstack( (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis]) ) else: return self.k1(X, Y) * self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) * self.k2.diag(X) def __repr__(self): return "{0} * {1}".format(self.k1, self.k2)
Product
python
django__django
tests/staticfiles_tests/test_management.py
{ "start": 7478, "end": 7955 }
class ____(AdminScriptTestCase): @override_settings(STATIC_ROOT=None) def test_missing_settings_dont_prevent_help(self): """ Even if the STATIC_ROOT setting is not set, one can still call the `manage.py help collectstatic` command. """ self.write_settings("settings.py", apps=["django.contrib.staticfiles"]) out, err = self.run_manage(["help", "collectstatic"]) self.assertNoOutput(err)
TestCollectionHelpSubcommand
python
getsentry__sentry
src/sentry/sentry_metrics/querying/visitors/query_expression.py
{ "start": 2767, "end": 3094 }
class ____(QueryExpressionVisitor[QueryExpression]): """ Visitor that recursively validates the `QueryExpression`. """ def _visit_timeseries(self, timeseries: Timeseries) -> QueryExpression: # This visitor has been kept in case we need future validations. return timeseries
QueryValidationVisitor
python
pytorch__pytorch
torch/_dynamo/variables/higher_order_ops.py
{ "start": 140895, "end": 158569 }
class ____(VariableTracker): def __init__(self, fwd_graph, bwd_graph, parent_source, **kwargs) -> None: super().__init__(**kwargs) self.fwd_graph = fwd_graph self.bwd_graph = bwd_graph self.parent_source = parent_source def call_function( self, tx: "InstructionTranslator", args: "list[VariableTracker]", kwargs: "dict[str, VariableTracker]", ) -> "VariableTracker": from . import ( AutogradFunctionContextVariable, UserDefinedClassVariable, UserFunctionVariable, UserMethodVariable, ) from .builder import wrap_fx_proxy """ Consider the following: class MySin(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return x.sin() @staticmethod def backward(ctx, grad): x, = ctx.saved_tensors return grad * x.cos() We want the resulting graphs to look like: def fwd(ctx, x): # (output, saved tensors / attrs) return (x.sin(), [x]) # bwd(ctx, grad0, grad1, ..., gradn, *saved_tensors_or_attrs) def bwd(ctx, grad, x): return grad * x.cos() To accomplish this, we're going to: 1. Construct a ctx object 2. (fwd_out, _), fwd_graph, fwd_freevars = speculate_subgraph on MySin.forward (manually_set_inputs=True) 3. (bwd_out, _), bwd_graph, bwd_freevars = speculate_subgraph on MySin.backward, while manually setting the ctx and grad inputs. 4. Manually rewriting the fwd graph's output to be (output, stuff_that_gets_used in bwd_graph) Getting from 3 to 4 is pretty elegant: stuff_that_gets_used in bwd graph is just the bwd_freevars returned from speculate_subgraph, assuming MySin.backward doesn't capture any arguments. All these steps work if MySin.backward doesn't capture any values. This is a limitation in general that we should check for. """ prev_side_effects = tx.output.side_effects.clone() fwd_tracer = torch._dynamo.output_graph.SubgraphTracer( tx.output, parent=tx.output.current_tracer, source_target="autograd.Function", ) ctx = AutogradFunctionContextVariable.create(tx, args, kwargs) with discard_graph_changes(tx): # A little hacky, but we need a dummy ctx proxy for speculate_subgraph. # We should clean this up at some point. proxy = tx.output.create_proxy( "call_function", torch.autograd.function.FunctionCtx, (), {} ) set_example_value(proxy.node, ctx.value) ctx.proxy = proxy if isinstance(self.fwd_graph, types.FunctionType): fwd_fn = UserFunctionVariable(self.fwd_graph) fwd_args = [ctx, *args] elif isinstance(self.fwd_graph, types.MethodType): fwd_fn = UserMethodVariable( self.fwd_graph.__func__, UserDefinedClassVariable(self.fwd_graph.__class__), ) fwd_args = [fwd_fn.obj, ctx, *args] else: unimplemented( gb_type="autograd.Function.apply: non-function or method forward", context=str(self.fwd_graph), explanation="Expected forward function to be a function or method.", hints=[], ) # Speculate subgraph on the fwd (fwd_out, _), fwd_graph, fwd_freevars = speculate_subgraph( tx, fwd_fn, fwd_args, kwargs, "autograd.Function", enable_grad=False, set_subgraph_inputs="semi_automatic", restore_side_effects=False, tracer=fwd_tracer, ) if ctx in tx.output.side_effects.store_attr_mutations: if ( "_materialize_non_diff_grads" in tx.output.side_effects.store_attr_mutations[ctx] ): unimplemented( gb_type="autograd.Function.apply: _materialize_non_diff_grads mutation", context="", explanation="Mutations to autograd.Function.ctx._materialize_non_diff_grads are not supported.", hints=[ *graph_break_hints.SUPPORTABLE, ], ) bwd_tracer = torch._dynamo.output_graph.SubgraphTracer( tx.output, parent=fwd_tracer, source_target="autograd.Function", ) # Speculate subgraph on the backward. We make the # bwd tracer a child of the fwd tracer, because backward may rely on # tensors/attrs created in the fwd tracer. if isinstance(fwd_out, variables.BaseListVariable): bwd_args = [ctx, *fwd_out.items] else: bwd_args = [ctx, fwd_out] bwd_src = AttrSource(self.parent_source, member="backward") if isinstance(self.bwd_graph, types.FunctionType): bwd_fn = UserFunctionVariable(self.bwd_graph, source=bwd_src) elif isinstance(self.bwd_graph, types.MethodType): bwd_fn = UserMethodVariable( self.bwd_graph.__func__, UserDefinedClassVariable(self.bwd_graph.__class__), source=bwd_src, ) bwd_args = [bwd_fn.obj, *bwd_args] else: unimplemented( gb_type="autograd.Function.apply: non-function or method backward", context=str(self.bwd_graph), explanation="Expected backward function to be a function or method.", hints=[], ) def is_strict_for(v: VariableTracker): if isinstance(v, variables.TensorVariable): # we can be more lax for stuff from forward return v.proxy.tracer is not fwd_tracer return True with ( tx.output.subtracer(fwd_fn, fwd_tracer), tx.strict_translation_mode(is_strict_for), ): try: (bwd_out, _), bwd_graph, bwd_freevars = speculate_subgraph( tx, bwd_fn, bwd_args, kwargs, "autograd.Function", enable_grad=False, set_subgraph_inputs="manual", restore_side_effects=False, tracer=bwd_tracer, ) except torch._dynamo.exc.Unsupported as e: if isinstance( e, torch._dynamo.exc.UnknownPropertiesDuringBackwardTrace ): from unittest import mock bwd_tracer = torch._dynamo.output_graph.SubgraphTracer( tx.output, parent=fwd_tracer, source_target="autograd.Function", ) from .._trace_wrapped_higher_order_op import ( autograd_function_backward_rewritten, ) if isinstance(self.bwd_graph, types.FunctionType): bwd_fn = UserFunctionVariable( autograd_function_backward_rewritten(self.bwd_graph) ) elif isinstance(self.bwd_graph, types.MethodType): bwd_fn = UserMethodVariable( autograd_function_backward_rewritten( self.bwd_graph.__func__ ), UserDefinedClassVariable(self.bwd_graph.__class__), ) else: unimplemented( gb_type="autograd.Function.apply: non-function or method backward (2)", context=str(self.bwd_graph), explanation="Expected backward function to be a function or method.", hints=[], ) with mock.patch( "torch._dynamo.config._autograd_backward_strict_mode_conditional_banned_ops", [], ): (bwd_out, _), bwd_graph, bwd_freevars = speculate_subgraph( tx, bwd_fn, bwd_args, kwargs, "autograd.Function", enable_grad=False, set_subgraph_inputs="manual", restore_side_effects=False, tracer=bwd_tracer, ) else: raise e # TODO: assert that bwd_graph didn't capture values that were # not created inside fwd_graph. # TODO(oulgen): Ideally, we would not do a linear search for output # node but as things currently are there could be nodes after the # output node # This is bug prone as if there's code after the output node, then # graph.output will append the output at the very end # This might be a behavior difference # If users call ctx.mark_non_differentiable, we should capture these output tensors who # are marked as non-differentiable and pass them to ApplyTemplate # at torch._functorch.autograd_function.AutogradFunctionApply for reconstruction. non_differentiable_idx = [] if ctx.non_differentiable is not None: non_differentiable_set = set(ctx.non_differentiable) assert isinstance(fwd_out, variables.BaseListVariable) for i, x in enumerate(fwd_out.items): if ( isinstance(x, variables.TensorVariable) and x.as_proxy() in non_differentiable_set ): non_differentiable_idx.append(i) # Rewrite the output of fwd_graph to (output, stuff_necessary_for_bwd) for node in fwd_graph.find_nodes(op="output"): fwd_graph.erase_node(node) break # Because we lift the bwd_freevars as inputs of the bwd_graph, # we have to manually add the bwd_freevars as output of fwd_graph. # However, the bwd_freevars got from speculate_subgraph use the Proxies in the bwd_graph, # we need to convert them to Proxies in the fwd_graph and then generate new fwd_graph output. fwd_proxy_of_bwd_freevars = [] for k in bwd_freevars: if k in fwd_freevars: fwd_proxy_of_bwd_freevars.append(fwd_freevars[k]) else: fwd_proxy_of_bwd_freevars.append(k) def unwrap_proxy(x): if isinstance(x, torch.fx.Proxy): return x.node else: assert variables.ConstantVariable.is_literal(x), ( f"Only constant is allowed. Got {x}" ) return x new_fwd_graph_outputs = (fwd_out.as_proxy(), fwd_proxy_of_bwd_freevars) new_fwd_graph_outputs = pytree.tree_map(unwrap_proxy, new_fwd_graph_outputs) fwd_graph.output(new_fwd_graph_outputs) fwd_graph.lint() # Store fwd_body fwd_nn_modules = tx.output.tracing_context.module_context.copy_graphstate() fwd_name = tx.output.install_subgraph( "fwd_body", torch.fx.GraphModule(fwd_nn_modules.nn_modules, fwd_graph), ) fwd_node = make_attr(tx, fwd_name) # The type of original args can be arbitrary, but we only support basic type in FX graph. # So the speculated subgraph input includes original tensor args and the lifted freevars. # We need to filter out the original tensor args and concat them with the lifted freevars # to generate the proxy args for the FX call_function node. filtered_args = [] # A boolean list to mark if the type of corresponding argument is tensor. # This is used to determine if a FX node's argument should be an argument of # ApplyTemplate.forward and if we should skip the output from ApplyTemplate.backward # at torch._functorch.autograd_function.AutogradFunctionApply. args_tensor_mask = [False] * len(args) for i, arg in enumerate(args): if isinstance(arg, (variables.TensorVariable, variables.SymNodeVariable)): filtered_args.append(arg) args_tensor_mask[i] = True # Rewrite the output of bwd_graph to remove the grad output for the non-Tensor args. new_bwd_graph_outputs = None for node in bwd_graph.find_nodes(op="output"): bwd_graph.erase_node(node) break # The same as the above fwd proxies, we need to use the bwd proxies in the bwd_graph # if some of the output is from fwd_freevars. bwd_out_proxy = bwd_out.as_proxy() bwd_proxy_of_fwd_freevars = [] if isinstance(bwd_out_proxy, (tuple, list)): for k in bwd_out_proxy: if k in bwd_freevars: bwd_proxy_of_fwd_freevars.append(bwd_freevars[k]) else: bwd_proxy_of_fwd_freevars.append(k) else: if bwd_out_proxy in bwd_freevars: bwd_proxy_of_fwd_freevars = bwd_freevars[bwd_out_proxy] else: bwd_proxy_of_fwd_freevars = bwd_out_proxy # Remove bwd output for non-Tensor args. output_proxy = bwd_proxy_of_fwd_freevars if isinstance(output_proxy, (tuple, list)): new_bwd_graph_outputs = () for x, mask in zip(output_proxy, args_tensor_mask): if mask: new_bwd_graph_outputs = new_bwd_graph_outputs + (x,) else: assert x is None, f"Grad of non-Tensor arg {x} is not None." else: new_bwd_graph_outputs = output_proxy # Update the bwd graph output. new_bwd_graph_outputs = pytree.tree_map( lambda x: None if x is None else x.node, new_bwd_graph_outputs ) bwd_graph.output(new_bwd_graph_outputs) bwd_graph.lint() # Store bwd_body bwd_nn_modules = tx.output.tracing_context.module_context.copy_graphstate() bwd_name = tx.output.install_subgraph( "bwd_body", torch.fx.GraphModule(bwd_nn_modules.nn_modules, bwd_graph), ) bwd_node = make_attr(tx, bwd_name) tx.output.side_effects = prev_side_effects p_args = ( fwd_node, bwd_node, *([arg.as_proxy() for arg in filtered_args] + list(fwd_freevars.keys())), ) kwargs = { "args_tensor_mask": args_tensor_mask, "non_differentiable_idx": non_differentiable_idx, } # Store the invocation as a call from torch._functorch.autograd_function import autograd_function_apply # We use speculate_subgraph to get the fwd graph, but it's always under no grad mode like what eager mode does. # The fwd outputs (tensor's example_value) need to be inferred from fake tensor prop to get the correct attributes # (e.g, tensor.requires_grad), which would be used by downstream Dynamo tracing. # Since there can be other ops like Triton kernels, which depends on python dispatcher, we have to enable it. with enable_python_dispatcher(), tx.output.fake_mode: fake_args = ( tx.output.nn_modules[fwd_node.node.name], tx.output.nn_modules[bwd_node.node.name], *( [ _get_fake_value(arg) for arg in filtered_args + list(fwd_freevars.keys()) ] ), ) example_value = autograd_function_apply(*fake_args, **kwargs) return wrap_fx_proxy( tx=tx, proxy=tx.output.create_proxy( "call_function", autograd_function_apply, args=p_args, kwargs=kwargs, ), example_value=example_value, ) def _get_fake_value(x): if isinstance(x, variables.VariableTracker): return x.as_proxy().node.meta["example_value"] elif isinstance(x, torch.fx.Proxy): return x.node.meta["example_value"] else: return x def maybe_positional_arg_names(func): result = [] if not hasattr(func, "get_function"): return None try: fn = func.get_function() except (Unsupported, NotImplementedError): return None try: sig = inspect.signature(fn) except ValueError: return None for name, param in sig.parameters.items(): if param.kind is inspect.Parameter.VAR_POSITIONAL: return None if ( param.kind is inspect.Parameter.POSITIONAL_ONLY or param.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD ): if name == "self": # FX graphs can't have a placeholder named self result.append("self_") else: result.append(name) return result
AutogradFunctionApplyVariable
python
doocs__leetcode
solution/0800-0899/0825.Friends Of Appropriate Ages/Solution.py
{ "start": 0, "end": 391 }
class ____: def numFriendRequests(self, ages: List[int]) -> int: cnt = [0] * 121 for x in ages: cnt[x] += 1 ans = 0 for ax, x in enumerate(cnt): for ay, y in enumerate(cnt): if not (ay <= 0.5 * ax + 7 or ay > ax or (ay > 100 and ax < 100)): ans += x * (y - int(ax == ay)) return ans
Solution
python
kamyu104__LeetCode-Solutions
Python/ugly-number.py
{ "start": 39, "end": 297 }
class ____(object): # @param {integer} num # @return {boolean} def isUgly(self, num): if num == 0: return False for i in [2, 3, 5]: while num % i == 0: num /= i return num == 1
Solution
python
cython__cython
Cython/Compiler/ParseTreeTransforms.py
{ "start": 123018, "end": 125553 }
class ____(EnvTransform): def visit_InPlaceAssignmentNode(self, node): lhs = node.lhs rhs = node.rhs if lhs.type.is_cpp_class: # No getting around this exact operator here. return node if isinstance(lhs, ExprNodes.BufferIndexNode): # There is code to handle this case in InPlaceAssignmentNode return node env = self.current_env() def side_effect_free_reference(node, setting=False): if node.is_name: return node, [] elif node.type.is_pyobject and not setting: node = LetRefNode(node) return node, [node] elif node.is_subscript: base, temps = side_effect_free_reference(node.base) index = LetRefNode(node.index) return ExprNodes.IndexNode(node.pos, base=base, index=index), temps + [index] elif node.is_attribute: obj, temps = side_effect_free_reference(node.obj, setting=setting) return ExprNodes.AttributeNode(node.pos, obj=obj, attribute=node.attribute), temps elif isinstance(node, ExprNodes.BufferIndexNode): raise ValueError("Don't allow things like attributes of buffer indexing operations") else: node = LetRefNode(node) return node, [node] try: lhs, let_ref_nodes = side_effect_free_reference(lhs, setting=True) except ValueError: return node dup = lhs.__class__(**lhs.__dict__) binop = ExprNodes.binop_node(node.pos, operator = node.operator, operand1 = dup, operand2 = rhs, inplace=True) # Manually analyse types for new node. lhs.is_target = True lhs = lhs.analyse_target_types(env) dup.analyse_types(env) # FIXME: no need to reanalyse the copy, right? binop.analyse_operation(env) node = Nodes.SingleAssignmentNode( node.pos, lhs = lhs, rhs=binop.coerce_to(lhs.type, env)) # Use LetRefNode to avoid side effects. let_ref_nodes.reverse() for t in let_ref_nodes: node = LetNode(t, node) return node def visit_ExprNode(self, node): # In-place assignments can't happen within an expression. return node
ExpandInplaceOperators
python
huggingface__transformers
src/transformers/models/wav2vec2_bert/processing_wav2vec2_bert.py
{ "start": 933, "end": 6252 }
class ____(ProcessorMixin): r""" Constructs a Wav2Vec2-BERT processor which wraps a Wav2Vec2-BERT feature extractor and a Wav2Vec2 CTC tokenizer into a single processor. [`Wav2Vec2Processor`] offers all the functionalities of [`SeamlessM4TFeatureExtractor`] and [`PreTrainedTokenizer`]. See the docstring of [`~Wav2Vec2Processor.__call__`] and [`~Wav2Vec2Processor.decode`] for more information. Args: feature_extractor (`SeamlessM4TFeatureExtractor`): An instance of [`SeamlessM4TFeatureExtractor`]. The feature extractor is a required input. tokenizer ([`PreTrainedTokenizer`]): An instance of [`PreTrainedTokenizer`]. The tokenizer is a required input. """ def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__( self, audio: Optional[AudioInput] = None, text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None, **kwargs: Unpack[Wav2Vec2BertProcessorKwargs], ): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `audio` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.__call__`] if `audio` is not `None` to pre-process the audio. To prepare the target sequences(s), this method forwards the `text` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.__call__`] if `text` is not `None`. Please refer to the docstring of the above two methods for more information. Args: audio (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. text (`str`, `list[str]`, `list[list[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_features** -- Audio input features to be fed to a model. Returned when `audio` is not `None`. - **attention_mask** -- List of indices specifying which timestamps should be attended to by the model when `audio` is not `None`. When only `text` is specified, returns the token attention mask. - **labels** -- List of token ids to be fed to a model. Returned when both `text` and `audio` are not `None`. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None` and `audio` is `None`. """ if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process.") output_kwargs = self._merge_kwargs( Wav2Vec2BertProcessorKwargs, tokenizer_init_kwargs=self.tokenizer.init_kwargs, **kwargs, ) if audio is not None: inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"]) if text is not None: encodings = self.tokenizer(text, **output_kwargs["text_kwargs"]) if text is None: return inputs elif audio is None: return encodings else: inputs["labels"] = encodings["input_ids"] return inputs def pad(self, input_features=None, labels=None, **kwargs): """ If `input_features` is not `None`, this method forwards the `input_features` and `kwargs` arguments to SeamlessM4TFeatureExtractor's [`~SeamlessM4TFeatureExtractor.pad`] to pad the input features. If `labels` is not `None`, this method forwards the `labels` and `kwargs` arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.pad`] to pad the label(s). Please refer to the docstring of the above two methods for more information. """ if input_features is None and labels is None: raise ValueError("You need to specify either an `input_features` or `labels` input to pad.") if input_features is not None: input_features = self.feature_extractor.pad(input_features, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features["labels"] = labels["input_ids"] return input_features @property def model_input_names(self): # The processor doesn't return text ids and the model seems to not need them feature_extractor_input_names = self.feature_extractor.model_input_names return feature_extractor_input_names + ["labels"] __all__ = ["Wav2Vec2BertProcessor"]
Wav2Vec2BertProcessor
python
pydantic__pydantic
pydantic/types.py
{ "start": 40592, "end": 45265 }
class ____(BaseModel): f: DirectoryPath path = Path('directory/') path.mkdir() m = Model(f='directory/') print(m.model_dump()) #> {'f': PosixPath('directory')} path.rmdir() path = Path('file.txt') path.touch() try: Model(f='file.txt') # file except ValidationError as e: print(e) ''' 1 validation error for Model f Path does not point to a directory [type=path_not_directory, input_value='file.txt', input_type=str] ''' path.unlink() try: Model(f='not-exists-directory') except ValidationError as e: print(e) ''' 1 validation error for Model f Path does not point to a directory [type=path_not_directory, input_value='not-exists-directory', input_type=str] ''' ``` """ NewPath = Annotated[Path, PathType('new')] """A path for a new file or directory that must not already exist. The parent directory must already exist.""" SocketPath = Annotated[Path, PathType('socket')] """A path to an existing socket file""" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if TYPE_CHECKING: # Json[list[str]] will be recognized by type checkers as list[str] Json = Annotated[AnyType, ...] else: class Json: """A special type wrapper which loads JSON before parsing. You can use the `Json` data type to make Pydantic first load a raw JSON string before validating the loaded data into the parametrized type: ```python from typing import Any from pydantic import BaseModel, Json, ValidationError class AnyJsonModel(BaseModel): json_obj: Json[Any] class ConstrainedJsonModel(BaseModel): json_obj: Json[list[int]] print(AnyJsonModel(json_obj='{"b": 1}')) #> json_obj={'b': 1} print(ConstrainedJsonModel(json_obj='[1, 2, 3]')) #> json_obj=[1, 2, 3] try: ConstrainedJsonModel(json_obj=12) except ValidationError as e: print(e) ''' 1 validation error for ConstrainedJsonModel json_obj JSON input should be string, bytes or bytearray [type=json_type, input_value=12, input_type=int] ''' try: ConstrainedJsonModel(json_obj='[a, b]') except ValidationError as e: print(e) ''' 1 validation error for ConstrainedJsonModel json_obj Invalid JSON: expected value at line 1 column 2 [type=json_invalid, input_value='[a, b]', input_type=str] ''' try: ConstrainedJsonModel(json_obj='["a", "b"]') except ValidationError as e: print(e) ''' 2 validation errors for ConstrainedJsonModel json_obj.0 Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='a', input_type=str] json_obj.1 Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='b', input_type=str] ''' ``` When you dump the model using `model_dump` or `model_dump_json`, the dumped value will be the result of validation, not the original JSON string. However, you can use the argument `round_trip=True` to get the original JSON string back: ```python from pydantic import BaseModel, Json class ConstrainedJsonModel(BaseModel): json_obj: Json[list[int]] print(ConstrainedJsonModel(json_obj='[1, 2, 3]').model_dump_json()) #> {"json_obj":[1,2,3]} print( ConstrainedJsonModel(json_obj='[1, 2, 3]').model_dump_json(round_trip=True) ) #> {"json_obj":"[1,2,3]"} ``` """ @classmethod def __class_getitem__(cls, item: AnyType) -> AnyType: return Annotated[item, cls()] @classmethod def __get_pydantic_core_schema__(cls, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema: if cls is source: return core_schema.json_schema(None) else: return core_schema.json_schema(handler(source)) def __repr__(self) -> str: return 'Json' def __hash__(self) -> int: return hash(type(self)) def __eq__(self, other: Any) -> bool: return type(other) is type(self) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The `Secret` class being conceptually immutable, make the type variable covariant: SecretType = TypeVar('SecretType', covariant=True)
Model
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/error/located_error.py
{ "start": 80, "end": 757 }
class ____(GraphQLError): def __init__(self, nodes, original_error=None): if original_error: try: message = str(original_error) except UnicodeEncodeError: message = original_error.message.encode('utf-8') else: message = 'An unknown error occurred.' if hasattr(original_error, 'stack'): stack = original_error.stack else: stack = sys.exc_info()[2] super(GraphQLLocatedError, self).__init__( message=message, nodes=nodes, stack=stack ) self.original_error = original_error
GraphQLLocatedError
python
django__django
tests/migrations/test_autodetector.py
{ "start": 1130, "end": 8372 }
class ____(TestCase): def repr_changes(self, changes, include_dependencies=False): output = "" for app_label, migrations_ in sorted(changes.items()): output += " %s:\n" % app_label for migration in migrations_: output += " %s\n" % migration.name for operation in migration.operations: output += " %s\n" % operation if include_dependencies: output += " Dependencies:\n" if migration.dependencies: for dep in migration.dependencies: output += " %s\n" % (dep,) else: output += " None\n" return output def assertNumberMigrations(self, changes, app_label, number): if len(changes.get(app_label, [])) != number: self.fail( "Incorrect number of migrations (%s) for %s (expected %s)\n%s" % ( len(changes.get(app_label, [])), app_label, number, self.repr_changes(changes), ) ) def assertMigrationDependencies(self, changes, app_label, position, dependencies): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if set(migration.dependencies) != set(dependencies): self.fail( "Migration dependencies mismatch for %s.%s (expected %s):\n%s" % ( app_label, migration.name, dependencies, self.repr_changes(changes, include_dependencies=True), ) ) def assertOperationTypes(self, changes, app_label, position, types): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] real_types = [ operation.__class__.__name__ for operation in migration.operations ] if types != real_types: self.fail( "Operation type mismatch for %s.%s (expected %s):\n%s" % ( app_label, migration.name, types, self.repr_changes(changes), ) ) def assertOperationAttributes( self, changes, app_label, position, operation_position, **attrs ): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if len(changes[app_label]) < position + 1: self.fail( "No operation at index %s for %s.%s\n%s" % ( operation_position, app_label, migration.name, self.repr_changes(changes), ) ) operation = migration.operations[operation_position] for attr, value in attrs.items(): if getattr(operation, attr, None) != value: self.fail( "Attribute mismatch for %s.%s op #%s, %s (expected %r, got %r):\n%s" % ( app_label, migration.name, operation_position, attr, value, getattr(operation, attr, None), self.repr_changes(changes), ) ) def assertOperationFieldAttributes( self, changes, app_label, position, operation_position, **attrs ): if not changes.get(app_label): self.fail( "No migrations found for %s\n%s" % (app_label, self.repr_changes(changes)) ) if len(changes[app_label]) < position + 1: self.fail( "No migration at index %s for %s\n%s" % (position, app_label, self.repr_changes(changes)) ) migration = changes[app_label][position] if len(changes[app_label]) < position + 1: self.fail( "No operation at index %s for %s.%s\n%s" % ( operation_position, app_label, migration.name, self.repr_changes(changes), ) ) operation = migration.operations[operation_position] if not hasattr(operation, "field"): self.fail( "No field attribute for %s.%s op #%s." % ( app_label, migration.name, operation_position, ) ) field = operation.field for attr, value in attrs.items(): if getattr(field, attr, None) != value: self.fail( "Field attribute mismatch for %s.%s op #%s, field.%s (expected %r, " "got %r):\n%s" % ( app_label, migration.name, operation_position, attr, value, getattr(field, attr, None), self.repr_changes(changes), ) ) def make_project_state(self, model_states): "Shortcut to make ProjectStates from lists of predefined models" project_state = ProjectState() for model_state in model_states: project_state.add_model(model_state.clone()) return project_state def get_changes(self, before_states, after_states, questioner=None): if not isinstance(before_states, ProjectState): before_states = self.make_project_state(before_states) if not isinstance(after_states, ProjectState): after_states = self.make_project_state(after_states) return MigrationAutodetector( before_states, after_states, questioner, )._detect_changes()
BaseAutodetectorTests
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 558629, "end": 559105 }
class ____(sgqlc.types.Type): """Autogenerated return type of DeleteProjectV2Item""" __schema__ = github_schema __field_names__ = ("client_mutation_id", "deleted_item_id") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation.""" deleted_item_id = sgqlc.types.Field(ID, graphql_name="deletedItemId") """The ID of the deleted item."""
DeleteProjectV2ItemPayload
python
dask__dask
dask/dataframe/dask_expr/io/_delayed.py
{ "start": 593, "end": 5230 }
class ____(PartitionsFiltered, BlockwiseIO): _parameters = [ "delayed_container", "meta", "user_divisions", "verify_meta", "_partitions", "prefix", ] _defaults = { "meta": None, "_partitions": None, "user_divisions": None, "verify_meta": True, "prefix": None, } @functools.cached_property def _name(self): if self.prefix is None: return super()._name return f"{self.prefix}-{self.deterministic_token}" @functools.cached_property def _meta(self): if self.operand("meta") is not None: return self.operand("meta") return delayed(make_meta)(self.delayed_container.operands[0]).compute() def _divisions(self): if self.operand("user_divisions") is not None: return self.operand("user_divisions") else: return self.delayed_container.divisions def _filtered_task(self, name: Key, index: int) -> Task: if self.verify_meta: return Task( name, functools.partial(check_meta, meta=self._meta, funcname="from_delayed"), TaskRef((self.delayed_container._name, index)), _data_producer=True, ) else: return Alias((self.delayed_container._name, index)) # type: ignore[return-value] def identity(x): return x def from_delayed( dfs: Delayed | distributed.Future | Collection[Delayed | distributed.Future], meta=None, divisions: tuple | None = None, prefix: str | None = None, verify_meta: bool = True, ): """Create Dask DataFrame from many Dask Delayed objects .. warning:: ``from_delayed`` should only be used if the objects that create the data are complex and cannot be easily represented as a single function in an embarrassingly parallel fashion. ``from_map`` is recommended if the query can be expressed as a single function like: def read_xml(path): return pd.read_xml(path) ddf = dd.from_map(read_xml, paths) ``from_delayed`` might be deprecated in the future. Parameters ---------- dfs : A ``dask.delayed.Delayed``, a ``distributed.Future``, or an iterable of either of these objects, e.g. returned by ``client.submit``. These comprise the individual partitions of the resulting dataframe. If a single object is provided (not an iterable), then the resulting dataframe will have only one partition. $META divisions : Partition boundaries along the index. For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions If None, then won't use index information prefix : Prefix to prepend to the keys. verify_meta : If True check that the partitions have consistent metadata, defaults to True. """ if isinstance(dfs, Delayed) or hasattr(dfs, "key"): dfs = [dfs] if len(dfs) == 0: raise TypeError("Must supply at least one delayed object") if meta is None: meta = delayed(make_meta)(dfs[0]).compute() # type: ignore[index] if divisions == "sorted": raise NotImplementedError( "divisions='sorted' not supported, please calculate the divisions " "yourself." ) elif divisions is not None: divs = list(divisions) if len(divs) != len(dfs) + 1: raise ValueError("divisions should be a tuple of len(dfs) + 1") futures = [v for v in dfs if isinstance(v, TaskRef)] if len(futures) == len(dfs): # All futures. Fast path dfs = futures else: # Every Delayed generates a Layer, i.e. this path is much more expensive # if there are many input values. dfs = [ delayed(v) if not isinstance(v, (Delayed,)) and hasattr(v, "key") else v for v in dfs ] for item in dfs: if not (isinstance(item, (Delayed, TaskRef))): raise TypeError(f"Expected Delayed object, got {type(item).__name__}") from dask.dataframe.dask_expr._collection import new_collection result = FromDelayed( DelayedsExpr(*dfs), make_meta(meta), divisions, verify_meta, None, prefix ) if pyarrow_strings_enabled() and any( pd.api.types.is_object_dtype(dtype) for dtype in (result.dtypes.values if result.ndim == 2 else [result.dtypes]) ): return new_collection(ArrowStringConversion(result)) return new_collection(result)
FromDelayed
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constrainedTypeVar2.py
{ "start": 168, "end": 835 }
class ____(Foo): pass T1 = TypeVar("T1", Foo, str) T2 = TypeVar("T2", bound=Foo) def test1(x: T1) -> T1: return x def test2(x: T2) -> T2: return x # This should generate an error because test1(Bar()) # should evaluate to type Foo, not Bar. aa1: Bar = test1(Bar()) aa2: Foo = test1(Bar()) bb1: Bar = test2(Bar()) bb2: Foo = test2(Bar()) # The call to rmtree should not generate any errors. data_dir = pathlib.Path("/tmp") archive_path = data_dir / "hello" shutil.rmtree(archive_path) def func1(a: AnyStr, b: AnyStr) -> None: ... def func2(a: Union[str, bytes], b: Union[str, bytes]): # This should generate two errors func1(a, b)
Bar
python
kamyu104__LeetCode-Solutions
Python/valid-permutations-for-di-sequence.py
{ "start": 31, "end": 512 }
class ____(object): def numPermsDISequence(self, S): """ :type S: str :rtype: int """ dp = [1]*(len(S)+1) for c in S: if c == "I": dp = dp[:-1] for i in xrange(1, len(dp)): dp[i] += dp[i-1] else: dp = dp[1:] for i in reversed(xrange(len(dp)-1)): dp[i] += dp[i+1] return dp[0] % (10**9+7)
Solution
python
milvus-io__pymilvus
pymilvus/client/types.py
{ "start": 7379, "end": 7627 }
class ____: def __init__(self, sources: list, target: int) -> None: self.sources = sources self.target = target def __repr__(self) -> str: return f""" Plan: - sources: {self.sources} - target: {self.target} """
Plan
python
redis__redis-py
redis/exceptions.py
{ "start": 5602, "end": 5791 }
class ____(RedisClusterException): """ Raised on unexpected response length on pipelines. This is most likely a handling error on the stack. """ pass
InvalidPipelineStack
python
dagster-io__dagster
examples/docs_projects/project_ask_ai_dagster/src/project_ask_ai_dagster/defs/scraper.py
{ "start": 144, "end": 2063 }
class ____(dg.ConfigurableResource): sitemap_url: str headers: dict = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"} # start_sitemap def parse_sitemap(self) -> list[str]: """Extract URLs from sitemap XML.""" response = requests.get(self.sitemap_url, headers=self.headers) soup = BeautifulSoup(response.content, "xml") urls = [] # Find all loc elements within url elements urls = list(set(loc.text.strip() for loc in soup.find_all("loc") if loc.text.strip())) return urls # end_sitemap # start_scrape def scrape_page(self, url: str) -> Optional[Document]: log = dg.get_dagster_logger() try: response = requests.get(url, headers=self.headers) response.raise_for_status() soup = BeautifulSoup(response.text, "html.parser") log.info(f"scraped page: {url}") for element in soup(["script", "style", "nav", "footer", "header"]): element.decompose() title = soup.title.string if soup.title else "" main_content = soup.find("main") or soup.find("article") or soup.body if main_content: content = [] for elem in main_content.stripped_strings: if elem.strip(): content.append(elem.strip()) text_content = "\n".join(content) else: text_content = "\n".join(s.strip() for s in soup.stripped_strings if s.strip()) return Document(page_content=text_content, metadata={"source": url, "title": title}) except Exception as e: log.info(f"Error scraping {url}: {e!s}") return None # end_scrape # start_resource_init scraper_resource = SitemapScraper(sitemap_url=dg.EnvVar("DOCS_SITEMAP")) # end_resource_init
SitemapScraper
python
Pylons__pyramid
tests/test_i18n.py
{ "start": 538, "end": 965 }
class ____(unittest.TestCase): def _makeOne(self, *arg, **kw): from pyramid.i18n import TranslationStringFactory return TranslationStringFactory(*arg, **kw) def test_it(self): # this is part of the API, we don't actually need to test much more # than that it's importable factory = self._makeOne('a') self.assertEqual(factory('').domain, 'a')
TestTranslationStringFactory
python
eriklindernoren__ML-From-Scratch
mlfromscratch/deep_learning/layers.py
{ "start": 1404, "end": 3307 }
class ____(Layer): """A fully-connected NN layer. Parameters: ----------- n_units: int The number of neurons in the layer. input_shape: tuple The expected input shape of the layer. For dense layers a single digit specifying the number of features of the input. Must be specified if it is the first layer in the network. """ def __init__(self, n_units, input_shape=None): self.layer_input = None self.input_shape = input_shape self.n_units = n_units self.trainable = True self.W = None self.w0 = None def initialize(self, optimizer): # Initialize the weights limit = 1 / math.sqrt(self.input_shape[0]) self.W = np.random.uniform(-limit, limit, (self.input_shape[0], self.n_units)) self.w0 = np.zeros((1, self.n_units)) # Weight optimizers self.W_opt = copy.copy(optimizer) self.w0_opt = copy.copy(optimizer) def parameters(self): return np.prod(self.W.shape) + np.prod(self.w0.shape) def forward_pass(self, X, training=True): self.layer_input = X return X.dot(self.W) + self.w0 def backward_pass(self, accum_grad): # Save weights used during forwards pass W = self.W if self.trainable: # Calculate gradient w.r.t layer weights grad_w = self.layer_input.T.dot(accum_grad) grad_w0 = np.sum(accum_grad, axis=0, keepdims=True) # Update the layer weights self.W = self.W_opt.update(self.W, grad_w) self.w0 = self.w0_opt.update(self.w0, grad_w0) # Return accumulated gradient for next layer # Calculated based on the weights used during the forward pass accum_grad = accum_grad.dot(W.T) return accum_grad def output_shape(self): return (self.n_units, )
Dense
python
google__pytype
pytype/pyc/pyc_test.py
{ "start": 168, "end": 664 }
class ____(unittest.TestCase): def test_error_matches_re(self): e = pyc.CompileError("some error (foo.py, line 123)") self.assertEqual("foo.py", e.filename) self.assertEqual(123, e.line) self.assertEqual("some error", e.error) def test_error_does_not_match_re(self): e = pyc.CompileError("some error in foo.py at line 123") self.assertIsNone(e.filename) self.assertEqual(1, e.line) self.assertEqual("some error in foo.py at line 123", e.error)
TestCompileError
python
fastai__fastai
dev_nbs/course/crappify.py
{ "start": 245, "end": 859 }
class ____(): def __init__(self, path_lr, path_hr): self.path_lr = path_lr self.path_hr = path_hr def __call__(self, fn): dest = self.path_lr/fn.relative_to(self.path_hr) dest.parent.mkdir(parents=True, exist_ok=True) img = Image.open(fn) targ_sz = resize_to(img, 96, use_min=True) img = img.resize(targ_sz, resample=BILINEAR).convert('RGB') w,h = img.size q = random.randint(10,70) ImageDraw.Draw(img).text((random.randint(0,w//2),random.randint(0,h//2)), str(q), fill=(255,255,255)) img.save(dest, quality=q)
crappifier
python
Lightning-AI__lightning
src/lightning/fabric/plugins/precision/deepspeed.py
{ "start": 1240, "end": 3587 }
class ____(Precision): """Precision plugin for DeepSpeed integration. Args: precision: Full precision (32-true), half precision (16-true, bf16-true) or mixed precision (16-mixed, bf16-mixed). Raises: ValueError: If unsupported ``precision`` is provided. """ def __init__(self, precision: _PRECISION_INPUT) -> None: supported_precision = get_args(_PRECISION_INPUT) if precision not in supported_precision: raise ValueError( f"`precision={precision!r})` is not supported in DeepSpeed." f" `precision` must be one of: {supported_precision}." ) self.precision = precision precision_to_type = { "bf16-mixed": torch.bfloat16, "16-mixed": torch.float16, "bf16-true": torch.bfloat16, "16-true": torch.float16, "32-true": torch.float32, } self._desired_dtype = precision_to_type[self.precision] @override def convert_module(self, module: Module) -> Module: if "true" in self.precision: return module.to(dtype=self._desired_dtype) return module @override def tensor_init_context(self) -> AbstractContextManager: if "true" not in self.precision: return nullcontext() return _DtypeContextManager(self._desired_dtype) @override def module_init_context(self) -> AbstractContextManager: return self.tensor_init_context() @override def convert_input(self, data: Any) -> Any: return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=self._desired_dtype) @override def convert_output(self, data: Any) -> Any: return apply_to_collection(data, function=_convert_fp_tensor, dtype=Tensor, dst_type=torch.get_default_dtype()) @override def backward(self, tensor: Tensor, model: "DeepSpeedEngine", *args: Any, **kwargs: Any) -> None: """Performs back-propagation using DeepSpeed's engine.""" model.backward(tensor, *args, **kwargs) @override def optimizer_step( self, optimizer: Steppable, **kwargs: Any, ) -> Any: # DeepSpeed handles the optimizer step internally return optimizer.step(**kwargs)
DeepSpeedPrecision
python
ray-project__ray
python/ray/data/_internal/actor_autoscaler/base_actor_autoscaler.py
{ "start": 308, "end": 918 }
class ____(ABC): """Abstract interface for Ray Data actor autoscaler.""" def __init__( self, topology: "Topology", resource_manager: "ResourceManager", ): self._topology = topology self._resource_manager = resource_manager @abstractmethod def try_trigger_scaling(self): """Try trigger autoscaling. This method will be called each time when StreamingExecutor makes a scheduling decision. A subclass should override this method to handle the autoscaling of `AutoscalingActorPool`s. """ ...
ActorAutoscaler
python
ray-project__ray
python/ray/_private/worker.py
{ "start": 45210, "end": 48214 }
class ____(metaclass=ABCMeta): """ Base class for RayContext and ClientContext """ dashboard_url: Optional[str] python_version: str ray_version: str @abstractmethod def disconnect(self): """ If this context is for directly attaching to a cluster, disconnect will call ray.shutdown(). Otherwise, if the context is for a ray client connection, the client will be disconnected. """ pass @abstractmethod def __enter__(self): pass @abstractmethod def __exit__(self): pass def _context_table_template(self): if self.dashboard_url: dashboard_row = Template("context_dashrow.html.j2").render( dashboard_url="http://" + self.dashboard_url ) else: dashboard_row = None return Template("context_table.html.j2").render( python_version=self.python_version, ray_version=self.ray_version, dashboard_row=dashboard_row, ) def _repr_html_(self): return Template("context.html.j2").render( context_logo=Template("context_logo.html.j2").render(), context_table=self._context_table_template(), ) @repr_with_fallback(["ipywidgets", "8"]) def _get_widget_bundle(self, **kwargs) -> Dict[str, Any]: """Get the mimebundle for the widget representation of the context. Args: **kwargs: Passed to the _repr_mimebundle_() function for the widget Returns: Dictionary ("mimebundle") of the widget representation of the context. """ import ipywidgets disconnect_button = ipywidgets.Button( description="Disconnect", disabled=False, button_style="", tooltip="Disconnect from the Ray cluster", layout=ipywidgets.Layout(margin="auto 0px 0px 0px"), ) def disconnect_callback(button): button.disabled = True button.description = "Disconnecting..." self.disconnect() button.description = "Disconnected" disconnect_button.on_click(disconnect_callback) left_content = ipywidgets.VBox( [ ipywidgets.HTML(Template("context_logo.html.j2").render()), disconnect_button, ], layout=ipywidgets.Layout(), ) right_content = ipywidgets.HTML(self._context_table_template()) widget = ipywidgets.HBox( [left_content, right_content], layout=ipywidgets.Layout(width="100%") ) return widget._repr_mimebundle_(**kwargs) def _repr_mimebundle_(self, **kwargs): bundle = self._get_widget_bundle(**kwargs) # Overwrite the widget html repr and default repr with those of the BaseContext bundle.update({"text/html": self._repr_html_(), "text/plain": repr(self)}) return bundle @dataclass
BaseContext
python
facebook__pyre-check
client/language_server/code_navigation_request.py
{ "start": 712, "end": 1022 }
class ____: paths: List[str] client_id: str def to_json(self) -> List[object]: return [ "GetTypeErrors", { "paths": self.paths, "client_id": self.client_id, }, ] @dataclasses.dataclass(frozen=True)
TypeErrorsRequest
python
kamyu104__LeetCode-Solutions
Python/count-non-decreasing-subarrays-after-k-operations.py
{ "start": 93, "end": 832 }
class ____(object): def countNonDecreasingSubarrays(self, nums, k): """ :type nums: List[int] :type k: int :rtype: int """ result = cnt = 0 dq = collections.deque() right = len(nums)-1 for left in reversed(xrange(len(nums))): while dq and nums[dq[-1]] < nums[left]: l = dq.pop() r = dq[-1]-1 if dq else right cnt += (r-l+1)*(nums[left]-nums[l]) dq.append(left) while cnt > k: cnt -= nums[dq[0]]-nums[right] if dq[0] == right: dq.popleft() right -= 1 result += right-left+1 return result
Solution
python
numpy__numpy
numpy/linalg/tests/test_linalg.py
{ "start": 45293, "end": 52002 }
class ____(_TestNormBase): def test_empty(self): assert_equal(norm([]), 0.0) assert_equal(norm(array([], dtype=self.dt)), 0.0) assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) def test_vector_return_type(self): a = np.array([1, 0, 1]) exact_types = np.typecodes['AllInteger'] inexact_types = np.typecodes['AllFloat'] all_types = exact_types + inexact_types for each_type in all_types: at = a.astype(each_type) an = norm(at, -np.inf) self.check_dtype(at, an) assert_almost_equal(an, 0.0) with warnings.catch_warnings(): warnings.filterwarnings( 'ignore', "divide by zero encountered", RuntimeWarning) an = norm(at, -1) self.check_dtype(at, an) assert_almost_equal(an, 0.0) an = norm(at, 0) self.check_dtype(at, an) assert_almost_equal(an, 2) an = norm(at, 1) self.check_dtype(at, an) assert_almost_equal(an, 2.0) an = norm(at, 2) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) an = norm(at, 4) self.check_dtype(at, an) assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) an = norm(at, np.inf) self.check_dtype(at, an) assert_almost_equal(an, 1.0) def test_vector(self): a = [1, 2, 3, 4] b = [-1, -2, -3, -4] c = [-1, 2, -3, 4] def _test(v): np.testing.assert_almost_equal(norm(v), 30 ** 0.5, decimal=self.dec) np.testing.assert_almost_equal(norm(v, inf), 4.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -inf), 1.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 1), 10.0, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, decimal=self.dec) np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, decimal=self.dec) np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), decimal=self.dec) np.testing.assert_almost_equal(norm(v, 0), 4, decimal=self.dec) for v in (a, b, c,): _test(v) for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), array(c, dtype=self.dt)): _test(v) def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) # Matrix norms. B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) nd = B.ndim for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']: for axis in itertools.combinations(range(-nd, nd), 2): row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if row_axis == col_axis: assert_raises(ValueError, norm, B, ord=order, axis=axis) else: n = norm(B, ord=order, axis=axis) # The logic using k_index only works for nd = 3. # This has to be changed if nd is increased. k_index = nd - (row_axis + col_axis) if row_axis < col_axis: expected = [norm(B[:].take(k, axis=k_index), ord=order) for k in range(B.shape[k_index])] else: expected = [norm(B[:].take(k, axis=k_index).T, ord=order) for k in range(B.shape[k_index])] assert_almost_equal(n, expected) def test_keepdims(self): A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) allclose_err = 'order {0}, axis = {1}' shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' # check the order=None, axis=None case expected = norm(A, ord=None, axis=None) found = norm(A, ord=None, axis=None, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(None, None)) expected_shape = (1, 1, 1) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, None, None)) # Vector norms. for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: for k in range(A.ndim): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k)) # Matrix norms. for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']: for k in itertools.permutations(range(A.ndim), 2): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k[0]] = 1 expected_shape[k[1]] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k))
_TestNormGeneral
python
google__jax
jax/experimental/jax2tf/tests/control_flow_ops_test.py
{ "start": 857, "end": 10073 }
class ____(tf_test_util.JaxToTfTestCase): @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond(self): def f_jax(pred, x): return lax.cond(pred, lambda t: t + 1., lambda f: f, x) self.ConvertAndCompare(f_jax, jnp.bool_(True), 1.) self.ConvertAndCompare(f_jax, jnp.bool_(False), 1.) @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond_multiple_results(self): def f_jax(pred, x): return lax.cond(pred, lambda t: (t + 1., 1.), lambda f: (f + 2., 2.), x) self.ConvertAndCompare(f_jax, jnp.bool_(True), 1.) self.ConvertAndCompare(f_jax, jnp.bool_(False), 1.) @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond_partial_eval(self): def f(x): res = lax.cond(True, lambda op: op * x, lambda op: op + x, x) return res self.ConvertAndCompare(jax.grad(f), 1.) @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond_units(self): def g(x): return lax.cond(True, lambda x: x, lambda y: y, x) self.ConvertAndCompare(g, 0.7) self.ConvertAndCompare(jax.grad(g), 0.7) @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond_custom_jvp(self): """Conversion of function with custom JVP, inside cond. This exercises the custom_jvp_call_jaxpr primitives.""" @jax.custom_jvp def f(x): return x * x @f.defjvp def f_jvp(primals, tangents): x, = primals x_dot, = tangents primal_out = f(x) tangent_out = 3. * x * x_dot return primal_out, tangent_out def g(x): return lax.cond(True, f, lambda y: y, x) arg = 0.7 self.TransformConvertAndCompare(g, arg, None) self.TransformConvertAndCompare(g, arg, "jvp") self.TransformConvertAndCompare(g, arg, "vmap") self.TransformConvertAndCompare(g, arg, "jvp_vmap") self.TransformConvertAndCompare(g, arg, "grad") self.TransformConvertAndCompare(g, arg, "grad_vmap") @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_cond_custom_vjp(self): """Conversion of function with custom VJP, inside cond. This exercises the custom_vjp_call_jaxpr primitives.""" @jax.custom_vjp def f(x): return x * x # f_fwd: a -> (b, residual) def f_fwd(x): return f(x), 3. * x # f_bwd: (residual, CT b) -> [CT a] def f_bwd(residual, ct_b): return residual * ct_b, f.defvjp(f_fwd, f_bwd) def g(x): return lax.cond(True, f, lambda y: y, x) arg = 0.7 self.TransformConvertAndCompare(g, arg, None) self.TransformConvertAndCompare(g, arg, "vmap") self.TransformConvertAndCompare(g, arg, "grad_vmap") @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_while_single_carry(self): """A while with a single carry""" def func(x): # Equivalent to: # for(i=x; i < 4; i++); return lax.while_loop(lambda c: c < 4, lambda c: c + 1, x) self.ConvertAndCompare(func, 0) def test_while(self): # Some constants to capture in the conditional branches cond_const = np.ones(3, dtype=np.float32) body_const1 = np.full_like(cond_const, 1.) body_const2 = np.full_like(cond_const, 2.) def func(x): # Equivalent to: # c = [1, 1, 1] # for(i=0; i < 3; i++) # c += [1, 1, 1] + [2, 2, 2] # # The function is set-up so that it captures constants in the # body of the functionals. This covers some cases in the representation # of the lax.while primitive. def cond(idx_carry): i, c = idx_carry return i < jnp.sum(cond_const) # Capture cond_const def body(idx_carry): i, c = idx_carry return (i + 1, c + body_const1 + body_const2) return lax.while_loop(cond, body, (0, x)) self.ConvertAndCompare(func, cond_const) def test_while_batched_cond(self): """A while with a single carry""" def product(x, y): # Equivalent to "x * y" implemented as: # res = 0. # for(i=0; i < y; i++) # res += x return lax.while_loop(lambda idx_carry: idx_carry[0] < y, lambda idx_carry: (idx_carry[0] + 1, idx_carry[1] + x), (0, 0.)) # We use vmap to compute result[i, j] = i * j xs = np.arange(4, dtype=np.int32) ys = np.arange(5, dtype=np.int32) def product_xs_y(xs, y): return jax.vmap(product, in_axes=(0, None))(xs, y) def product_xs_ys(xs, ys): return jax.vmap(product_xs_y, in_axes=(None, 0))(xs, ys) self.ConvertAndCompare(product_xs_ys, xs, ys) @jtu.ignore_warning(category=UserWarning, message="Explicitly requested dtype .* requested in array is not available") def test_while_custom_jvp(self): """Conversion of function with custom JVP, inside while. This exercises the custom_jvp_call_jaxpr primitives.""" @jax.custom_jvp def f(x): return x * x @f.defjvp def f_jvp(primals, tangents): x, = primals x_dot, = tangents primal_out = f(x) tangent_out = 3. * x * x_dot return primal_out, tangent_out def g(x): return lax.while_loop(lambda carry: carry[0] < 10, lambda carry: (carry[0] + 1., f(carry[1])), (0., x)) arg = 0.7 self.TransformConvertAndCompare(g, arg, None) self.TransformConvertAndCompare(g, arg, "jvp") self.TransformConvertAndCompare(g, arg, "vmap") self.TransformConvertAndCompare(g, arg, "jvp_vmap") def test_scan(self): def f_jax(xs, ys): body_const = np.ones((2, ), dtype=np.float32) # Test constant capture def body(res0, inputs): x, y = inputs return res0 + x * y, body_const return lax.scan(body, 0., (xs, ys)) arg = np.arange(10, dtype=np.float32) self.ConvertAndCompare(f_jax, arg, arg) def test_scan_partial_eval(self): def f_jax(xs, ys): body_const = np.ones((2, ), dtype=np.float32) # Test constant capture def body(res0, inputs): x, y = inputs return res0 + x * y, body_const c_out, _ = lax.scan(body, 0., (xs, ys)) return c_out arg = np.arange(10, dtype=np.float32) self.ConvertAndCompare(jax.grad(f_jax), arg, arg) def test_scan_custom_jvp(self): """Conversion of function with custom JVP, inside scan. This exercises the custom_jvp_call_jaxpr primitives.""" @jax.custom_jvp def f(x): return x * x @f.defjvp def f_jvp(primals, tangents): x, = primals x_dot, = tangents primal_out = f(x) tangent_out = 3. * x * x_dot return primal_out, tangent_out def g(x): return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((5,), 0.7) self.TransformConvertAndCompare(g, arg, None) self.TransformConvertAndCompare(g, arg, "jvp") self.TransformConvertAndCompare(g, arg, "vmap") self.TransformConvertAndCompare(g, arg, "jvp_vmap") self.TransformConvertAndCompare(g, arg, "grad") self.TransformConvertAndCompare(g, arg, "grad_vmap") def test_scan_custom_vjp(self): """Conversion of function with custom VJP, inside scan. This exercises the custom_vjp_call_jaxpr primitives.""" @jax.custom_vjp def f(x): return x * x # f_fwd: a -> (b, residual) def f_fwd(x): return f(x), 3. * x # f_bwd: (residual, CT b) -> [CT a] def f_bwd(residual, ct_b): return residual * ct_b, f.defvjp(f_fwd, f_bwd) def g(x): return lax.scan(lambda carry, inp: (carry + f(inp), 0.), np.full(x.shape[1:], 0.), # Like x w/o leading dim x)[0] arg = np.full((5,), 0.7) self.TransformConvertAndCompare(g, arg, None) self.TransformConvertAndCompare(g, arg, "vmap") self.TransformConvertAndCompare(g, arg, "grad") self.TransformConvertAndCompare(g, arg, "grad_vmap") def test_scan_remat(self): def f_jax(xs): @jax.remat def body_fun(carry, x): return carry * x, xs # capture xs from the environment res1, res2 = lax.scan(body_fun, 0., xs + 1.) return jnp.sum(res1) + jnp.sum(res2) arg = np.arange(10, dtype=np.float32) + 1. self.TransformConvertAndCompare(f_jax, arg, None) self.TransformConvertAndCompare(f_jax, arg, "grad") if __name__ == "__main__": absltest.main(testLoader=jtu.JaxTestLoader())
ControlFlowOpsTest
python
getsentry__sentry
src/sentry/api/serializers/rest_framework/organizationmemberinvite.py
{ "start": 6382, "end": 7043 }
class ____(serializers.Serializer): approve = serializers.BooleanField(required=True, write_only=True) def validate_approve(self, approve): invited_member = self.context["invited_member"] allowed_roles = self.context["allowed_roles"] # you can't reject an invite request via a PUT request if approve is False: raise serializers.ValidationError(ERR_WRONG_METHOD) try: invited_member.validate_invitation(allowed_roles) except UnableToAcceptMemberInvitationException as err: raise serializers.ValidationError(str(err)) return approve
ApproveInviteRequestValidator
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/resource_annotation.py
{ "start": 630, "end": 1619 }
class ____(ABC): """Marker class for types that can be used as a parameter on an annotated function like `@asset`. Any type marked with this class does not require a ResourceParam when used on an asset. Example: class YourClass(TreatAsResourceParam): ... @asset def an_asset(your_class: YourClass): ... """ def _is_resource_annotation(annotation: Optional[type[Any]]) -> bool: from dagster._config.pythonic_config import ConfigurableResourceFactory if isinstance(annotation, type) and ( is_subclass(annotation, ResourceDefinition) or is_subclass(annotation, ConfigurableResourceFactory) or is_subclass(annotation, TreatAsResourceParam) ): return True return hasattr(annotation, "__metadata__") and getattr(annotation, "__metadata__") == ( RESOURCE_PARAM_METADATA, ) T = TypeVar("T") ResourceParam = Annotated[T, RESOURCE_PARAM_METADATA]
TreatAsResourceParam
python
readthedocs__readthedocs.org
readthedocs/oauth/migrations/0011_add_default_branch.py
{ "start": 149, "end": 636 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("oauth", "0010_index_full_name"), ] operations = [ migrations.AddField( model_name="remoterepository", name="default_branch", field=models.CharField( blank=True, max_length=150, null=True, verbose_name="Default branch of the repository", ), ), ]
Migration
python
doocs__leetcode
solution/0700-0799/0775.Global and Local Inversions/Solution.py
{ "start": 0, "end": 232 }
class ____: def isIdealPermutation(self, nums: List[int]) -> bool: mx = 0 for i in range(2, len(nums)): if (mx := max(mx, nums[i - 2])) > nums[i]: return False return True
Solution
python
huggingface__transformers
src/transformers/models/efficientloftr/modeling_efficientloftr.py
{ "start": 22203, "end": 23622 }
class ____(GradientCheckpointingLayer): def __init__(self, config: EfficientLoFTRConfig, layer_idx: int): super().__init__() self.self_attention = EfficientLoFTRAggregatedAttention(config, layer_idx) self.cross_attention = EfficientLoFTRAggregatedAttention(config, layer_idx) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, **kwargs: Unpack[TransformersKwargs], ) -> torch.Tensor: batch_size, _, embed_dim, height, width = hidden_states.shape hidden_states = hidden_states.reshape(-1, embed_dim, height, width) hidden_states = self.self_attention(hidden_states, position_embeddings=position_embeddings, **kwargs) ### # Implementation of a bug in the original implementation regarding the cross-attention # See : https://github.com/zju3dv/MatchAnything/issues/26 hidden_states = hidden_states.reshape(-1, 2, embed_dim, height, width) features_0 = hidden_states[:, 0] features_1 = hidden_states[:, 1] features_0 = self.cross_attention(features_0, features_1, **kwargs) features_1 = self.cross_attention(features_1, features_0, **kwargs) hidden_states = torch.stack((features_0, features_1), dim=1) ### return hidden_states
EfficientLoFTRLocalFeatureTransformerLayer
python
scipy__scipy
scipy/stats/_distribution_infrastructure.py
{ "start": 20310, "end": 21858 }
class ____(_Interval): r""" Represents a simply-connected subset of the real line; i.e., an interval Completes the implementation of the `_Interval` class for intervals on the real line. Methods ------- define_parameters(*parameters) (Inherited) Records any parameters used to define the endpoints of the domain. get_numerical_endpoints(parameter_values) (Inherited) Gets the numerical values of the domain endpoints, which may have been defined symbolically. contains(item, parameter_values) (Inherited) Determines whether the argument is contained within the domain __str__() Returns a string representation of the domain, e.g. "[a, b)". """ def __str__(self): a, b = self.endpoints a, b = self._get_endpoint_str(a, "f1"), self._get_endpoint_str(b, "f2") left_inclusive, right_inclusive = self.inclusive left = "[" if left_inclusive else "(" right = "]" if right_inclusive else ")" return f"{left}{a}, {b}{right}" def _get_endpoint_str(self, endpoint, funcname): if callable(endpoint): if endpoint.__doc__ is not None: return endpoint.__doc__ params = inspect.signature(endpoint).parameters.values() params = [ p.name for p in params if p.kind == inspect.Parameter.KEYWORD_ONLY ] return f"{funcname}({','.join(params)})" return self.symbols.get(endpoint, f"{endpoint}")
_RealInterval
python
google__pytype
pytype/tests/test_cmp2.py
{ "start": 1137, "end": 1870 }
class ____(test_base.BaseTest): """Tests handling of the NotImplemented builtin.""" def test_return_annotation(self): self.Check(""" class Foo: def __eq__(self, other) -> bool: if isinstance(other, Foo): return id(self) == id(other) else: return NotImplemented """) def test_infer_return_type(self): ty = self.Infer(""" class Foo: def __eq__(self, other): if isinstance(other, Foo): return id(self) == id(other) else: return NotImplemented """) self.assertTypesMatchPytd( ty, """ class Foo: def __eq__(self, other) -> bool: ... """, )
NotImplementedTest
python
apache__airflow
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/pod_manager.py
{ "start": 37897, "end": 38762 }
class ____(str, enum.Enum): """Action to take when the pod finishes.""" KEEP_POD = "keep_pod" DELETE_POD = "delete_pod" DELETE_SUCCEEDED_POD = "delete_succeeded_pod" def is_log_group_marker(line: str) -> bool: """Check if the line is a log group marker like `::group::` or `::endgroup::`.""" return line.startswith("::group::") or line.startswith("::endgroup::") def parse_log_line(line: str) -> tuple[DateTime | None, str]: """ Parse K8s log line and returns the final state. :param line: k8s log line :return: timestamp and log message """ timestamp, sep, message = line.strip().partition(" ") if not sep: return None, line try: last_log_time = cast("DateTime", pendulum.parse(timestamp)) except ParserError: return None, line return last_log_time, message
OnFinishAction
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_line02.py
{ "start": 315, "end": 1314 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_line02.xlsx") self.ignore_elements = {"xl/workbook.xml": ["<fileVersion", "<calcPr"]} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [63593856, 63612032] chart.axis2_ids = [63615360, 63613568] data = [[1, 2, 3, 4, 5], [6, 8, 6, 4, 2]] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5", "y2_axis": 1}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
google__jax
jax/experimental/_private_mm/examples/example_overlap.py
{ "start": 1051, "end": 6494 }
class ____: fwd: Callable[[Any, Any], Any] # (params, acts) -> acts mesh: Mesh def transfer(arr, stage): sharding = NamedSharding(stage.mesh, P()) # just replicate return mm.device_put(arr, device=sharding) def stages_step_fn(stages, num_mubatches, params_by_stage, xs): # One task per mubatch and stage (e.g. forward stages only) tasks = [ (mubatch_idx, stage_idx) for stage_idx in range(len(stages)) for mubatch_idx in range(num_mubatches) ] # We want to be careful with the order in which we enqueue work, since # a single process is managing multiple devices. # Assuming a GPipe-like schedule we traverse tasks in the following order: # t=0 t=1 t=2 t=3 t=4 t=5 t=6 # stage=0 1 2 4 7 # stage=1 3 5 8 11 # stage=2 6 9 12 14 # stage=3 10 13 15 16 def task_key(task): mubatch_idx, stage_idx = task return (mubatch_idx + stage_idx, stage_idx) tasks.sort(key=task_key) input = { (mubatch_idx, 0): xs for mubatch_idx in range(num_mubatches) } for task_id in tasks: mubatch_idx, stage_idx = task_id stage = stages[stage_idx] params = params_by_stage[stage_idx] with profile_utils.annotate( f'mub{mubatch_idx}/F{stage_idx}', color='cyan', ): # Invoke the stage and immediately enqueue the transfer of the # result to the next stage. We want the transfer to be overlapped # with subsequent computation on the same stage. local_output = stage.fwd(params, input[task_id]) if stage_idx + 1 < len(stages): with profile_utils.annotate( f'Tx mub{mubatch_idx} to {stage_idx+1}', color='yellow', ): input[(mubatch_idx, stage_idx+1)] = transfer( local_output, stages[stage_idx+1], ) return local_output ### Example usage def example_overlap(num_processes, process_id): assert jax.device_count() == 8 NUM_STAGES = 4 NUM_MUBATCHES = 4 # FIXME: Support stages spread across multiple processes. assert NUM_STAGES % num_processes == 0 # Takes ~5ms/stage/microbatch on H100s: LAYER_SIZE = 8192 # # a) Several layers per stage, little communication (32MB activations) # NUM_LAYERS = NUM_STAGES * 16 # BATCH_SIZE = 1024 # b) One layer per stage, more communication (512MB activations) NUM_LAYERS = NUM_STAGES BATCH_SIZE = 1024 * 16 def mlp(params, xs): for W in params: xs = xs @ W return xs def init_params(key): params = [] for _ in range(NUM_LAYERS): key, key_W = jax.random.split(key) params.append(jax.random.normal(key_W, (LAYER_SIZE, LAYER_SIZE))) return params, key # Two devices per stage (running fully-replicated) num_devices_per_stage = 2 stages = [] for i in range(NUM_STAGES): devices = jax.devices()[ num_devices_per_stage*i : num_devices_per_stage*(i+1) ] assert all(d.process_index == devices[0].process_index for d in devices) mesh = Mesh(np.asarray(devices), ('repl',)) jitted_fun = mm.jit( mlp, in_shardings=(NamedSharding(mesh, P()), NamedSharding(mesh, P())), out_shardings=NamedSharding(mesh, P()), ) stages.append(Stage(jitted_fun, mesh)) def step_fn(params_by_stage, xs): return stages_step_fn(stages, NUM_MUBATCHES, params_by_stage, xs) def shard_params_by_stage(params): num_per_stage, rem = divmod(len(params), NUM_STAGES) assert num_per_stage > 0 assert rem == 0 params_by_stage = [ jax.tree.map( lambda arr: transfer(arr, stages[stage_idx]), params[num_per_stage*stage_idx:num_per_stage*(stage_idx+1)], ) for stage_idx in range(NUM_STAGES) ] return params_by_stage key = jax.random.PRNGKey(0) params, key = init_params(key) params_by_stage = shard_params_by_stage(params) key, key_xs = jax.random.split(key) xs_batch = jax.random.uniform(key_xs, (BATCH_SIZE, LAYER_SIZE)) NUM_STEPS = 50 NUM_STEPS_PROFILED = 3 for i in range(NUM_STEPS): print(f'===== STEP {i} {process_id=} =====') if i == 1: # The overhead from compilations during warm-up ends up # staggering executions on devices of the same stage. The sleep # below allows them to catch up. In a real model collectives # within each stage would likely have the same effect of keeping # devices in sync. time.sleep(0.2) if i == NUM_STEPS - NUM_STEPS_PROFILED: profile_utils.maybe_start_profile(f"overlap_trace/p{process_id}") xs_batch = transfer(xs_batch, stages[0]) with profile_utils.annotate(f'step{i}', color='white'): xs_batch = step_fn(params_by_stage, xs_batch) if __name__ == '__main__': import sys num_processes = 4 if len(sys.argv) >= 2: num_processes = int(sys.argv[1]) success = launch_utils.launch_example(num_processes, example_overlap) sys.exit(0 if success else 1)
Stage
python
django__django
tests/model_formsets/test_uuid.py
{ "start": 309, "end": 4727 }
class ____(TestCase): def test_inlineformset_factory_nulls_default_pks(self): """ #24377 - If we're adding a new object, a parent's auto-generated pk from the model field default should be ignored as it's regenerated on the save request. Tests the case where both the parent and child have a UUID primary key. """ FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields="__all__") formset = FormSet() self.assertIsNone(formset.forms[0].fields["parent"].initial) def test_inlineformset_factory_ignores_default_pks_on_submit(self): """ #24377 - Inlines with a model field default should ignore that default value to avoid triggering validation on empty forms. """ FormSet = inlineformset_factory(UUIDPKParent, UUIDPKChild, fields="__all__") formset = FormSet( { "uuidpkchild_set-TOTAL_FORMS": 3, "uuidpkchild_set-INITIAL_FORMS": 0, "uuidpkchild_set-MAX_NUM_FORMS": "", "uuidpkchild_set-0-name": "Foo", "uuidpkchild_set-1-name": "", "uuidpkchild_set-2-name": "", } ) self.assertTrue(formset.is_valid()) self.assertIsNone(formset.instance.uuid) self.assertIsNone(formset.forms[0].instance.parent_id) def test_inlineformset_factory_nulls_default_pks_uuid_parent_auto_child(self): """ #24958 - Variant of test_inlineformset_factory_nulls_default_pks for the case of a parent object with a UUID primary key and a child object with an AutoField primary key. """ FormSet = inlineformset_factory( UUIDPKParent, AutoPKChildOfUUIDPKParent, fields="__all__" ) formset = FormSet() self.assertIsNone(formset.forms[0].fields["parent"].initial) def test_inlineformset_factory_nulls_default_pks_auto_parent_uuid_child(self): """ #24958 - Variant of test_inlineformset_factory_nulls_default_pks for the case of a parent object with an AutoField primary key and a child object with a UUID primary key. """ FormSet = inlineformset_factory( AutoPKParent, UUIDPKChildOfAutoPKParent, fields="__all__" ) formset = FormSet() self.assertIsNone(formset.forms[0].fields["parent"].initial) def test_inlineformset_factory_nulls_default_pks_child_editable_pk(self): """ #24958 - Variant of test_inlineformset_factory_nulls_default_pks for the case of a parent object with a UUID primary key and a child object with an editable natural key for a primary key. """ FormSet = inlineformset_factory( UUIDPKParent, ChildWithEditablePK, fields="__all__" ) formset = FormSet() self.assertIsNone(formset.forms[0].fields["parent"].initial) def test_inlineformset_factory_nulls_default_pks_alternate_key_relation(self): """ #24958 - Variant of test_inlineformset_factory_nulls_default_pks for the case of a parent object with a UUID alternate key and a child object that relates to that alternate key. """ FormSet = inlineformset_factory( ParentWithUUIDAlternateKey, ChildRelatedViaAK, fields="__all__" ) formset = FormSet() self.assertIsNone(formset.forms[0].fields["parent"].initial) def test_inlineformset_factory_nulls_default_pks_alternate_key_relation_data(self): """ If form data is provided, a parent's auto-generated alternate key is set. """ FormSet = inlineformset_factory( ParentWithUUIDAlternateKey, ChildRelatedViaAK, fields="__all__" ) formset = FormSet( { "childrelatedviaak_set-TOTAL_FORMS": 3, "childrelatedviaak_set-INITIAL_FORMS": 0, "childrelatedviaak_set-MAX_NUM_FORMS": "", "childrelatedviaak_set-0-name": "Test", "childrelatedviaak_set-1-name": "", "childrelatedviaak_set-2-name": "", } ) self.assertIs(formset.is_valid(), True) self.assertIsNotNone(formset.instance.uuid) self.assertEqual(formset.forms[0].instance.parent_id, formset.instance.uuid)
InlineFormsetTests
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 7000, "end": 9213 }
class ____(Operation): def __init__(self, axis=None, keepdims=False, *, name=None): super().__init__(name=name) if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.all( x, axis=self.axis, keepdims=self.keepdims, ) def compute_output_spec(self, x): return KerasTensor( reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims, ), dtype="bool", ) @keras_export(["keras.ops.all", "keras.ops.numpy.all"]) def all(x, axis=None, keepdims=False): """Test whether all array elements along a given axis evaluate to `True`. Args: x: Input tensor. axis: An integer or tuple of integers that represent the axis along which a logical AND reduction is performed. The default (`axis=None`) is to perform a logical AND over all the dimensions of the input array. `axis` may be negative, in which case it counts for the last to the first axis. keepdims: If `True`, axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. Defaults to `False`. Returns: The tensor containing the logical AND reduction over the `axis`. Examples: >>> x = keras.ops.convert_to_tensor([True, False]) >>> keras.ops.all(x) array(False, shape=(), dtype=bool) >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.all(x, axis=0) array([ True False], shape=(2,), dtype=bool) `keepdims=True` outputs a tensor with dimensions reduced to one. >>> x = keras.ops.convert_to_tensor([[True, False], [True, True]]) >>> keras.ops.all(x, keepdims=True) array([[False]], shape=(1, 1), dtype=bool) """ if any_symbolic_tensors((x,)): return All(axis=axis, keepdims=keepdims).symbolic_call(x) return backend.numpy.all(x, axis=axis, keepdims=keepdims)
All
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 2974, "end": 5149 }
class ____: """Used for checking GraphModule Node""" def __init__(self, op, target): """ op: call_function | call_module target: for call_function, target would be a function for call_module, target would be the type of PyTorch module """ self.op = op self.target = target @classmethod def call_function(cls, target): return NodeSpec("call_function", target) @classmethod def call_method(cls, target): return NodeSpec("call_method", target) @classmethod def call_module(cls, target): return NodeSpec("call_module", target) def __hash__(self): return hash((self.op, self.target)) def __eq__(self, other): if not isinstance(other, NodeSpec): return NotImplemented return self.op == other.op and self.target == other.target def __repr__(self): return repr(self.op) + " " + repr(self.target) def get_supported_device_types(): return ( ["cpu", "cuda"] if torch.cuda.is_available() and not TEST_WITH_ROCM else ["cpu"] ) def test_only_eval_fn(model, calib_data): r""" Default evaluation function takes a torch.utils.data.Dataset or a list of input Tensors and run the model on the dataset """ for inp in calib_data: model(*inp) _default_loss_fn = torch.nn.CrossEntropyLoss() def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn): r""" Default train function takes a torch.utils.data.Dataset and train the model on the dataset """ optimizer = torch.optim.Adam(model.parameters(), lr=0.001) train_loss, correct, total = 0, 0, 0 for _ in range(10): model.train() for data, target in train_data: optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(output, 1) total += target.size(0) correct += (predicted == target).sum().item() return train_loss, correct, total
NodeSpec
python
getsentry__sentry-python
sentry_sdk/integrations/loguru.py
{ "start": 1481, "end": 3233 }
class ____(Integration): identifier = "loguru" level = DEFAULT_LEVEL # type: Optional[int] event_level = DEFAULT_EVENT_LEVEL # type: Optional[int] breadcrumb_format = DEFAULT_FORMAT event_format = DEFAULT_FORMAT sentry_logs_level = DEFAULT_LEVEL # type: Optional[int] def __init__( self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL, breadcrumb_format=DEFAULT_FORMAT, event_format=DEFAULT_FORMAT, sentry_logs_level=DEFAULT_LEVEL, ): # type: (Optional[int], Optional[int], str | loguru.FormatFunction, str | loguru.FormatFunction, Optional[int]) -> None LoguruIntegration.level = level LoguruIntegration.event_level = event_level LoguruIntegration.breadcrumb_format = breadcrumb_format LoguruIntegration.event_format = event_format LoguruIntegration.sentry_logs_level = sentry_logs_level @staticmethod def setup_once(): # type: () -> None if LoguruIntegration.level is not None: logger.add( LoguruBreadcrumbHandler(level=LoguruIntegration.level), level=LoguruIntegration.level, format=LoguruIntegration.breadcrumb_format, ) if LoguruIntegration.event_level is not None: logger.add( LoguruEventHandler(level=LoguruIntegration.event_level), level=LoguruIntegration.event_level, format=LoguruIntegration.event_format, ) if LoguruIntegration.sentry_logs_level is not None: logger.add( loguru_sentry_logs_handler, level=LoguruIntegration.sentry_logs_level, )
LoguruIntegration
python
neetcode-gh__leetcode
python/0673-number-of-longest-increasing-subsequence.py
{ "start": 0, "end": 1894 }
class ____: def findNumberOfLIS(self, nums: List[int]) -> int: # 1. O(n^2) Recursive solution with Caching dp = {} # key = index, value = [length of LIS, count] lenLIS, res = 0, 0 # length of LIS, count of LIS def dfs(i): if i in dp: return dp[i] maxLen, maxCnt = 1, 1 # length and count of LIS for j in range(i + 1, len(nums)): if nums[j] > nums[i]: # make sure increasing order length, count = dfs(j) if length + 1 > maxLen: maxLen, maxCnt = length + 1, count elif length + 1 == maxLen: maxCnt += count nonlocal lenLIS, res if maxLen > lenLIS: lenLIS, res = maxLen, maxCnt elif maxLen == lenLIS: res += maxCnt dp[i] = [maxLen, maxCnt] return dp[i] for i in range(len(nums)): dfs(i) return res # 2. O(n^2) Dynamic Programming dp = {} # key = index, value = [length of LIS, count] lenLIS, res = 0, 0 # length of LIS, count of LIS # i = start of subseq for i in range(len(nums) - 1, -1, -1): maxLen, maxCnt = 1, 1 # len, cnt of LIS start from i for j in range(i + 1, len(nums)): if nums[j] > nums[i]: length, count = dp[j] # len, cnt of LIS start from j if length + 1 > maxLen: maxLen, maxCnt = length + 1, count elif length + 1 == maxLen: maxCnt += count if maxLen > lenLIS: lenLIS, res = maxLen, maxCnt elif maxLen == lenLIS: res += maxCnt dp[i] = [maxLen, maxCnt] return res
Solution
python
zarr-developers__zarr-python
src/zarr/abc/codec.py
{ "start": 5950, "end": 7286 }
class ____: """Mixin for array-to-bytes codecs that implement partial decoding.""" async def _decode_partial_single( self, byte_getter: ByteGetter, selection: SelectorTuple, chunk_spec: ArraySpec ) -> NDBuffer | None: raise NotImplementedError async def decode_partial( self, batch_info: Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]], ) -> Iterable[NDBuffer | None]: """Partially decodes a batch of chunks. This method determines parts of a chunk from the slice selection, fetches these parts from the store (via ByteGetter) and decodes them. Parameters ---------- batch_info : Iterable[tuple[ByteGetter, SelectorTuple, ArraySpec]] Ordered set of information about slices of encoded chunks. The slice selection determines which parts of the chunk will be fetched. The ByteGetter is used to fetch the necessary bytes. The chunk spec contains information about the construction of an array from the bytes. Returns ------- Iterable[NDBuffer | None] """ return await concurrent_map( list(batch_info), self._decode_partial_single, config.get("async.concurrency"), )
ArrayBytesCodecPartialDecodeMixin
python
doocs__leetcode
solution/3100-3199/3159.Find Occurrences of an Element in an Array/Solution.py
{ "start": 0, "end": 253 }
class ____: def occurrencesOfElement( self, nums: List[int], queries: List[int], x: int ) -> List[int]: ids = [i for i, v in enumerate(nums) if v == x] return [ids[i - 1] if i - 1 < len(ids) else -1 for i in queries]
Solution
python
patrick-kidger__equinox
equinox/nn/_shared.py
{ "start": 183, "end": 337 }
class ____: """Placeholder value for nodes that have been removed by `eqx.nn.Shared`.""" def __repr__(self): return "SharedNode"
SharedNode
python
jazzband__django-pipeline
pipeline/storage.py
{ "start": 3077, "end": 3146 }
class ____(PipelineMixin, StaticFilesStorage): pass
PipelineStorage
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/sensors/comprehend.py
{ "start": 1457, "end": 2856 }
class ____(AwsBaseSensor[ComprehendHook]): """ General sensor behavior for Amazon Comprehend. Subclasses must implement following methods: - ``get_state()`` Subclasses must set the following fields: - ``INTERMEDIATE_STATES`` - ``FAILURE_STATES`` - ``SUCCESS_STATES`` - ``FAILURE_MESSAGE`` :param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore module to be installed. (default: False, but can be overridden in config file by setting default_deferrable to True) """ aws_hook_class = ComprehendHook INTERMEDIATE_STATES: tuple[str, ...] = () FAILURE_STATES: tuple[str, ...] = () SUCCESS_STATES: tuple[str, ...] = () FAILURE_MESSAGE = "" ui_color = "#66c3ff" def __init__( self, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), **kwargs: Any, ): super().__init__(**kwargs) self.deferrable = deferrable def poke(self, context: Context, **kwargs) -> bool: state = self.get_state() if state in self.FAILURE_STATES: raise AirflowException(self.FAILURE_MESSAGE) return state not in self.INTERMEDIATE_STATES @abc.abstractmethod def get_state(self) -> str: """Implement in subclasses."""
ComprehendBaseSensor
python
redis__redis-py
redis/cluster.py
{ "start": 60066, "end": 61441 }
class ____: """ Round-Robin Load Balancing """ def __init__(self, start_index: int = 0) -> None: self.primary_to_idx = {} self.start_index = start_index def get_server_index( self, primary: str, list_size: int, load_balancing_strategy: LoadBalancingStrategy = LoadBalancingStrategy.ROUND_ROBIN, ) -> int: if load_balancing_strategy == LoadBalancingStrategy.RANDOM_REPLICA: return self._get_random_replica_index(list_size) else: return self._get_round_robin_index( primary, list_size, load_balancing_strategy == LoadBalancingStrategy.ROUND_ROBIN_REPLICAS, ) def reset(self) -> None: self.primary_to_idx.clear() def _get_random_replica_index(self, list_size: int) -> int: return random.randint(1, list_size - 1) def _get_round_robin_index( self, primary: str, list_size: int, replicas_only: bool ) -> int: server_index = self.primary_to_idx.setdefault(primary, self.start_index) if replicas_only and server_index == 0: # skip the primary node index server_index = 1 # Update the index for the next round self.primary_to_idx[primary] = (server_index + 1) % list_size return server_index
LoadBalancer
python
pytest-dev__pytest
src/_pytest/python.py
{ "start": 32771, "end": 42158 }
class ____: """Make IDs for a parametrization.""" __slots__ = ( "argnames", "config", "idfn", "ids", "nodeid", "parametersets", ) # The argnames of the parametrization. argnames: Sequence[str] # The ParameterSets of the parametrization. parametersets: Sequence[ParameterSet] # Optionally, a user-provided callable to make IDs for parameters in a # ParameterSet. idfn: Callable[[Any], object | None] | None # Optionally, explicit IDs for ParameterSets by index. ids: Sequence[object | None] | None # Optionally, the pytest config. # Used for controlling ASCII escaping, determining parametrization ID # strictness, and for calling the :hook:`pytest_make_parametrize_id` hook. config: Config | None # Optionally, the ID of the node being parametrized. # Used only for clearer error messages. nodeid: str | None def make_unique_parameterset_ids(self) -> list[str | _HiddenParam]: """Make a unique identifier for each ParameterSet, that may be used to identify the parametrization in a node ID. If strict_parametrization_ids is enabled, and duplicates are detected, raises CollectError. Otherwise makes the IDs unique as follows: Format is <prm_1_token>-...-<prm_n_token>[counter], where prm_x_token is - user-provided id, if given - else an id derived from the value, applicable for certain types - else <argname><parameterset index> The counter suffix is appended only in case a string wouldn't be unique otherwise. """ resolved_ids = list(self._resolve_ids()) # All IDs must be unique! if len(resolved_ids) != len(set(resolved_ids)): # Record the number of occurrences of each ID. id_counts = Counter(resolved_ids) if self._strict_parametrization_ids_enabled(): parameters = ", ".join(self.argnames) parametersets = ", ".join( [saferepr(list(param.values)) for param in self.parametersets] ) ids = ", ".join( id if id is not HIDDEN_PARAM else "<hidden>" for id in resolved_ids ) duplicates = ", ".join( id if id is not HIDDEN_PARAM else "<hidden>" for id, count in id_counts.items() if count > 1 ) msg = textwrap.dedent(f""" Duplicate parametrization IDs detected, but strict_parametrization_ids is set. Test name: {self.nodeid} Parameters: {parameters} Parameter sets: {parametersets} IDs: {ids} Duplicates: {duplicates} You can fix this problem using `@pytest.mark.parametrize(..., ids=...)` or `pytest.param(..., id=...)`. """).strip() # noqa: E501 raise nodes.Collector.CollectError(msg) # Map the ID to its next suffix. id_suffixes: dict[str, int] = defaultdict(int) # Suffix non-unique IDs to make them unique. for index, id in enumerate(resolved_ids): if id_counts[id] > 1: if id is HIDDEN_PARAM: self._complain_multiple_hidden_parameter_sets() suffix = "" if id and id[-1].isdigit(): suffix = "_" new_id = f"{id}{suffix}{id_suffixes[id]}" while new_id in set(resolved_ids): id_suffixes[id] += 1 new_id = f"{id}{suffix}{id_suffixes[id]}" resolved_ids[index] = new_id id_suffixes[id] += 1 assert len(resolved_ids) == len(set(resolved_ids)), ( f"Internal error: {resolved_ids=}" ) return resolved_ids def _strict_parametrization_ids_enabled(self) -> bool: if self.config is None: return False strict_parametrization_ids = self.config.getini("strict_parametrization_ids") if strict_parametrization_ids is None: strict_parametrization_ids = self.config.getini("strict") return cast(bool, strict_parametrization_ids) def _resolve_ids(self) -> Iterable[str | _HiddenParam]: """Resolve IDs for all ParameterSets (may contain duplicates).""" for idx, parameterset in enumerate(self.parametersets): if parameterset.id is not None: # ID provided directly - pytest.param(..., id="...") if parameterset.id is HIDDEN_PARAM: yield HIDDEN_PARAM else: yield _ascii_escaped_by_config(parameterset.id, self.config) elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: # ID provided in the IDs list - parametrize(..., ids=[...]). if self.ids[idx] is HIDDEN_PARAM: yield HIDDEN_PARAM else: yield self._idval_from_value_required(self.ids[idx], idx) else: # ID not provided - generate it. yield "-".join( self._idval(val, argname, idx) for val, argname in zip( parameterset.values, self.argnames, strict=True ) ) def _idval(self, val: object, argname: str, idx: int) -> str: """Make an ID for a parameter in a ParameterSet.""" idval = self._idval_from_function(val, argname, idx) if idval is not None: return idval idval = self._idval_from_hook(val, argname) if idval is not None: return idval idval = self._idval_from_value(val) if idval is not None: return idval return self._idval_from_argname(argname, idx) def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None: """Try to make an ID for a parameter in a ParameterSet using the user-provided id callable, if given.""" if self.idfn is None: return None try: id = self.idfn(val) except Exception as e: prefix = f"{self.nodeid}: " if self.nodeid is not None else "" msg = "error raised while trying to determine id of parameter '{}' at position {}" msg = prefix + msg.format(argname, idx) raise ValueError(msg) from e if id is None: return None return self._idval_from_value(id) def _idval_from_hook(self, val: object, argname: str) -> str | None: """Try to make an ID for a parameter in a ParameterSet by calling the :hook:`pytest_make_parametrize_id` hook.""" if self.config: id: str | None = self.config.hook.pytest_make_parametrize_id( config=self.config, val=val, argname=argname ) return id return None def _idval_from_value(self, val: object) -> str | None: """Try to make an ID for a parameter in a ParameterSet from its value, if the value type is supported.""" if isinstance(val, str | bytes): return _ascii_escaped_by_config(val, self.config) elif val is None or isinstance(val, float | int | bool | complex): return str(val) elif isinstance(val, re.Pattern): return ascii_escaped(val.pattern) elif val is NOTSET: # Fallback to default. Note that NOTSET is an enum.Enum. pass elif isinstance(val, enum.Enum): return str(val) elif isinstance(getattr(val, "__name__", None), str): # Name of a class, function, module, etc. name: str = getattr(val, "__name__") return name return None def _idval_from_value_required(self, val: object, idx: int) -> str: """Like _idval_from_value(), but fails if the type is not supported.""" id = self._idval_from_value(val) if id is not None: return id # Fail. prefix = self._make_error_prefix() msg = ( f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." ) fail(msg, pytrace=False) @staticmethod def _idval_from_argname(argname: str, idx: int) -> str: """Make an ID for a parameter in a ParameterSet from the argument name and the index of the ParameterSet.""" return str(argname) + str(idx) def _complain_multiple_hidden_parameter_sets(self) -> NoReturn: fail( f"{self._make_error_prefix()}multiple instances of HIDDEN_PARAM " "cannot be used in the same parametrize call, " "because the tests names need to be unique." ) def _make_error_prefix(self) -> str: if self.nodeid is not None: return f"In {self.nodeid}: " else: return "" @final @dataclasses.dataclass(frozen=True)
IdMaker
python
doocs__leetcode
lcof2/剑指 Offer II 077. 链表排序/Solution.py
{ "start": 151, "end": 835 }
class ____: def sortList(self, head: ListNode) -> ListNode: if head is None or head.next is None: return head slow, fast = head, head.next while fast and fast.next: slow, fast = slow.next, fast.next.next t = slow.next slow.next = None l1, l2 = self.sortList(head), self.sortList(t) dummy = ListNode() cur = dummy while l1 and l2: if l1.val <= l2.val: cur.next = l1 l1 = l1.next else: cur.next = l2 l2 = l2.next cur = cur.next cur.next = l1 or l2 return dummy.next
Solution
python
sqlalchemy__sqlalchemy
test/typing/plain_files/orm/relationship.py
{ "start": 3233, "end": 3583 }
class ____(Base): __tablename__ = "employee" id: Mapped[int] = mapped_column(primary_key=True) team_id: Mapped[int] = mapped_column(ForeignKey("team.id")) team: Mapped["Team"] = relationship(back_populates="employees") __mapper_args__ = { "polymorphic_on": "type", "polymorphic_identity": "employee", }
Employee
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride6.py
{ "start": 1603, "end": 1895 }
class ____(Parent1[bytes]): @overload def m1(self, x: Literal[True]) -> int: ... @overload def m1(self, x: Literal[False]) -> float: ... @overload def m1(self, x: bytes) -> bytes: ... def m1(self, x: bool | bytes) -> int | float | bytes: return x
Child1_6
python
fluentpython__example-code-2e
24-class-metaprog/slots/slots_timing.py
{ "start": 472, "end": 612 }
class ____(metaclass=Correct1): pass o = Klass1() try: o.z = 3 except AttributeError as e: print('Raised as expected:', e)
Klass1
python
getsentry__sentry
tests/symbolicator/test_minidump_full.py
{ "start": 1500, "end": 9167 }
class ____(RelayStoreHelper, TransactionTestCase): @pytest.fixture(autouse=True) def initialize(self, live_server, reset_snuba): self.project.update_option("sentry:builtin_symbol_sources", []) with ( patch("sentry.auth.system.is_internal_ip", return_value=True), self.options({"system.url-prefix": live_server.url}), ): # Run test case yield def upload_symbols(self): url = reverse( "sentry-api-0-dsym-files", kwargs={ "organization_id_or_slug": self.project.organization.slug, "project_id_or_slug": self.project.slug, }, ) self.login_as(user=self.user) out = BytesIO() f = zipfile.ZipFile(out, "w") f.write(get_fixture_path("native", "windows.sym"), "crash.sym") f.close() response = self.client.post( url, { "file": SimpleUploadedFile( "symbols.zip", out.getvalue(), content_type="application/zip" ) }, format="multipart", ) assert response.status_code == 201, response.content assert len(response.json()) == 1 _FEATURES = { "organizations:event-attachments": True, "organizations:symbol-sources": False, "organizations:custom-symbol-sources": False, } def test_full_minidump(self) -> None: self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL) self.upload_symbols() with self.feature(self._FEATURES): with open(get_fixture_path("native", "windows.dmp"), "rb") as f: event = self.post_and_retrieve_minidump( { "upload_file_minidump": f, "some_file": ("hello.txt", BytesIO(b"Hello World!")), }, { "sentry[logger]": "test-logger", "sentry[level]": "error", }, ) candidates = event.data["debug_meta"]["images"][0]["candidates"] redact_location(candidates) event.data["debug_meta"]["images"][0]["candidates"] = candidates insta_snapshot_native_stacktrace_data(self, event.data) assert event.data.get("logger") == "test-logger" assert event.data.get("level") == "error" # assert event.data.get("extra") == {"foo": "bar"} attachments = sorted( EventAttachment.objects.filter(event_id=event.event_id), key=lambda x: x.name ) hello, minidump = attachments assert hello.name == "hello.txt" assert hello.sha1 == "2ef7bde608ce5404e97d5f042f95f89f1c232871" assert minidump.name == "windows.dmp" assert minidump.sha1 == "74bb01c850e8d65d3ffbc5bad5cabc4668fce247" def test_full_minidump_json_extra(self) -> None: self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL) self.upload_symbols() with self.feature("organizations:event-attachments"): with open(get_fixture_path("native", "windows.dmp"), "rb") as f: event = self.post_and_retrieve_minidump( {"upload_file_minidump": f}, {"sentry": '{"logger":"test-logger"}', "foo": "bar"}, ) assert event.data.get("logger") == "test-logger" assert event.data.get("extra") == {"foo": "bar"} # Other assertions are performed by `test_full_minidump` def test_full_minidump_invalid_extra(self) -> None: self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL) self.upload_symbols() with self.feature("organizations:event-attachments"): with open(get_fixture_path("native", "windows.dmp"), "rb") as f: event = self.post_and_retrieve_minidump( {"upload_file_minidump": f}, {"sentry": "{{{{", "foo": "bar"}, # invalid sentry JSON ) assert not event.data.get("logger") assert event.data.get("extra") == {"foo": "bar"} # Other assertions are performed by `test_full_minidump` def test_missing_dsym(self) -> None: with self.feature(self._FEATURES): with open(get_fixture_path("native", "windows.dmp"), "rb") as f: event = self.post_and_retrieve_minidump( {"upload_file_minidump": f}, {"sentry[logger]": "test-logger"} ) insta_snapshot_native_stacktrace_data(self, event.data) assert not EventAttachment.objects.filter(event_id=event.event_id) def test_reprocessing(self) -> None: # NOTE: # When running this test against a local symbolicator instance, # make sure that instance has its caches disabled. This test assumes # that a symbol upload has immediate effect, whereas in reality the # negative cache needs to expire first. self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL) with self.feature(self._FEATURES): with open(get_fixture_path("native", "windows.dmp"), "rb") as f: event = self.post_and_retrieve_minidump( {"upload_file_minidump": f}, {"sentry[logger]": "test-logger"} ) insta_snapshot_native_stacktrace_data(self, event.data, subname="initial") self.upload_symbols() from sentry.tasks.reprocessing2 import reprocess_group with BurstTaskRunner() as burst: reprocess_group.delay(project_id=self.project.id, group_id=event.group_id) burst(max_jobs=100) new_event = eventstore.backend.get_event_by_id(self.project.id, event.event_id) assert new_event is not None assert new_event.event_id == event.event_id candidates = new_event.data["debug_meta"]["images"][0]["candidates"] redact_location(candidates) new_event.data["debug_meta"]["images"][0]["candidates"] = candidates insta_snapshot_native_stacktrace_data(self, new_event.data, subname="reprocessed") for event_id in (event.event_id, new_event.event_id): (minidump,) = sorted( EventAttachment.objects.filter(event_id=new_event.event_id), key=lambda x: x.name ) assert minidump.name == "windows.dmp" assert minidump.sha1 == "74bb01c850e8d65d3ffbc5bad5cabc4668fce247" @requires_objectstore def test_reprocessing_with_objectstore(self) -> None: with override_options( { "objectstore.force-stored-symbolication": 1, "objectstore.enable_for.attachments": 1, } ): self.test_reprocessing() def test_minidump_threadnames(self) -> None: self.project.update_option("sentry:store_crash_reports", STORE_CRASH_REPORTS_ALL) with self.feature(self._FEATURES): with open(get_fixture_path("native", "threadnames.dmp"), "rb") as f: event = self.post_and_retrieve_minidump({"upload_file_minidump": f}, {}) thread_name = get_path(event.data, "threads", "values", 1, "name") assert thread_name == "sentry-http" @requires_objectstore def test_force_stored_minidump(self) -> None: with override_options({"objectstore.force-stored-symbolication": 1}): self.test_minidump_threadnames()
SymbolicatorMinidumpIntegrationTest
python
davidhalter__jedi
test/completion/pep0484_typing.py
{ "start": 4822, "end": 5903 }
class ____(typing.DefaultDict[str, int]): def setdud(self): pass def testdict(x: TestDefaultDict): #? ["setdud", "setdefault"] x.setd for key in x.keys(): #? str() key for value in x.values(): #? int() value x = TestDefaultDict() #? ["setdud", "setdefault"] x.setd for key in x.keys(): #? str() key for value in x.values(): #? int() value """ docstrings have some auto-import, annotations can use all of Python's import logic """ import typing as t def union2(x: t.Union[int, str]): #? int() str() x from typing import Union def union3(x: Union[int, str]): #? int() str() x from typing import Union as U def union4(x: U[int, str]): #? int() str() x #? typing.Optional typing.Optional[0] # ------------------------- # Type Vars # ------------------------- TYPE_VARX = typing.TypeVar('TYPE_VARX') TYPE_VAR_CONSTRAINTSX = typing.TypeVar('TYPE_VAR_CONSTRAINTSX', str, int) #? ['__class__'] TYPE_VARX.__clas #! ["TYPE_VARX = typing.TypeVar('TYPE_VARX')"] TYPE_VARX
TestDefaultDict
python
django__django
tests/db_functions/math/test_atan2.py
{ "start": 182, "end": 1730 }
class ____(TestCase): def test_null(self): IntegerModel.objects.create(big=100) obj = IntegerModel.objects.annotate( null_atan2_sn=ATan2("small", "normal"), null_atan2_nb=ATan2("normal", "big"), null_atan2_bn=ATan2("big", "normal"), ).first() self.assertIsNone(obj.null_atan2_sn) self.assertIsNone(obj.null_atan2_nb) self.assertIsNone(obj.null_atan2_bn) def test_decimal(self): DecimalModel.objects.create(n1=Decimal("-9.9"), n2=Decimal("4.6")) obj = DecimalModel.objects.annotate(n_atan2=ATan2("n1", "n2")).first() self.assertIsInstance(obj.n_atan2, Decimal) self.assertAlmostEqual(obj.n_atan2, Decimal(math.atan2(obj.n1, obj.n2))) def test_float(self): FloatModel.objects.create(f1=-25, f2=0.33) obj = FloatModel.objects.annotate(f_atan2=ATan2("f1", "f2")).first() self.assertIsInstance(obj.f_atan2, float) self.assertAlmostEqual(obj.f_atan2, math.atan2(obj.f1, obj.f2)) def test_integer(self): IntegerModel.objects.create(small=0, normal=1, big=10) obj = IntegerModel.objects.annotate( atan2_sn=ATan2("small", "normal"), atan2_nb=ATan2("normal", "big"), ).first() self.assertIsInstance(obj.atan2_sn, float) self.assertIsInstance(obj.atan2_nb, float) self.assertAlmostEqual(obj.atan2_sn, math.atan2(obj.small, obj.normal)) self.assertAlmostEqual(obj.atan2_nb, math.atan2(obj.normal, obj.big))
ATan2Tests
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 580801, "end": 581205 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("DiscussionPollOption", graphql_name="node") """The item at the end of the edge."""
DiscussionPollOptionEdge
python
gevent__gevent
src/gevent/_interfaces.py
{ "start": 9396, "end": 9985 }
class ____(Interface): """ Represents a function that will be run some time in the future. Callback functions run in the hub, and as such they cannot use gevent's blocking API; any exception they raise cannot be caught. """ pending = schema.Bool(description=u"Has this callback run yet?", readonly=True) def stop(): """ If this object is still `pending`, cause it to no longer be `pending`; the function will not be run. """ def close(): """ An alias of `stop`. """
ICallback
python
python__mypy
test-data/unit/plugins/class_attr_hook.py
{ "start": 156, "end": 585 }
class ____(Plugin): def get_class_attribute_hook( self, fullname: str ) -> Callable[[AttributeContext], MypyType] | None: if fullname == "__main__.Cls.attr": return my_hook return None def my_hook(ctx: AttributeContext) -> MypyType: return ctx.api.named_generic_type("builtins.int", []) def plugin(_version: str) -> type[ClassAttrPlugin]: return ClassAttrPlugin
ClassAttrPlugin
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/nn_ops/losses_test.py
{ "start": 22784, "end": 29628 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testAllCorrectSigmoid(self): with self.cached_session(): logits = constant_op.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) loss = losses.sigmoid_cross_entropy(labels, logits) self.assertEqual(logits.dtype, loss.dtype) self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name) self.assertAlmostEqual(0.0, self.evaluate(loss), 3) @test_util.run_deprecated_v1 def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self): logits = array_ops.placeholder(dtypes.float32, shape=(None, 1)) labels = array_ops.placeholder(dtypes.float32, shape=(None, 1)) weights = array_ops.ones_like(logits, dtype=dtypes.float32) loss = losses.sigmoid_cross_entropy(labels, logits, weights) self.assertEqual(logits.dtype, loss.dtype) with self.cached_session() as sess: loss = sess.run(loss, feed_dict={ logits: np.ones((32, 1)), labels: np.ones((32, 1)), }) self.assertAlmostEqual(0.313, loss, 3) @test_util.run_deprecated_v1 def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self): logits = array_ops.placeholder(dtypes.float32, shape=(None, 2)) labels = array_ops.placeholder(dtypes.float32, shape=(None, 2)) weights = array_ops.ones_like(logits, dtype=dtypes.float32) loss = losses.sigmoid_cross_entropy(labels, logits, weights) self.assertEqual(logits.dtype, loss.dtype) with self.cached_session() as sess: loss = sess.run(loss, feed_dict={ logits: np.ones((32, 2)), labels: np.ones((32, 2)), }) self.assertAlmostEqual(0.313, loss, 3) @test_util.run_deprecated_v1 def testAllWrongSigmoid(self): with self.cached_session(): logits = constant_op.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) loss = losses.sigmoid_cross_entropy(labels, logits) self.assertEqual(logits.dtype, loss.dtype) self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name) self.assertAlmostEqual(self.evaluate(loss), 600.0 / 9.0, 3) @test_util.run_deprecated_v1 def testAllWrongSigmoidWithMeasurementSpecificWeights(self): with self.cached_session(): logits = constant_op.constant([[100.0, -100.0, -100.0], [-100.0, 100.0, -100.0], [-100.0, -100.0, 100.0]]) labels = constant_op.constant([[0, 0, 1], [1, 0, 0], [0, 1, 0]]) weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]]) loss = losses.sigmoid_cross_entropy(labels, logits, weights) self.assertEqual(logits.dtype, loss.dtype) self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name) self.assertAlmostEqual(1700.0 / 7.0, self.evaluate(loss), 3) @test_util.run_deprecated_v1 def testMultiCorrectSigmoid(self): logits = constant_op.constant([[100.0, -100.0, 100.0], [100.0, 100.0, -100.0], [-100.0, 100.0, 100.0]]) labels = constant_op.constant([[1, 0, 1], [1, 1, 0], [0, 1, 1]]) loss = losses.sigmoid_cross_entropy(labels, logits) self.assertEqual(logits.dtype, loss.dtype) self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name) with self.cached_session(): self.assertAlmostEqual(0.0, self.evaluate(loss), 3) def testSigmoidFloat64(self): logits = constant_op.constant(( (100.0, -100.0, 100.0), (100.0, -100.0, 100.0), (100.0, 100.0, -100.0) ), dtype=dtypes.float64) labels = constant_op.constant(( (1, 0, 1), (1, 1, 0), (0, 1, 1) ), dtype=dtypes.int64) loss = losses.sigmoid_cross_entropy(labels, logits) self.assertEqual(logits.dtype, loss.dtype) with self.cached_session(): self.assertAlmostEqual(44.444, self.evaluate(loss), 3) def testSigmoidNoReduction(self): logits = constant_op.constant(( (100.0, -100.0, 100.0), (100.0, -100.0, 100.0), (100.0, 100.0, -100.0))) labels = constant_op.constant(((1, 0, 1), (1, 1, 0), (0, 1, 1))) loss = losses.sigmoid_cross_entropy( labels, logits, reduction=losses.Reduction.NONE) self.assertEqual(logits.dtype, loss.dtype) with self.cached_session(): self.assertAllClose(((0., 0., 0.), (0., 100., 100.), (100., 0., 100.)), self.evaluate(loss), 3) @test_util.run_deprecated_v1 def testSigmoidLabelSmoothingCorrect(self): with self.cached_session(): logits = constant_op.constant([[100.0, -100.0, -100.0]]) labels = constant_op.constant([[1, 0, 1]]) # Sigmoid cross entropy loss is: # max(x,0) - x*z + log(1 + exp(-abs(x))) # The new labels are: # z' = z * (1 - L) + 0.5 L # 1 -> 1 - 0.5 L # 0 -> 0.5 L # here we expect: # 1/3 * (100 - 100 * (1 - 0.5 L) + 0 # + 0 + 100 * (0.5 L) + 0 # + 0 + 100 * (1 - 0.5 L) + 0) # = 1/3 * (100 + 50 L) label_smoothing = 0.1 loss = losses.sigmoid_cross_entropy( labels, logits, label_smoothing=label_smoothing) self.assertEqual(logits.dtype, loss.dtype) self.assertEqual('sigmoid_cross_entropy_loss/value', loss.op.name) expected_value = (100.0 + 50.0 * label_smoothing) / 3.0 self.assertAlmostEqual(self.evaluate(loss), expected_value, 3) @test_util.run_deprecated_v1 def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self): with self.cached_session(): label_smoothing = 0.1 sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]]) sigmoid_labels = constant_op.constant([[1, 0, 1]]) sigmoid_loss = losses.sigmoid_cross_entropy( sigmoid_labels, sigmoid_logits, label_smoothing=label_smoothing) self.assertEqual(sigmoid_logits.dtype, sigmoid_loss.dtype) softmax_logits = constant_op.constant( [[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]]) softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]]) softmax_loss = losses.softmax_cross_entropy( softmax_labels, softmax_logits, label_smoothing=label_smoothing) self.assertAlmostEqual( self.evaluate(sigmoid_loss), self.evaluate(softmax_loss), 3) @test_util.run_deprecated_v1
SigmoidCrossEntropyLossTest
python
plotly__plotly.py
plotly/graph_objs/layout/selection/_line.py
{ "start": 235, "end": 4166 }
class ____(_BaseLayoutHierarchyType): _parent_path_str = "layout.selection" _path_str = "layout.selection.line" _valid_props = {"color", "dash", "width"} @property def color(self): """ Sets the line color. The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def dash(self): """ Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). The 'dash' property is an enumeration that may be specified as: - One of the following dash styles: ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot'] - A string containing a dash length list in pixels or percentages (e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.) Returns ------- str """ return self["dash"] @dash.setter def dash(self, val): self["dash"] = val @property def width(self): """ Sets the line width (in px). The 'width' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["width"] @width.setter def width(self, val): self["width"] = val @property def _prop_descriptions(self): return """\ color Sets the line color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). width Sets the line width (in px). """ def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs): """ Construct a new Line object Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.layout.selection.Line` color Sets the line color. dash Sets the dash style of lines. Set to a dash type string ("solid", "dot", "dash", "longdash", "dashdot", or "longdashdot") or a dash length list in px (eg "5px,10px,2px,2px"). width Sets the line width (in px). Returns ------- Line """ super().__init__("line") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.layout.selection.Line constructor must be a dict or an instance of :class:`plotly.graph_objs.layout.selection.Line`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("dash", arg, dash) self._set_property("width", arg, width) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Line