language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
ray-project__ray
python/ray/train/_internal/state/schema.py
{ "start": 1994, "end": 2156 }
class ____(BaseModel): cpuPercent: float # total memory, free memory, memory used ratio mem: Optional[List[int]] memoryInfo: MemoryInfo
ProcessStats
python
numpy__numpy
numpy/distutils/system_info.py
{ "start": 99275, "end": 99364 }
class ____(_numpy_info): section = 'numarray' modulename = 'numarray'
numarray_info
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_embed_image09.py
{ "start": 315, "end": 957 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("embed_image09.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.embed_image( 0, 0, self.image_dir + "red.png", {"description": "Some alt text", "decorative": 1}, ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
scrapy__scrapy
tests/test_utils_project.py
{ "start": 912, "end": 1950 }
class ____: def test_valid_envvar(self): value = "tests.test_cmdline.settings" envvars = { "SCRAPY_SETTINGS_MODULE": value, } with warnings.catch_warnings(): warnings.simplefilter("error") with set_environ(**envvars): settings = get_project_settings() assert settings.get("SETTINGS_MODULE") == value def test_invalid_envvar(self): envvars = { "SCRAPY_FOO": "bar", } with set_environ(**envvars): settings = get_project_settings() assert settings.get("SCRAPY_FOO") is None def test_valid_and_invalid_envvars(self): value = "tests.test_cmdline.settings" envvars = { "SCRAPY_FOO": "bar", "SCRAPY_SETTINGS_MODULE": value, } with set_environ(**envvars): settings = get_project_settings() assert settings.get("SETTINGS_MODULE") == value assert settings.get("SCRAPY_FOO") is None
TestGetProjectSettings
python
ipython__ipython
tests/test_interactiveshell.py
{ "start": 31693, "end": 33466 }
class ____(object): def __repr__(self): return "DummyRepr" def _repr_html_(self): return "<b>dummy</b>" def _repr_javascript_(self): return "console.log('hi');", {"key": "value"} def test_user_variables(): # enable all formatters ip.display_formatter.active_types = ip.display_formatter.format_types ip.user_ns["dummy"] = d = DummyRepr() keys = {"dummy", "doesnotexist"} r = ip.user_expressions({key: key for key in keys}) assert keys == set(r.keys()) dummy = r["dummy"] assert {"status", "data", "metadata"} == set(dummy.keys()) assert dummy["status"] == "ok" data = dummy["data"] metadata = dummy["metadata"] assert data.get("text/html") == d._repr_html_() js, jsmd = d._repr_javascript_() assert data.get("application/javascript") == js assert metadata.get("application/javascript") == jsmd dne = r["doesnotexist"] assert dne["status"] == "error" assert dne["ename"] == "NameError" # back to text only ip.display_formatter.active_types = ["text/plain"] def test_user_expression(): # enable all formatters ip.display_formatter.active_types = ip.display_formatter.format_types query = { "a": "1 + 2", "b": "1/0", } r = ip.user_expressions(query) import pprint pprint.pprint(r) assert set(r.keys()) == set(query.keys()) a = r["a"] assert {"status", "data", "metadata"} == set(a.keys()) assert a["status"] == "ok" data = a["data"] metadata = a["metadata"] assert data.get("text/plain") == "3" b = r["b"] assert b["status"] == "error" assert b["ename"] == "ZeroDivisionError" # back to text only ip.display_formatter.active_types = ["text/plain"]
DummyRepr
python
pytorch__pytorch
test/distributed/elastic/multiprocessing/test_api.py
{ "start": 527, "end": 13829 }
class ____(TestCase): def setUp(self): super().setUp() # Save original environment variable if it exists self.original_signals_env = os.environ.get( "TORCHELASTIC_SIGNALS_TO_HANDLE", None ) def tearDown(self): # Restore original environment variable if self.original_signals_env is not None: os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = self.original_signals_env elif "TORCHELASTIC_SIGNALS_TO_HANDLE" in os.environ: del os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] def test_terminate_process_handler(self): """Test that the terminate process handler raises SignalException with the correct signal.""" signum = signal.SIGTERM with self.assertRaises(SignalException) as cm: _terminate_process_handler(signum, None) self.assertEqual(cm.exception.sigval, signal.SIGTERM) # The signal is represented as a number in the string representation self.assertIn(f"Process {os.getpid()} got signal: {signum}", str(cm.exception)) @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.signal") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_registers_default_signals( self, mock_logger, mock_signal, mock_threading ): """Test that the start method registers the default signals.""" # Setup mock_threading.current_thread.return_value = ( mock_threading.main_thread.return_value ) mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Remove environment variable if it exists to test default behavior if "TORCHELASTIC_SIGNALS_TO_HANDLE" in os.environ: del os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] # Call the start method PContext.start(mock_pcontext) # Verify that the signal handler was registered for the default signals expected_signals = ["SIGTERM", "SIGINT", "SIGHUP", "SIGQUIT"] # Count the number of calls to signal.signal signal_calls = 0 for call in mock_signal.signal.call_args_list: args, _ = call sig, handler = args signal_calls += 1 # Verify the handler is our _terminate_process_handler self.assertEqual(handler, _terminate_process_handler) # Verify we registered the expected number of signals self.assertEqual(signal_calls, len(expected_signals)) # Verify _start was called mock_pcontext._start.assert_called_once() # Verify _stdout_tail.start() and _stderr_tail.start() were called mock_stdout_tail.start.assert_called_once() mock_stderr_tail.start.assert_called_once() @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.signal") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_registers_custom_signals( self, mock_logger, mock_signal, mock_threading ): """Test that the start method registers custom signals from the environment variable.""" # Setup mock_threading.current_thread.return_value = ( mock_threading.main_thread.return_value ) mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Set custom signals in the environment variable os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = "SIGTERM,SIGUSR1,SIGUSR2" # Call the start method PContext.start(mock_pcontext) # Verify that the signal handler was registered for the custom signals expected_signals = ["SIGTERM", "SIGUSR1", "SIGUSR2"] # Count the number of calls to signal.signal signal_calls = 0 for call in mock_signal.signal.call_args_list: args, _ = call sig, handler = args signal_calls += 1 # Verify the handler is our _terminate_process_handler self.assertEqual(handler, _terminate_process_handler) # Verify we registered the expected number of signals self.assertEqual(signal_calls, len(expected_signals)) # Verify _start was called mock_pcontext._start.assert_called_once() @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.signal") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_handles_invalid_signals( self, mock_logger, mock_signal, mock_threading ): """Test that the start method handles invalid signals gracefully.""" # Setup mock_threading.current_thread.return_value = ( mock_threading.main_thread.return_value ) mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Set invalid signals in the environment variable os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = "SIGTERM,INVALID_SIGNAL" # Mock the signal module to not have the INVALID_SIGNAL attribute # but have SIGTERM mock_signal.SIGTERM = signal.SIGTERM # Remove INVALID_SIGNAL attribute if it exists if hasattr(mock_signal, "INVALID_SIGNAL"): delattr(mock_signal, "INVALID_SIGNAL") # Call the start method PContext.start(mock_pcontext) # Verify that the warning was logged for the invalid signal # The exact message may vary, so let's check if warning was called with INVALID_SIGNAL warning_calls = [ call for call in mock_logger.warning.call_args_list if "INVALID_SIGNAL" in str(call) ] self.assertTrue(len(warning_calls) > 0, "Expected warning about INVALID_SIGNAL") # Verify _start was called mock_pcontext._start.assert_called_once() @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.signal") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_handles_windows_signals( self, mock_logger, mock_signal, mock_threading ): """Test that the start method handles Windows-specific signal behavior.""" # Setup mock_threading.current_thread.return_value = ( mock_threading.main_thread.return_value ) mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Set signals including ones not supported on Windows os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = "SIGTERM,SIGHUP,SIGUSR1" # Mock signal attributes mock_signal.SIGTERM = signal.SIGTERM mock_signal.SIGHUP = signal.SIGHUP mock_signal.SIGUSR1 = signal.SIGUSR1 # Mock IS_WINDOWS to be True with patch("torch.distributed.elastic.multiprocessing.api.IS_WINDOWS", True): # Mock signal.signal to raise RuntimeError for Windows-unsupported signals def signal_side_effect(sig, handler): if sig in [signal.SIGHUP, signal.SIGUSR1]: raise RuntimeError("Signal not supported on Windows") mock_signal.signal.side_effect = signal_side_effect # Call the start method PContext.start(mock_pcontext) # Verify that the info was logged for the unsupported signals # Check if any info calls contain the expected messages info_calls = [str(call) for call in mock_logger.info.call_args_list] sighup_logged = any( "SIGHUP" in call and "Windows" in call for call in info_calls ) sigusr1_logged = any( "SIGUSR1" in call and "Windows" in call for call in info_calls ) self.assertTrue( sighup_logged, f"Expected SIGHUP Windows message in info calls: {info_calls}", ) self.assertTrue( sigusr1_logged, f"Expected SIGUSR1 Windows message in info calls: {info_calls}", ) # Verify _start was called mock_pcontext._start.assert_called_once() @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_not_main_thread(self, mock_logger, mock_threading): """Test that the start method warns when not called from the main thread.""" # Setup mock_threading.current_thread.return_value = MagicMock() # Not the main thread mock_threading.main_thread.return_value = MagicMock() mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Call the start method PContext.start(mock_pcontext) # Verify that the warning was logged mock_logger.warning.assert_called_with( "Failed to register signal handlers since torchelastic is running on a child thread. " "This could lead to orphaned worker processes if the torchrun is terminated." ) # Verify _start was called mock_pcontext._start.assert_called_once() @patch("torch.distributed.elastic.multiprocessing.api.threading") @patch("torch.distributed.elastic.multiprocessing.api.signal") @patch("torch.distributed.elastic.multiprocessing.api.logger") def test_start_supports_sigusr1_and_sigusr2( self, mock_logger, mock_signal, mock_threading ): """Test that the start method properly supports SIGUSR1 and SIGUSR2 signals.""" # Setup mock_threading.current_thread.return_value = ( mock_threading.main_thread.return_value ) mock_pcontext = MagicMock(spec=PContext) # Mock the stdout_tail and stderr_tail mock_stdout_tail = MagicMock() mock_stderr_tail = MagicMock() mock_pcontext._tail_logs = [mock_stdout_tail, mock_stderr_tail] # Set environment variable to include SIGUSR1 and SIGUSR2 os.environ["TORCHELASTIC_SIGNALS_TO_HANDLE"] = "SIGUSR1,SIGUSR2" # Mock signal attributes to have SIGUSR1 and SIGUSR2 mock_signal.SIGUSR1 = signal.SIGUSR1 mock_signal.SIGUSR2 = signal.SIGUSR2 # Call the start method PContext.start(mock_pcontext) # Verify that signal.signal was called for both SIGUSR1 and SIGUSR2 signal_calls = mock_signal.signal.call_args_list registered_signals = [ call[0][0] for call in signal_calls ] # Extract the signal from each call # Verify both SIGUSR1 and SIGUSR2 were registered self.assertIn( signal.SIGUSR1, registered_signals, "SIGUSR1 should be registered" ) self.assertIn( signal.SIGUSR2, registered_signals, "SIGUSR2 should be registered" ) # Verify the correct handler was registered for both signals for call in signal_calls: sig, handler = call[0] if sig in [signal.SIGUSR1, signal.SIGUSR2]: self.assertEqual( handler, _terminate_process_handler, f"Signal {sig} should use _terminate_process_handler", ) # Verify that info messages were logged for successful registration info_calls = [str(call) for call in mock_logger.info.call_args_list] sigusr1_logged = any( "SIGUSR1" in call and "Registered signal handler" in call for call in info_calls ) sigusr2_logged = any( "SIGUSR2" in call and "Registered signal handler" in call for call in info_calls ) self.assertTrue( sigusr1_logged, f"Expected SIGUSR1 registration message in info calls: {info_calls}", ) self.assertTrue( sigusr2_logged, f"Expected SIGUSR2 registration message in info calls: {info_calls}", ) # Verify _start was called mock_pcontext._start.assert_called_once() # Verify _stdout_tail.start() and _stderr_tail.start() were called mock_stdout_tail.start.assert_called_once() mock_stderr_tail.start.assert_called_once() if __name__ == "__main__": run_tests()
SignalHandlingTest
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/mrkl/base.py
{ "start": 1088, "end": 1490 }
class ____(NamedTuple): """Configuration for a chain to use in MRKL system. Args: action_name: Name of the action. action: Action function to call. action_description: Description of the action. """ action_name: str action: Callable action_description: str @deprecated( "0.1.0", message=AGENT_DEPRECATION_WARNING, removal="1.0", )
ChainConfig
python
numba__numba
numba/core/caching.py
{ "start": 4671, "end": 5416 }
class ____(_SourceFileBackedLocatorMixin, _CacheLocator): """ A locator that always point to the user provided directory in `numba.config.CACHE_DIR` """ def __init__(self, py_func, py_file): self._py_file = py_file self._lineno = py_func.__code__.co_firstlineno cache_subpath = self.get_suitable_cache_subpath(py_file) self._cache_path = os.path.join(config.CACHE_DIR, cache_subpath) def get_cache_path(self): return self._cache_path @classmethod def from_function(cls, py_func, py_file): if not config.CACHE_DIR: return parent = super(UserProvidedCacheLocator, cls) return parent.from_function(py_func, py_file)
UserProvidedCacheLocator
python
numpy__numpy
numpy/lib/tests/test_stride_tricks.py
{ "start": 13489, "end": 17967 }
class ____: def test_1d(self): arr = np.arange(5) arr_view = sliding_window_view(arr, 2) expected = np.array([[0, 1], [1, 2], [2, 3], [3, 4]]) assert_array_equal(arr_view, expected) def test_2d(self): i, j = np.ogrid[:3, :4] arr = 10 * i + j shape = (2, 2) arr_view = sliding_window_view(arr, shape) expected = np.array([[[[0, 1], [10, 11]], [[1, 2], [11, 12]], [[2, 3], [12, 13]]], [[[10, 11], [20, 21]], [[11, 12], [21, 22]], [[12, 13], [22, 23]]]]) assert_array_equal(arr_view, expected) def test_2d_with_axis(self): i, j = np.ogrid[:3, :4] arr = 10 * i + j arr_view = sliding_window_view(arr, 3, 0) expected = np.array([[[0, 10, 20], [1, 11, 21], [2, 12, 22], [3, 13, 23]]]) assert_array_equal(arr_view, expected) def test_2d_repeated_axis(self): i, j = np.ogrid[:3, :4] arr = 10 * i + j arr_view = sliding_window_view(arr, (2, 3), (1, 1)) expected = np.array([[[[0, 1, 2], [1, 2, 3]]], [[[10, 11, 12], [11, 12, 13]]], [[[20, 21, 22], [21, 22, 23]]]]) assert_array_equal(arr_view, expected) def test_2d_without_axis(self): i, j = np.ogrid[:4, :4] arr = 10 * i + j shape = (2, 3) arr_view = sliding_window_view(arr, shape) expected = np.array([[[[0, 1, 2], [10, 11, 12]], [[1, 2, 3], [11, 12, 13]]], [[[10, 11, 12], [20, 21, 22]], [[11, 12, 13], [21, 22, 23]]], [[[20, 21, 22], [30, 31, 32]], [[21, 22, 23], [31, 32, 33]]]]) assert_array_equal(arr_view, expected) def test_errors(self): i, j = np.ogrid[:4, :4] arr = 10 * i + j with pytest.raises(ValueError, match='cannot contain negative values'): sliding_window_view(arr, (-1, 3)) with pytest.raises( ValueError, match='must provide window_shape for all dimensions of `x`'): sliding_window_view(arr, (1,)) with pytest.raises( ValueError, match='Must provide matching length window_shape and axis'): sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) with pytest.raises( ValueError, match='window shape cannot be larger than input array'): sliding_window_view(arr, (5, 5)) def test_writeable(self): arr = np.arange(5) view = sliding_window_view(arr, 2, writeable=False) assert_(not view.flags.writeable) with pytest.raises( ValueError, match='assignment destination is read-only'): view[0, 0] = 3 view = sliding_window_view(arr, 2, writeable=True) assert_(view.flags.writeable) view[0, 1] = 3 assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) def test_subok(self): class MyArray(np.ndarray): pass arr = np.arange(5).view(MyArray) assert_(not isinstance(sliding_window_view(arr, 2, subok=False), MyArray)) assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) # Default behavior assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) def as_strided_writeable(): arr = np.ones(10) view = as_strided(arr, writeable=False) assert_(not view.flags.writeable) # Check that writeable also is fine: view = as_strided(arr, writeable=True) assert_(view.flags.writeable) view[...] = 3 assert_array_equal(arr, np.full_like(arr, 3)) # Test that things do not break down for readonly: arr.flags.writeable = False view = as_strided(arr, writeable=False) view = as_strided(arr, writeable=True) assert_(not view.flags.writeable)
TestSlidingWindowView
python
eth-brownie__brownie
brownie/utils/docopt.py
{ "start": 12727, "end": 13054 }
class ____(_BranchPattern): def match(self, left: list[_Pattern], collected: list[_Pattern] | None = None) -> Any: collected = [] if collected is None else collected for pattern in self.children: _, left, collected = pattern.match(left, collected) return True, left, collected
_NotRequired
python
huggingface__transformers
src/transformers/models/ministral/modular_ministral.py
{ "start": 8769, "end": 8834 }
class ____(Qwen2PreTrainedModel): pass
MinistralPreTrainedModel
python
crytic__slither
slither/core/declarations/function_contract.py
{ "start": 650, "end": 5870 }
class ____(Function, ContractLevel): def __init__(self, compilation_unit: "SlitherCompilationUnit") -> None: super().__init__(compilation_unit) self._contract_declarer: Optional["Contract"] = None def set_contract_declarer(self, contract: "Contract") -> None: self._contract_declarer = contract @property def contract_declarer(self) -> "Contract": """ Return the contract where this function was declared. Only functions have both a contract, and contract_declarer This is because we need to have separate representation of the function depending of the contract's context For example a function calling super.f() will generate different IR depending on the current contract's inheritance Returns: The contract where this function was declared """ assert self._contract_declarer return self._contract_declarer @property def canonical_name(self) -> str: """ str: contract.func_name(type1,type2) Return the function signature without the return values """ if self._canonical_name is None: name, parameters, _ = self.signature self._canonical_name = ( ".".join([self.contract_declarer.name] + self._internal_scope + [name]) + "(" + ",".join(parameters) + ")" ) return self._canonical_name def is_declared_by(self, contract: "Contract") -> bool: """ Check if the element is declared by the contract :param contract: :return: """ return self.contract_declarer == contract @property def file_scope(self) -> "FileScope": # This is the contract declarer's file scope because inherited functions have access # to the file scope which their declared in. This scope may contain references not # available in the child contract's scope. See inherited_function_scope.sol for an example. return self.contract_declarer.file_scope # endregion ################################################################################### ################################################################################### # region Functions ################################################################################### ################################################################################### @property def functions_shadowed(self) -> List["Function"]: """ Return the list of functions shadowed Returns: list(core.Function) """ candidates = [c.functions_declared for c in self.contract.inheritance] candidates = [candidate for sublist in candidates for candidate in sublist] return [f for f in candidates if f.full_name == self.full_name] # endregion ################################################################################### ################################################################################### # region Summary information ################################################################################### ################################################################################### def get_summary( self, ) -> Tuple[str, str, str, List[str], List[str], List[str], List[str], List[str], int]: """ Return the function summary Returns: (str, str, str, list(str), list(str), listr(str), list(str), list(str); contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions """ return ( self.contract_declarer.name, self.full_name, self.visibility, [str(x) for x in self.modifiers], [str(x) for x in self.state_variables_read + self.solidity_variables_read], [str(x) for x in self.state_variables_written], [str(x) for x in self.internal_calls], [str(x) for x in self.external_calls_as_expressions], compute_cyclomatic_complexity(self), ) # endregion ################################################################################### ################################################################################### # region SlithIr and SSA ################################################################################### ################################################################################### def generate_slithir_ssa( self, all_ssa_state_variables_instances: Dict[str, "StateIRVariable"] ) -> None: from slither.slithir.utils.ssa import add_ssa_ir, transform_slithir_vars_to_ssa from slither.core.dominators.utils import ( compute_dominance_frontier, compute_dominators, ) compute_dominators(self.nodes) compute_dominance_frontier(self.nodes) transform_slithir_vars_to_ssa(self) if not self.contract.is_incorrectly_constructed: add_ssa_ir(self, all_ssa_state_variables_instances)
FunctionContract
python
scrapy__scrapy
tests/test_spiderloader/test_spiders/spider3.py
{ "start": 36, "end": 235 }
class ____(Spider): name = "spider3" allowed_domains = ["spider3.com"] @classmethod def handles_request(cls, request): return request.url == "http://spider3.com/onlythis"
Spider3
python
django__django
tests/admin_views/models.py
{ "start": 27371, "end": 27588 }
class ____(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) title = models.CharField(max_length=100) def __str__(self): return str(self.id)
ParentWithUUIDPK
python
pytorch__pytorch
torch/distributed/flight_recorder/components/config_manager.py
{ "start": 478, "end": 3951 }
class ____: """ A helper class to manage the script configuration. """ def __init__(self: "JobConfig"): self.parser = argparse.ArgumentParser( description="PyTorch Flight recorder analyzing script." ) self.parser.add_argument( "trace_dir", nargs="?", help="Directory containing one trace file per rank, named with <prefix>_<rank>.", ) self.parser.add_argument( "--selected-ranks", default=None, nargs="+", type=int, help="List of ranks we want to show traces for.", ) self.parser.add_argument( "--allow-incomplete-ranks", action="store_true", help=( "FR trace require all ranks to have dumps for analysis. " "This flag allows best-effort partial analysis of results " "and printing of collected data." ), ) self.parser.add_argument( "--pg-filters", default=None, nargs="+", type=str, help=( "List of filter strings, it could be pg name or pg desc. " "If specified, only show traces for the given pg." ), ) self.parser.add_argument("-o", "--output", default=None) self.parser.add_argument( "-p", "--prefix", help=( "Common filename prefix to strip such that rank can be extracted. " "If not specified, will attempt to infer a common prefix." ), default=None, ) self.parser.add_argument("-j", "--just_print_entries", action="store_true") self.parser.add_argument("-v", "--verbose", action="store_true") self.parser.add_argument("--print_stack_trace", action="store_true") self.parser.add_argument( "--mismatch_cap", type=int, default=10, help="Maximum number of mismatches we print (from earliest).", ) self.parser.add_argument( "--transform-ft", action="store_true", help="Transform PG config to use global ranks to analyze traces produced by torchft", ) self.parser.add_argument( "--group-world-size", type=int, default=None, help="The number of ranks in 1 torchft replica group. Must be specified if --transform-ft is True", ) def parse_args( self: "JobConfig", args: Optional[Sequence[str]] ) -> argparse.Namespace: # pyrefly: ignore [bad-assignment] args = self.parser.parse_args(args) # pyrefly: ignore [missing-attribute] if args.selected_ranks is not None: # pyrefly: ignore [missing-attribute] assert args.just_print_entries, ( "Not support selecting ranks without printing entries" ) # pyrefly: ignore [missing-attribute] if args.pg_filters is not None: # pyrefly: ignore [missing-attribute] assert args.just_print_entries, ( "Not support selecting pg filters without printing entries" ) # pyrefly: ignore [missing-attribute] if args.verbose: logger.set_log_level(logging.DEBUG) # pyrefly: ignore [bad-return] return args
JobConfig
python
pallets__werkzeug
src/werkzeug/user_agent.py
{ "start": 37, "end": 1416 }
class ____: """Represents a parsed user agent header value. The default implementation does no parsing, only the :attr:`string` attribute is set. A subclass may parse the string to set the common attributes or expose other information. Set :attr:`werkzeug.wrappers.Request.user_agent_class` to use a subclass. :param string: The header value to parse. .. versionadded:: 2.0 This replaces the previous ``useragents`` module, but does not provide a built-in parser. """ platform: str | None = None """The OS name, if it could be parsed from the string.""" browser: str | None = None """The browser name, if it could be parsed from the string.""" version: str | None = None """The browser version, if it could be parsed from the string.""" language: str | None = None """The browser language, if it could be parsed from the string.""" def __init__(self, string: str) -> None: self.string: str = string """The original header value.""" def __repr__(self) -> str: return f"<{type(self).__name__} {self.browser}/{self.version}>" def __str__(self) -> str: return self.string def __bool__(self) -> bool: return bool(self.browser) def to_header(self) -> str: """Convert to a header value.""" return self.string
UserAgent
python
Pylons__pyramid
tests/test_path.py
{ "start": 2900, "end": 3972 }
class ____(unittest.TestCase): def _callFUT(self, package): from pyramid.path import package_path return package_path(package) def test_it_package(self): import tests package = DummyPackageOrModule(tests) result = self._callFUT(package) self.assertEqual(result, package.package_path) def test_it_module(self): from . import test_path module = DummyPackageOrModule(test_path) result = self._callFUT(module) self.assertEqual(result, module.package_path) def test_memoization_success(self): from . import test_path module = DummyPackageOrModule(test_path) self._callFUT(module) self.assertEqual(module.__abspath__, module.package_path) def test_memoization_fail(self): from . import test_path module = DummyPackageOrModule(test_path, raise_exc=TypeError) result = self._callFUT(module) self.assertFalse(hasattr(module, '__abspath__')) self.assertEqual(result, module.package_path)
TestPackagePath
python
tensorflow__tensorflow
tensorflow/python/eager/monitoring_test.py
{ "start": 871, "end": 6299 }
class ____(test_util.TensorFlowTestCase): def test_counter(self): counter = monitoring.Counter('test/counter', 'test counter') counter.get_cell().increase_by(1) self.assertEqual(counter.get_cell().value(), 1) counter.get_cell().increase_by(5) self.assertEqual(counter.get_cell().value(), 6) def test_multiple_counters(self): counter1 = monitoring.Counter('test/counter1', 'test counter', 'label1') counter1.get_cell('foo').increase_by(1) self.assertEqual(counter1.get_cell('foo').value(), 1) counter2 = monitoring.Counter('test/counter2', 'test counter', 'label1', 'label2') counter2.get_cell('foo', 'bar').increase_by(5) self.assertEqual(counter2.get_cell('foo', 'bar').value(), 5) def test_same_counter(self): counter1 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable counter2 = monitoring.Counter('test/same_counter', 'test counter') # pylint: disable=unused-variable def test_int_gauge(self): gauge = monitoring.IntGauge('test/gauge', 'test gauge') gauge.get_cell().set(1) self.assertEqual(gauge.get_cell().value(), 1) gauge.get_cell().set(5) self.assertEqual(gauge.get_cell().value(), 5) gauge1 = monitoring.IntGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set(2) self.assertEqual(gauge1.get_cell('foo').value(), 2) def test_string_gauge(self): gauge = monitoring.StringGauge('test/gauge', 'test gauge') gauge.get_cell().set('left') self.assertEqual(gauge.get_cell().value(), 'left') gauge.get_cell().set('right') self.assertEqual(gauge.get_cell().value(), 'right') gauge1 = monitoring.StringGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set('start') self.assertEqual(gauge1.get_cell('foo').value(), 'start') def test_bool_gauge(self): gauge = monitoring.BoolGauge('test/gauge', 'test gauge') gauge.get_cell().set(True) self.assertTrue(gauge.get_cell().value()) gauge.get_cell().set(False) self.assertFalse(gauge.get_cell().value()) gauge1 = monitoring.BoolGauge('test/gauge1', 'test gauge1', 'label1') gauge1.get_cell('foo').set(True) self.assertTrue(gauge1.get_cell('foo').value()) def test_sampler(self): buckets = monitoring.ExponentialBuckets(1.0, 2.0, 2) sampler = monitoring.Sampler('test/sampler', buckets, 'test sampler') sampler.get_cell().add(1.0) sampler.get_cell().add(5.0) histogram_proto = sampler.get_cell().value() self.assertEqual(histogram_proto.min, 1.0) self.assertEqual(histogram_proto.num, 2.0) self.assertEqual(histogram_proto.sum, 6.0) sampler1 = monitoring.Sampler('test/sampler1', buckets, 'test sampler', 'label1') sampler1.get_cell('foo').add(2.0) sampler1.get_cell('foo').add(4.0) sampler1.get_cell('bar').add(8.0) histogram_proto1 = sampler1.get_cell('foo').value() self.assertEqual(histogram_proto1.max, 4.0) self.assertEqual(histogram_proto1.num, 2.0) self.assertEqual(histogram_proto1.sum, 6.0) def test_context_manager(self): counter = monitoring.Counter('test/ctxmgr', 'test context manager', 'slot') with monitoring.MonitoredTimer(counter.get_cell('long')): time.sleep(0.01) with monitoring.MonitoredTimer(counter.get_cell('short')): time.sleep(0.01) self.assertGreater( counter.get_cell('long').value(), counter.get_cell('short').value() ) def test_monitored_timer_tracker(self): counter = monitoring.Counter('test/ctxmgr', 'test context manager', 'slot') counter2 = monitoring.Counter('test/ctxmgr2', 'slot') with monitoring.MonitoredTimer(counter.get_cell('long'), 'counter'): time.sleep(0.01) self.assertIn('counter', monitoring.MonitoredTimerSections) with monitoring.MonitoredTimer(counter2.get_cell(), 'counter2'): time.sleep(0.01) self.assertIn('counter', monitoring.MonitoredTimerSections) self.assertIn('counter2', monitoring.MonitoredTimerSections) with monitoring.MonitoredTimer(counter.get_cell('long'), 'counter'): time.sleep(0.01) self.assertNotIn('counter2', monitoring.MonitoredTimerSections) self.assertGreater( counter.get_cell('long').value(), counter.get_cell('short').value() ) self.assertGreater(counter2.get_cell().value(), 0) def test_repetitive_monitored_timer(self): counter = monitoring.Counter('test/ctxmgr', 'test context manager') with monitoring.MonitoredTimer( counter.get_cell(), monitored_section_name='action1', avoid_repetitive_counting=True, ): time.sleep(1) with monitoring.MonitoredTimer( counter.get_cell(), monitored_section_name='action1', avoid_repetitive_counting=True, ): time.sleep(1) # The inner section is not timed. self.assertEqual(counter.get_cell().value(), 0) self.assertGreater(counter.get_cell().value(), 0) def test_function_decorator(self): counter = monitoring.Counter('test/funcdecorator', 'test func decorator') @monitoring.monitored_timer(counter.get_cell()) def timed_function(seconds): time.sleep(seconds) timed_function(0.001) self.assertGreater(counter.get_cell().value(), 1000) if __name__ == '__main__': test.main()
MonitoringTest
python
dagster-io__dagster
python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/component.py
{ "start": 2338, "end": 10882 }
class ____(Component, Resolvable): databricks_config_path: Annotated[ Path, Resolver( resolve_databricks_config_path, model_field_type=str, description="The path to the databricks.yml config file.", examples=[ "{{ project_root }}/path/to/databricks_yml_config_file", ], ), ] workspace: Annotated[ DatabricksWorkspace, Resolver( resolve_databricks_workspace, model_field_type=DatabricksWorkspaceArgs.model(), description="The mapping defining a DatabricksWorkspace.", examples=[ { "host": "your_host", "token": "your_token", }, ], ), ] compute_config: Annotated[ Union[ ResolvedDatabricksNewClusterConfig, ResolvedDatabricksExistingClusterConfig, ResolvedDatabricksServerlessConfig, ], Resolver.default( model_field_type=Union[ ResolvedDatabricksNewClusterConfig, ResolvedDatabricksExistingClusterConfig, ResolvedDatabricksServerlessConfig, ], description=( "A mapping defining a Databricks compute config. " "Allowed types are databricks_asset_bundle.configs.ResolvedDatabricksNewClusterConfig, " "databricks_asset_bundle.configs.ResolvedDatabricksExistingClusterConfig and " "databricks_asset_bundle.configs.ResolvedDatabricksServerlessConfig." ), examples=[ { "spark_version": "some_spark_version", "node_type_id": "some_node_type_id", "num_workers": 1, }, { "existing_cluster_id": "some_existing_cluster_id", }, { "is_serverless": True, }, ], ), ] = field(default_factory=ResolvedDatabricksServerlessConfig) op: Annotated[ Optional[OpSpec], Resolver.default( description="Op related arguments to set on the generated @multi_asset", examples=[ { "name": "some_op", "tags": {"some_tag": "some_value"}, "description": "some_description", "pool": "some_pool", "backfill_policy": {"type": "single_run"}, }, ], ), ] = None assets_by_task_key: Optional[dict[str, list[ResolvedAssetSpec]]] = None @cached_property def databricks_config(self) -> DatabricksConfig: return DatabricksConfig(databricks_config_path=self.databricks_config_path) @cached_property def asset_specs_by_task_key(self) -> dict[str, list[AssetSpec]]: tasks_by_task_key = self.databricks_config.tasks_by_task_key default_asset_specs_by_task_key = { task_key: self.get_asset_spec(task=task) for task_key, task in tasks_by_task_key.items() } provided_asset_specs_by_task_key = self.assets_by_task_key or {} provided_task_keys = provided_asset_specs_by_task_key.keys() missing_task_keys = tasks_by_task_key.keys() - provided_task_keys missing_asset_specs_by_task_key = { task_key: [asset_spec] for task_key, asset_spec in default_asset_specs_by_task_key.items() if task_key in missing_task_keys } updated_provided_asset_specs = defaultdict(list) for task_key, asset_specs in provided_asset_specs_by_task_key.items(): for asset_spec in asset_specs: curr_spec = asset_spec # replace with default attributes if not curr_spec.description: curr_spec = curr_spec.replace_attributes( description=default_asset_specs_by_task_key[task_key].description ) # merge default attributes curr_spec = curr_spec.merge_attributes( metadata=default_asset_specs_by_task_key[task_key].metadata, kinds=default_asset_specs_by_task_key[task_key].kinds, ) updated_provided_asset_specs[task_key].append(curr_spec) return {**missing_asset_specs_by_task_key, **updated_provided_asset_specs} @public def get_asset_spec(self, task: DatabricksBaseTask) -> AssetSpec: """Generates an AssetSpec for a given Databricks task. This method can be overridden in a subclass to customize how Databricks Asset Bundle tasks are converted to Dagster asset specs. By default, it creates an asset spec with metadata about the task type, configuration, and dependencies. Args: task: The DatabricksBaseTask containing information about the Databricks job task Returns: An AssetSpec that represents the Databricks task as a Dagster asset Example: Override this method to add custom tags or modify the asset key: .. code-block:: python from dagster_databricks import DatabricksAssetBundleComponent from dagster import AssetSpec class CustomDatabricksAssetBundleComponent(DatabricksAssetBundleComponent): def get_asset_spec(self, task): base_spec = super().get_asset_spec(task) return base_spec.replace_attributes( tags={ **base_spec.tags, "job_name": task.job_name, "environment": "production" } ) """ return AssetSpec( key=snake_case(task.task_key), description=f"{task.task_key} task from {task.job_name} job", kinds={ "databricks", *([task.task_type] if task.task_type is not DATABRICKS_UNKNOWN_TASK_TYPE else []), }, skippable=True, metadata={ "task_key": MetadataValue.text(task.task_key), "task_type": MetadataValue.text(task.task_type), **( {"task_config": MetadataValue.json(task.task_config_metadata)} if task.task_config_metadata else {} ), **({"libraries": MetadataValue.json(task.libraries)} if task.libraries else {}), }, deps=[ self.get_asset_spec( task=DatabricksUnknownTask.from_job_task_config( {"task_key": dep_config.task_key} ) ).key for dep_config in task.depends_on ], ) def build_defs(self, context: ComponentLoadContext) -> Definitions: component_defs_path_as_python_str = str( os.path.relpath(context.component_path.file_path, start=context.project_root) ).replace("/", "_") databricks_assets = [] for task_key, asset_specs in self.asset_specs_by_task_key.items(): @multi_asset( name=self.op.name if self.op and self.op.name else f"databricks_{task_key}_multi_asset_{component_defs_path_as_python_str}", specs=asset_specs, can_subset=False, op_tags=self.op.tags if self.op else None, description=self.op.description if self.op else None, pool=self.op.pool if self.op else None, backfill_policy=self.op.backfill_policy if self.op else None, ) def _databricks_task_multi_asset( context: AssetExecutionContext, databricks: DatabricksWorkspace, ): """Multi-asset that runs multiple assets of a task as a single Databricks job.""" yield from databricks.submit_and_poll( component=self, context=context, ) databricks_assets.append(_databricks_task_multi_asset) return Definitions(assets=databricks_assets, resources={"databricks": self.workspace})
DatabricksAssetBundleComponent
python
django__django
tests/admin_views/models.py
{ "start": 14338, "end": 14549 }
class ____(models.Model): details = models.CharField(max_length=100) plot = models.OneToOneField(Plot, models.CASCADE, null=True, blank=True) def __str__(self): return self.details
PlotDetails
python
walkccc__LeetCode
solutions/1579. Remove Max Number of Edges to Keep Graph Fully Traversable/1579.py
{ "start": 579, "end": 1520 }
class ____: def maxNumEdgesToRemove(self, n: int, edges: list[list[int]]) -> int: alice = UnionFind(n) bob = UnionFind(n) requiredEdges = 0 # Greedily put type 3 edges in the front. for type_, u, v in sorted(edges, reverse=True): u -= 1 v -= 1 if type_ == 3: # Can be traversed by Alice and Bob. # Note that we should use | instead of or because if the first # expression is True, short-circuiting will skip the second # expression. if alice.unionByRank(u, v) | bob.unionByRank(u, v): requiredEdges += 1 elif type_ == 2: # Can be traversed by Bob. if bob.unionByRank(u, v): requiredEdges += 1 else: # type == 1 Can be traversed by Alice. if alice.unionByRank(u, v): requiredEdges += 1 return (len(edges) - requiredEdges if alice.count == 1 and bob.count == 1 else -1)
Solution
python
sphinx-doc__sphinx
tests/roots/test-ext-autodoc/target/enums.py
{ "start": 4899, "end": 5000 }
class ____(_NamePropertyInDataType, enum.Enum): """this is enum class"""
EnumNamePropertyInDataType
python
vyperlang__vyper
vyper/utils.py
{ "start": 5156, "end": 11436 }
class ____(decimal.Context): def __setattr__(self, name, value): if name == "prec": if value < 78: # definitely don't want this to happen raise DecimalOverrideException("Overriding decimal precision disabled") elif value > 78: # not sure it's incorrect, might not be end of the world warnings.warn( "Changing decimals precision could have unintended side effects!", stacklevel=2 ) # else: no-op, is ok super().__setattr__(name, value) decimal.setcontext(DecimalContextOverride(prec=78)) def keccak256(x): return keccak.new(digest_bits=256, data=x).digest() @functools.lru_cache(maxsize=512) def sha256sum(s: str) -> str: return hashlib.sha256(s.encode("utf-8")).digest().hex() def get_long_version(): from vyper import __long_version__ return __long_version__ # Converts four bytes to an integer def fourbytes_to_int(inp): return (inp[0] << 24) + (inp[1] << 16) + (inp[2] << 8) + inp[3] # Converts an integer to four bytes def int_to_fourbytes(n: int) -> bytes: assert n < 2**32 return n.to_bytes(4, byteorder="big") def wrap256(val: int, signed=False) -> int: ret = val % (2**256) if signed: ret = unsigned_to_signed(ret, 256, strict=True) return ret def signed_to_unsigned(int_, bits, strict=False): """ Reinterpret a signed integer with n bits as an unsigned integer. The implementation is unforgiving in that it assumes the input is in bounds for int<bits>, in order to fail more loudly (and not hide errors in modular reasoning in consumers of this function). """ if strict: lo, hi = int_bounds(signed=True, bits=bits) assert lo <= int_ <= hi, int_ if int_ < 0: return int_ + 2**bits return int_ def unsigned_to_signed(int_, bits, strict=False): """ Reinterpret an unsigned integer with n bits as a signed integer. The implementation is unforgiving in that it assumes the input is in bounds for uint<bits>, in order to fail more loudly (and not hide errors in modular reasoning in consumers of this function). """ if strict: lo, hi = int_bounds(signed=False, bits=bits) assert lo <= int_ <= hi, int_ if int_ > (2 ** (bits - 1)) - 1: return int_ - (2**bits) return int_ def is_power_of_two(n: int) -> bool: # busted for ints wider than 53 bits: # t = math.log(n, 2) # return math.ceil(t) == math.floor(t) return n != 0 and ((n & (n - 1)) == 0) # https://stackoverflow.com/a/71122440/ def int_log2(n: int) -> int: return n.bit_length() - 1 # utility function for debugging purposes def trace(n=5, out=sys.stderr): print("BEGIN TRACE", file=out) for x in list(traceback.format_stack())[-n:]: print(x.strip(), file=out) print("END TRACE", file=out) # converts a signature like Func(bool,uint256,address) to its 4 byte method ID # TODO replace manual calculations in codebase with this def method_id_int(method_sig: str) -> int: method_id_bytes = method_id(method_sig) return fourbytes_to_int(method_id_bytes) def method_id(method_str: str) -> bytes: return keccak256(bytes(method_str, "utf-8"))[:4] def round_towards_zero(d: decimal.Decimal) -> int: # TODO double check if this can just be int(d) # (but either way keep this util function bc it's easier at a glance # to understand what round_towards_zero() does instead of int()) return int(d.to_integral_exact(decimal.ROUND_DOWN)) # Converts a provided hex string to an integer def hex_to_int(inp): if inp[:2] == "0x": inp = inp[2:] return bytes_to_int(binascii.unhexlify(inp)) # Converts bytes to an integer def bytes_to_int(bytez): o = 0 for b in bytez: o = o * 256 + b return o def is_checksum_encoded(addr): return addr == checksum_encode(addr) # Encodes an address using ethereum's checksum scheme def checksum_encode(addr): # Expects an input of the form 0x<40 hex chars> assert addr[:2] == "0x" and len(addr) == 42, addr o = "" v = bytes_to_int(keccak256(addr[2:].lower().encode("utf-8"))) for i, c in enumerate(addr[2:]): if c in "0123456789": o += c else: o += c.upper() if (v & (2 ** (255 - 4 * i))) else c.lower() return "0x" + o # Returns lowest multiple of 32 >= the input def ceil32(x): return x if x % 32 == 0 else x + 32 - (x % 32) # Calculates amount of gas needed for memory expansion def calc_mem_gas(memsize): return (memsize // 32) * 3 + (memsize // 32) ** 2 // 512 # Specific gas usage GAS_IDENTITY = 15 GAS_IDENTITYWORD = 3 GAS_COPY_WORD = 3 # i.e., W_copy from YP # A decimal value can store multiples of 1/DECIMAL_DIVISOR MAX_DECIMAL_PLACES = 10 DECIMAL_DIVISOR = 10**MAX_DECIMAL_PLACES DECIMAL_EPSILON = decimal.Decimal(1) / DECIMAL_DIVISOR def int_bounds(signed, bits): """ calculate the bounds on an integer type ex. int_bounds(True, 8) -> (-128, 127) int_bounds(False, 8) -> (0, 255) """ if signed: return -(2 ** (bits - 1)), (2 ** (bits - 1)) - 1 return 0, (2**bits) - 1 # e.g. -1 -> -(2**256 - 1) def evm_twos_complement(x: int) -> int: # return ((o + 2 ** 255) % 2 ** 256) - 2 ** 255 return ((2**256 - 1) ^ x) + 1 def evm_not(val: int) -> int: assert 0 <= val <= SizeLimits.MAX_UINT256, "Value out of bounds" return SizeLimits.MAX_UINT256 ^ val # EVM div semantics as a python function def evm_div(x, y): if y == 0: return 0 # NOTE: should be same as: round_towards_zero(Decimal(x)/Decimal(y)) sign = -1 if (x * y) < 0 else 1 return sign * (abs(x) // abs(y)) # adapted from py-evm # EVM mod semantics as a python function def evm_mod(x, y): if y == 0: return 0 sign = -1 if x < 0 else 1 return sign * (abs(x) % abs(y)) # adapted from py-evm # EVM pow which wraps instead of hanging on "large" numbers # (which can generated, for ex. in the unevaluated branch of the Shift builtin) def evm_pow(x, y): assert x >= 0 and y >= 0 return pow(x, y, 2**256) # memory used for system purposes, not for variables
DecimalContextOverride
python
python__mypy
mypy/nodes.py
{ "start": 160743, "end": 169523 }
class ____: """Specifies how a dataclass-like transform should be applied. The fields here are based on the parameters accepted by `typing.dataclass_transform`.""" __slots__ = ( "eq_default", "order_default", "kw_only_default", "frozen_default", "field_specifiers", ) def __init__( self, *, eq_default: bool | None = None, order_default: bool | None = None, kw_only_default: bool | None = None, field_specifiers: tuple[str, ...] | None = None, # Specified outside of PEP 681: # frozen_default was added to CPythonin https://github.com/python/cpython/pull/99958 citing # positive discussion in typing-sig frozen_default: bool | None = None, ) -> None: self.eq_default = eq_default if eq_default is not None else True self.order_default = order_default if order_default is not None else False self.kw_only_default = kw_only_default if kw_only_default is not None else False self.frozen_default = frozen_default if frozen_default is not None else False self.field_specifiers = field_specifiers if field_specifiers is not None else () def serialize(self) -> JsonDict: return { "eq_default": self.eq_default, "order_default": self.order_default, "kw_only_default": self.kw_only_default, "frozen_default": self.frozen_default, "field_specifiers": list(self.field_specifiers), } @classmethod def deserialize(cls, data: JsonDict) -> DataclassTransformSpec: return DataclassTransformSpec( eq_default=data.get("eq_default"), order_default=data.get("order_default"), kw_only_default=data.get("kw_only_default"), frozen_default=data.get("frozen_default"), field_specifiers=tuple(data.get("field_specifiers", [])), ) def write(self, data: WriteBuffer) -> None: write_tag(data, DT_SPEC) write_bool(data, self.eq_default) write_bool(data, self.order_default) write_bool(data, self.kw_only_default) write_bool(data, self.frozen_default) write_str_list(data, self.field_specifiers) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> DataclassTransformSpec: ret = DataclassTransformSpec( eq_default=read_bool(data), order_default=read_bool(data), kw_only_default=read_bool(data), frozen_default=read_bool(data), field_specifiers=tuple(read_str_list(data)), ) assert read_tag(data) == END_TAG return ret def get_flags(node: Node, names: list[str]) -> list[str]: return [name for name in names if getattr(node, name)] def set_flags(node: Node, flags: list[str]) -> None: for name in flags: setattr(node, name, True) def write_flags(data: WriteBuffer, node: SymbolNode, flags: list[str]) -> None: for flag in flags: write_bool(data, getattr(node, flag)) def read_flags(data: ReadBuffer, node: SymbolNode, flags: list[str]) -> None: for flag in flags: if read_bool(data): setattr(node, flag, True) def get_member_expr_fullname(expr: MemberExpr) -> str | None: """Return the qualified name representation of a member expression. Return a string of form foo.bar, foo.bar.baz, or similar, or None if the argument cannot be represented in this form. """ initial: str | None = None if isinstance(expr.expr, NameExpr): initial = expr.expr.name elif isinstance(expr.expr, MemberExpr): initial = get_member_expr_fullname(expr.expr) if initial is None: return None return f"{initial}.{expr.name}" deserialize_map: Final = { key: obj.deserialize for key, obj in globals().items() if type(obj) is not FakeInfo and isinstance(obj, type) and issubclass(obj, SymbolNode) and obj is not SymbolNode } def check_arg_kinds( arg_kinds: list[ArgKind], nodes: list[T], fail: Callable[[str, T], None] ) -> None: is_var_arg = False is_kw_arg = False seen_named = False seen_opt = False for kind, node in zip(arg_kinds, nodes): if kind == ARG_POS: if is_var_arg or is_kw_arg or seen_named or seen_opt: fail( "Required positional args may not appear after default, named or var args", node, ) break elif kind == ARG_OPT: if is_var_arg or is_kw_arg or seen_named: fail("Positional default args may not appear after named or var args", node) break seen_opt = True elif kind == ARG_STAR: if is_var_arg or is_kw_arg or seen_named: fail("Var args may not appear after named or var args", node) break is_var_arg = True elif kind == ARG_NAMED or kind == ARG_NAMED_OPT: seen_named = True if is_kw_arg: fail("A **kwargs argument must be the last argument", node) break elif kind == ARG_STAR2: if is_kw_arg: fail("You may only have one **kwargs argument", node) break is_kw_arg = True def check_arg_names( names: Sequence[str | None], nodes: list[T], fail: Callable[[str, T], None], description: str = "function definition", ) -> None: seen_names: set[str | None] = set() for name, node in zip(names, nodes): if name is not None and name in seen_names: fail(f'Duplicate argument "{name}" in {description}', node) break seen_names.add(name) def is_class_var(expr: NameExpr) -> bool: """Return whether the expression is ClassVar[...]""" if isinstance(expr.node, Var): return expr.node.is_classvar return False def is_final_node(node: SymbolNode | None) -> bool: """Check whether `node` corresponds to a final attribute.""" return isinstance(node, (Var, FuncDef, OverloadedFuncDef, Decorator)) and node.is_final def get_func_def(typ: mypy.types.CallableType) -> SymbolNode | None: definition = typ.definition if isinstance(definition, Decorator): definition = definition.func return definition def local_definitions( names: SymbolTable, name_prefix: str, info: TypeInfo | None = None ) -> Iterator[Definition]: """Iterate over local definitions (not imported) in a symbol table. Recursively iterate over class members and nested classes. """ # TODO: What should the name be? Or maybe remove it? for name, symnode in names.items(): shortname = name if "-redef" in name: # Restore original name from mangled name of multiply defined function shortname = name.split("-redef")[0] fullname = name_prefix + "." + shortname node = symnode.node if node and node.fullname == fullname: yield fullname, symnode, info if isinstance(node, TypeInfo): yield from local_definitions(node.names, fullname, node) # See docstring for mypy/cache.py for reserved tag ranges. MYPY_FILE: Final[Tag] = 50 OVERLOADED_FUNC_DEF: Final[Tag] = 51 FUNC_DEF: Final[Tag] = 52 DECORATOR: Final[Tag] = 53 VAR: Final[Tag] = 54 TYPE_VAR_EXPR: Final[Tag] = 55 PARAM_SPEC_EXPR: Final[Tag] = 56 TYPE_VAR_TUPLE_EXPR: Final[Tag] = 57 TYPE_INFO: Final[Tag] = 58 TYPE_ALIAS: Final[Tag] = 59 CLASS_DEF: Final[Tag] = 60 SYMBOL_TABLE_NODE: Final[Tag] = 61 def read_symbol(data: ReadBuffer) -> SymbolNode: tag = read_tag(data) # The branches here are ordered manually by type "popularity". if tag == VAR: return Var.read(data) if tag == FUNC_DEF: return FuncDef.read(data) if tag == DECORATOR: return Decorator.read(data) if tag == TYPE_INFO: return TypeInfo.read(data) if tag == OVERLOADED_FUNC_DEF: return OverloadedFuncDef.read(data) if tag == TYPE_VAR_EXPR: return TypeVarExpr.read(data) if tag == TYPE_ALIAS: return TypeAlias.read(data) if tag == PARAM_SPEC_EXPR: return ParamSpecExpr.read(data) if tag == TYPE_VAR_TUPLE_EXPR: return TypeVarTupleExpr.read(data) assert False, f"Unknown symbol tag {tag}" def read_overload_part(data: ReadBuffer, tag: Tag | None = None) -> OverloadPart: if tag is None: tag = read_tag(data) if tag == DECORATOR: return Decorator.read(data) if tag == FUNC_DEF: return FuncDef.read(data) assert False, f"Invalid tag for an OverloadPart {tag}"
DataclassTransformSpec
python
django__django
tests/test_utils/test_simpletestcase.py
{ "start": 596, "end": 6402 }
class ____(SimpleTestCase): def get_runner(self): return unittest.TextTestRunner(stream=StringIO()) def isolate_debug_test(self, test_suite, result): # Suite teardown needs to be manually called to isolate failures. test_suite._tearDownPreviousClass(None, result) test_suite._handleModuleTearDown(result) def test_run_cleanup(self, _pre_setup, _post_teardown): """Simple test run: catches errors and runs cleanup.""" test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("raising_test")) result = self.get_runner()._makeResult() self.assertEqual(result.errors, []) test_suite.run(result) self.assertEqual(len(result.errors), 1) _, traceback = result.errors[0] self.assertIn( "Exception: debug() bubbles up exceptions before cleanup.", traceback ) _pre_setup.assert_called_once_with() _post_teardown.assert_called_once_with() def test_run_pre_setup_error(self, _pre_setup, _post_teardown): _pre_setup.side_effect = Exception("Exception in _pre_setup.") test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("simple_test")) result = self.get_runner()._makeResult() self.assertEqual(result.errors, []) test_suite.run(result) self.assertEqual(len(result.errors), 1) _, traceback = result.errors[0] self.assertIn("Exception: Exception in _pre_setup.", traceback) # pre-setup is called but not post-teardown. _pre_setup.assert_called_once_with() self.assertFalse(_post_teardown.called) def test_run_post_teardown_error(self, _pre_setup, _post_teardown): _post_teardown.side_effect = Exception("Exception in _post_teardown.") test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("simple_test")) result = self.get_runner()._makeResult() self.assertEqual(result.errors, []) test_suite.run(result) self.assertEqual(len(result.errors), 1) _, traceback = result.errors[0] self.assertIn("Exception: Exception in _post_teardown.", traceback) # pre-setup and post-teardwn are called. _pre_setup.assert_called_once_with() _post_teardown.assert_called_once_with() def test_run_skipped_test_no_cleanup(self, _pre_setup, _post_teardown): test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("skipped_test")) try: test_suite.run(self.get_runner()._makeResult()) except unittest.SkipTest: self.fail("SkipTest should not be raised at this stage.") self.assertFalse(_post_teardown.called) self.assertFalse(_pre_setup.called) def test_debug_cleanup(self, _pre_setup, _post_teardown): """Simple debug run without errors.""" test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("simple_test")) test_suite.debug() _pre_setup.assert_called_once_with() _post_teardown.assert_called_once_with() def test_debug_bubbles_error(self, _pre_setup, _post_teardown): """debug() bubbles up exceptions before cleanup.""" test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("raising_test")) msg = "debug() bubbles up exceptions before cleanup." with self.assertRaisesMessage(Exception, msg): # This is the same as test_suite.debug(). result = _DebugResult() test_suite.run(result, debug=True) # pre-setup is called but not post-teardown. _pre_setup.assert_called_once_with() self.assertFalse(_post_teardown.called) self.isolate_debug_test(test_suite, result) def test_debug_bubbles_pre_setup_error(self, _pre_setup, _post_teardown): """debug() bubbles up exceptions during _pre_setup.""" msg = "Exception in _pre_setup." _pre_setup.side_effect = Exception(msg) test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("simple_test")) with self.assertRaisesMessage(Exception, msg): # This is the same as test_suite.debug(). result = _DebugResult() test_suite.run(result, debug=True) # pre-setup is called but not post-teardown. _pre_setup.assert_called_once_with() self.assertFalse(_post_teardown.called) self.isolate_debug_test(test_suite, result) def test_debug_bubbles_post_teardown_error(self, _pre_setup, _post_teardown): """debug() bubbles up exceptions during _post_teardown.""" msg = "Exception in _post_teardown." _post_teardown.side_effect = Exception(msg) test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("simple_test")) with self.assertRaisesMessage(Exception, msg): # This is the same as test_suite.debug(). result = _DebugResult() test_suite.run(result, debug=True) # pre-setup and post-teardwn are called. _pre_setup.assert_called_once_with() _post_teardown.assert_called_once_with() self.isolate_debug_test(test_suite, result) def test_debug_skipped_test_no_cleanup(self, _pre_setup, _post_teardown): test_suite = unittest.TestSuite() test_suite.addTest(ErrorTestCase("skipped_test")) with self.assertRaisesMessage(unittest.SkipTest, "Skip condition."): # This is the same as test_suite.debug(). result = _DebugResult() test_suite.run(result, debug=True) self.assertFalse(_post_teardown.called) self.assertFalse(_pre_setup.called) self.isolate_debug_test(test_suite, result)
DebugInvocationTests
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/depends_on_define_cmake_prefix_paths/package.py
{ "start": 217, "end": 538 }
class ____(Package): """Package that defines cmake_prefix_paths""" homepage = "http://www.example.com" url = "http://www.example.com/dependsonefinecmakeprefixpaths-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") depends_on("define-cmake-prefix-paths")
DependsOnDefineCmakePrefixPaths
python
realpython__materials
python-tic-tac-toe-game-tkinter/source_code_final/tic_tac_toe.py
{ "start": 221, "end": 407 }
class ____(NamedTuple): row: int col: int label: str = "" BOARD_SIZE = 3 DEFAULT_PLAYERS = ( Player(label="X", color="blue"), Player(label="O", color="green"), )
Move
python
eth-brownie__brownie
brownie/_gui/report.py
{ "start": 107, "end": 1612 }
class ____(SelectBox): def __init__(self, parent): super().__init__(parent, "", []) def show(self): self.grid() def hide(self): self.grid_remove() def set_values(self, contract): reports = self._root().reports values = sorted(report for report in reports if contract in reports[report][report]) if not values: self.set("(No Reports)") self.config(state="disabled") self.root.toolbar.highlight_select.hide() return self["values"] = ["None"] + values self.config(state="readonly") if self.root.report_key: self.set(self.root.report_key) self.root.toolbar.highlight_select.show() if self.root.highlight_key: self.root.toolbar.highlight_select.update_highlights(self.root.highlight_key) else: self.set("Select Report") def _select(self, event): value = super()._select() if value != "None" and self.root.report_key == value: return self.root.toolbar.highlight_select.toggle_off() if value == "None": self.root.toolbar.highlight_select.hide() self.root.report_key = None self.set("Select Report") return self.root.toolbar.highlight_select.show() self.root.report_key = value self.root.toolbar.highlight_select.set_values(list(self.root.reports[value]["highlights"]))
ReportSelect
python
huggingface__transformers
tests/models/sam3/test_modeling_sam3.py
{ "start": 16218, "end": 43135 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Tests for SAM3 full model. """ all_model_classes = (Sam3Model,) if is_torch_available() else () pipeline_model_mapping = {"mask-generation": Sam3Model} if is_torch_available() else {} test_resize_embeddings = False test_torch_exportable = False _is_composite = True def setUp(self): self.model_tester = Sam3ModelTester(self) common_properties = ["initializer_range"] self.config_tester = ConfigTester( self, config_class=Sam3Config, has_text_modality=False, common_properties=common_properties ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SAM3 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # Vision encoder has input embeddings self.assertIsInstance(model.vision_encoder.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batching_equivalence(self, atol=5e-4, rtol=5e-4): super().test_batching_equivalence(atol=atol, rtol=rtol) # Override as SAM3Model has component-specific attention outputs def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Check that we have the component-specific attention outputs # Note: Some may be empty tuples if attentions aren't collected for that component self.assertIsNotNone(outputs.vision_attentions) self.assertIsNotNone(outputs.detr_encoder_attentions) self.assertIsNotNone(outputs.detr_decoder_attentions) self.assertIsNotNone(outputs.mask_decoder_attentions) # Check vision attentions (from ViT backbone) - should be properly collected if outputs.vision_attentions: vision_attentions = outputs.vision_attentions self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers) # Check that at least vision attentions are present (others may require different collection mechanism) self.assertTrue( len(outputs.vision_attentions) > 0, "At least vision attentions should be collected when output_attentions=True", ) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True for k in config.sub_configs: if getattr(config, k) is not None: getattr(config, k).output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Verify again with config-based setting self.assertIsNotNone(outputs.vision_attentions) self.assertIsNotNone(outputs.detr_encoder_attentions) self.assertIsNotNone(outputs.detr_decoder_attentions) self.assertIsNotNone(outputs.mask_decoder_attentions) # Override as SAM3Model has component-specific attention/hidden state outputs def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for k in config.sub_configs: if getattr(config, k) is not None: getattr(config, k).output_hidden_states = True getattr(config, k).output_attentions = True config.output_hidden_states = True config.output_attentions = True config._attn_implementation = "eager" # Use first model class model_class = self.all_model_classes[0] model = model_class._from_config(config, attn_implementation="eager") model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # SAM3 has component-specific hidden states and attentions # Check vision hidden states and attentions if outputs.vision_hidden_states is not None and len(outputs.vision_hidden_states) > 0: vision_hidden_states = outputs.vision_hidden_states[0] vision_hidden_states.retain_grad() if outputs.vision_attentions is not None and len(outputs.vision_attentions) > 0: vision_attentions = outputs.vision_attentions[0] vision_attentions.retain_grad() # Check DETR encoder hidden states and attentions if outputs.encoder_hidden_states is not None and len(outputs.encoder_hidden_states) > 0: encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() if outputs.detr_encoder_attentions is not None and len(outputs.detr_encoder_attentions) > 0: detr_encoder_attentions = outputs.detr_encoder_attentions[0] detr_encoder_attentions.retain_grad() # Check DETR decoder hidden states and attentions if outputs.decoder_hidden_states is not None and len(outputs.decoder_hidden_states) > 0: decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if outputs.detr_decoder_attentions is not None and len(outputs.detr_decoder_attentions) > 0: detr_decoder_attentions = outputs.detr_decoder_attentions[0] detr_decoder_attentions.retain_grad() # Check mask decoder attentions if outputs.mask_decoder_attentions is not None and len(outputs.mask_decoder_attentions) > 0: mask_decoder_attentions = outputs.mask_decoder_attentions[0] mask_decoder_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) # Check gradients are not None if outputs.vision_hidden_states is not None and len(outputs.vision_hidden_states) > 0: self.assertIsNotNone(vision_hidden_states.grad) if outputs.vision_attentions is not None and len(outputs.vision_attentions) > 0: self.assertIsNotNone(vision_attentions.grad) if outputs.encoder_hidden_states is not None and len(outputs.encoder_hidden_states) > 0: self.assertIsNotNone(encoder_hidden_states.grad) if outputs.detr_encoder_attentions is not None and len(outputs.detr_encoder_attentions) > 0: self.assertIsNotNone(detr_encoder_attentions.grad) if outputs.decoder_hidden_states is not None and len(outputs.decoder_hidden_states) > 0: self.assertIsNotNone(decoder_hidden_states.grad) if outputs.detr_decoder_attentions is not None and len(outputs.detr_decoder_attentions) > 0: self.assertIsNotNone(detr_decoder_attentions.grad) if outputs.mask_decoder_attentions is not None and len(outputs.mask_decoder_attentions) > 0: self.assertIsNotNone(mask_decoder_attentions.grad) def test_hidden_states_output(self): """Test that SAM3 properly outputs component-specific hidden states.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Enable hidden states output config.output_hidden_states = True for k in config.sub_configs: if getattr(config, k) is not None: getattr(config, k).output_hidden_states = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # SAM3 has component-specific hidden states # Check vision hidden states if outputs.vision_hidden_states is not None: vision_hidden_states = outputs.vision_hidden_states self.assertIsInstance(vision_hidden_states, (list, tuple)) # Vision encoder outputs hidden states from each layer expected_num_vision_layers = self.model_tester.num_hidden_layers + 1 # +1 for embeddings self.assertEqual(len(vision_hidden_states), expected_num_vision_layers) # Check DETR encoder hidden states (stored as encoder_hidden_states) if outputs.encoder_hidden_states is not None: encoder_hidden_states = outputs.encoder_hidden_states self.assertIsInstance(encoder_hidden_states, (list, tuple)) # Check DETR decoder hidden states (stored as decoder_hidden_states) if outputs.decoder_hidden_states is not None: decoder_hidden_states = outputs.decoder_hidden_states self.assertIsInstance(decoder_hidden_states, (list, tuple)) @unittest.skip(reason="SAM3VisionModel has FPN channel mismatch with flex attention") def test_flex_attention_with_grads(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_2_inference_equivalence(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_3_inference_equivalence(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_3_inference_equivalence_right_padding(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_kernels_inference_equivalence(self): pass @unittest.skip( reason="Sam3Model creates attention masks from features (with gradients), " "which is incompatible with flash attention's expectation of binary masks" ) def test_flash_attn_kernels_mps_inference_equivalence(self): pass def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested. SAM3 has multiple sub-models: vision_encoder, text_encoder, geometry_encoder, detr_encoder, detr_decoder, mask_decoder. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, attn_implementation="sdpa") model_sdpa = model_sdpa.eval().to(torch_device) # Get all sub-models that support attention vision_encoder_sdpa = getattr(model_sdpa, "vision_encoder") text_encoder_sdpa = getattr(model_sdpa, "text_encoder", None) detr_encoder_sdpa = getattr(model_sdpa, "detr_encoder", None) detr_decoder_sdpa = getattr(model_sdpa, "detr_decoder", None) mask_decoder_sdpa = getattr(model_sdpa, "mask_decoder", None) # Check that sub-models dispatch to SDPA if they support it self.assertTrue(vision_encoder_sdpa.config._attn_implementation == "sdpa") if text_encoder_sdpa is not None and hasattr(text_encoder_sdpa, "_supports_sdpa"): # Text encoder from CLIP should support SDPA self.assertTrue(text_encoder_sdpa.config._attn_implementation == "sdpa") if detr_encoder_sdpa is not None: self.assertTrue(detr_encoder_sdpa.config._attn_implementation == "sdpa") if detr_decoder_sdpa is not None: self.assertTrue(detr_decoder_sdpa.config._attn_implementation == "sdpa") if mask_decoder_sdpa is not None: self.assertTrue(mask_decoder_sdpa.config._attn_implementation == "sdpa") # Now test with eager model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(getattr(model_eager, "vision_encoder").config._attn_implementation == "eager") if hasattr(model_eager, "text_encoder"): self.assertTrue(model_eager.text_encoder.config._attn_implementation == "eager") if hasattr(model_eager, "detr_encoder"): self.assertTrue(model_eager.detr_encoder.config._attn_implementation == "eager") if hasattr(model_eager, "detr_decoder"): self.assertTrue(model_eager.detr_decoder.config._attn_implementation == "eager") if hasattr(model_eager, "mask_decoder"): self.assertTrue(model_eager.mask_decoder.config._attn_implementation == "eager") # Verify no SDPA layers in eager model for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if ( class_name.endswith("Attention") and getattr(submodule, "config", None) and submodule.config._attn_implementation == "sdpa" ): raise ValueError("The eager model should not have SDPA attention layers") def test_forward_with_text_embeds(self): """Test that text_embeds parameter works correctly.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # First get text embeddings with torch.no_grad(): text_embeds = model.get_text_features( input_ids=inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"] ) # Forward with text_embeds (remove input_ids) inputs_with_embeds = { "pixel_values": inputs_dict["pixel_values"], "text_embeds": text_embeds, } with torch.no_grad(): outputs_with_embeds = model(**inputs_with_embeds) # Forward with input_ids with torch.no_grad(): outputs_with_ids = model(**inputs_dict) # Outputs should be very close self.assertTrue(torch.allclose(outputs_with_embeds.pred_logits, outputs_with_ids.pred_logits, atol=1e-5)) self.assertTrue(torch.allclose(outputs_with_embeds.pred_boxes, outputs_with_ids.pred_boxes, atol=1e-5)) def test_forward_with_both_input_ids_and_text_embeds_raises_error(self): """Test that passing both input_ids and text_embeds raises an error.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # Get text embeddings with torch.no_grad(): text_embeds = model.get_text_features( input_ids=inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"] ) # Try to pass both (should raise error) inputs_with_both = { "pixel_values": inputs_dict["pixel_values"], "input_ids": inputs_dict["input_ids"], "text_embeds": text_embeds, } with self.assertRaises(ValueError): model(**inputs_with_both) def test_forward_with_vision_embeds(self): """Test that vision_embeds parameter works correctly.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # First get vision embeddings with torch.no_grad(): vision_embeds = model.get_vision_features(pixel_values=inputs_dict["pixel_values"]) # Forward with vision_embeds (remove pixel_values) inputs_with_embeds = { "vision_embeds": vision_embeds, "input_ids": inputs_dict["input_ids"], "attention_mask": inputs_dict["attention_mask"], } with torch.no_grad(): outputs_with_embeds = model(**inputs_with_embeds) # Forward with pixel_values with torch.no_grad(): outputs_with_pixels = model(**inputs_dict) # Outputs should be very close self.assertTrue( torch.allclose(outputs_with_embeds.pred_logits, outputs_with_pixels.pred_logits, atol=1e-5) ) self.assertTrue(torch.allclose(outputs_with_embeds.pred_boxes, outputs_with_pixels.pred_boxes, atol=1e-5)) def test_forward_with_both_pixel_values_and_vision_embeds_raises_error(self): """Test that passing both pixel_values and vision_embeds raises an error.""" config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # Get vision embeddings with torch.no_grad(): vision_embeds = model.get_vision_features(pixel_values=inputs_dict["pixel_values"]) # Try to pass both (should raise error) inputs_with_both = { "pixel_values": inputs_dict["pixel_values"], "vision_embeds": vision_embeds, "input_ids": inputs_dict["input_ids"], "attention_mask": inputs_dict["attention_mask"], } with self.assertRaises(ValueError): model(**inputs_with_both) @unittest.skip(reason="SAM3 model can't be compiled dynamic yet") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip( reason="SAM3 uses CLIP text encoder which has two attention masks: `causal_attention_mask` and `attention_mask`." ) def test_sdpa_can_dispatch_on_flash(self): pass def test_model_outputs_equivalence(self): """ Test that tuple and dict outputs are equivalent. SAM3 returns complex outputs with component-specific fields, so we need to ensure proper conversion. """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return # model might return non-tensors objects (e.g. Cache class) elif isinstance(tuple_object, torch.Tensor): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) # Test with output_hidden_states tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # Test with output_attentions if supported if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): """Override to ensure input_ids and attention_mask are always present for Sam3Model.""" inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) # Sam3Model always requires input_ids and attention_mask for text encoding if model_class == Sam3Model: if "input_ids" not in inputs_dict or inputs_dict.get("input_ids") is None: # Create dummy input_ids if not present # Get batch_size from pixel_values or vision_embeds if "pixel_values" in inputs_dict and inputs_dict.get("pixel_values") is not None: batch_size = inputs_dict["pixel_values"].shape[0] elif "vision_embeds" in inputs_dict and inputs_dict.get("vision_embeds") is not None: vision_embeds = inputs_dict["vision_embeds"] if vision_embeds.fpn_hidden_states is not None and len(vision_embeds.fpn_hidden_states) > 0: batch_size = vision_embeds.fpn_hidden_states[0].shape[0] elif vision_embeds.last_hidden_state is not None: batch_size = vision_embeds.last_hidden_state.shape[0] else: batch_size = 2 else: batch_size = 2 config = self.model_tester.get_config() # text_config might be a dict or a config object if isinstance(config.text_config, dict): vocab_size = config.text_config.get("vocab_size", 1000) else: vocab_size = getattr(config.text_config, "vocab_size", 1000) inputs_dict["input_ids"] = torch.randint(0, vocab_size, (batch_size, 16), device=torch_device) if "attention_mask" not in inputs_dict or inputs_dict.get("attention_mask") is None: inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["input_ids"]) return inputs_dict def prepare_coco_cat_image(): """Prepare COCO cat and laptop image (from batched inference notebook).""" img_url = "http://images.cocodataset.org/val2017/000000077595.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def prepare_coco_kitchen_image(): """Prepare COCO kitchen scene image (from batched inference notebook).""" img_url = "http://images.cocodataset.org/val2017/000000136466.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @slow
Sam3ModelTest
python
celery__celery
t/unit/app/test_exceptions.py
{ "start": 510, "end": 829 }
class ____: def test_attrs(self): x = Reject('foo', requeue=True) assert x.reason == 'foo' assert x.requeue def test_repr(self): assert repr(Reject('foo', True)) def test_pickleable(self): x = Retry('foo', True) assert pickle.loads(pickle.dumps(x))
test_Reject
python
optuna__optuna
optuna/storages/_grpc/client.py
{ "start": 14246, "end": 16521 }
class ____: def __init__(self, grpc_client: api_pb2_grpc.StorageServiceStub) -> None: self.studies: dict[int, GrpcClientCacheEntry] = {} self.grpc_client = grpc_client self.lock = threading.Lock() def delete_study_cache(self, study_id: int) -> None: with self.lock: self.studies.pop(study_id, None) def get_all_trials( self, study_id: int, states: Container[TrialState] | None ) -> list[FrozenTrial]: with self.lock: self._read_trials_from_remote_storage(study_id) study = self.studies[study_id] trials: dict[int, FrozenTrial] | list[FrozenTrial] if states is not None: trials = {number: t for number, t in study.trials.items() if t.state in states} else: trials = study.trials trials = list(sorted(trials.values(), key=lambda t: t.number)) return trials def _read_trials_from_remote_storage(self, study_id: int) -> None: if study_id not in self.studies: self.studies[study_id] = GrpcClientCacheEntry() study = self.studies[study_id] req = api_pb2.GetTrialsRequest( study_id=study_id, included_trial_ids=study.unfinished_trial_ids, trial_id_greater_than=study.last_finished_trial_id, ) try: res = self.grpc_client.GetTrials(req) except grpc.RpcError as e: if e.code() == grpc.StatusCode.NOT_FOUND: self.studies.pop(study_id, None) raise KeyError from e raise if not res.trials: return for trial_proto in res.trials: trial = grpc_servicer._from_proto_trial(trial_proto) self._add_trial_to_cache(study_id, trial) def _add_trial_to_cache(self, study_id: int, trial: FrozenTrial) -> None: study = self.studies[study_id] study.trials[trial.number] = trial if not trial.state.is_finished(): study.unfinished_trial_ids.add(trial._trial_id) return study.last_finished_trial_id = max(study.last_finished_trial_id, trial._trial_id) study.unfinished_trial_ids.discard(trial._trial_id)
GrpcClientCache
python
ray-project__ray
python/ray/dashboard/modules/job/tests/test_utils.py
{ "start": 1977, "end": 2383 }
class ____: def __init__(self, **kwargs): self._json = kwargs async def json(self): return self._json @pytest.mark.asyncio async def test_mock_request(): request = MockRequest(a=1, b=2) assert await request.json() == {"a": 1, "b": 2} request = MockRequest(a=1, b=None) assert await request.json() == {"a": 1, "b": None} # async test @pytest.mark.asyncio
MockRequest
python
Lightning-AI__lightning
src/lightning/pytorch/callbacks/progress/rich_progress.py
{ "start": 8000, "end": 9213 }
class ____: """Styles to associate to different base components. Args: description: Style for the progress bar description. For eg., Epoch x, Testing, etc. progress_bar: Style for the bar in progress. progress_bar_finished: Style for the finished progress bar. progress_bar_pulse: Style for the progress bar when `IterableDataset` is being processed. batch_progress: Style for the progress tracker (i.e 10/50 batches completed). time: Style for the processed time and estimate time remaining. processing_speed: Style for the speed of the batches being processed. metrics: Style for the metrics https://rich.readthedocs.io/en/stable/style.html """ description: Union[str, "Style"] = "" progress_bar: Union[str, "Style"] = "#6206E0" progress_bar_finished: Union[str, "Style"] = "#6206E0" progress_bar_pulse: Union[str, "Style"] = "#6206E0" batch_progress: Union[str, "Style"] = "" time: Union[str, "Style"] = "dim" processing_speed: Union[str, "Style"] = "dim underline" metrics: Union[str, "Style"] = "italic" metrics_text_delimiter: str = " " metrics_format: str = ".3f"
RichProgressBarTheme
python
realpython__materials
python-selenium/src/bandcamp/web/base.py
{ "start": 696, "end": 859 }
class ____(WebPage): def __init__(self, parent: WebElement, driver: WebDriver) -> None: super().__init__(driver) self._parent = parent
WebComponent
python
walkccc__LeetCode
solutions/719. Find K-th Smallest Pair Distance/719.py
{ "start": 0, "end": 612 }
class ____: def smallestDistancePair(self, nums: list[int], k: int) -> int: nums.sort() def numPairDistancesNoGreaterThan(m: int) -> int: count = 0 j = 1 # For each index i, find the first index j s.t. nums[j] > nums[i] + m, # so numPairDistancesNoGreaterThan for the index i will be j - i - 1. for i, num in enumerate(nums): while j < len(nums) and nums[j] <= num + m: j += 1 count += j - i - 1 return count return bisect.bisect_left(range(nums[-1] - nums[0]), k, key=numPairDistancesNoGreaterThan)
Solution
python
astropy__astropy
astropy/units/core.py
{ "start": 78503, "end": 85166 }
class ____(UnitBase): """ Create a composite unit using expressions of previously defined units. Direct use of this class is not recommended. Instead use the factory function `Unit` and arithmetic operators to compose units. Parameters ---------- scale : number A scaling factor for the unit. bases : sequence of `UnitBase` A sequence of units this unit is composed of. powers : sequence of numbers A sequence of powers (in parallel with ``bases``) for each of the base units. Raises ------ UnitScaleError If the scale is zero. """ _decomposed_cache: Union["CompositeUnit", None] = None # _error_check can switch off runtime validation of scale, bases and powers. # These overloads enable type checkers to validate statically. @overload def __init__( self, scale: UnitScaleLike, bases: Sequence[UnitBase], powers: Sequence[UnitPowerLike], decompose: bool = False, decompose_bases: Collection[UnitBase] = (), _error_check: Literal[True] = True, ) -> None: ... @overload def __init__( self, scale: UnitScale, bases: Sequence[UnitBase], powers: Sequence[UnitPower], decompose: bool = False, decompose_bases: Collection[UnitBase] = (), _error_check: Literal[False] = False, ) -> None: ... def __init__( self, scale, bases, powers, decompose=False, decompose_bases=(), _error_check=True, ): # There are many cases internal to astropy.units where we # already know that all the bases are Unit objects, and the # powers have been validated. In those cases, we can skip the # error checking for performance reasons. When the private # kwarg `_error_check` is False, the error checking is turned # off. if _error_check: scale = sanitize_scale(scale) for base in bases: if not isinstance(base, UnitBase): raise TypeError("bases must be sequence of UnitBase instances") powers = [sanitize_power(p) for p in powers] if not decompose and len(bases) == 1 and powers[0] >= 0: # Short-cut; with one unit there's nothing to expand and gather, # as that has happened already when creating the unit. But do only # positive powers, since for negative powers we need to re-sort. unit = bases[0] power = powers[0] if power == 1: scale *= unit.scale self._bases = unit.bases self._powers = unit.powers elif power == 0: self._bases = [] self._powers = [] else: scale *= unit.scale**power self._bases = unit.bases self._powers = [ sanitize_power(operator.mul(*resolve_fractions(p, power))) for p in unit.powers ] self._scale = sanitize_scale(scale) else: # Regular case: use inputs as preliminary scale, bases, and powers, # then "expand and gather" identical bases, sanitize the scale, &c. self._scale = scale self._bases = bases self._powers = powers self._expand_and_gather(decompose=decompose, bases=decompose_bases) def __repr__(self) -> str: if len(self._bases): return super().__repr__() else: if self._scale != 1.0: return f"Unit(dimensionless with a scale of {self._scale})" else: return "Unit(dimensionless)" @property def scale(self) -> UnitScale: """The scale of the composite unit.""" return self._scale @property def bases(self) -> list[NamedUnit]: """The bases of the composite unit.""" return self._bases @property def powers(self) -> list[UnitPower]: """The powers of the bases of the composite unit.""" return self._powers def _expand_and_gather( self, decompose: bool = False, bases: Collection[UnitBase] = () ): def add_unit(unit, power, scale): if bases and unit not in bases: for base in bases: try: scale *= unit._to(base) ** power except UnitsError: pass else: unit = base break if unit in new_parts: a, b = resolve_fractions(new_parts[unit], power) new_parts[unit] = a + b else: new_parts[unit] = power return scale new_parts = {} scale = self._scale for b, p in zip(self._bases, self._powers): if decompose and b not in bases: b = b.decompose(bases=bases) if isinstance(b, CompositeUnit): scale *= b._scale**p for b_sub, p_sub in zip(b._bases, b._powers): a, b = resolve_fractions(p_sub, p) scale = add_unit(b_sub, a * b, scale) else: scale = add_unit(b, p, scale) new_parts = [x for x in new_parts.items() if x[1] != 0] new_parts.sort(key=lambda x: (-x[1], getattr(x[0], "name", ""))) self._bases = [x[0] for x in new_parts] self._powers = [sanitize_power(x[1]) for x in new_parts] self._scale = sanitize_scale(scale) def __copy__(self) -> "CompositeUnit": return CompositeUnit(self._scale, self._bases[:], self._powers[:]) def decompose(self, bases: Collection[UnitBase] = ()) -> "CompositeUnit": if len(bases) == 0 and self._decomposed_cache is not None: return self._decomposed_cache for base in self.bases: if not isinstance(base, IrreducibleUnit) or ( len(bases) and base not in bases ): break else: if len(bases) == 0: self._decomposed_cache = self return self x = CompositeUnit( self.scale, self.bases, self.powers, decompose=True, decompose_bases=bases ) if len(bases) == 0: self._decomposed_cache = x return x def is_unity(self) -> bool: unit = self.decompose() return len(unit.bases) == 0 and unit.scale == 1.0
CompositeUnit
python
spack__spack
lib/spack/spack/util/windows_registry.py
{ "start": 15197, "end": 15287 }
class ____(Exception): """RunTime Error concerning the Windows Registry"""
RegistryError
python
PyCQA__pylint
tests/pyreverse/functional/class_diagrams/relationships_filtering/all.py
{ "start": 71, "end": 148 }
class ____: def __init__(self): self.__x = P("private")
PrivateAttr
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI012.py
{ "start": 554, "end": 714 }
class ____: value: int pass # PYI012 Class body must not contain `pass` def __init__(): pass # Not violations (of PYI012)
NonEmptyWithInit
python
getsentry__sentry
src/sentry/discover/arithmetic.py
{ "start": 4684, "end": 15201 }
class ____(NodeVisitor): DEFAULT_MAX_OPERATORS = 10 # Don't wrap in VisitationErrors unwrapped_exceptions = (ArithmeticError,) field_allowlist = { "transaction.duration", "spans.http", "spans.db", "spans.resource", "spans.browser", "spans.total.time", "measurements.app_start_cold", "measurements.app_start_warm", "measurements.cls", "measurements.fcp", "measurements.fid", "measurements.fp", "measurements.frames_frozen", "measurements.frames_slow", "measurements.frames_total", "measurements.lcp", "measurements.stall_count", "measurements.stall_stall_longest_time", "measurements.stall_stall_total_time", "measurements.time_to_full_display", "measurements.time_to_initial_display", "measurements.ttfb", "measurements.ttfb.requesttime", TOTAL_COUNT_ALIAS, TOTAL_TRANSACTION_DURATION_ALIAS, } function_allowlist = { "count", "count_if", "count_unique", "failure_count", "failure_rate", "min", "max", "avg", "sum", "p50", "p75", "p90", "p95", "p99", "p100", "percentile", "apdex", "user_misery", "eps", "epm", "count_miserable", "count_web_vitals", "percentile_range", } def __init__(self, max_operators: int | None, custom_measurements: set[str] | None): super().__init__() self.operators: int = 0 self.terms: int = 0 self.max_operators = max_operators if max_operators else self.DEFAULT_MAX_OPERATORS self.fields: set[str] = set() self.functions: set[str] = set() self.custom_measurements: set[str] = custom_measurements or set() def visit_term(self, _, children): maybe_factor, remaining_adds = children maybe_factor = maybe_factor[0] # remaining_adds is either a list containing an Operation, or a Node if isinstance(remaining_adds, list): # Update the operation with lhs and continue remaining_adds[0].lhs = maybe_factor return flatten(remaining_adds) else: # if remaining is a node lhs contains a factor so just return that return maybe_factor def visit_factor(self, _, children): primary, remaining_muls = children remaining_muls[0].lhs = primary return flatten(remaining_muls) def visited_operator(self): """We visited an operator, increment the count and error if we exceed max""" self.operators += 1 if self.operators > self.max_operators: raise MaxOperatorError("Exceeded maximum number of operations") def visit_add_sub(self, _, children): add_sub_operator, maybe_factor = children self.visited_operator() return Operation(add_sub_operator, rhs=maybe_factor[0]) def visit_mul_div(self, _, children): mul_div_operator, primary = children self.visited_operator() return Operation(mul_div_operator, rhs=primary) @staticmethod def strip_spaces(children): """Visitor for a `spaces foo spaces` node""" _, value, _ = children return value def visit_maybe_factor(self, _, children): return self.strip_spaces(children) def visit_primary(self, _, children): # Return the 0th element since this is a (numeric/function/field) self.terms += 1 return self.strip_spaces(children)[0] def visit_parens(self, _, children): # Strip brackets _, term, _ = children return term @staticmethod def parse_operator(operator): # operator is a list since the pattern is (a/b) but we'll only ever want the first value return operator[0].expr_name def visit_add_sub_operator(self, _, children): return self.parse_operator(self.strip_spaces(children)) def visit_mul_div_operator(self, _, children): return self.parse_operator(self.strip_spaces(children)) def visit_numeric_value(self, node, _): return float(node.text) def visit_field_value(self, node, _): field = node.text if field not in self.field_allowlist and field not in self.custom_measurements: raise ArithmeticValidationError(f"{field} not allowed in arithmetic") self.fields.add(field) return field def visit_function_value(self, node, children): function_node, *_ = children function_name = function_node.text field = node.text if function_name not in self.function_allowlist: raise ArithmeticValidationError(f"{function_name} not allowed in arithmetic") self.functions.add(field) return field def generic_visit(self, node, children): return children or node @overload def parse_arithmetic( equation: str, max_operators: int | None = None, custom_measurements: set[str] | None = None, *, validate_single_operator: Literal[True], ) -> tuple[Operation, list[str], list[str]]: ... @overload def parse_arithmetic( equation: str, max_operators: int | None = None, custom_measurements: set[str] | None = None, ) -> tuple[Operation | float | str, list[str], list[str]]: ... def parse_arithmetic( equation: str, max_operators: int | None = None, custom_measurements: set[str] | None = None, validate_single_operator: bool = False, ) -> tuple[Operation | float | str, list[str], list[str]]: """Given a string equation try to parse it into a set of Operations""" try: tree = arithmetic_grammar.parse(equation) except ParseError: raise ArithmeticParseError( "Unable to parse your equation, make sure it is well formed arithmetic" ) visitor = ArithmeticVisitor(max_operators, custom_measurements) result = visitor.visit(tree) # total count is the exception to the no mixing rule if ( visitor.fields.intersection({TOTAL_COUNT_ALIAS, TOTAL_TRANSACTION_DURATION_ALIAS}) and len(visitor.functions) > 0 ): return result, list(visitor.fields), list(visitor.functions) if len(visitor.fields) > 0 and len(visitor.functions) > 0: raise ArithmeticValidationError("Cannot mix functions and fields in arithmetic") if validate_single_operator and visitor.operators == 0: raise ArithmeticValidationError("Arithmetic expression must contain at least 1 operator") return result, list(visitor.fields), list(visitor.functions) def resolve_equation_list( equations: list[str], selected_columns: list[str], aggregates_only: bool = False, auto_add: bool = False, plain_math: bool = False, custom_measurements: set[str] | None = None, ) -> tuple[list[str], list[ParsedEquation]]: """Given a list of equation strings, resolve them to their equivalent snuba json query formats :param equations: list of equations strings that haven't been parsed yet :param selected_columns: list of public aliases from the endpoint, can be a mix of fields and aggregates :param aggregates_only: Optional parameter whether we need to enforce equations don't include fields intended for use with event-stats where fields aren't compatible since they change grouping :param: auto_add: Optional parameter that will take any fields in the equation that's missing in the selected_columns and return a new list with them added :param plain_math: Allow equations that don't include any fields or functions, disabled by default """ parsed_equations: list[ParsedEquation] = [] resolved_columns: list[str] = selected_columns[:] for index, equation in enumerate(equations): parsed_equation, fields, functions = parse_arithmetic( equation, None, custom_measurements, validate_single_operator=True ) if (len(fields) == 0 and len(functions) == 0) and not plain_math: raise InvalidSearchQuery("Equations need to include a field or function") if aggregates_only and len(functions) == 0: raise InvalidSearchQuery("Only equations on aggregate functions are supported") for field in fields: if field not in selected_columns: if auto_add: resolved_columns.append(field) else: raise InvalidSearchQuery( f"{field} used in an equation but is not a selected field" ) for function in functions: if function not in selected_columns: if auto_add: resolved_columns.append(function) else: raise InvalidSearchQuery( f"{function} used in an equation but is not a selected function" ) # TODO: currently returning "resolved_equations" for the json syntax # once we're converted to SnQL this should only return parsed_equations parsed_equations.append(ParsedEquation(parsed_equation, len(functions) > 0)) return resolved_columns, parsed_equations def is_equation(field: str) -> bool: """check if a public alias is an equation, which start with the equation prefix eg. `equation|5 + 5` """ return field.startswith(EQUATION_PREFIX) def strip_equation(field: str) -> str: """remove the equation prefix from a public field alias""" assert is_equation(field), f"{field} does not start with {EQUATION_PREFIX}" return field[len(EQUATION_PREFIX) :] def categorize_columns(columns) -> tuple[list[str], list[str]]: """equations have a prefix so that they can be easily included alongside our existing fields""" equations = [] fields = [] for column in columns: if is_equation(column): equations.append(strip_equation(column)) else: fields.append(column) return equations, fields def is_equation_alias(alias: str) -> bool: return EQUATION_ALIAS_REGEX.match(alias) is not None def get_equation_alias_index(alias: str) -> int | None: """Extract the index from an equation alias like 'equation[5]' -> 5""" match = re.match(r"^equation\[(\d+)\]$", alias) if match: return int(match.group(1)) return None
ArithmeticVisitor
python
falconry__falcon
falcon/_typing.py
{ "start": 6729, "end": 6945 }
class ____(Protocol): """ASGI middleware with startup handler.""" async def process_startup( self, scope: Mapping[str, Any], event: Mapping[str, Any] ) -> None: ...
AsgiMiddlewareWithProcessStartup
python
more-itertools__more-itertools
tests/test_more.py
{ "start": 69371, "end": 74392 }
class ____(TestCase): """Tests for sort_together()""" def test_key_list(self): """tests `key_list` including default, iterables include duplicates""" iterables = [ ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], ['May', 'Aug.', 'May', 'June', 'July', 'July'], [97, 20, 100, 70, 100, 20], ] self.assertEqual( mi.sort_together(iterables), [ ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), ('June', 'July', 'July', 'May', 'Aug.', 'May'), (70, 100, 20, 97, 20, 100), ], ) self.assertEqual( mi.sort_together(iterables, key_list=(0, 1)), [ ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), ('July', 'July', 'June', 'Aug.', 'May', 'May'), (100, 20, 70, 20, 97, 100), ], ) self.assertEqual( mi.sort_together(iterables, key_list=(0, 1, 2)), [ ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), ('July', 'July', 'June', 'Aug.', 'May', 'May'), (20, 100, 70, 20, 97, 100), ], ) self.assertEqual( mi.sort_together(iterables, key_list=(2,)), [ ('GA', 'CT', 'CT', 'GA', 'GA', 'CT'), ('Aug.', 'July', 'June', 'May', 'May', 'July'), (20, 20, 70, 97, 100, 100), ], ) def test_invalid_key_list(self): """tests `key_list` for indexes not available in `iterables`""" iterables = [ ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], ['May', 'Aug.', 'May', 'June', 'July', 'July'], [97, 20, 100, 70, 100, 20], ] self.assertRaises( IndexError, lambda: mi.sort_together(iterables, key_list=(5,)) ) def test_key_function(self): """tests `key` function, including interaction with `key_list`""" iterables = [ ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], ['May', 'Aug.', 'May', 'June', 'July', 'July'], [97, 20, 100, 70, 100, 20], ] self.assertEqual( mi.sort_together(iterables, key=lambda x: x), [ ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), ('June', 'July', 'July', 'May', 'Aug.', 'May'), (70, 100, 20, 97, 20, 100), ], ) self.assertEqual( mi.sort_together(iterables, key=lambda x: x[::-1]), [ ('GA', 'GA', 'GA', 'CT', 'CT', 'CT'), ('May', 'Aug.', 'May', 'June', 'July', 'July'), (97, 20, 100, 70, 100, 20), ], ) self.assertEqual( mi.sort_together( iterables, key_list=(0, 2), key=lambda state, number: ( number if state == 'CT' else 2 * number ), ), [ ('CT', 'GA', 'CT', 'CT', 'GA', 'GA'), ('July', 'Aug.', 'June', 'July', 'May', 'May'), (20, 20, 70, 100, 97, 100), ], ) def test_reverse(self): """tests `reverse` to ensure a reverse sort for `key_list` iterables""" iterables = [ ['GA', 'GA', 'GA', 'CT', 'CT', 'CT'], ['May', 'Aug.', 'May', 'June', 'July', 'July'], [97, 20, 100, 70, 100, 20], ] self.assertEqual( mi.sort_together(iterables, key_list=(0, 1, 2), reverse=True), [ ('GA', 'GA', 'GA', 'CT', 'CT', 'CT'), ('May', 'May', 'Aug.', 'June', 'July', 'July'), (100, 97, 20, 70, 100, 20), ], ) def test_uneven_iterables(self): """tests trimming of iterables to the shortest length before sorting""" iterables = [ ['GA', 'GA', 'GA', 'CT', 'CT', 'CT', 'MA'], ['May', 'Aug.', 'May', 'June', 'July', 'July'], [97, 20, 100, 70, 100, 20, 0], ] self.assertEqual( mi.sort_together(iterables), [ ('CT', 'CT', 'CT', 'GA', 'GA', 'GA'), ('June', 'July', 'July', 'May', 'Aug.', 'May'), (70, 100, 20, 97, 20, 100), ], ) def test_strict(self): # Test for list of lists or tuples self.assertRaises( ValueError, lambda: mi.sort_together( [(4, 3, 2, 1), ('a', 'b', 'c')], strict=True ), ) # Test for list of iterables self.assertRaises( ValueError, lambda: mi.sort_together([range(4), range(5)], strict=True), ) # Test for iterable of iterables self.assertRaises( ValueError, lambda: mi.sort_together( (range(i) for i in range(4)), strict=True ), )
SortTogetherTest
python
scikit-learn__scikit-learn
sklearn/metrics/_scorer.py
{ "start": 12282, "end": 15655 }
class ____(_BaseScorer): def _score(self, method_caller, estimator, X, y_true, **kwargs): """Evaluate the response method of `estimator` on `X` and `y_true`. Parameters ---------- method_caller : callable Returns predictions given an estimator, method name, and other arguments, potentially caching results. estimator : object Trained estimator to use for scoring. X : {array-like, sparse matrix} Test data that will be fed to clf.decision_function or clf.predict_proba. y_true : array-like Gold standard target values for X. These must be class labels, not decision function values. **kwargs : dict Other parameters passed to the scorer. Refer to :func:`set_score_request` for more details. Returns ------- score : float Score function applied to prediction of estimator on X. """ self._warn_overlap( message=( "There is an overlap between set kwargs of this scorer instance and" " passed metadata. Please pass them either as kwargs to `make_scorer`" " or metadata, but not both." ), kwargs=kwargs, ) pos_label = None if is_regressor(estimator) else self._get_pos_label() response_method = _check_response_method(estimator, self._response_method) y_pred = method_caller( estimator, _get_response_method_name(response_method), X, pos_label=pos_label, ) scoring_kwargs = {**self._kwargs, **kwargs} return self._sign * self._score_func(y_true, y_pred, **scoring_kwargs) @validate_params( { "scoring": [str, callable, None], }, prefer_skip_nested_validation=True, ) def get_scorer(scoring): """Get a scorer from string. Read more in the :ref:`User Guide <scoring_parameter>`. :func:`~sklearn.metrics.get_scorer_names` can be used to retrieve the names of all available scorers. Parameters ---------- scoring : str, callable or None Scoring method as string. If callable it is returned as is. If None, returns None. Returns ------- scorer : callable The scorer. Notes ----- When passed a string, this function always returns a copy of the scorer object. Calling `get_scorer` twice for the same scorer results in two separate scorer objects. Examples -------- >>> import numpy as np >>> from sklearn.dummy import DummyClassifier >>> from sklearn.metrics import get_scorer >>> X = np.reshape([0, 1, -1, -0.5, 2], (-1, 1)) >>> y = np.array([0, 1, 1, 0, 1]) >>> classifier = DummyClassifier(strategy="constant", constant=0).fit(X, y) >>> accuracy = get_scorer("accuracy") >>> accuracy(classifier, X, y) 0.4 """ if isinstance(scoring, str): try: scorer = copy.deepcopy(_SCORERS[scoring]) except KeyError: raise ValueError( "%r is not a valid scoring value. " "Use sklearn.metrics.get_scorer_names() " "to get valid options." % scoring ) else: scorer = scoring return scorer
_Scorer
python
automl__auto-sklearn
autosklearn/pipeline/components/feature_preprocessing/polynomial.py
{ "start": 468, "end": 2590 }
class ____(AutoSklearnPreprocessingAlgorithm): def __init__(self, degree, interaction_only, include_bias, random_state=None): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias self.random_state = random_state self.preprocessor = None def fit(self, X, Y): import sklearn.preprocessing self.degree = int(self.degree) self.interaction_only = check_for_bool(self.interaction_only) self.include_bias = check_for_bool(self.include_bias) self.preprocessor = sklearn.preprocessing.PolynomialFeatures( degree=self.degree, interaction_only=self.interaction_only, include_bias=self.include_bias, ) self.preprocessor.fit(X, Y) return self def transform(self, X): if self.preprocessor is None: raise NotImplementedError() return self.preprocessor.transform(X) @staticmethod def get_properties(dataset_properties=None): return { "shortname": "PolynomialFeatures", "name": "PolynomialFeatures", "handles_regression": True, "handles_classification": True, "handles_multiclass": True, "handles_multilabel": True, "handles_multioutput": True, "is_deterministic": True, "input": (DENSE, SPARSE, UNSIGNED_DATA), "output": (INPUT,), } @staticmethod def get_hyperparameter_search_space( feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None ): # More than degree 3 is too expensive! degree = UniformIntegerHyperparameter("degree", 2, 3, 2) interaction_only = CategoricalHyperparameter( "interaction_only", ["False", "True"], "False" ) include_bias = CategoricalHyperparameter( "include_bias", ["True", "False"], "True" ) cs = ConfigurationSpace() cs.add_hyperparameters([degree, interaction_only, include_bias]) return cs
PolynomialFeatures
python
falconry__falcon
falcon/routing/compiled.py
{ "start": 42096, "end": 42404 }
class ____(_CxChild): def __init__(self, field_name: str) -> None: self._field_name = field_name def src(self, indentation: int) -> str: return "{0}fragment = groups.pop('{1}')".format( _TAB_STR * indentation, self._field_name, )
_CxSetFragmentFromField
python
cython__cython
Cython/Tests/xmlrunner.py
{ "start": 1475, "end": 1677 }
class ____(xml.dom.minidom.Document): def createCDATAOrText(self, data): if ']]>' in data: return self.createTextNode(data) return self.createCDATASection(data)
XMLDocument
python
scrapy__scrapy
tests/test_downloadermiddleware_httpcache.py
{ "start": 25725, "end": 25953 }
class ____( TestBase, StorageTestMixin, RFC2616PolicyTestMixin ): storage_class = "scrapy.extensions.httpcache.DbmCacheStorage" policy_class = "scrapy.extensions.httpcache.RFC2616Policy"
TestDbmStorageWithRFC2616Policy
python
pypa__warehouse
warehouse/packaging/models.py
{ "start": 31737, "end": 35655 }
class ____(HasEvents, db.Model): __tablename__ = "release_files" @declared_attr def __table_args__(cls): # noqa return ( CheckConstraint("sha256_digest ~* '^[A-F0-9]{64}$'"), CheckConstraint("blake2_256_digest ~* '^[A-F0-9]{64}$'"), Index( "release_files_single_sdist", "release_id", "packagetype", unique=True, postgresql_where=( (cls.packagetype == "sdist") & (cls.allow_multiple_sdist == False) # noqa ), ), Index("release_files_release_id_idx", "release_id"), Index("release_files_archived_idx", "archived"), Index("release_files_cached_idx", "cached"), ) __parent__ = dotted_navigator("release") __name__ = dotted_navigator("filename") release_id: Mapped[UUID] = mapped_column( ForeignKey("releases.id", onupdate="CASCADE", ondelete="CASCADE"), ) release: Mapped[Release] = orm.relationship(back_populates="files") python_version: Mapped[str] requires_python: Mapped[str | None] packagetype: Mapped[PackageType] = mapped_column() comment_text: Mapped[str | None] filename: Mapped[str] = mapped_column(unique=True) path: Mapped[str] = mapped_column(unique=True) size: Mapped[int] md5_digest: Mapped[str] = mapped_column(unique=True) sha256_digest: Mapped[str] = mapped_column(CITEXT, unique=True) blake2_256_digest: Mapped[str] = mapped_column(CITEXT, unique=True) upload_time: Mapped[datetime_now] uploaded_via: Mapped[str | None] # PEP 658 metadata_file_sha256_digest: Mapped[str | None] = mapped_column(CITEXT) metadata_file_blake2_256_digest: Mapped[str | None] = mapped_column(CITEXT) # We need this column to allow us to handle the currently existing "double" # sdists that exist in our database. Eventually we should try to get rid # of all of them and then remove this column. allow_multiple_sdist: Mapped[bool_false] = mapped_column() cached: Mapped[bool_false] = mapped_column( comment="If True, the object has been populated to our cache bucket.", ) archived: Mapped[bool_false] = mapped_column( comment="If True, the object has been archived to our archival bucket.", ) metadata_file_unbackfillable: Mapped[bool_false] = mapped_column( nullable=True, comment="If True, the metadata for the file cannot be backfilled.", ) # PEP 740 provenance: Mapped[Provenance] = orm.relationship( cascade="all, delete-orphan", lazy="joined", passive_deletes=True, ) @property def uploaded_via_trusted_publisher(self) -> bool: """Return True if the file was uploaded via a trusted publisher.""" return ( self.events.where( or_( self.Event.additional[ # type: ignore[attr-defined] "uploaded_via_trusted_publisher" ].as_boolean(), self.Event.additional["publisher_url"] # type: ignore[attr-defined] .as_string() .is_not(None), ) ).count() > 0 ) @hybrid_property def metadata_path(self): return self.path + ".metadata" @metadata_path.expression # type: ignore def metadata_path(self): return func.concat(self.path, ".metadata") @validates("requires_python") def validates_requires_python(self, *args, **kwargs): raise RuntimeError("Cannot set File.requires_python") @property def pretty_wheel_tags(self) -> list[str]: return wheel.filename_to_pretty_tags(self.filename) @property def wheel_filters(self): return wheel.filename_to_filters(self.filename)
File
python
huggingface__transformers
tests/models/phi3/test_modeling_phi3.py
{ "start": 2991, "end": 3114 }
class ____(CausalLMModelTest, unittest.TestCase): model_tester_class = Phi3ModelTester @slow @require_torch
Phi3ModelTest
python
astropy__astropy
astropy/units/tests/test_quantity_non_ufuncs.py
{ "start": 25135, "end": 26077 }
class ____: def setup_method(self): self.q = np.arange(54.0).reshape(3, 3, 6) * u.m def check(self, func, *args, **kwargs): out = func(self.q, *args, **kwargs) expected = func(self.q.value, *args, **kwargs) expected = [x * self.q.unit for x in expected] assert len(out) == len(expected) assert all(o.shape == x.shape for o, x in zip(out, expected)) assert all(np.all(o == x) for o, x in zip(out, expected)) def test_split(self): self.check(np.split, [1]) def test_array_split(self): self.check(np.array_split, 2) def test_hsplit(self): self.check(np.hsplit, [1, 4]) def test_vsplit(self): self.check(np.vsplit, [1]) def test_dsplit(self): self.check(np.dsplit, [1]) @pytest.mark.skipif(NUMPY_LT_2_1, reason="np.unstack is new in Numpy 2.1") def test_unstack(self): self.check(np.unstack)
TestSplit
python
huggingface__transformers
tests/quantization/higgs/test_higgs.py
{ "start": 1169, "end": 2219 }
class ____(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = HiggsConfig() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["embed_tokens", "lm_head"], "quant_method": "higgs"} quantization_config = HiggsConfig.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) @slow @require_torch_gpu @require_flute_hadamard @require_accelerate # @require_read_token
HiggsConfigTest
python
PrefectHQ__prefect
tests/test_flows.py
{ "start": 100139, "end": 106188 }
class ____: def test_noniterable_hook_raises(self): def failure_hook(): pass with pytest.raises( TypeError, match=re.escape( "Expected iterable for 'on_failure'; got function instead. Please" " provide a list of hooks to 'on_failure':\n\n" "@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_failure=failure_hook) def flow1(): pass def test_noncallable_hook_raises(self): with pytest.raises( TypeError, match=re.escape( "Expected callables in 'on_failure'; got str instead. Please provide a" " list of hooks to 'on_failure':\n\n" "@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_failure=["test"]) def flow1(): pass def test_callable_noncallable_hook_raises(self): def failure_hook(): pass with pytest.raises( TypeError, match=re.escape( "Expected callables in 'on_failure'; got str instead. Please provide a" " list of hooks to 'on_failure':\n\n" "@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass" ), ): @flow(on_failure=[failure_hook, "test"]) def flow2(): pass def test_decorated_on_failure_hooks_run_on_failure(self): my_mock = MagicMock() @flow def my_flow(): raise Exception("oops") @my_flow.on_failure def failed1(flow, flow_run, state): my_mock("failed1") @my_flow.on_failure def failed2(flow, flow_run, state): my_mock("failed2") state = my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_args_list == [call("failed1"), call("failed2")] def test_on_failure_hooks_run_on_failure(self): my_mock = MagicMock() def failed1(flow, flow_run, state): my_mock("failed1") def failed2(flow, flow_run, state): my_mock("failed2") @flow(on_failure=[failed1, failed2]) def my_flow(): raise Exception("oops") state = my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_args_list == [call("failed1"), call("failed2")] def test_on_failure_hooks_dont_run_on_completed(self): my_mock = MagicMock() def failed1(flow, flow_run, state): my_mock("failed1") def failed2(flow, flow_run, state): my_mock("failed2") @flow(on_failure=[failed1, failed2]) def my_flow(): pass state = my_flow(return_state=True) assert state.type == StateType.COMPLETED my_mock.assert_not_called() def test_on_failure_hooks_dont_run_on_retries(self): my_mock = MagicMock() def failed1(flow, flow_run, state): my_mock("failed1") def failed2(flow, flow_run, state): my_mock("failed2") @flow(on_failure=[failed1, failed2], retries=3) def my_flow(): raise SyntaxError("oops") state = my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_count == 2 assert [call.args[0] for call in my_mock.call_args_list] == [ "failed1", "failed2", ] async def test_on_async_failure_hooks_dont_run_on_retries(self): my_mock = MagicMock() async def failed1(flow, flow_run, state): my_mock("failed1") async def failed2(flow, flow_run, state): my_mock("failed2") @flow(on_failure=[failed1, failed2], retries=3) async def my_flow(): raise SyntaxError("oops") state = await my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_count == 2 assert [call.args[0] for call in my_mock.call_args_list] == [ "failed1", "failed2", ] def test_other_failure_hooks_run_if_a_hook_fails(self): my_mock = MagicMock() def failed1(flow, flow_run, state): my_mock("failed1") def exception_hook(flow, flow_run, state): raise Exception("oops") def failed2(flow, flow_run, state): my_mock("failed2") @flow(on_failure=[failed1, exception_hook, failed2]) def my_flow(): raise Exception("oops") state = my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_args_list == [call("failed1"), call("failed2")] @pytest.mark.parametrize( "hook1, hook2", [ (create_hook, create_hook), (create_hook, create_async_hook), (create_async_hook, create_hook), (create_async_hook, create_async_hook), ], ) def test_on_failure_hooks_work_with_sync_and_async(self, hook1, hook2): my_mock = MagicMock() hook1_with_mock = hook1(my_mock) hook2_with_mock = hook2(my_mock) @flow(on_failure=[hook1_with_mock, hook2_with_mock]) def my_flow(): raise Exception("oops") state = my_flow(return_state=True) assert state.type == StateType.FAILED assert my_mock.call_args_list == [call(), call()] def test_on_failure_hooks_run_on_bad_parameters(self): my_mock = MagicMock() def failure_hook(flow, flow_run, state): my_mock("failure_hook") @flow(on_failure=[failure_hook]) def my_flow(x: int): pass state = my_flow(x="x", return_state=True) assert state.type == StateType.FAILED assert my_mock.call_args_list == [call("failure_hook")]
TestFlowHooksOnFailure
python
pytorch__pytorch
torch/testing/_internal/common_quantization.py
{ "start": 92803, "end": 92995 }
class ____(nn.Sequential): def __init__(self) -> None: super().__init__( nn.Conv2d(3, 3, 1, 1, bias=False), nn.BatchNorm2d(3), nn.ReLU(inplace=False) )
ConvBNReLU
python
django-extensions__django-extensions
django_extensions/management/commands/sqlcreate.py
{ "start": 433, "end": 4757 }
class ____(BaseCommand): help = """Generates the SQL to create your database for you, as specified in settings.py The envisioned use case is something like this: ./manage.py sqlcreate [--database=<databasename>] | mysql -u <db_administrator> -p ./manage.py sqlcreate [--database=<databasename>] | psql -U <db_administrator> -W """ # noqa: E501 requires_system_checks: List[str] = [] can_import_settings = True def add_arguments(self, parser): super().add_arguments(parser) parser.add_argument( "-R", "--router", action="store", dest="router", default=DEFAULT_DB_ALIAS, help="Use this router-database other then defined in settings.py", ) parser.add_argument( "--database", default=DEFAULT_DB_ALIAS, help=( "Nominates a database to run command for. " 'Defaults to the "%s" database.' ) % DEFAULT_DB_ALIAS, ) parser.add_argument( "-D", "--drop", action="store_true", dest="drop", default=False, help="If given, includes commands to drop any existing user and database.", ) @signalcommand def handle(self, *args, **options): database = options["database"] if options["router"] != DEFAULT_DB_ALIAS: warnings.warn( "--router is deprecated. You should use --database.", RemovedInNextVersionWarning, stacklevel=2, ) database = options["router"] dbinfo = settings.DATABASES.get(database) if dbinfo is None: raise CommandError("Unknown database %s" % database) engine = dbinfo.get("ENGINE") dbuser = dbinfo.get("USER") dbpass = dbinfo.get("PASSWORD") dbname = dbinfo.get("NAME") dbhost = dbinfo.get("HOST") dbclient = socket.gethostname() # django settings file tells you that localhost should be specified by leaving # the DATABASE_HOST blank if not dbhost: dbhost = "localhost" if engine in SQLITE_ENGINES: sys.stderr.write( "-- manage.py migrate will automatically create a sqlite3 database file.\n" # noqa: E501 ) elif engine in MYSQL_ENGINES: sys.stderr.write( """-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings -- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data. """ # noqa: E501 ) print("CREATE DATABASE %s CHARACTER SET utf8 COLLATE utf8_bin;" % dbname) print( "GRANT ALL PRIVILEGES ON %s.* to '%s'@'%s' identified by '%s';" % (dbname, dbuser, dbclient, dbpass) ) elif engine in POSTGRESQL_ENGINES: if options["drop"]: print("DROP DATABASE IF EXISTS %s;" % (dbname,)) if dbuser: print("DROP USER IF EXISTS %s;" % (dbuser,)) if dbuser and dbpass: print( "CREATE USER %s WITH ENCRYPTED PASSWORD '%s' CREATEDB;" % (dbuser, dbpass) ) print( "CREATE DATABASE %s WITH ENCODING 'UTF-8' OWNER \"%s\";" % (dbname, dbuser) ) print("GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (dbname, dbuser)) else: print( "-- Assuming that unix domain socket connection mode is being used because\n" # noqa: E501 "-- USER or PASSWORD are blank in Django DATABASES configuration." ) print("CREATE DATABASE %s WITH ENCODING 'UTF-8';" % (dbname,)) else: # CREATE DATABASE is not SQL standard, but seems to be supported by most. sys.stderr.write( "-- Don't know how to handle '%s' falling back to SQL.\n" % engine ) print("CREATE DATABASE %s;" % dbname) print("GRANT ALL PRIVILEGES ON DATABASE %s to %s;" % (dbname, dbuser))
Command
python
sqlalchemy__sqlalchemy
test/orm/test_transaction.py
{ "start": 68675, "end": 72707 }
class ____(fixtures.MappedTest): __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): Table("users", metadata, Column("name", String(50), primary_key=True)) @classmethod def setup_classes(cls): class User(cls.Comparable): pass def test_rollback_recover(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) session = fixture_session() u1, u2, u3 = User(name="u1"), User(name="u2"), User(name="u3") session.add_all([u1, u2, u3]) session.commit() session.delete(u2) u4 = User(name="u2") session.add(u4) session.flush() u5 = User(name="u3") session.add(u5) with expect_warnings("New instance"): assert_raises(sa_exc.IntegrityError, session.flush) assert u5 not in session assert u2 not in session.deleted session.rollback() def test_reloaded_deleted_checked_for_expiry(self): """test issue #3677""" users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) u1 = User(name="u1") s = fixture_session() s.add(u1) s.flush() del u1 gc_collect() u1 = s.query(User).first() # noqa s.rollback() u2 = User(name="u1") s.add(u2) s.commit() assert inspect(u2).persistent def test_key_replaced_by_update(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) u1 = User(name="u1") u2 = User(name="u2") s = fixture_session() s.add_all([u1, u2]) s.commit() s.delete(u1) s.flush() u2.name = "u1" s.flush() assert u1 not in s s.rollback() assert u1 in s assert u2 in s assert s.identity_map[identity_key(User, ("u1",))] is u1 assert s.identity_map[identity_key(User, ("u2",))] is u2 @testing.requires.savepoints def test_key_replaced_by_update_nested(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) u1 = User(name="u1") s = fixture_session() s.add(u1) s.commit() with s.begin_nested(): u2 = User(name="u2") s.add(u2) s.flush() u2.name = "u3" s.rollback() assert u1 in s assert u2 not in s u1.name = "u5" s.commit() def test_multiple_key_replaced_by_update(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) u1 = User(name="u1") u2 = User(name="u2") u3 = User(name="u3") s = fixture_session() s.add_all([u1, u2, u3]) s.commit() s.delete(u1) s.delete(u2) s.flush() u3.name = "u1" s.flush() u3.name = "u2" s.flush() s.rollback() assert u1 in s assert u2 in s assert u3 in s assert s.identity_map[identity_key(User, ("u1",))] is u1 assert s.identity_map[identity_key(User, ("u2",))] is u2 assert s.identity_map[identity_key(User, ("u3",))] is u3 def test_key_replaced_by_oob_insert(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) u1 = User(name="u1") s = fixture_session() s.add(u1) s.commit() s.delete(u1) s.flush() s.execute(users.insert().values(name="u1")) u2 = s.get(User, "u1") assert u1 not in s s.rollback() assert u1 in s assert u2 not in s assert s.identity_map[identity_key(User, ("u1",))] is u1
NaturalPKRollbackTest
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_basic.py
{ "start": 34101, "end": 36450 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "base", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("type", String(30)), ) Table( "sub", metadata, Column("id", Integer, ForeignKey("base.id"), primary_key=True), ) Table( "related", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("sub_id", Integer, ForeignKey("sub.id")), ) def test_use_get(self): base, sub, related = ( self.tables.base, self.tables.sub, self.tables.related, ) # test [ticket:1186] class Base(BasicEntity): pass class Sub(Base): pass class Related(Base): pass self.mapper_registry.map_imperatively( Base, base, polymorphic_on=base.c.type, polymorphic_identity="b" ) self.mapper_registry.map_imperatively( Sub, sub, inherits=Base, polymorphic_identity="s" ) self.mapper_registry.map_imperatively( Related, related, properties={ # previously, this was needed for the comparison to occur: # the 'primaryjoin' looks just like "Sub"'s "get" clause # (based on the Base id), and foreign_keys since that join # condition doesn't actually have any fks in it # 'sub':relationship(Sub, # primaryjoin=base.c.id==related.c.sub_id, # foreign_keys=related.c.sub_id) # now we can use this: "sub": relationship(Sub) }, ) assert class_mapper(Related).get_property("sub").strategy.use_get sess = fixture_session() s1 = Sub() r1 = Related(sub=s1) sess.add(r1) sess.flush() sess.expunge_all() r1 = sess.query(Related).first() s1 = sess.query(Sub).first() def go(): assert r1.sub self.assert_sql_count(testing.db, go, 0)
M2OUseGetTest
python
pytorch__pytorch
torch/_export/serde/schema.py
{ "start": 8254, "end": 8384 }
class ____: arg: Annotated[TensorArgument, 10] tensor_constant_name: Annotated[str, 20] @dataclass
InputToTensorConstantSpec
python
pytorch__pytorch
torch/distributed/pipelining/_IR.py
{ "start": 46641, "end": 49585 }
class ____: # Create a class alias for BC SplitPoint = SplitPoint def _split_before_forward(self, *args, **kwargs): pipe_split() return self._orig_forward(*args, **kwargs) def _split_after_forward(self, *args, **kwargs): try: return self._orig_forward(*args, **kwargs) finally: pipe_split() def annotate_split_points(mod: torch.nn.Module, spec: dict[str, SplitPoint]): # TODO: make this implementation out-of-place? for qualname, split_type in spec.items(): atoms = qualname.split(".") predecessor_module = mod for i, atom in enumerate(atoms[:-1]): try: predecessor_module = getattr(predecessor_module, atom) except AttributeError as e: raise AttributeError( f"Specified target {qualname} referenced " f"nonexistent module {'.'.join(atoms[: i + 1])}" ) from e mod_to_wrap = getattr(predecessor_module, atoms[-1]) mod_to_wrap._orig_forward = mod_to_wrap.forward if split_type == SplitPoint.BEGINNING: mod_to_wrap.forward = MethodType(_split_before_forward, mod_to_wrap) elif split_type == SplitPoint.END: mod_to_wrap.forward = MethodType(_split_after_forward, mod_to_wrap) else: raise ValueError("Unknown split point type.") def pipeline( module: torch.nn.Module, mb_args: tuple[Any, ...], mb_kwargs: Optional[dict[str, Any]] = None, split_spec: Optional[dict[str, SplitPoint]] = None, split_policy: Optional[Callable[[fx.GraphModule], fx.GraphModule]] = None, ) -> Pipe: """ Split a module based on a specification. See `Pipe` for more details. Arguments --------- module: The module to be split. mb_args: Example positional inputs, in micro-batch form. mb_kwargs: Example keyword inputs, in micro-batch form. (default: `None`) split_spec: A dictionary using submodule names as split marker. (default: `None`) split_policy: The policy to use for splitting the module. (default: `None`) Returns ------- A pipeline representation of class `Pipe`. """ if split_spec is not None and split_policy is not None: raise ValueError( "Cannot specify both `split_spec` and `split_policy`. Please use only one of them." ) if split_spec is not None: # Annotate split points in the module based on user spec annotate_split_points(module, split_spec) return Pipe.from_tracing( mod=module, example_args=mb_args, example_kwargs=mb_kwargs, ) else: # Use split policy return Pipe.from_tracing( mod=module, example_args=mb_args, example_kwargs=mb_kwargs, split_policy=split_policy, )
PipeSplitWrapper
python
huggingface__transformers
tests/deepspeed/test_deepspeed.py
{ "start": 16004, "end": 17831 }
class ____(TestCasePlus): def setUp(self): super().setUp() args = TrainingArguments(".") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.ds_config_file = { "zero2": f"{self.test_file_dir_str}/ds_config_zero2.json", "zero3": f"{self.test_file_dir_str}/ds_config_zero3.json", } # use self.get_config_dict(stage) to use these to ensure the original is not modified with open(self.ds_config_file[ZERO2], encoding="utf-8") as f: config_zero2 = json.load(f) with open(self.ds_config_file[ZERO3], encoding="utf-8") as f: config_zero3 = json.load(f) # The following setting slows things down, so don't enable it by default unless needed by a test. # It's in the file as a demo for users since we want everything to work out of the box even if slower. config_zero3["zero_optimization"]["stage3_gather_16bit_weights_on_model_save"] = False self.ds_config_dict = { "zero2": config_zero2, "zero3": config_zero3, } def tearDown(self): super().tearDown() # reset the ds config global so that tests state doesn't leak unset_hf_deepspeed_config() def get_config_dict(self, stage): # As some tests modify the dict, always make a copy return deepcopy(self.ds_config_dict[stage]) @require_deepspeed @require_torch_fp16 @require_torch_accelerator
TrainerIntegrationDeepSpeedWithCustomConfig
python
Unity-Technologies__ml-agents
ml-agents/mlagents/trainers/settings.py
{ "start": 32058, "end": 32165 }
class ____: device: Optional[str] = parser.get_default("device") @attr.s(auto_attribs=True)
TorchSettings
python
numba__numba
numba/tests/test_dyn_array.py
{ "start": 15631, "end": 18236 }
class ____(NrtRefCtTest): def check_0d(self, pyfunc): cfunc = nrtjit(pyfunc) expected = pyfunc() ret = cfunc() self.assert_array_nrt_refct(ret, 1) self.assertEqual(ret.size, expected.size) self.assertEqual(ret.shape, expected.shape) self.assertEqual(ret.dtype, expected.dtype) self.assertEqual(ret.strides, expected.strides) self.check_result_value(ret, expected) # test writability expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 expected.fill(123) ret.fill(123) np.testing.assert_equal(ret, expected) def check_1d(self, pyfunc): cfunc = nrtjit(pyfunc) n = 3 expected = pyfunc(n) ret = cfunc(n) self.assert_array_nrt_refct(ret, 1) self.assertEqual(ret.size, expected.size) self.assertEqual(ret.shape, expected.shape) self.assertEqual(ret.dtype, expected.dtype) self.assertEqual(ret.strides, expected.strides) self.check_result_value(ret, expected) # test writability expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 expected.fill(123) ret.fill(123) np.testing.assert_equal(ret, expected) # errors with self.assertRaises(ValueError) as cm: cfunc(-1) self.assertEqual(str(cm.exception), "negative dimensions not allowed") def check_2d(self, pyfunc): cfunc = nrtjit(pyfunc) m, n = 2, 3 expected = pyfunc(m, n) ret = cfunc(m, n) self.assert_array_nrt_refct(ret, 1) self.assertEqual(ret.size, expected.size) self.assertEqual(ret.shape, expected.shape) self.assertEqual(ret.dtype, expected.dtype) self.assertEqual(ret.strides, expected.strides) self.check_result_value(ret, expected) # test writability expected = np.empty_like(ret) # np.full_like was not added until Numpy 1.8 expected.fill(123) ret.fill(123) np.testing.assert_equal(ret, expected) # errors with self.assertRaises(ValueError) as cm: cfunc(2, -1) self.assertEqual(str(cm.exception), "negative dimensions not allowed") def check_alloc_size(self, pyfunc): """Checks that pyfunc will error, not segfaulting due to array size.""" cfunc = nrtjit(pyfunc) with self.assertRaises(ValueError) as e: cfunc() self.assertIn( "array is too big", str(e.exception) )
ConstructorBaseTest
python
gevent__gevent
src/gevent/tests/test__monkey_select.py
{ "start": 144, "end": 741 }
class ____(greentest.TestCase): def _make_test(name, ns): # pylint:disable=no-self-argument def test(self): self.assertIs(getattr(select, name, self), self) self.assertFalse(hasattr(select, name)) test.__name__ = 'test_' + name + '_removed' ns[test.__name__] = test for name in ( 'epoll', 'kqueue', 'kevent', 'devpoll', ): _make_test(name, locals()) # pylint:disable=too-many-function-args del name del _make_test if __name__ == '__main__': greentest.main()
TestSelect
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
{ "start": 22048, "end": 22129 }
class ____(SpaceToBatchGradientTest, CppOpImpl): pass
SpaceToBatchGradientCppTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/call9.py
{ "start": 550, "end": 1025 }
class ____(Generic[T]): def __getitem__(self, __key: str) -> T: ... def keys(self) -> KeysView[T]: ... def func1(**kwargs: Any) -> None: ... m = MyMapping() r = StrRecord() def func2( m: MyMapping, r: StrRecord, g: GenericRecord[str], mrg: MyMapping | StrRecord | GenericRecord[str], bad: GenericRecord[bytes], ): func1(**m) func1(**r) func1(**g) func1(**mrg) # This should generate an error. func1(**bad)
GenericRecord
python
ApeWorX__ape
src/ape/utils/os.py
{ "start": 13889, "end": 15394 }
class ____: """ A context-manager for changing a directory. Initializing it will still change the directory, but you can optionally exit out of it to restore back to the original directory. Additionally, provides hooks to run when each of these events occur. """ def __init__( self, original_path: Path, new_path: Path, chdir: Optional[Callable[[Path], None]] = None, on_push: Optional[Callable[[Path], dict]] = None, on_pop: Optional[Callable[[dict], None]] = None, ): self.original_path = original_path self.new_path = new_path self._on_push = on_push self._on_pop = on_pop self._chdir = chdir or os.chdir # Initiate the change now so you can still use this class # on methods that are not intended to be used in a context. if self.original_path != self.new_path: self._chdir(new_path) self._cache: dict = {} if self._on_push is None else self._on_push(new_path) self._did_change = True else: self._cache = {} self._did_change = False def __enter__(self): return self.new_path def __exit__(self, *args): if not self._did_change: # Don't do anything. Nothing changed. return # Handle the return to the original path. self._chdir(self.original_path) if self._on_pop: self._on_pop(self._cache)
ChangeDirectory
python
huggingface__transformers
src/transformers/models/bros/modeling_bros.py
{ "start": 2363, "end": 3187 }
class ____(nn.Module): # Reference: https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py#L15 def __init__(self, config): super().__init__() self.dim_bbox_sinusoid_emb_1d = config.dim_bbox_sinusoid_emb_1d inv_freq = 1 / ( 10000 ** (torch.arange(0.0, self.dim_bbox_sinusoid_emb_1d, 2.0) / self.dim_bbox_sinusoid_emb_1d) ) self.register_buffer("inv_freq", inv_freq) def forward(self, pos_seq: torch.Tensor) -> torch.Tensor: seq_size = pos_seq.size() b1, b2, b3 = seq_size sinusoid_inp = pos_seq.view(b1, b2, b3, 1) * self.inv_freq.view(1, 1, 1, self.dim_bbox_sinusoid_emb_1d // 2) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) return pos_emb
BrosPositionalEmbedding1D
python
scrapy__scrapy
tests/test_exporters.py
{ "start": 5954, "end": 6250 }
class ____(TestBaseItemExporter): def _get_exporter(self, **kwargs): return PprintItemExporter(self.output, **kwargs) def _check_output(self): self._assert_expected_item( eval(self.output.getvalue()) # pylint: disable=eval-used )
TestPprintItemExporter
python
apache__airflow
providers/microsoft/winrm/tests/unit/microsoft/winrm/hooks/test_winrm.py
{ "start": 1139, "end": 8988 }
class ____: def test_get_conn_missing_remote_host(self): with pytest.raises(AirflowException): WinRMHook().get_conn() @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol") def test_get_conn_error(self, mock_protocol): mock_protocol.side_effect = Exception("Error") with pytest.raises(AirflowException): WinRMHook(remote_host="host", password="pwd").get_conn() @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol", autospec=True) @patch( "airflow.providers.microsoft.winrm.hooks.winrm.WinRMHook.get_connection", return_value=Connection( conn_id="", conn_type="", login="username", password="password", host="remote_host", extra="""{ "endpoint": "endpoint", "remote_port": 123, "transport": "plaintext", "service": "service", "keytab": "keytab", "ca_trust_path": "ca_trust_path", "cert_pem": "cert_pem", "cert_key_pem": "cert_key_pem", "server_cert_validation": "validate", "kerberos_delegation": "true", "read_timeout_sec": 124, "operation_timeout_sec": 123, "kerberos_hostname_override": "kerberos_hostname_override", "message_encryption": "auto", "credssp_disable_tlsv1_2": "true", "send_cbt": "false" }""", ), ) def test_get_conn_from_connection(self, mock_get_connection, mock_protocol): connection = mock_get_connection.return_value winrm_hook = WinRMHook(ssh_conn_id="conn_id") winrm_hook.get_conn() mock_get_connection.assert_called_once_with(winrm_hook.ssh_conn_id) mock_protocol.assert_called_once_with( endpoint=str(connection.extra_dejson["endpoint"]), transport=str(connection.extra_dejson["transport"]), username=connection.login, password=connection.password, service=str(connection.extra_dejson["service"]), keytab=str(connection.extra_dejson["keytab"]), ca_trust_path=str(connection.extra_dejson["ca_trust_path"]), cert_pem=str(connection.extra_dejson["cert_pem"]), cert_key_pem=str(connection.extra_dejson["cert_key_pem"]), server_cert_validation=str(connection.extra_dejson["server_cert_validation"]), kerberos_delegation=str(connection.extra_dejson["kerberos_delegation"]).lower() == "true", read_timeout_sec=int(connection.extra_dejson["read_timeout_sec"]), operation_timeout_sec=int(connection.extra_dejson["operation_timeout_sec"]), kerberos_hostname_override=str(connection.extra_dejson["kerberos_hostname_override"]), message_encryption=str(connection.extra_dejson["message_encryption"]), credssp_disable_tlsv1_2=str(connection.extra_dejson["credssp_disable_tlsv1_2"]).lower() == "true", send_cbt=str(connection.extra_dejson["send_cbt"]).lower() == "true", ) @patch("airflow.providers.microsoft.winrm.hooks.winrm.getuser", return_value="user") @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol") def test_get_conn_no_username(self, mock_protocol, mock_getuser): winrm_hook = WinRMHook(remote_host="host", password="password") winrm_hook.get_conn() assert mock_getuser.return_value == winrm_hook.username @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol") def test_get_conn_no_endpoint(self, mock_protocol): winrm_hook = WinRMHook(remote_host="host", password="password") winrm_hook.get_conn() assert f"http://{winrm_hook.remote_host}:{winrm_hook.remote_port}/wsman" == winrm_hook.endpoint @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol", autospec=True) @patch( "airflow.providers.microsoft.winrm.hooks.winrm.WinRMHook.get_connection", return_value=Connection( conn_id="", conn_type="", login="username", password="password", host="remote_host", extra="""{ "endpoint": "endpoint", "remote_port": 123, "transport": "plaintext", "service": "service", "keytab": "keytab", "ca_trust_path": "ca_trust_path", "cert_pem": "cert_pem", "cert_key_pem": "cert_key_pem", "server_cert_validation": "validate", "kerberos_delegation": "true", "read_timeout_sec": 124, "operation_timeout_sec": 123, "kerberos_hostname_override": "kerberos_hostname_override", "message_encryption": "auto", "credssp_disable_tlsv1_2": "true", "send_cbt": "false" }""", ), ) def test_run_with_stdout(self, mock_get_connection, mock_protocol): winrm_hook = WinRMHook(ssh_conn_id="conn_id") mock_protocol.return_value.run_command = MagicMock(return_value="command_id") mock_protocol.return_value.get_command_output_raw = MagicMock( return_value=(b"stdout", b"stderr", 0, True) ) return_code, stdout_buffer, stderr_buffer = winrm_hook.run("dir") assert return_code == 0 assert stdout_buffer == [b"stdout"] assert stderr_buffer == [b"stderr"] @patch("airflow.providers.microsoft.winrm.hooks.winrm.Protocol", autospec=True) @patch( "airflow.providers.microsoft.winrm.hooks.winrm.WinRMHook.get_connection", return_value=Connection( conn_id="", conn_type="", login="username", password="password", host="remote_host", extra="""{ "endpoint": "endpoint", "remote_port": 123, "transport": "plaintext", "service": "service", "keytab": "keytab", "ca_trust_path": "ca_trust_path", "cert_pem": "cert_pem", "cert_key_pem": "cert_key_pem", "server_cert_validation": "validate", "kerberos_delegation": "true", "read_timeout_sec": 124, "operation_timeout_sec": 123, "kerberos_hostname_override": "kerberos_hostname_override", "message_encryption": "auto", "credssp_disable_tlsv1_2": "true", "send_cbt": "false" }""", ), ) def test_run_without_stdout_and_working_dir(self, mock_get_connection, mock_protocol): winrm_hook = WinRMHook(ssh_conn_id="conn_id") working_dir = "c:\\test" mock_protocol.return_value.run_command = MagicMock(return_value="command_id") mock_protocol.return_value.get_command_output_raw = MagicMock( return_value=(b"stdout", b"stderr", 0, True) ) mock_protocol.return_value.open_shell = MagicMock() return_code, stdout_buffer, stderr_buffer = winrm_hook.run( "dir", return_output=False, working_directory=working_dir ) mock_protocol.return_value.open_shell.assert_called_once_with(working_directory=working_dir) assert return_code == 0 assert not stdout_buffer assert stderr_buffer == [b"stderr"]
TestWinRMHook
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/tests/test_registry.py
{ "start": 5172, "end": 7285 }
class ____: """Tests for _apply_metrics_to_registry_entry function.""" @pytest.mark.parametrize( "connector_type,registry_entry,metrics_dict,expected_metrics,description", [ ( ConnectorTypes.SOURCE, {"sourceDefinitionId": "source-123", "name": "Test Source"}, {"source-123": {"cloud": {"usage": 100}}}, {"cloud": {"usage": 100}}, "source with matching metrics", ), ( ConnectorTypes.DESTINATION, {"destinationDefinitionId": "dest-456", "name": "Test Destination"}, {"dest-456": {"oss": {"usage": 50}}}, {"oss": {"usage": 50}}, "destination with matching metrics", ), ( ConnectorTypes.SOURCE, {"sourceDefinitionId": "source-999", "name": "No Metrics Source"}, {}, {}, "entry with no matching metrics", ), ], ) def test_apply_metrics_to_registry_entry_scenarios(self, connector_type, registry_entry, metrics_dict, expected_metrics, description): """Test metrics application to registry entries.""" result = _apply_metrics_to_registry_entry(registry_entry, connector_type, metrics_dict) assert result["generated"]["metrics"] == expected_metrics assert result["name"] == registry_entry["name"] def test_apply_metrics_to_registry_entry_preserves_existing_structure(self): """Test that existing registry entry structure is preserved.""" registry_entry = {"sourceDefinitionId": "source-123", "name": "Test Source", "existing_field": "value"} metrics_dict = {"source-123": {"cloud": {"usage": 100}}} result = _apply_metrics_to_registry_entry(registry_entry, ConnectorTypes.SOURCE, metrics_dict) assert result["existing_field"] == "value" assert result["name"] == "Test Source" assert result["generated"]["metrics"] == {"cloud": {"usage": 100}}
TestApplyMetricsToRegistryEntry
python
pandas-dev__pandas
pandas/tests/frame/methods/test_combine_first.py
{ "start": 300, "end": 21020 }
class ____: def test_combine_first_mixed(self): a = Series(["a", "b"], index=range(2)) b = Series(range(2), index=range(2)) f = DataFrame({"A": a, "B": b}) a = Series(["a", "b"], index=range(5, 7)) b = Series(range(2), index=range(5, 7)) g = DataFrame({"A": a, "B": b}) exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6]) combined = f.combine_first(g) tm.assert_frame_equal(combined, exp) def test_combine_first(self, float_frame): # disjoint head, tail = float_frame[:5], float_frame[5:] combined = head.combine_first(tail) reordered_frame = float_frame.reindex(combined.index) tm.assert_frame_equal(combined, reordered_frame) tm.assert_index_equal(combined.columns, float_frame.columns) tm.assert_series_equal(combined["A"], reordered_frame["A"]) # same index fcopy = float_frame.copy() fcopy["A"] = 1 del fcopy["C"] fcopy2 = float_frame.copy() fcopy2["B"] = 0 del fcopy2["D"] combined = fcopy.combine_first(fcopy2) assert (combined["A"] == 1).all() tm.assert_series_equal(combined["B"], fcopy["B"]) tm.assert_series_equal(combined["C"], fcopy2["C"]) tm.assert_series_equal(combined["D"], fcopy["D"]) # overlap head, tail = reordered_frame[:10].copy(), reordered_frame head["A"] = 1 combined = head.combine_first(tail) assert (combined["A"][:10] == 1).all() # reverse overlap tail.iloc[:10, tail.columns.get_loc("A")] = 0 combined = tail.combine_first(head) assert (combined["A"][:10] == 0).all() # no overlap f = float_frame[:10] g = float_frame[10:] combined = f.combine_first(g) tm.assert_series_equal(combined["A"].reindex(f.index), f["A"]) tm.assert_series_equal(combined["A"].reindex(g.index), g["A"]) # corner cases comb = float_frame.combine_first(DataFrame()) tm.assert_frame_equal(comb, float_frame) comb = DataFrame().combine_first(float_frame) tm.assert_frame_equal(comb, float_frame.sort_index()) comb = float_frame.combine_first(DataFrame(index=["faz", "boo"])) assert "faz" in comb.index # #2525 df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)]) df2 = DataFrame(columns=["b"]) result = df.combine_first(df2) assert "b" in result def test_combine_first_mixed_bug(self): idx = Index(["a", "b", "c", "e"]) ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx) ser2 = Series(["a", "b", "c", "e"], index=idx) ser3 = Series([12, 4, 5, 97], index=idx) frame1 = DataFrame({"col0": ser1, "col2": ser2, "col3": ser3}) idx = Index(["a", "b", "c", "f"]) ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx) ser2 = Series(["a", "b", "c", "f"], index=idx) ser3 = Series([12, 4, 5, 97], index=idx) frame2 = DataFrame({"col1": ser1, "col2": ser2, "col5": ser3}) combined = frame1.combine_first(frame2) assert len(combined.columns) == 5 def test_combine_first_same_as_in_update(self): # gh 3016 (same as in update) df = DataFrame( [[1.0, 2.0, False, True], [4.0, 5.0, True, False]], columns=["A", "B", "bool1", "bool2"], ) other = DataFrame([[45, 45]], index=[0], columns=["A", "B"]) result = df.combine_first(other) tm.assert_frame_equal(result, df) df.loc[0, "A"] = np.nan result = df.combine_first(other) df.loc[0, "A"] = 45 tm.assert_frame_equal(result, df) def test_combine_first_doc_example(self): # doc example df1 = DataFrame( {"A": [1.0, np.nan, 3.0, 5.0, np.nan], "B": [np.nan, 2.0, 3.0, np.nan, 6.0]} ) df2 = DataFrame( { "A": [5.0, 2.0, 4.0, np.nan, 3.0, 7.0], "B": [np.nan, np.nan, 3.0, 4.0, 6.0, 8.0], } ) result = df1.combine_first(df2) expected = DataFrame({"A": [1, 2, 3, 5, 3, 7.0], "B": [np.nan, 2, 3, 4, 6, 8]}) tm.assert_frame_equal(result, expected) def test_combine_first_return_obj_type_with_bools(self): # GH3552 df1 = DataFrame( [[np.nan, 3.0, True], [-4.6, np.nan, True], [np.nan, 7.0, False]] ) df2 = DataFrame([[-42.6, np.nan, True], [-5.0, 1.6, False]], index=[1, 2]) expected = Series([True, True, False], name=2, dtype=bool) result_12 = df1.combine_first(df2)[2] tm.assert_series_equal(result_12, expected) result_21 = df2.combine_first(df1)[2] tm.assert_series_equal(result_21, expected) @pytest.mark.parametrize( "data1, data2, data_expected", ( ( [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], [pd.NaT, pd.NaT, pd.NaT], [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], ), ( [pd.NaT, pd.NaT, pd.NaT], [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], ), ( [datetime(2000, 1, 2), pd.NaT, pd.NaT], [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], [datetime(2000, 1, 2), datetime(2000, 1, 2), datetime(2000, 1, 3)], ), ( [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], [datetime(2000, 1, 2), pd.NaT, pd.NaT], [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], ), ), ) def test_combine_first_convert_datatime_correctly( self, data1, data2, data_expected ): # GH 3593 df1, df2 = DataFrame({"a": data1}), DataFrame({"a": data2}) result = df1.combine_first(df2) expected = DataFrame({"a": data_expected}) tm.assert_frame_equal(result, expected) def test_combine_first_align_nan(self): # GH 7509 (not fixed) ts = pd.Timestamp("2011-01-01").as_unit("s") dfa = DataFrame([[ts, 2]], columns=["a", "b"]) dfb = DataFrame([[4], [5]], columns=["b"]) assert dfa["a"].dtype == "datetime64[s]" assert dfa["b"].dtype == "int64" res = dfa.combine_first(dfb) exp = DataFrame( {"a": [ts, pd.NaT], "b": [2, 5]}, columns=["a", "b"], ) tm.assert_frame_equal(res, exp) assert res["a"].dtype == "datetime64[s]" # TODO: this must be int64 assert res["b"].dtype == "int64" res = dfa.iloc[:0].combine_first(dfb) exp = DataFrame({"a": [np.nan, np.nan], "b": [4, 5]}, columns=["a", "b"]) tm.assert_frame_equal(res, exp) # TODO: this must be datetime64 assert res["a"].dtype == "float64" # TODO: this must be int64 assert res["b"].dtype == "int64" def test_combine_first_timezone(self, unit): # see gh-7630 data1 = pd.to_datetime("20100101 01:01").tz_localize("UTC").as_unit(unit) df1 = DataFrame( columns=["UTCdatetime", "abc"], data=data1, index=pd.date_range("20140627", periods=1, unit=unit), ) data2 = pd.to_datetime("20121212 12:12").tz_localize("UTC").as_unit(unit) df2 = DataFrame( columns=["UTCdatetime", "xyz"], data=data2, index=pd.date_range("20140628", periods=1, unit=unit), ) res = df2[["UTCdatetime"]].combine_first(df1) exp = DataFrame( { "UTCdatetime": [ pd.Timestamp("2010-01-01 01:01", tz="UTC"), pd.Timestamp("2012-12-12 12:12", tz="UTC"), ], "abc": [pd.Timestamp("2010-01-01 01:01:00", tz="UTC"), pd.NaT], }, columns=["UTCdatetime", "abc"], index=pd.date_range("20140627", periods=2, freq="D", unit=unit), dtype=f"datetime64[{unit}, UTC]", ) assert res["UTCdatetime"].dtype == f"datetime64[{unit}, UTC]" assert res["abc"].dtype == f"datetime64[{unit}, UTC]" tm.assert_frame_equal(res, exp) def test_combine_first_timezone2(self, unit): # see gh-10567 dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="UTC", unit=unit) df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-03", "2015-01-05", tz="UTC", unit=unit) df2 = DataFrame({"DATE": dts2}) res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) assert res["DATE"].dtype == f"datetime64[{unit}, UTC]" def test_combine_first_timezone3(self, unit): dts1 = pd.DatetimeIndex( ["2011-01-01", "NaT", "2011-01-03", "2011-01-04"], tz="US/Eastern" ).as_unit(unit) df1 = DataFrame({"DATE": dts1}, index=[1, 3, 5, 7]) dts2 = pd.DatetimeIndex( ["2012-01-01", "2012-01-02", "2012-01-03"], tz="US/Eastern" ).as_unit(unit) df2 = DataFrame({"DATE": dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.DatetimeIndex( [ "2011-01-01", "2012-01-01", "NaT", "2012-01-02", "2011-01-03", "2011-01-04", ], tz="US/Eastern", ).as_unit(unit) exp = DataFrame({"DATE": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) def test_combine_first_timezone4(self, unit): # different tz dts1 = pd.date_range("2015-01-01", "2015-01-05", tz="US/Eastern", unit=unit) df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-03", "2015-01-05", unit=unit) df2 = DataFrame({"DATE": dts2}) # if df1 doesn't have NaN, keep its dtype res = df1.combine_first(df2) tm.assert_frame_equal(res, df1) assert res["DATE"].dtype == f"datetime64[{unit}, US/Eastern]" def test_combine_first_timezone5(self, unit): dts1 = pd.date_range("2015-01-01", "2015-01-02", tz="US/Eastern", unit=unit) df1 = DataFrame({"DATE": dts1}) dts2 = pd.date_range("2015-01-01", "2015-01-03", unit=unit) df2 = DataFrame({"DATE": dts2}) res = df1.combine_first(df2) exp_dts = [ pd.Timestamp("2015-01-01", tz="US/Eastern"), pd.Timestamp("2015-01-02", tz="US/Eastern"), pd.Timestamp("2015-01-03"), ] exp = DataFrame({"DATE": exp_dts}) tm.assert_frame_equal(res, exp) assert res["DATE"].dtype == "object" def test_combine_first_timedelta(self): data1 = pd.TimedeltaIndex(["1 day", "NaT", "3 day", "4day"]) df1 = DataFrame({"TD": data1}, index=[1, 3, 5, 7]) data2 = pd.TimedeltaIndex(["10 day", "11 day", "12 day"]) df2 = DataFrame({"TD": data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.TimedeltaIndex( ["1 day", "10 day", "NaT", "11 day", "3 day", "4 day"] ) exp = DataFrame({"TD": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["TD"].dtype == "timedelta64[ns]" def test_combine_first_period(self): data1 = pd.PeriodIndex(["2011-01", "NaT", "2011-03", "2011-04"], freq="M") df1 = DataFrame({"P": data1}, index=[1, 3, 5, 7]) data2 = pd.PeriodIndex(["2012-01-01", "2012-02", "2012-03"], freq="M") df2 = DataFrame({"P": data2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = pd.PeriodIndex( ["2011-01", "2012-01", "NaT", "2012-02", "2011-03", "2011-04"], freq="M" ) exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["P"].dtype == data1.dtype # different freq dts2 = pd.PeriodIndex(["2012-01-01", "2012-01-02", "2012-01-03"], freq="D") df2 = DataFrame({"P": dts2}, index=[2, 4, 5]) res = df1.combine_first(df2) exp_dts = [ pd.Period("2011-01", freq="M"), pd.Period("2012-01-01", freq="D"), pd.NaT, pd.Period("2012-01-02", freq="D"), pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M"), ] exp = DataFrame({"P": exp_dts}, index=[1, 2, 3, 4, 5, 7]) tm.assert_frame_equal(res, exp) assert res["P"].dtype == "object" def test_combine_first_int(self): # GH14687 - integer series that do no align exactly df1 = DataFrame({"a": [0, 1, 3, 5]}, dtype="int64") df2 = DataFrame({"a": [1, 4]}, dtype="int64") result_12 = df1.combine_first(df2) expected_12 = DataFrame({"a": [0, 1, 3, 5]}) tm.assert_frame_equal(result_12, expected_12) result_21 = df2.combine_first(df1) expected_21 = DataFrame({"a": [1, 4, 3, 5]}) tm.assert_frame_equal(result_21, expected_21) @pytest.mark.parametrize("val", [1, 1.0]) def test_combine_first_with_asymmetric_other(self, val): # see gh-20699 df1 = DataFrame({"isNum": [val]}) df2 = DataFrame({"isBool": [True]}) res = df1.combine_first(df2) exp = DataFrame({"isNum": [val], "isBool": [True]}) tm.assert_frame_equal(res, exp) def test_combine_first_string_dtype_only_na(self, nullable_string_dtype): # GH: 37519 df = DataFrame( {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype ) df2 = DataFrame({"a": ["85"], "b": [pd.NA]}, dtype=nullable_string_dtype) df.set_index(["a", "b"], inplace=True) df2.set_index(["a", "b"], inplace=True) result = df.combine_first(df2) expected = DataFrame( {"a": ["962", "85"], "b": [pd.NA] * 2}, dtype=nullable_string_dtype ).set_index(["a", "b"]) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "wide_val, dtype", ( (1666880195890293744, "UInt64"), (-1666880195890293744, "Int64"), ), ) def test_combine_first_preserve_EA_precision(self, wide_val, dtype): # GH#60128 df1 = DataFrame({"A": [wide_val, 5]}, dtype=dtype) df2 = DataFrame({"A": [6, 7, wide_val]}, dtype=dtype) result = df1.combine_first(df2) expected = DataFrame({"A": [wide_val, 5, wide_val]}, dtype=dtype) tm.assert_frame_equal(result, expected) def test_combine_first_non_unique_columns(self): # GH#29135 df1 = DataFrame([[1, np.nan], [3, 4]], columns=["P", "Q"], index=["A", "B"]) df2 = DataFrame( [[5, 6, 7], [8, 9, np.nan]], columns=["P", "Q", "Q"], index=["A", "B"] ) result = df1.combine_first(df2) expected = DataFrame( [[1, 6.0, 7.0], [3, 4.0, 4.0]], index=["A", "B"], columns=["P", "Q", "Q"] ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "scalar1, scalar2", [ (datetime(2020, 1, 1), datetime(2020, 1, 2)), (pd.Period("2020-01-01", "D"), pd.Period("2020-01-02", "D")), (pd.Timedelta("89 days"), pd.Timedelta("60 min")), (pd.Interval(left=0, right=1), pd.Interval(left=2, right=3, closed="left")), ], ) def test_combine_first_timestamp_bug(scalar1, scalar2, nulls_fixture): # GH28481 na_value = nulls_fixture frame = DataFrame([[na_value, na_value]], columns=["a", "b"]) other = DataFrame([[scalar1, scalar2]], columns=["b", "c"]) common_dtype = find_common_type([frame.dtypes["b"], other.dtypes["b"]]) if ( is_dtype_equal(common_dtype, "object") or frame.dtypes["b"] == other.dtypes["b"] or frame.dtypes["b"].kind == frame.dtypes["b"].kind == "M" ): val = scalar1 else: val = na_value result = frame.combine_first(other) expected = DataFrame([[na_value, val, scalar2]], columns=["a", "b", "c"]) expected["b"] = expected["b"].astype(common_dtype) tm.assert_frame_equal(result, expected) def test_combine_first_timestamp_bug_NaT(): # GH28481 frame = DataFrame([[pd.NaT, pd.NaT]], columns=["a", "b"]) other = DataFrame( [[datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["b", "c"] ) result = frame.combine_first(other) expected = DataFrame( [[pd.NaT, datetime(2020, 1, 1), datetime(2020, 1, 2)]], columns=["a", "b", "c"] ) tm.assert_frame_equal(result, expected) def test_combine_first_with_nan_multiindex(): # gh-36562 mi1 = MultiIndex.from_arrays( [["b", "b", "c", "a", "b", np.nan], [1, 2, 3, 4, 5, 6]], names=["a", "b"] ) df = DataFrame({"c": [1, 1, 1, 1, 1, 1]}, index=mi1) mi2 = MultiIndex.from_arrays( [["a", "b", "c", "a", "b", "d"], [1, 1, 1, 1, 1, 1]], names=["a", "b"] ) s = Series([1, 2, 3, 4, 5, 6], index=mi2) res = df.combine_first(DataFrame({"d": s})) mi_expected = MultiIndex.from_arrays( [ ["a", "a", "a", "b", "b", "b", "b", "c", "c", "d", np.nan], [1, 1, 4, 1, 1, 2, 5, 1, 3, 1, 6], ], names=["a", "b"], ) expected = DataFrame( { "c": [np.nan, np.nan, 1, 1, 1, 1, 1, np.nan, 1, np.nan, 1], "d": [1.0, 4.0, np.nan, 2.0, 5.0, np.nan, np.nan, 3.0, np.nan, 6.0, np.nan], }, index=mi_expected, ) tm.assert_frame_equal(res, expected) def test_combine_preserve_dtypes(): # GH7509 a_column = Series(["a", "b"], index=range(2)) b_column = Series(range(2), index=range(2)) df1 = DataFrame({"A": a_column, "B": b_column}) c_column = Series(["a", "b"], index=range(5, 7)) b_column = Series(range(-1, 1), index=range(5, 7)) df2 = DataFrame({"B": b_column, "C": c_column}) expected = DataFrame( { "A": ["a", "b", np.nan, np.nan], "B": [0, 1, -1, 0], "C": [np.nan, np.nan, "a", "b"], }, index=[0, 1, 5, 6], ) combined = df1.combine_first(df2) tm.assert_frame_equal(combined, expected) def test_combine_first_duplicates_rows_for_nan_index_values(): # GH39881 df1 = DataFrame( {"x": [9, 10, 11]}, index=MultiIndex.from_arrays([[1, 2, 3], [np.nan, 5, 6]], names=["a", "b"]), ) df2 = DataFrame( {"y": [12, 13, 14]}, index=MultiIndex.from_arrays([[1, 2, 4], [np.nan, 5, 7]], names=["a", "b"]), ) expected = DataFrame( { "x": [9.0, 10.0, 11.0, np.nan], "y": [12.0, 13.0, np.nan, 14.0], }, index=MultiIndex.from_arrays( [[1, 2, 3, 4], [np.nan, 5, 6, 7]], names=["a", "b"] ), ) combined = df1.combine_first(df2) tm.assert_frame_equal(combined, expected) def test_combine_first_int64_not_cast_to_float64(): # GH 28613 df_1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) df_2 = DataFrame({"A": [1, 20, 30], "B": [40, 50, 60], "C": [12, 34, 65]}) result = df_1.combine_first(df_2) expected = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [12, 34, 65]}) tm.assert_frame_equal(result, expected) def test_midx_losing_dtype(): # GH#49830 midx = MultiIndex.from_arrays([[0, 0], [np.nan, np.nan]]) midx2 = MultiIndex.from_arrays([[1, 1], [np.nan, np.nan]]) df1 = DataFrame({"a": [None, 4]}, index=midx) df2 = DataFrame({"a": [3, 3]}, index=midx2) result = df1.combine_first(df2) expected_midx = MultiIndex.from_arrays( [[0, 0, 1, 1], [np.nan, np.nan, np.nan, np.nan]] ) expected = DataFrame({"a": [np.nan, 4, 3, 3]}, index=expected_midx) tm.assert_frame_equal(result, expected) def test_combine_first_empty_columns(): left = DataFrame(columns=["a", "b"]) right = DataFrame(columns=["a", "c"]) result = left.combine_first(right) expected = DataFrame(columns=["a", "b", "c"]) tm.assert_frame_equal(result, expected) def test_combine_first_preserve_column_order(): # GH#60427 df1 = DataFrame({"B": [1, 2, 3], "A": [4, None, 6]}) df2 = DataFrame({"A": [5]}, index=[1]) result = df1.combine_first(df2) expected = DataFrame({"B": [1, 2, 3], "A": [4.0, 5.0, 6.0]}) tm.assert_frame_equal(result, expected)
TestDataFrameCombineFirst
python
run-llama__llama_index
llama-index-integrations/tools/llama-index-tools-exa/llama_index/tools/exa/base.py
{ "start": 183, "end": 7388 }
class ____(BaseToolSpec): """Exa tool spec.""" spec_functions = [ "search", "retrieve_documents", "search_and_retrieve_documents", "search_and_retrieve_highlights", "find_similar", "current_date", ] def __init__( self, api_key: str, verbose: bool = True, max_characters: int = 2000, ) -> None: """Initialize with parameters.""" from exa_py import Exa self.client = Exa(api_key=api_key, user_agent="llama-index") self._verbose = verbose # max characters for the text field in the search_and_contents function self._max_characters = max_characters def search( self, query: str, num_results: Optional[int] = 10, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None, start_published_date: Optional[str] = None, end_published_date: Optional[str] = None, use_autoprompt: bool = True, type: str = "magic", ) -> List: """ Exa allows you to use a natural language query to search the internet. Args: query (str): A natural language query phrased as an answer for what the link provides, ie: "This is the latest news about space:" num_results (Optional[int]): Number of results to return. Defaults to 10. include_domains (Optional[List(str)]): A list of top level domains like ["wsj.com"] to limit the search to specific sites. exclude_domains (Optional[List(str)]): Top level domains to exclude. start_published_date (Optional[str]): A date string like "2020-06-15". Get the date from `current_date` end_published_date (Optional[str]): End date string """ response = self.client.search( query, num_results=num_results, include_domains=include_domains, exclude_domains=exclude_domains, start_published_date=start_published_date, end_published_date=end_published_date, use_autoprompt=use_autoprompt, type=type, ) if self._verbose: print(f"[Exa Tool] Autoprompt: {response.autoprompt_string}") return [ {"title": result.title, "url": result.url, "id": result.id} for result in response.results ] def retrieve_documents(self, ids: List[str]) -> List[Document]: """ Retrieve a list of document texts returned by `exa_search`, using the ID field. Args: ids (List(str)): the ids of the documents to retrieve """ response = self.client.get_contents(ids) return [Document(text=result.text) for result in response.results] def find_similar( self, url: str, num_results: Optional[int] = 3, start_published_date: Optional[str] = None, end_published_date: Optional[str] = None, ) -> List: """ Retrieve a list of similar documents to a given url. Args: url (str): The web page to find similar results of num_results (Optional[int]): Number of results to return. Default 3. start_published_date (Optional[str]): A date string like "2020-06-15" end_published_date (Optional[str]): End date string """ response = self.client.find_similar( url, num_results=num_results, start_published_date=start_published_date, end_published_date=end_published_date, ) return [ {"title": result.title, "url": result.url, "id": result.id} for result in response.results ] def search_and_retrieve_documents( self, query: str, num_results: Optional[int] = 10, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None, start_published_date: Optional[str] = None, end_published_date: Optional[str] = None, use_autoprompt: bool = True, type: str = "magic", ) -> List[Document]: """ Combines the functionality of `search` and `retrieve_documents`. Args: query (str): the natural language query num_results (Optional[int]): Number of results. Defaults to 10. include_domains (Optional[List(str)]): A list of top level domains to search, like ["wsj.com"] exclude_domains (Optional[List(str)]): Top level domains to exclude. start_published_date (Optional[str]): A date string like "2020-06-15". end_published_date (Optional[str]): End date string """ response = self.client.search_and_contents( query, num_results=num_results, include_domains=include_domains, exclude_domains=exclude_domains, start_published_date=start_published_date, end_published_date=end_published_date, use_autoprompt=use_autoprompt, text={"max_characters": self._max_characters}, type=type, ) if self._verbose: print(f"[Exa Tool] Autoprompt: {response.autoprompt_string}") return [Document(text=document.text) for document in response.results] def search_and_retrieve_highlights( self, query: str, num_results: Optional[int] = 10, include_domains: Optional[List[str]] = None, exclude_domains: Optional[List[str]] = None, start_published_date: Optional[str] = None, end_published_date: Optional[str] = None, use_autoprompt: bool = True, type: str = "magic", ) -> List[Document]: """ Searches and retrieves highlights (intelligent snippets from the document). Args: query (str): the natural language query num_results (Optional[int]): Number of results. Defaults to 10. include_domains (Optional[List(str)]): A list of top level domains to search, like ["wsj.com"] exclude_domains (Optional[List(str)]): Top level domains to exclude. start_published_date (Optional[str]): A date string like "2020-06-15". end_published_date (Optional[str]): End date string """ response = self.client.search_and_contents( query, num_results=num_results, include_domains=include_domains, exclude_domains=exclude_domains, start_published_date=start_published_date, end_published_date=end_published_date, use_autoprompt=use_autoprompt, highlights=True, type=type, ) if self._verbose: print(f"[Exa Tool] Autoprompt: {response.autoprompt_string}") return [Document(text=document.highlights[0]) for document in response.results] def current_date(self): """ A function to return todays date. Call this before any other functions that take timestamps as an argument """ return datetime.date.today()
ExaToolSpec
python
tensorflow__tensorflow
tensorflow/python/ops/summary_ops_v2.py
{ "start": 11931, "end": 14102 }
class ____(SummaryWriter): """Implementation of SummaryWriter using a SummaryWriterInterface resource.""" def __init__(self, create_fn, init_op_fn, mesh=None): if mesh is not None: with dtensor_api.default_mesh(mesh.host_mesh()): self._resource = create_fn() self._init_op = init_op_fn(self._resource) else: self._resource = create_fn() self._init_op = init_op_fn(self._resource) self._closed = False if context.executing_eagerly(): self._set_up_resource_deleter() else: ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, self._init_op) self._mesh = mesh # Extension point to be overridden by subclasses to customize deletion. def _set_up_resource_deleter(self): self._resource_deleter = resource_variable_ops.EagerResourceDeleter( handle=self._resource, handle_device="cpu:0") def set_as_default(self, step=None): """See `SummaryWriter.set_as_default`.""" if context.executing_eagerly() and self._closed: raise RuntimeError(f"SummaryWriter {self!r} is already closed") super().set_as_default(step) def as_default(self, step=None): """See `SummaryWriter.as_default`.""" if context.executing_eagerly() and self._closed: raise RuntimeError(f"SummaryWriter {self!r} is already closed") return super().as_default(step) def init(self): """See `SummaryWriter.init`.""" if context.executing_eagerly() and self._closed: raise RuntimeError(f"SummaryWriter {self!r} is already closed") return self._init_op def flush(self): """See `SummaryWriter.flush`.""" if context.executing_eagerly() and self._closed: return with ops.device("cpu:0"): return gen_summary_ops.flush_summary_writer(self._resource) def close(self): """See `SummaryWriter.close`.""" if context.executing_eagerly() and self._closed: return try: with ops.control_dependencies([self.flush()]): with ops.device("cpu:0"): return gen_summary_ops.close_summary_writer(self._resource) finally: if context.executing_eagerly(): self._closed = True
_ResourceSummaryWriter
python
ansible__ansible
test/lib/ansible_test/_internal/host_profiles.py
{ "start": 68983, "end": 70146 }
class ____(SshTargetHostProfile[WindowsInventoryConfig]): """Host profile for a Windows inventory.""" @property def name(self) -> str: """The name of the host profile.""" return self.config.path def get_controller_target_connections(self) -> list[SshConnection]: """Return SSH connection(s) for accessing the host as a target from the controller.""" inventory = parse_inventory(self.args, self.config.path) hosts = get_hosts(inventory, 'windows') identity_file = SshKey(self.args).key settings = [SshConnectionDetail( name=name, host=config['ansible_host'], port=22, user=config['ansible_user'], identity_file=identity_file, shell_type='powershell', ) for name, config in hosts.items()] if settings: details = '\n'.join(f'{ssh.name} {ssh.user}@{ssh.host}:{ssh.port}' for ssh in settings) display.info(f'Generated SSH connection details from inventory:\n{details}', verbosity=1) return [SshConnection(self.args, setting) for setting in settings]
WindowsInventoryProfile
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 11097, "end": 11422 }
class ____(_SimpleAutomotiveTestMixin): """Test sk_SK automotive provider methods""" license_plate_pattern: Pattern = re.compile(r"(?P<prefix>[A-Z]{2})\d{3}[A-Z]{2}") def perform_extra_checks(self, license_plate, match): assert match.group("prefix") in SkSkAutomotiveProvider.license_plate_prefix
TestSkSk
python
spyder-ide__spyder
external-deps/spyder-kernels/spyder_kernels/console/tests/test_console_kernel.py
{ "start": 20189, "end": 49777 }
class ____(): def __init__(self, i): self.i = i + 10 def myFunc(i): return myClass(i) if __name__ == '__main__': with Pool(5) as p: result = p.map(myFunc, [1, 2, 3]) result = [r.i for r in result] """ p = tmpdir.join("mp-test.py") p.write(code) # Run code client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT) # Verify that the `result` variable is defined client.inspect('result') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] assert "[11, 12, 13]" in content['data']['text/plain'] @pytest.mark.flaky(max_runs=3) @pytest.mark.skipif( sys.platform == 'darwin' and sys.version_info[:2] == (3, 8), reason="Fails on Mac with Python 3.8") @pytest.mark.skipif( os.environ.get('USE_CONDA') != 'true', reason="Doesn't work with pip packages") def test_dask_multiprocessing(tmpdir): """ Test that dask multiprocessing works. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Remove all variables client.execute_interactive("%reset -f") # Write multiprocessing code to a file # Runs two times to verify that in the second case it doesn't break code = """ from dask.distributed import Client if __name__=='__main__': client = Client() client.close() x = 'hello' """ p = tmpdir.join("mp-test.py") p.write(code) # Run code two times client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT) client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT) # Verify that the `x` variable is defined client.inspect('x') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] @pytest.mark.flaky(max_runs=3) def test_runfile(tmpdir): """ Test that runfile uses the proper name space for execution. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Remove all variables client.execute_interactive("%reset -f", timeout=TIMEOUT) # Write defined variable code to a file code = "result = 'hello world'; error # make an error" d = tmpdir.join("defined-test.py") d.write(code) # Write undefined variable code to a file code = dedent(""" try: result3 = result except NameError: result2 = 'hello world' """) u = tmpdir.join("undefined-test.py") u.write(code) # Run code file `d` to define `result` even after an error client.execute_interactive( "%runfile {}".format(repr(str(d))), timeout=TIMEOUT) # Verify that `result` is defined in the current namespace client.inspect('result') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] # Run code file `u` without current namespace client.execute_interactive( "%runfile {}".format(repr(str(u))), timeout=TIMEOUT) # Verify that the variable `result2` is defined client.inspect('result2') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] # Run code file `u` with current namespace msg = client.execute_interactive( "%runfile {} --current-namespace".format(repr(str(u))), timeout=TIMEOUT ) content = msg['content'] # Verify that the variable `result3` is defined client.inspect('result3') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] # Verify that the variable `__file__` is undefined client.inspect('__file__') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert not content['found'] @pytest.mark.flaky(max_runs=3) @pytest.mark.skipif( sys.platform == 'darwin' and sys.version_info[:2] == (3, 8), reason="Fails on Mac with Python 3.8") def test_np_threshold(kernel): """ Test that setting Numpy threshold doesn't make the Variable Explorer slow. """ cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Set Numpy threshold, suppress and formatter client.execute_interactive(""" import numpy as np; np.set_printoptions( threshold=np.inf, suppress=True, formatter={'float_kind':'{:0.2f}'.format}) """, timeout=TIMEOUT) # Create a big Numpy array and an array to check decimal format client.execute_interactive(""" x = np.random.rand(75000,5); a = np.array([123412341234.123412341234]) """, timeout=TIMEOUT) # Assert that NumPy threshold, suppress and formatter # are the same as the ones set by the user client.execute_interactive(""" t = np.get_printoptions()['threshold']; s = np.get_printoptions()['suppress']; f = np.get_printoptions()['formatter'] """, timeout=TIMEOUT) # Check correct decimal format client.inspect('a') msg = client.get_shell_msg(timeout=TIMEOUT) while "data" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content']['data']['text/plain'] assert "123412341234.12" in content # Check threshold value client.inspect('t') msg = client.get_shell_msg(timeout=TIMEOUT) while "data" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content']['data']['text/plain'] assert "inf" in content # Check suppress value client.inspect('s') msg = client.get_shell_msg(timeout=TIMEOUT) while "data" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content']['data']['text/plain'] assert "True" in content # Check formatter client.inspect('f') msg = client.get_shell_msg(timeout=TIMEOUT) while "data" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content']['data']['text/plain'] assert ( "{'float_kind': <built-in method format of str object" in content ) @pytest.mark.flaky(max_runs=3) @pytest.mark.skipif( not TURTLE_ACTIVE, reason="Doesn't work on non-interactive settings or Python without Tk") def test_turtle_launch(tmpdir): """Test turtle scripts running in the same kernel.""" # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Remove all variables client.execute_interactive("%reset -f", timeout=TIMEOUT) # Write turtle code to a file code = """ import turtle wn=turtle.Screen() wn.bgcolor("lightgreen") tess = turtle.Turtle() # Create tess and set some attributes tess.color("hotpink") tess.pensize(5) tess.forward(80) # Make tess draw equilateral triangle tess.left(120) tess.forward(80) tess.left(120) tess.forward(80) tess.left(120) # Complete the triangle turtle.bye() """ p = tmpdir.join("turtle-test.py") p.write(code) # Run code client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT) # Verify that the `tess` variable is defined client.inspect('tess') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] # Write turtle code to a file code = code + "a = 10" p = tmpdir.join("turtle-test1.py") p.write(code) # Run code again client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT) # Verify that the `a` variable is defined client.inspect('a') msg = client.get_shell_msg(timeout=TIMEOUT) while "found" not in msg['content']: msg = client.get_shell_msg(timeout=TIMEOUT) content = msg['content'] assert content['found'] @pytest.mark.flaky(max_runs=3) def test_matplotlib_inline(kernel): """Test that the default backend for our kernels is 'inline'.""" # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Get current backend code = "import matplotlib; backend = matplotlib.get_backend()" reply = client.execute_interactive( code, user_expressions={'output': 'backend'}, timeout=TIMEOUT) # Transform value obtained through user_expressions user_expressions = reply['content']['user_expressions'] str_value = user_expressions['output']['data']['text/plain'] value = ast.literal_eval(str_value) # Assert backend is inline assert 'inline' in value @pytest.mark.anyio async def test_do_complete(kernel): """ Check do complete works in normal and debugging mode. """ await kernel.do_execute("abba = 1", True) assert kernel.get_value("abba") == 1 match = kernel.do_complete("ab", 2) if inspect.isawaitable(match): match = await match assert 'abba' in match['matches'] # test pdb pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.completenames = lambda *ignore: ['baba'] kernel.shell._namespace_stack = [pdb_obj] match = kernel.do_complete('ba', 2) if inspect.isawaitable(match): match = await match assert 'baba' in match['matches'] pdb_obj.curframe = None @pytest.mark.parametrize("exclude_callables_and_modules", [True, False]) @pytest.mark.parametrize("exclude_unsupported", [True, False]) def test_callables_and_modules(kernel, exclude_callables_and_modules, exclude_unsupported): """ Tests that callables and modules are in the namespace view only when the right options are passed to the kernel. """ asyncio.run(kernel.do_execute('import numpy', True)) asyncio.run(kernel.do_execute('a = 10', True)) asyncio.run(kernel.do_execute('def f(x): return x', True)) settings = kernel.namespace_view_settings settings['exclude_callables_and_modules'] = exclude_callables_and_modules settings['exclude_unsupported'] = exclude_unsupported nsview = kernel.get_namespace_view() # Callables and modules should always be in nsview when the option # is active. if not exclude_callables_and_modules: assert 'numpy' in nsview.keys() assert 'f' in nsview.keys() else: assert 'numpy' not in nsview.keys() assert 'f' not in nsview.keys() # Other values should always be part of nsview assert 'a' in nsview.keys() # Restore settings for other tests settings['exclude_callables_and_modules'] = True settings['exclude_unsupported'] = False def test_comprehensions_with_locals_in_pdb(kernel): """ Test that evaluating comprehensions with locals works in Pdb. Also test that we use the right frame globals, in case the user wants to work with them. This is a regression test for spyder-ide/spyder#13909. """ pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.curframe_locals = pdb_obj.curframe.f_locals kernel.shell._namespace_stack = [pdb_obj] # Create a local variable. kernel.shell.pdb_session.default('zz = 10') assert kernel.get_value('zz') == 10 # Run a list comprehension with this variable. kernel.shell.pdb_session.default("compr = [zz * i for i in [1, 2, 3]]") assert kernel.get_value('compr') == [10, 20, 30] # Check that the variable is not reported as being part of globals. kernel.shell.pdb_session.default("in_globals = 'zz' in globals()") assert kernel.get_value('in_globals') is False pdb_obj.curframe = None pdb_obj.curframe_locals = None def test_comprehensions_with_locals_in_pdb_2(kernel): """ Test that evaluating comprehensions with locals works in Pdb. This is a regression test for spyder-ide/spyder#16790. """ pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.curframe_locals = pdb_obj.curframe.f_locals kernel.shell._namespace_stack = [pdb_obj] # Create a local variable. kernel.shell.pdb_session.default('aa = [1, 2]') kernel.shell.pdb_session.default('bb = [3, 4]') kernel.shell.pdb_session.default('res = []') # Run a list comprehension with this variable. kernel.shell.pdb_session.default( "for c0 in aa: res.append([(c0, c1) for c1 in bb])") assert kernel.get_value('res') == [[(1, 3), (1, 4)], [(2, 3), (2, 4)]] pdb_obj.curframe = None pdb_obj.curframe_locals = None def test_namespaces_in_pdb(kernel): """ Test namespaces in pdb """ # Define get_ipython for timeit get_ipython = lambda: kernel.shell # noqa kernel.shell.user_ns["test"] = 0 pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.curframe_locals = pdb_obj.curframe.f_locals kernel.shell._namespace_stack = [pdb_obj] # Check adding something to globals works pdb_obj.default("globals()['test2'] = 0") assert pdb_obj.curframe.f_globals["test2"] == 0 # Create wrapper to check for errors old_error = pdb_obj.error pdb_obj._error_occured = False def error_wrapper(*args, **kwargs): print(args, kwargs) pdb_obj._error_occured = True return old_error(*args, **kwargs) pdb_obj.error = error_wrapper # Test globals are visible pdb_obj.curframe.f_globals["test3"] = 0 pdb_obj.default("%timeit test3") assert not pdb_obj._error_occured # Test locals are visible pdb_obj.curframe_locals["test4"] = 0 pdb_obj.default("%timeit test4") assert not pdb_obj._error_occured # Test user namespace is not visible pdb_obj.default("%timeit test") assert pdb_obj._error_occured pdb_obj.curframe = None pdb_obj.curframe_locals = None def test_functions_with_locals_in_pdb(kernel): """ Test that functions with locals work in Pdb. This is a regression test for spyder-ide/spyder-kernels#345 """ pdb_obj = SpyderPdb() Frame = namedtuple("Frame", ["f_globals"]) pdb_obj.curframe = Frame(f_globals=kernel.shell.user_ns) pdb_obj.curframe_locals = kernel.shell.user_ns kernel.shell._namespace_stack = [pdb_obj] # Create a local function. kernel.shell.pdb_session.default( 'def fun_a(): return [i for i in range(1)]') kernel.shell.pdb_session.default( 'zz = fun_a()') assert kernel.get_value('zz') == [0] kernel.shell.pdb_session.default( 'a = 1') kernel.shell.pdb_session.default( 'def fun_a(): return a') kernel.shell.pdb_session.default( 'zz = fun_a()') assert kernel.get_value('zz') == 1 pdb_obj.curframe = None pdb_obj.curframe_locals = None def test_functions_with_locals_in_pdb_2(kernel): """ Test that functions with locals work in Pdb. This is another regression test for spyder-ide/spyder-kernels#345 """ baba = 1 # noqa pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.curframe_locals = pdb_obj.curframe.f_locals kernel.shell._namespace_stack = [pdb_obj] # Create a local function. kernel.shell.pdb_session.default( 'def fun_a(): return [i for i in range(1)]') kernel.shell.pdb_session.default( 'zz = fun_a()') assert kernel.get_value('zz') == [0] kernel.shell.pdb_session.default( 'a = 1') kernel.shell.pdb_session.default( 'def fun_a(): return a') kernel.shell.pdb_session.default( 'zz = fun_a()') assert kernel.get_value('zz') == 1 # Check baba is in locals and not globals kernel.shell.pdb_session.default( 'll = locals().keys()') assert "baba" in kernel.get_value('ll') kernel.shell.pdb_session.default( 'gg = globals().keys()') assert "baba" not in kernel.get_value('gg') pdb_obj.curframe = None pdb_obj.curframe_locals = None def test_locals_globals_in_pdb(kernel): """ Test thal locals and globals work properly in Pdb. """ a = 1 # noqa pdb_obj = SpyderPdb() pdb_obj.curframe = inspect.currentframe() pdb_obj.curframe_locals = pdb_obj.curframe.f_locals kernel.shell._namespace_stack = [pdb_obj] assert kernel.get_value('a') == 1 kernel.shell.pdb_session.default( 'test = "a" in globals()') assert kernel.get_value('test') is False kernel.shell.pdb_session.default( 'test = "a" in locals()') assert kernel.get_value('test') is True kernel.shell.pdb_session.default( 'def f(): return a') kernel.shell.pdb_session.default( 'test = f()') assert kernel.get_value('test') == 1 kernel.shell.pdb_session.default( 'a = 2') assert kernel.get_value('a') == 2 kernel.shell.pdb_session.default( 'test = "a" in globals()') assert kernel.get_value('test') is False kernel.shell.pdb_session.default( 'test = "a" in locals()') assert kernel.get_value('test') is True pdb_obj.curframe = None pdb_obj.curframe_locals = None @pytest.mark.flaky(max_runs=3) @pytest.mark.parametrize("backend", [None, 'inline', 'tk', 'qt']) @pytest.mark.skipif( os.environ.get('USE_CONDA') != 'true', reason="Doesn't work with pip packages" ) def test_get_interactive_backend(backend): """ Test that we correctly get the interactive backend set in the kernel. """ # This test passes locally but fails on CIs. Don't know why. if sys.platform == "darwin" and backend == "qt" and os.environ.get('CI'): return cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Set backend if backend is not None: client.execute_interactive( "%matplotlib {}".format(backend), timeout=TIMEOUT ) client.execute_interactive( "import time; time.sleep(.1)", timeout=TIMEOUT ) # Get backend code = "backend = get_ipython().kernel.get_mpl_interactive_backend()" reply = client.execute_interactive( code, user_expressions={'output': 'backend'}, timeout=TIMEOUT ) # Get value obtained through user_expressions user_expressions = reply['content']['user_expressions'] value = user_expressions['output']['data']['text/plain'] # remove quotes value = value[1:-1] # Assert we got the right interactive backend if backend is not None: assert value == backend else: assert value == 'inline' def test_global_message(tmpdir): """ Test that using `global` triggers a warning. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Remove all variables client.execute_interactive("%reset -f", timeout=TIMEOUT) # Write code with a global to a file code = ( "def foo1():\n" " global x\n" " x = 2\n" "x = 1\n" "print(x)\n" ) p = tmpdir.join("test.py") p.write(code) global found found = False def check_found(msg): if "text" in msg["content"]: if ("WARNING: This file contains a global statement" in msg["content"]["text"]): global found found = True # Run code in current namespace client.execute_interactive("%runfile {} --current-namespace".format( repr(str(p))), timeout=TIMEOUT, output_hook=check_found) assert not found # Run code in empty namespace client.execute_interactive( "%runfile {}".format(repr(str(p))), timeout=TIMEOUT, output_hook=check_found) assert found @pytest.mark.flaky(max_runs=3) def test_debug_namespace(tmpdir): """ Test that the kernel uses the proper namespace while debugging. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: # Write code to a file d = tmpdir.join("pdb-ns-test.py") d.write('def func():\n bb = "hello"\n breakpoint()\nfunc()') # Run code file `d` client.execute("%runfile {}".format(repr(str(d)))) # make sure that 'bb' returns 'hello' client.get_stdin_msg(timeout=TIMEOUT) client.input('bb') t0 = time.time() while True: assert time.time() - t0 < 5 msg = client.get_iopub_msg(timeout=TIMEOUT) if msg.get('msg_type') == 'stream': if 'hello' in msg["content"].get("text"): break # make sure that get_value('bb') returns 'hello' client.get_stdin_msg(timeout=TIMEOUT) client.input("get_ipython().kernel.get_value('bb')") t0 = time.time() while True: assert time.time() - t0 < 5 msg = client.get_iopub_msg(timeout=TIMEOUT) if msg.get('msg_type') == 'stream': if 'hello' in msg["content"].get("text"): break def test_interrupt_short_loop(): """ Test that the kernel can be interrupted by calling a comm handler. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: kernel_comm = CommBase() # Create new comm and send the highest protocol comm = Comm(kernel_comm._comm_name, client) comm.open(data={}) comm._send_channel = client.control_channel kernel_comm._register_comm(comm) client.execute_interactive("import time", timeout=TIMEOUT) # Try interrupting loop t0 = time.time() msg_id = client.execute("for i in range(100): time.sleep(.1)") time.sleep(.2) # Raise interrupt on control_channel kernel_comm.remote_call().raise_interrupt_signal() # Wait for shell message while True: delta = time.time() - t0 assert delta < 5 msg = client.get_shell_msg(timeout=TIMEOUT) if msg["parent_header"].get("msg_id") != msg_id: # not from my request continue break delta = time.time() - t0 assert delta < 5, ( "10 seconds long call should have been interrupted, so the " "interrupt signal was likely mishandled" ) @pytest.mark.skipif(os.name == "nt", reason="Windows doesn't do 'interrupting sleep'") def test_interrupt_long_sleep(): # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: kernel_comm = CommBase() # Create new comm and send the highest protocol comm = Comm(kernel_comm._comm_name, client) comm.open(data={}) comm._send_channel = client.control_channel kernel_comm._register_comm(comm) client.execute_interactive("import time", timeout=TIMEOUT) # Try interrupting sleep t0 = time.time() msg_id = client.execute("time.sleep(10)") time.sleep(.2) # Raise interrupt on control_channel kernel_comm.remote_call().raise_interrupt_signal() # Wait for shell message while True: delta = time.time() - t0 assert delta < 5 msg = client.get_shell_msg(timeout=TIMEOUT) if msg["parent_header"].get("msg_id") != msg_id: # not from my request continue break delta = time.time() - t0 assert delta < 5, ( "10 seconds long call should have been interrupted, so the " "interrupt signal was likely mishandled" ) def test_enter_debug_after_interruption(): """ Test that we can enter the debugger after interrupting the current execution. """ # Command to start the kernel cmd = "from spyder_kernels.console import start; start.main()" with setup_kernel(cmd) as client: kernel_comm = CommBase() # Create new comm and send the highest protocol comm = Comm(kernel_comm._comm_name, client) comm.open(data={}) comm._send_channel = client.control_channel kernel_comm._register_comm(comm) client.execute_interactive("import time", timeout=TIMEOUT) # Try interrupting loop t0 = time.time() msg_id = client.execute("for i in range(100): time.sleep(.1)") time.sleep(.2) # Request to enter the debugger kernel_comm.remote_call().request_pdb_stop() # Wait for debug message while True: assert time.time() - t0 < 5 msg = client.get_iopub_msg(timeout=TIMEOUT) if msg.get('msg_type') == 'stream': print(msg["content"].get("text")) if msg["parent_header"].get("msg_id") != msg_id: # not from my request continue if msg.get('msg_type') == 'comm_msg': if msg["content"].get("data", {}).get("content", {}).get( 'call_name') == 'pdb_input': # pdb entered break comm.handle_msg(msg) assert time.time() - t0 < 5 def test_non_strings_in_locals(kernel): """ Test that we can hande non-string entries in `locals` when bulding the namespace view. This is a regression test for issue spyder-ide/spyder#19145 """ asyncio.run(kernel.do_execute('locals().update({1:2})', True)) nsview = repr(kernel.get_namespace_view()) assert "1:" in nsview def test_django_settings(kernel): """ Test that we don't generate errors when importing `django.conf.settings`. This is a regression test for issue spyder-ide/spyder#19516 """ import django asyncio.run(kernel.do_execute('from django.conf import settings', True)) nsview = repr(kernel.get_namespace_view()) assert "'settings':" in nsview def test_hard_link_pdb(tmpdir): """ Test that breakpoints on a file are recognised even when the path is different. """ # Create a file and a hard link d = tmpdir.join("file.py") d.write('def func():\n bb = "hello"\n') folder = tmpdir.join("folder") os.mkdir(folder) hard_link = folder.join("file.py") os.link(d, hard_link) # Make sure both paths point to the same file assert os.path.samefile(d, hard_link) # Make sure canonic returns the same path for a single file pdb_obj = SpyderPdb() assert pdb_obj.canonic(str(d)) == pdb_obj.canonic(str(hard_link)) @pytest.mark.skipif(not os.environ.get('CI'), reason="Only works on CIs") def test_get_pythonenv_info(kernel): """Test the output we get from this method.""" output = kernel.get_pythonenv_info() assert output["path"] == sys.executable if os.environ.get('USE_CONDA'): assert output["name"] == "test" assert output["env_type"] == PythonEnvType.Conda else: assert output["env_type"] in [ # This Custom here accounts for Linux packagers that run our tests # in their CIs PythonEnvType.Custom, PythonEnvType.Conda, ] # Check these keys are present. Otherwise we'll break Spyder. assert output["python_version"] == sys.version.split()[0] assert output["ipython_version"] == ipython_release.version assert output["sys_version"] == sys.version @pytest.mark.parametrize("prefix", ["%", "!"]) def test_disable_pkg_managers(kernel, capsys, prefix): """Test that we disable Python package manager magics and commands.""" pkg_manager = random.choice(kernel.shell._disabled_pkg_managers) asyncio.run(kernel.do_execute(f"{prefix}{pkg_manager}", True)) captured = capsys.readouterr() assert kernel.shell._disable_pkg_managers_msg[2:] == captured.out[1:-1] if __name__ == "__main__": pytest.main()
myClass
python
django__django
tests/fixtures_regress/tests.py
{ "start": 33539, "end": 34315 }
class ____(TestCase): """ #23612 -- fixtures path should be normalized to allow referencing relative paths on Windows. """ current_dir = os.path.abspath(os.path.dirname(__file__)) # relative_prefix is something like tests/fixtures_regress or # fixtures_regress depending on how runtests.py is invoked. # All path separators must be / in order to be a proper regression test on # Windows, so replace as appropriate. relative_prefix = os.path.relpath(current_dir, os.getcwd()).replace("\\", "/") fixtures = [relative_prefix + "/fixtures/absolute.json"] def test_fixtures_loaded(self): count = Absolute.objects.count() self.assertGreater(count, 0, "Fixtures not loaded properly.")
TestLoadFixtureFromOtherAppDirectory
python
geekcomputers__Python
BlackJack_game/blackjack_simulate.py
{ "start": 7678, "end": 8318 }
class ____(User): def __init__(self, name): super().__init__(name=name, role="Dealer", color="PURPLE") self.trigger = 0 def ask_insurance(self): buy_insurance = ( "(Insurance pay 2 to 1)\n" "\tMy Face card is an Ace.\n" "\tWould your like buy a insurance ?" ) self.speak(content=buy_insurance) def strategy_trigger(self, deck): if self.is_point("<", BASE_VALUE): self.obtain_card(deck) else: self.trigger += random.randint(0, 5) if self.trigger % 5 == 0: self.obtain_card(deck)
Dealer
python
walkccc__LeetCode
solutions/1133. Largest Unique Number/1133.py
{ "start": 0, "end": 187 }
class ____: def largestUniqueNumber(self, nums: list[int]) -> int: return max([num for num, freq in collections.Counter(nums).items() if freq == 1], default=-1)
Solution
python
spack__spack
lib/spack/spack/test/spec_list.py
{ "start": 1386, "end": 7477 }
class ____: @pytest.mark.regression("28749") @pytest.mark.parametrize( "specs,expected", [ # Constraints are ordered randomly ( [ { "matrix": [ ["^zmpi"], ["%gcc@4.5.0"], ["hypre", "libelf"], ["~shared"], ["cflags=-O3", 'cflags="-g -O0"'], ["^foo"], ] } ], [ "hypre cflags=-O3 ~shared %gcc@4.5.0 ^foo ^zmpi", 'hypre cflags="-g -O0" ~shared %gcc@4.5.0 ^foo ^zmpi', "libelf cflags=-O3 ~shared %gcc@4.5.0 ^foo ^zmpi", 'libelf cflags="-g -O0" ~shared %gcc@4.5.0 ^foo ^zmpi', ], ), # A constraint affects both the root and a dependency ( [{"matrix": [["version-test-root"], ["%gcc"], ["^version-test-pkg%gcc"]]}], ["version-test-root%gcc ^version-test-pkg%gcc"], ), ], ) def test_spec_list_constraint_ordering(self, specs, expected): result = SpecListParser().parse_user_specs(name="specs", yaml_list=specs) assert result.specs == [Spec(x) for x in expected] def test_mock_spec_list(self, parser_and_speclist): """Tests expected properties on the default mock spec list""" parser, mock_list = parser_and_speclist assert mock_list.specs_as_yaml_list == DEFAULT_EXPANSION assert mock_list.specs_as_constraints == DEFAULT_CONSTRAINTS assert mock_list.specs == DEFAULT_SPECS def test_spec_list_add(self, parser_and_speclist): parser, mock_list = parser_and_speclist mock_list.add("libdwarf") assert mock_list.specs_as_yaml_list == DEFAULT_EXPANSION + ["libdwarf"] assert mock_list.specs_as_constraints == DEFAULT_CONSTRAINTS + [[Spec("libdwarf")]] assert mock_list.specs == DEFAULT_SPECS + [Spec("libdwarf")] def test_spec_list_remove(self, parser_and_speclist): parser, mock_list = parser_and_speclist mock_list.remove("libelf") assert mock_list.specs_as_yaml_list + ["libelf"] == DEFAULT_EXPANSION assert mock_list.specs_as_constraints + [[Spec("libelf")]] == DEFAULT_CONSTRAINTS assert mock_list.specs + [Spec("libelf")] == DEFAULT_SPECS def test_spec_list_extension(self, parser_and_speclist): parser, mock_list = parser_and_speclist other_list = parser.parse_user_specs( name="specs", yaml_list=[{"matrix": [["callpath"], ["%intel@18"]]}] ) mock_list.extend(other_list) assert mock_list.specs_as_yaml_list == (DEFAULT_EXPANSION + other_list.specs_as_yaml_list) assert mock_list.specs == DEFAULT_SPECS + other_list.specs def test_spec_list_nested_matrices(self, parser_and_speclist): parser, _ = parser_and_speclist inner_matrix = [{"matrix": [["zlib", "libelf"], ["%gcc", "%intel"]]}] outer_addition = ["+shared", "~shared"] outer_matrix = [{"matrix": [inner_matrix, outer_addition]}] result = parser.parse_user_specs(name="specs", yaml_list=outer_matrix) expected_components = itertools.product( ["zlib", "libelf"], ["%gcc", "%intel"], ["+shared", "~shared"] ) def _reduce(*, combo): root = Spec(combo[0]) for x in combo[1:]: root.constrain(x) return root expected = [_reduce(combo=combo) for combo in expected_components] assert set(result.specs) == set(expected) @pytest.mark.regression("16897") def test_spec_list_recursion_specs_as_constraints(self): input = ["mpileaks", "$mpis", {"matrix": [["hypre"], ["$%gccs", "$%clangs"]]}, "libelf"] definitions = [ {"gccs": ["gcc@4.5.0"]}, {"clangs": ["clang@3.3"]}, {"mpis": ["zmpi@1.0", "mpich@3.0"]}, ] parser = SpecListParser() parser.parse_definitions(data=definitions) result = parser.parse_user_specs(name="specs", yaml_list=input) assert result.specs_as_yaml_list == DEFAULT_EXPANSION assert result.specs_as_constraints == DEFAULT_CONSTRAINTS assert result.specs == DEFAULT_SPECS @pytest.mark.regression("16841") def test_spec_list_matrix_exclude(self): parser = SpecListParser() result = parser.parse_user_specs( name="specs", yaml_list=[ { "matrix": [["multivalue-variant"], ["foo=bar", "foo=baz"]], "exclude": ["foo=bar"], } ], ) assert len(result.specs) == 1 def test_spec_list_exclude_with_abstract_hashes(self, install_mockery): # Put mpich in the database so it can be referred to by hash. mpich_1 = spack.concretize.concretize_one("mpich+debug") mpich_2 = spack.concretize.concretize_one("mpich~debug") PackageInstaller([mpich_1.package, mpich_2.package], explicit=True, fake=True).install() # Create matrix and exclude +debug, which excludes the first mpich after its abstract hash # is resolved. parser = SpecListParser() result = parser.parse_user_specs( name="specs", yaml_list=[ { "matrix": [ ["mpileaks"], ["^callpath"], [f"^mpich/{mpich_1.dag_hash(5)}", f"^mpich/{mpich_2.dag_hash(5)}"], ], "exclude": ["^mpich+debug"], } ], ) # Ensure that only mpich~debug is selected, and that the assembled spec remains abstract. assert len(result.specs) == 1 assert result.specs[0] == Spec(f"mpileaks ^callpath ^mpich/{mpich_2.dag_hash(5)}")
TestSpecList
python
PyCQA__pycodestyle
tests/test_blank_lines.py
{ "start": 5025, "end": 8098 }
class ____(object): @method_decorator def some_method(self): pass """) self.assertNoErrors(result) def test_top_level_fewer_follow_lines(self): """ It will trigger an error when less than 2 blank lines are found between a top level definitions and other top level code. """ result = errors_from_src(""" def a(): print('Something') a() """) self.assertEqual([ 'E305:5:1', # a call ], result) def test_top_level_fewer_follow_lines_comments(self): """ It will trigger an error when less than 2 blank lines are found between a top level definitions and other top level code, even if we have comments before """ result = errors_from_src(""" def a(): print('Something') # comment # another comment # With comment still needs 2 spaces before, # as comments are ignored. a() """) self.assertEqual([ 'E305:11:1', # a call ], result) def test_top_level_good_follow_lines(self): """ It not trigger an error when 2 blank lines are found between a top level definitions and other top level code. """ result = errors_from_src(""" def a(): print('Something') # Some comments in other parts. # More comments. # With the right spaces, # It will work, even when we have comments. a() """) self.assertNoErrors(result) def test_method_fewer_follow_lines(self): """ It will trigger an error when less than 1 blank line is found between a method and previous definitions. """ result = errors_from_src(""" def a(): x = 1 def b(): pass """) self.assertEqual([ 'E306:4:5', # b() call ], result) def test_method_nested_fewer_follow_lines(self): """ It will trigger an error when less than 1 blank line is found between a method and previous definitions, even when nested. """ result = errors_from_src(""" def a(): x = 2 def b(): x = 1 def c(): pass """) self.assertEqual([ 'E306:7:9', # c() call ], result) def test_method_nested_less_class(self): """ It will trigger an error when less than 1 blank line is found between a method and previous definitions, even when used to define a class. """ result = errors_from_src(""" def a(): x = 1 class C: pass """) self.assertEqual([ 'E306:4:5', # class C definition. ], result) def test_method_nested_ok(self): """ Will not trigger an error when 1 blank line is found found between a method and previous definitions, even when nested. """ result = errors_from_src(""" def a(): x = 2 def b(): x = 1 def c(): pass class C: pass """) self.assertNoErrors(result)
SomeClass
python
huggingface__transformers
src/transformers/models/superpoint/modeling_superpoint.py
{ "start": 4667, "end": 5637 }
class ____(nn.Module): def __init__( self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool = False ) -> None: super().__init__() self.conv_a = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=1, padding=1, ) self.conv_b = nn.Conv2d( out_channels, out_channels, kernel_size=3, stride=1, padding=1, ) self.relu = nn.ReLU(inplace=True) self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if add_pooling else None def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.relu(self.conv_a(hidden_states)) hidden_states = self.relu(self.conv_b(hidden_states)) if self.pool is not None: hidden_states = self.pool(hidden_states) return hidden_states
SuperPointConvBlock
python
pytorch__pytorch
torch/_export/passes/collect_tracepoints_pass.py
{ "start": 459, "end": 6522 }
class ____(PassBase): """ Performs constant folding and constant propagation. """ def __init__( self, specs: dict[str, ModuleCallSignature], sig: ExportGraphSignature ) -> None: super().__init__() self.specs = specs self.sig = sig def call(self, gm: torch.fx.GraphModule) -> PassResult | None: def get_arg_spec(arg) -> TensorArgument | ConstantArgument: if isinstance(arg, torch.fx.Node): if isinstance(arg.meta.get("val"), torch.Tensor): return TensorArgument(name=arg.name) else: raise AssertionError( "Symint input is not implemented yet for submodule call signature." ) else: return ConstantArgument(name="", value=arg) for module in gm.modules(): if not isinstance(module, torch.fx.GraphModule): continue nn_module_stack = None for node in module.graph.nodes: if node.op != "call_function": continue if node.target is torch.ops.higher_order._export_tracepoint: kind = node.kwargs["kind"] if kind == "module_call_outputs": nn_module_stack = node.meta["nn_module_stack"] elif kind == "module_call_inputs": nn_module_stack = None else: raise AssertionError(f"Unknown tracepoint kind: {kind}") elif node.meta["nn_module_stack"] == nn_module_stack: node.meta["nn_module_stack"].popitem() else: nn_module_stack = None nn_module_stack = None for node in reversed(module.graph.nodes): if node.op != "call_function": continue if node.target is torch.ops.higher_order._export_tracepoint: kind = node.kwargs["kind"] if kind == "module_call_inputs": nn_module_stack = node.meta["nn_module_stack"] elif kind == "module_call_outputs": nn_module_stack = None else: raise AssertionError(f"Unknown tracepoint kind: {kind}") elif node.meta["nn_module_stack"] == nn_module_stack: node.meta["nn_module_stack"].popitem() else: nn_module_stack = None def copy_sig(sig) -> ModuleCallSignature: from torch.export.exported_program import ModuleCallSignature return ModuleCallSignature( inputs=[], outputs=[], in_spec=sig.in_spec, out_spec=sig.out_spec, forward_arg_names=None, ) for module in gm.modules(): if not isinstance(module, torch.fx.GraphModule): continue for node in module.graph.nodes: if node.op != "call_function": continue if node.target is torch.ops.higher_order._export_tracepoint: # There's some subtlety worth noting. Here fqn corresponds to # the call name, whereas path corresponds to the module name. # They are not necessarily the same! When a submodule is shared # through different aliases, there are as many _export_tracepoint # markers as there are aliases, since the shared submodule is # wrapped once for each alias. path = node.kwargs["path"] fqn, _ = next(reversed(node.meta["nn_module_stack"].values())) module_key = next(reversed(node.meta["nn_module_stack"])) if "@" in module_key: suffix = module_key.split("@")[-1] path = f"{path}@{suffix}" call_fqn = f"{fqn}@{suffix}" if call_fqn not in self.specs: self.specs[call_fqn] = copy_sig(self.specs[fqn]) fqn = call_fqn kind = node.kwargs["kind"] for i, arg in enumerate(node.args): # We only update the signature of the alias used to call # the submodule. Otherwise the signatures of all aliases # would get conflated; the inputs/outputs of every call # would be recorded in every other call as well. if fqn == path: if kind == "module_call_inputs": self.specs[path].inputs.append(get_arg_spec(arg)) elif kind == "module_call_outputs": self.specs[path].outputs.append(get_arg_spec(arg)) else: raise AssertionError(f"Unknown tracepoint kind: {kind}") if isinstance(arg, torch.fx.Node): for user in node.users: assert user.op == "call_function" assert user.target is operator.getitem assert isinstance(user.args[1], int) if user.args[1] == i: user.replace_all_uses_with(arg) self.sig.replace_all_uses(user.name, arg.name) break users = list(node.users) for user in users: assert len(user.users) == 0 gm.graph.erase_node(user) gm.graph.erase_node(node) return PassResult(gm, True) return None
CollectTracepointsPass
python
weaviate__weaviate-python-client
weaviate/collections/classes/config.py
{ "start": 15521, "end": 15696 }
class ____(_ConfigCreateModel): reranker: Union[Rerankers, _EnumLikeStr] RerankerCohereModel = Literal["rerank-english-v2.0", "rerank-multilingual-v2.0"]
_RerankerProvider
python
joke2k__faker
tests/providers/test_geo.py
{ "start": 5368, "end": 5472 }
class ____(TestEnUS): def setUp(self): self.fake = Faker("en_IE") Faker.seed(0)
TestEnIe
python
keras-team__keras
keras/src/layers/reshaping/cropping1d_test.py
{ "start": 121, "end": 2921 }
class ____(testing.TestCase): @pytest.mark.requires_trainable_backend def test_cropping_1d(self): inputs = np.random.rand(3, 5, 7) # Cropping with different values on the left and the right. self.run_layer_test( layers.Cropping1D, init_kwargs={"cropping": (1, 2)}, input_data=inputs, expected_output=ops.convert_to_tensor(inputs[:, 1:3, :]), ) # Same cropping on the left and the right. self.run_layer_test( layers.Cropping1D, init_kwargs={"cropping": (1, 1)}, input_data=inputs, expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]), ) # Same cropping on the left and the right provided as an int. self.run_layer_test( layers.Cropping1D, init_kwargs={"cropping": 1}, input_data=inputs, expected_output=ops.convert_to_tensor(inputs[:, 1:4, :]), ) # Cropping on the right only. self.run_layer_test( layers.Cropping1D, init_kwargs={"cropping": (0, 1)}, input_data=inputs, expected_output=ops.convert_to_tensor(inputs[:, 0:4, :]), ) # Cropping on the left only. self.run_layer_test( layers.Cropping1D, init_kwargs={"cropping": (1, 0)}, input_data=inputs, expected_output=ops.convert_to_tensor(inputs[:, 1:5, :]), ) @pytest.mark.requires_trainable_backend def test_cropping_1d_with_dynamic_spatial_dim(self): input_layer = layers.Input(batch_shape=(1, None, 7)) cropped = layers.Cropping1D((1, 2))(input_layer) self.assertEqual(cropped.shape, (1, None, 7)) def test_cropping_1d_errors_if_cropping_argument_invalid(self): with self.assertRaises(ValueError): layers.Cropping1D(cropping=(1,)) with self.assertRaises(ValueError): layers.Cropping1D(cropping=(1, 2, 3)) with self.assertRaises(ValueError): layers.Cropping1D(cropping="1") def test_cropping_1d_errors_if_cropping_more_than_available(self): with self.assertRaisesRegex( ValueError, "`cropping` parameter of `Cropping1D` layer must be smaller than", ): input_layer = layers.Input(batch_shape=(3, 5, 7)) layers.Cropping1D(cropping=(2, 3))(input_layer) def test_cropping_1d_error_on_excessive_cropping(self): inputs = np.random.rand(3, 5, 7) with self.assertRaisesRegex( ValueError, "`cropping` parameter of `Cropping1D` layer must be smaller than", ): layer = layers.Cropping1D(cropping=(3, 3)) _ = layer(inputs)
Cropping1DTest
python
jazzband__django-waffle
waffle/tests/test_testutils.py
{ "start": 280, "end": 3456 }
class ____: def test_switch_existed_and_was_active(self): waffle.get_waffle_switch_model().objects.create(name='foo', active=True) with override_switch('foo', active=True): assert waffle.switch_is_active('foo') with override_switch('foo', active=False): assert not waffle.switch_is_active('foo') # make sure it didn't change 'active' value assert waffle.get_waffle_switch_model().objects.get(name='foo').active def test_switch_existed_and_was_NOT_active(self): waffle.get_waffle_switch_model().objects.create(name='foo', active=False) with override_switch('foo', active=True): assert waffle.switch_is_active('foo') with override_switch('foo', active=False): assert not waffle.switch_is_active('foo') # make sure it didn't change 'active' value assert not waffle.get_waffle_switch_model().objects.get(name='foo').active def test_new_switch(self): assert not waffle.get_waffle_switch_model().objects.filter(name='foo').exists() with override_switch('foo', active=True): assert waffle.switch_is_active('foo') with override_switch('foo', active=False): assert not waffle.switch_is_active('foo') assert not waffle.get_waffle_switch_model().objects.filter(name='foo').exists() def test_as_decorator(self): assert not waffle.get_waffle_switch_model().objects.filter(name='foo').exists() @override_switch('foo', active=True) def test_enabled(): assert waffle.switch_is_active('foo') test_enabled() @override_switch('foo', active=False) def test_disabled(): assert not waffle.switch_is_active('foo') test_disabled() assert not waffle.get_waffle_switch_model().objects.filter(name='foo').exists() def test_restores_after_exception(self): waffle.get_waffle_switch_model().objects.create(name='foo', active=True) def inner(): with override_switch('foo', active=False): raise RuntimeError("Trying to break") with self.assertRaises(RuntimeError): inner() assert waffle.get_waffle_switch_model().objects.get(name='foo').active def test_restores_after_exception_in_decorator(self): waffle.get_waffle_switch_model().objects.create(name='foo', active=True) @override_switch('foo', active=False) def inner(): raise RuntimeError("Trying to break") with self.assertRaises(RuntimeError): inner() assert waffle.get_waffle_switch_model().objects.get(name='foo').active def test_cache_is_flushed_by_testutils_even_in_transaction(self): waffle.get_waffle_switch_model().objects.create(name='foo', active=True) with transaction.atomic(): with override_switch('foo', active=True): assert waffle.switch_is_active('foo') with override_switch('foo', active=False): assert not waffle.switch_is_active('foo') assert waffle.switch_is_active('foo')
OverrideSwitchMixin
python
altair-viz__altair
altair/vegalite/v6/schema/channels.py
{ "start": 302956, "end": 316739 }
class ____(FieldChannelMixin, core.LatLongFieldDef): r""" Latitude schema wrapper. Parameters ---------- shorthand : str, dict, Sequence[str], :class:`RepeatRef` shorthand for field, aggregate, and type aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb'] Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``, ``"min"``, ``"max"``, ``"count"``). **Default value:** ``undefined`` (None) **See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__ documentation. bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. bin : None A flag for binning a ``quantitative`` field, `an object defining binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating that the data for ``x`` or ``y`` channel are binned before they are imported into Vega-Lite (``"binned"``). * If ``true``, default `binning parameters <https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be applied. * If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are already binned. You can map the bin-start field to ``x`` (or ``y``) and the bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can also set the axis's `tickMinStep <https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property. **Default value:** ``false`` **See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__ documentation. field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef` **Required.** A string defining the name of the field from which to pull a data value or an object defining iterated values from the `repeat <https://vega.github.io/vega-lite/docs/repeat.html>`__ operator. **See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__ documentation. **Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If field names contain dots or brackets but are not nested, you can use ``\\`` to escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details about escaping in the `field documentation <https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required if ``aggregate`` is ``count``. timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds'] Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal field. or `a temporal field that gets casted as ordinal <https://vega.github.io/vega-lite/docs/type.html#cast>`__. **Default value:** ``undefined`` (None) **See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__ documentation. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : Literal['quantitative'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _class_is_valid_at_instantiation = False _encoding_name = "latitude" @overload def aggregate(self, _: NonArgAggregateOp_T, /) -> Latitude: ... @overload def aggregate( self, *, argmax: Optional[str | SchemaBase] = Undefined ) -> Latitude: ... @overload def aggregate( self, *, argmin: Optional[str | SchemaBase] = Undefined ) -> Latitude: ... @overload def bandPosition(self, _: float, /) -> Latitude: ... @overload def bin(self, _: None, /) -> Latitude: ... @overload def field(self, _: str | RepeatRef, /) -> Latitude: ... @overload def field( self, *, repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined, ) -> Latitude: ... @overload def timeUnit( self, _: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T, /, ) -> Latitude: ... @overload def timeUnit( self, *, binned: Optional[bool] = Undefined, maxbins: Optional[float] = Undefined, step: Optional[float] = Undefined, unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined, utc: Optional[bool] = Undefined, ) -> Latitude: ... @overload def title(self, _: str | Sequence[str] | None, /) -> Latitude: ... @overload def type(self, _: Literal["quantitative"], /) -> Latitude: ... def __init__( self, shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined, aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined, bandPosition: Optional[float] = Undefined, bin: Optional[None] = Undefined, field: Optional[str | SchemaBase | Map] = Undefined, timeUnit: Optional[ SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T ] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[Literal["quantitative"]] = Undefined, **kwds, ): super().__init__( shorthand=shorthand, aggregate=aggregate, bandPosition=bandPosition, bin=bin, field=field, timeUnit=timeUnit, title=title, type=type, **kwds, ) @with_property_setters
Latitude
python
donnemartin__system-design-primer
solutions/object_oriented_design/online_chat/online_chat.py
{ "start": 1441, "end": 1590 }
class ____(metaclass=ABCMeta): def __init__(self, chat_id): self.chat_id = chat_id self.users = [] self.messages = []
Chat
python
apache__airflow
airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_task_instances.py
{ "start": 51110, "end": 52000 }
class ____: def setup_method(self): clear_db_runs() def teardown_method(self): clear_db_runs() @pytest.mark.parametrize("_json", (({"tasks": ["t1"]}), ({"tasks": [("t1", -1)]}))) def test_ti_skip_downstream(self, client, session, create_task_instance, dag_maker, _json): with dag_maker("skip_downstream_dag", session=session): t0 = EmptyOperator(task_id="t0") t1 = EmptyOperator(task_id="t1") t0 >> t1 dr = dag_maker.create_dagrun(run_id="run") ti0 = dr.get_task_instance("t0") ti0.set_state(State.SUCCESS) response = client.patch( f"/execution/task-instances/{ti0.id}/skip-downstream", json=_json, ) ti1 = dr.get_task_instance("t1") assert response.status_code == 204 assert ti1.state == State.SKIPPED
TestTISkipDownstream
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/freshness.py
{ "start": 838, "end": 1019 }
class ____(str, Enum): PASS = "PASS" WARN = "WARN" FAIL = "FAIL" UNKNOWN = "UNKNOWN" NOT_APPLICABLE = "NOT_APPLICABLE" @whitelist_for_serdes @record
FreshnessState
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclass11.py
{ "start": 412, "end": 792 }
class ____(Generic[Key2, Value]): def add(self, key: Key2, value: Value): return MapTreeNode(key=key, value=value) def test1(self, a: Key2, b: Value): v1 = self.add(a, b) reveal_type(v1, expected_text="MapTreeNode[Key2@Foo, Value@Foo]") reveal_type(v1.key, expected_text="Key2@Foo") reveal_type(v1.value, expected_text="Value@Foo")
Foo
python
scrapy__scrapy
scrapy/pipelines/images.py
{ "start": 862, "end": 941 }
class ____(FileException): """General image error exception"""
ImageException
python
google__pytype
pytype/file_utils_test.py
{ "start": 3897, "end": 6594 }
class ____(unittest.TestCase): """Tests for file_utils.expand_source_files.""" FILES = [ "a.py", file_utils.replace_separator("foo/b.py"), file_utils.replace_separator("foo/c.txt"), file_utils.replace_separator("foo/bar/d.py"), file_utils.replace_separator("foo/bar/baz/e.py"), ] def _test_expand(self, string): with test_utils.Tempdir() as d: fs = [d.create_file(f) for f in self.FILES] pyfiles = file_utils.expand_paths(f for f in fs if f.endswith(".py")) self.assertCountEqual( pyfiles, file_utils.expand_source_files(string, d.path) ) def test_expand_source_files(self): self._test_expand(file_utils.replace_separator("a.py foo/c.txt foo")) def test_duplicates(self): self._test_expand(file_utils.replace_separator("a.py foo/b.py foo foo/bar")) def test_cwd(self): with test_utils.Tempdir() as d: fs = [d.create_file(f) for f in self.FILES] pyfiles = file_utils.expand_paths(f for f in fs if f.endswith(".py")) # cd to d.path and run with just "." as an argument with file_utils.cd(d.path): self.assertCountEqual(pyfiles, file_utils.expand_source_files(".")) def test_empty(self): self.assertEqual(file_utils.expand_source_files(""), set()) def test_magic(self): filenames = ["a.py", file_utils.replace_separator("b/c.py")] with test_utils.Tempdir() as d: for f in filenames: d.create_file(f) with file_utils.cd(d.path): self.assertEqual( file_utils.expand_source_files( file_utils.replace_separator("**/*.py") ), {path_utils.realpath(f) for f in filenames}, ) def test_magic_with_cwd(self): filenames = ["a.py", file_utils.replace_separator("b/c.py")] with test_utils.Tempdir() as d: for f in filenames: d.create_file(f) self.assertEqual( file_utils.expand_source_files( file_utils.replace_separator("**/*.py"), cwd=d.path ), set( file_utils.expand_paths( path_utils.join(d.path, f) for f in filenames ) ), ) def test_multiple_magic(self): filenames = ["a.py", file_utils.replace_separator("b/c.py")] with test_utils.Tempdir() as d: for f in filenames: d.create_file(f) self.assertEqual( file_utils.expand_source_files( file_utils.replace_separator("*.py b/*.py"), cwd=d.path ), set( file_utils.expand_paths( path_utils.join(d.path, f) for f in filenames ) ), )
TestExpandSourceFiles
python
PrefectHQ__prefect
tests/runtime/test_flow_run.py
{ "start": 13893, "end": 18842 }
class ____: async def test_parent_deployment_id_is_attribute(self): assert "parent_deployment_id" in dir(flow_run) async def test_parent_deployment_id_is_empty_when_not_set(self): assert flow_run.parent_deployment_id is None async def test_parent_deployment_id_returns_parent_deployment_id_when_present_dynamically( self, prefect_client: PrefectClient ): assert flow_run.parent_deployment_id is None @flow def parent(): return 1 @task def foo(): return 1 parent_flow_id = await prefect_client.create_flow(parent) # Parent flow run that does not have a deployment parent_flow_run_no_deployment = await prefect_client.create_flow_run( flow=parent, ) parent_task_run_no_deployment = await prefect_client.create_task_run( task=foo, dynamic_key="1", flow_run_id=parent_flow_run_no_deployment.id ) with FlowRunContext.model_construct( flow_run=FlowRun.model_construct( parent_task_run_id=parent_task_run_no_deployment.id ), flow=Flow(fn=lambda: None, name="child-flow-no-deployment"), ): assert flow_run.parent_deployment_id is None # Parent flow run that does have a deployment parent_flow_deployment_id = await prefect_client.create_deployment( flow_id=parent_flow_id, name="example", ) parent_flow_run_with_deployment = ( await prefect_client.create_flow_run_from_deployment( deployment_id=parent_flow_deployment_id, ) ) parent_task_run_with_deployment = await prefect_client.create_task_run( task=foo, dynamic_key="1", flow_run_id=parent_flow_run_with_deployment.id ) with FlowRunContext.model_construct( flow_run=FlowRun.model_construct( parent_task_run_id=parent_task_run_with_deployment.id ), flow=Flow(fn=lambda: None, name="child-flow-with-parent-deployment"), ): assert flow_run.parent_deployment_id == str(parent_flow_deployment_id) # No parent flow run with FlowRunContext.model_construct( flow_run=FlowRun.model_construct(parent_task_run_id=None), flow=Flow(fn=lambda: None, name="child-flow-no-parent-task-run"), ): assert flow_run.parent_deployment_id is None async def test_parent_deployment_id_pulls_from_api_when_needed( self, monkeypatch: pytest.MonkeyPatch, prefect_client: PrefectClient ): assert flow_run.parent_deployment_id is None @flow def parent(): return 1 @task def foo(): return 1 parent_flow_id = await prefect_client.create_flow(parent) # Parent flow run that does not have a deployment parent_flow_run_no_deployment = await prefect_client.create_flow_run( flow=parent, ) parent_task_run_no_deployment = await prefect_client.create_task_run( task=foo, dynamic_key="1", flow_run_id=parent_flow_run_no_deployment.id ) child_flow_run_no_deployment = await prefect_client.create_flow_run( flow=Flow(fn=lambda: None, name="child-no-deploy"), parameters={"x": "foo", "y": "bar"}, parent_task_run_id=parent_task_run_no_deployment.id, ) monkeypatch.setenv( name="PREFECT__FLOW_RUN_ID", value=str(child_flow_run_no_deployment.id) ) assert flow_run.parent_deployment_id is None # Parent flow run that does have a deployment parent_flow_deployment_id = await prefect_client.create_deployment( flow_id=parent_flow_id, name="example", ) parent_flow_run_with_deployment = ( await prefect_client.create_flow_run_from_deployment( deployment_id=parent_flow_deployment_id, ) ) parent_task_run_with_deployment = await prefect_client.create_task_run( task=foo, dynamic_key="1", flow_run_id=parent_flow_run_with_deployment.id ) child_flow_run_with_deployment = await prefect_client.create_flow_run( flow=Flow(fn=lambda: None, name="child-deploy"), parameters={"x": "foo", "y": "bar"}, parent_task_run_id=parent_task_run_with_deployment.id, ) monkeypatch.setenv( name="PREFECT__FLOW_RUN_ID", value=str(child_flow_run_with_deployment.id) ) assert flow_run.parent_deployment_id == str(parent_flow_deployment_id) # No parent flow run monkeypatch.setenv( name="PREFECT__FLOW_RUN_ID", value=str(parent_flow_run_no_deployment.id) ) assert flow_run.parent_deployment_id is None
TestParentDeploymentId
python
pydata__xarray
xarray/backends/common.py
{ "start": 22573, "end": 27899 }
class ____: """ ``BackendEntrypoint`` is a class container and it is the main interface for the backend plugins, see :ref:`RST backend_entrypoint`. It shall implement: - ``open_dataset`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~xarray.Dataset`. It shall take in input at least ``filename_or_obj`` argument and ``drop_variables`` keyword argument. For more details see :ref:`RST open_dataset`. - ``guess_can_open`` method: it shall return ``True`` if the backend is able to open ``filename_or_obj``, ``False`` otherwise. The implementation of this method is not mandatory. - ``open_datatree`` method: it shall implement reading from file, variables decoding and it returns an instance of :py:class:`~datatree.DataTree`. It shall take in input at least ``filename_or_obj`` argument. The implementation of this method is not mandatory. For more details see <reference to open_datatree documentation>. Attributes ---------- open_dataset_parameters : tuple, default: None A list of ``open_dataset`` method parameters. The setting of this attribute is not mandatory. description : str, default: "" A short string describing the engine. The setting of this attribute is not mandatory. url : str, default: "" A string with the URL to the backend's documentation. The setting of this attribute is not mandatory. supports_groups : bool, default: False Whether the backend supports opening groups (via open_datatree and open_groups_as_dict) or not. """ open_dataset_parameters: ClassVar[tuple | None] = None description: ClassVar[str] = "" url: ClassVar[str] = "" supports_groups: ClassVar[bool] = False def __repr__(self) -> str: txt = f"<{type(self).__name__}>" if self.description: txt += f"\n {self.description}" if self.url: txt += f"\n Learn more at {self.url}" return txt def open_dataset( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> Dataset: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ raise NotImplementedError() def guess_can_open( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, ) -> bool: """ Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`. """ return False def open_datatree( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> DataTree: """ Backend open_datatree method used by Xarray in :py:func:`~xarray.open_datatree`. If implemented, set the class variable supports_groups to True. """ raise NotImplementedError() def open_groups_as_dict( self, filename_or_obj: str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore, *, drop_variables: str | Iterable[str] | None = None, ) -> dict[str, Dataset]: """ Opens a dictionary mapping from group names to Datasets. Called by :py:func:`~xarray.open_groups`. This function exists to provide a universal way to open all groups in a file, before applying any additional consistency checks or requirements necessary to create a `DataTree` object (typically done using :py:meth:`~xarray.DataTree.from_dict`). If implemented, set the class variable supports_groups to True. """ raise NotImplementedError() # mapping of engine name to (module name, BackendEntrypoint Class) BACKEND_ENTRYPOINTS: dict[str, tuple[str | None, type[BackendEntrypoint]]] = {} def _is_likely_dap_url(url: str) -> bool: """ Determines if a URL is likely an OPeNDAP (DAP) endpoint based on known protocols, server software path patterns, and file extensions. Parameters ---------- url : str Returns ------- True if the URL matches common DAP patterns, False otherwise. """ if not url: return False url_lower = url.lower() # For remote URIs, check for DAP server software path patterns if is_remote_uri(url_lower): dap_path_patterns = ( "/dodsc/", # THREDDS Data Server (TDS) DAP endpoint (case-insensitive) "/dods/", # GrADS Data Server (GDS) DAP endpoint "/opendap/", # Generic OPeNDAP/Hyrax server "/erddap/", # ERDDAP data server "/dap2/", # Explicit DAP2 version in path "/dap4/", # Explicit DAP4 version in path "/dap/", ) return any(pattern in url_lower for pattern in dap_path_patterns) return False
BackendEntrypoint
python
tensorflow__tensorflow
tensorflow/python/feature_column/sequence_feature_column_test.py
{ "start": 25760, "end": 34548 }
class ____(test.TestCase, parameterized.TestCase): def test_defaults(self): a = sfc.sequence_numeric_column('aaa') self.assertEqual('aaa', a.key) self.assertEqual('aaa', a.name) self.assertEqual((1,), a.shape) self.assertEqual(0., a.default_value) self.assertEqual(dtypes.float32, a.dtype) self.assertIsNone(a.normalizer_fn) def test_shape_saved_as_tuple(self): a = sfc.sequence_numeric_column('aaa', shape=[1, 2]) self.assertEqual((1, 2), a.shape) def test_shape_must_be_positive_integer(self): with self.assertRaisesRegex(TypeError, 'shape dimensions must be integer'): sfc.sequence_numeric_column('aaa', shape=[1.0]) with self.assertRaisesRegex(ValueError, 'shape dimensions must be greater than 0'): sfc.sequence_numeric_column('aaa', shape=[0]) def test_dtype_is_convertible_to_float(self): with self.assertRaisesRegex(ValueError, 'dtype must be convertible to float'): sfc.sequence_numeric_column('aaa', dtype=dtypes.string) def test_normalizer_fn_must_be_callable(self): with self.assertRaisesRegex(TypeError, 'must be a callable'): sfc.sequence_numeric_column('aaa', normalizer_fn='NotACallable') @parameterized.named_parameters( {'testcase_name': '2D', 'inputs_args': { # example 0, values [0., 1] # example 1, [10.] 'indices': ((0, 0), (0, 1), (1, 0)), 'values': (0., 1., 10.), 'dense_shape': (2, 2)}, 'expected': [ [[0.], [1.]], [[10.], [0.]]]}, {'testcase_name': '3D', 'inputs_args': { # feature 0, ids [[20, 3], [5]] # feature 1, ids [[3], [8]] 'indices': ((0, 0, 0), (0, 0, 1), (0, 1, 0), (1, 0, 0), (1, 1, 0)), 'values': (20, 3, 5., 3., 8.), 'dense_shape': (2, 2, 2)}, 'expected': [ [[20.], [3.], [5.], [0.]], [[3.], [0.], [8.], [0.]]]}, ) def test_get_sequence_dense_tensor(self, inputs_args, expected): inputs = sparse_tensor.SparseTensorValue(**inputs_args) numeric_column = sfc.sequence_numeric_column('aaa') dense_tensor, _ = _get_sequence_dense_tensor( numeric_column, {'aaa': inputs}) self.assertAllEqual(expected, self.evaluate(dense_tensor)) def test_get_sequence_dense_tensor_with_normalizer_fn(self): def _increment_two(input_sparse_tensor): return sparse_ops.sparse_add( input_sparse_tensor, sparse_tensor.SparseTensor(((0, 0), (1, 1)), (2.0, 2.0), (2, 2)) ) sparse_input = sparse_tensor.SparseTensorValue( # example 0, values [[0.], [1]] # example 1, [[10.]] indices=((0, 0), (0, 1), (1, 0)), values=(0., 1., 10.), dense_shape=(2, 2)) # Before _increment_two: # [[0.], [1.]], # [[10.], [0.]], # After _increment_two: # [[2.], [1.]], # [[10.], [2.]], expected_dense_tensor = [ [[2.], [1.]], [[10.], [2.]], ] numeric_column = sfc.sequence_numeric_column( 'aaa', normalizer_fn=_increment_two) dense_tensor, _ = _get_sequence_dense_tensor( numeric_column, {'aaa': sparse_input}) self.assertAllEqual( expected_dense_tensor, self.evaluate(dense_tensor)) @parameterized.named_parameters( {'testcase_name': '2D', 'sparse_input_args': { # example 0, values [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]] # example 1, [[[10., 11.], [12., 13.]]] 'indices': ((0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 0), (1, 1), (1, 2), (1, 3)), 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.), 'dense_shape': (2, 8)}, 'expected_dense_tensor': [ [[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]], [[[10., 11.], [12., 13.]], [[0., 0.], [0., 0.]]]]}, {'testcase_name': '3D', 'sparse_input_args': { 'indices': ((0, 0, 0), (0, 0, 2), (0, 0, 4), (0, 0, 6), (0, 1, 0), (0, 1, 2), (0, 1, 4), (0, 1, 6), (1, 0, 0), (1, 0, 2), (1, 0, 4), (1, 0, 6)), 'values': (0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12., 13.), 'dense_shape': (2, 2, 8)}, 'expected_dense_tensor': [ [[[0., 0.], [1., 0.]], [[2., 0.], [3., 0.]], [[4., 0.], [5., 0.]], [[6., 0.], [7., 0.]]], [[[10., 0.], [11., 0.]], [[12., 0.], [13., 0.]], [[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]]]}, ) def test_get_dense_tensor_multi_dim( self, sparse_input_args, expected_dense_tensor): """Tests get_sequence_dense_tensor for multi-dim numeric_column.""" sparse_input = sparse_tensor.SparseTensorValue(**sparse_input_args) numeric_column = sfc.sequence_numeric_column('aaa', shape=(2, 2)) dense_tensor, _ = _get_sequence_dense_tensor( numeric_column, {'aaa': sparse_input}) self.assertAllEqual( expected_dense_tensor, self.evaluate(dense_tensor)) @parameterized.named_parameters( {'testcase_name': '2D', 'inputs_args': { # example 0, ids [2] # example 1, ids [0, 1] 'indices': ((0, 0), (1, 0), (1, 1)), 'values': (2., 0., 1.), 'dense_shape': (2, 2)}, 'expected_sequence_length': [1, 2], 'shape': (1,)}, {'testcase_name': '3D', 'inputs_args': { # example 0, ids [[2]] # example 1, ids [[0, 1], [2]] 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)), 'values': (2., 0., 1., 2.), 'dense_shape': (2, 2, 2)}, 'expected_sequence_length': [1, 2], 'shape': (1,)}, {'testcase_name': '2D_with_shape', 'inputs_args': { # example 0, ids [2] # example 1, ids [0, 1] 'indices': ((0, 0), (1, 0), (1, 1)), 'values': (2., 0., 1.), 'dense_shape': (2, 2)}, 'expected_sequence_length': [1, 1], 'shape': (2,)}, {'testcase_name': '3D_with_shape', 'inputs_args': { # example 0, ids [[2]] # example 1, ids [[0, 1], [2]] 'indices': ((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)), 'values': (2., 0., 1., 2.), 'dense_shape': (2, 2, 2)}, 'expected_sequence_length': [1, 2], 'shape': (2,)}, ) def test_sequence_length(self, inputs_args, expected_sequence_length, shape): inputs = sparse_tensor.SparseTensorValue(**inputs_args) numeric_column = sfc.sequence_numeric_column('aaa', shape=shape) _, sequence_length = _get_sequence_dense_tensor( numeric_column, {'aaa': inputs}) sequence_length = self.evaluate(sequence_length) self.assertAllEqual(expected_sequence_length, sequence_length) self.assertEqual(np.int64, sequence_length.dtype) def test_sequence_length_with_empty_rows(self): """Tests _sequence_length when some examples do not have ids.""" sparse_input = sparse_tensor.SparseTensorValue( # example 0, values [] # example 1, values [[0.], [1.]] # example 2, [[2.]] # example 3, values [] # example 4, [[3.]] # example 5, values [] indices=((1, 0), (1, 1), (2, 0), (4, 0)), values=(0., 1., 2., 3.), dense_shape=(6, 2)) expected_sequence_length = [0, 2, 1, 0, 1, 0] numeric_column = sfc.sequence_numeric_column('aaa') _, sequence_length = _get_sequence_dense_tensor( numeric_column, {'aaa': sparse_input}) self.assertAllEqual( expected_sequence_length, self.evaluate(sequence_length)) def test_serialization(self): """Tests that column can be serialized.""" def _custom_fn(input_tensor): return input_tensor + 42 column = sfc.sequence_numeric_column( key='my-key', shape=(2,), default_value=3, dtype=dtypes.int32, normalizer_fn=_custom_fn) configs = serialization.serialize_feature_column(column) column = serialization.deserialize_feature_column( configs, custom_objects={_custom_fn.__name__: _custom_fn}) self.assertEqual(column.key, 'my-key') self.assertEqual(column.shape, (2,)) self.assertEqual(column.default_value, 3) self.assertEqual(column.normalizer_fn(3), 45) with self.assertRaisesRegex(ValueError, 'Instance: 0 is not a FeatureColumn'): serialization.serialize_feature_column(int()) def test_parents(self): """Tests parents attribute of column.""" column = sfc.sequence_numeric_column(key='my-key') self.assertEqual(column.parents, ['my-key']) if __name__ == '__main__': test.main()
SequenceNumericColumnTest
python
sympy__sympy
sympy/tensor/tensor.py
{ "start": 58073, "end": 67687 }
class ____(Basic): """ Tensor head of the tensor. Parameters ========== name : name of the tensor index_types : list of TensorIndexType symmetry : TensorSymmetry of the tensor comm : commutation group number Attributes ========== ``name`` ``index_types`` ``rank`` : total number of indices ``symmetry`` ``comm`` : commutation group Notes ===== Similar to ``symbols`` multiple TensorHeads can be created using ``tensorhead(s, typ, sym=None, comm=0)`` function, where ``s`` is the string of names and ``sym`` is the monoterm tensor symmetry (see ``tensorsymmetry``). A ``TensorHead`` belongs to a commutation group, defined by a symbol on number ``comm`` (see ``_TensorManager.set_comm``); tensors in a commutation group have the same commutation properties; by default ``comm`` is ``0``, the group of the commuting tensors. Examples ======== Define a fully antisymmetric tensor of rank 2: >>> from sympy.tensor.tensor import TensorIndexType, TensorHead, TensorSymmetry >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> asym2 = TensorSymmetry.fully_symmetric(-2) >>> A = TensorHead('A', [Lorentz, Lorentz], asym2) Examples with ndarray values, the components data assigned to the ``TensorHead`` object are assumed to be in a fully-contravariant representation. In case it is necessary to assign components data which represents the values of a non-fully covariant tensor, see the other examples. >>> from sympy.tensor.tensor import tensor_indices >>> from sympy import diag >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> i0, i1 = tensor_indices('i0:2', Lorentz) Specify a replacement dictionary to keep track of the arrays to use for replacements in the tensorial expression. The ``TensorIndexType`` is associated to the metric used for contractions (in fully covariant form): >>> repl = {Lorentz: diag(1, -1, -1, -1)} Let's see some examples of working with components with the electromagnetic tensor: >>> from sympy import symbols >>> Ex, Ey, Ez, Bx, By, Bz = symbols('E_x E_y E_z B_x B_y B_z') >>> c = symbols('c', positive=True) Let's define `F`, an antisymmetric tensor: >>> F = TensorHead('F', [Lorentz, Lorentz], asym2) Let's update the dictionary to contain the matrix to use in the replacements: >>> repl.update({F(-i0, -i1): [ ... [0, Ex/c, Ey/c, Ez/c], ... [-Ex/c, 0, -Bz, By], ... [-Ey/c, Bz, 0, -Bx], ... [-Ez/c, -By, Bx, 0]]}) Now it is possible to retrieve the contravariant form of the Electromagnetic tensor: >>> F(i0, i1).replace_with_arrays(repl, [i0, i1]) [[0, -E_x/c, -E_y/c, -E_z/c], [E_x/c, 0, -B_z, B_y], [E_y/c, B_z, 0, -B_x], [E_z/c, -B_y, B_x, 0]] and the mixed contravariant-covariant form: >>> F(i0, -i1).replace_with_arrays(repl, [i0, -i1]) [[0, E_x/c, E_y/c, E_z/c], [E_x/c, 0, B_z, -B_y], [E_y/c, -B_z, 0, B_x], [E_z/c, B_y, -B_x, 0]] Energy-momentum of a particle may be represented as: >>> from sympy import symbols >>> P = TensorHead('P', [Lorentz], TensorSymmetry.no_symmetry(1)) >>> E, px, py, pz = symbols('E p_x p_y p_z', positive=True) >>> repl.update({P(i0): [E, px, py, pz]}) The contravariant and covariant components are, respectively: >>> P(i0).replace_with_arrays(repl, [i0]) [E, p_x, p_y, p_z] >>> P(-i0).replace_with_arrays(repl, [-i0]) [E, -p_x, -p_y, -p_z] The contraction of a 1-index tensor by itself: >>> expr = P(i0)*P(-i0) >>> expr.replace_with_arrays(repl, []) E**2 - p_x**2 - p_y**2 - p_z**2 """ is_commutative = False def __new__(cls, name, index_types, symmetry=None, comm=0): if isinstance(name, str): name_symbol = Symbol(name) elif isinstance(name, Symbol): name_symbol = name else: raise ValueError("invalid name") if symmetry is None: symmetry = TensorSymmetry.no_symmetry(len(index_types)) else: assert symmetry.rank == len(index_types) obj = Basic.__new__(cls, name_symbol, Tuple(*index_types), symmetry, sympify(comm)) return obj @property def name(self): return self.args[0].name @property def index_types(self): return list(self.args[1]) @property def symmetry(self): return self.args[2] @property def comm(self): return TensorManager.comm_symbols2i(self.args[3]) @property def rank(self): return len(self.index_types) def __lt__(self, other): return (self.name, self.index_types) < (other.name, other.index_types) def commutes_with(self, other): """ Returns ``0`` if ``self`` and ``other`` commute, ``1`` if they anticommute. Returns ``None`` if ``self`` and ``other`` neither commute nor anticommute. """ r = TensorManager.get_comm(self.comm, other.comm) return r def _print(self): return f"{self.name}({','.join([str(x) for x in self.index_types])})" def __call__(self, *indices, **kw_args): """ Returns a tensor with indices. Explanation =========== There is a special behavior in case of indices denoted by ``True``, they are considered auto-matrix indices, their slots are automatically filled, and confer to the tensor the behavior of a matrix or vector upon multiplication with another tensor containing auto-matrix indices of the same ``TensorIndexType``. This means indices get summed over the same way as in matrix multiplication. For matrix behavior, define two auto-matrix indices, for vector behavior define just one. Indices can also be strings, in which case the attribute ``index_types`` is used to convert them to proper ``TensorIndex``. Examples ======== >>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorHead >>> Lorentz = TensorIndexType('Lorentz', dummy_name='L') >>> a, b = tensor_indices('a,b', Lorentz) >>> A = TensorHead('A', [Lorentz]*2, TensorSymmetry.no_symmetry(2)) >>> t = A(a, -b) >>> t A(a, -b) """ updated_indices = [] for idx, typ in zip(indices, self.index_types): if isinstance(idx, str): idx = idx.strip().replace(" ", "") if idx.startswith('-'): updated_indices.append(TensorIndex(idx[1:], typ, is_up=False)) else: updated_indices.append(TensorIndex(idx, typ)) else: updated_indices.append(idx) updated_indices += indices[len(updated_indices):] tensor = Tensor(self, updated_indices, **kw_args) return tensor.doit() # Everything below this line is deprecated def __pow__(self, other): deprecate_data() with ignore_warnings(SymPyDeprecationWarning): if self.data is None: raise ValueError("No power on abstract tensors.") from .array import tensorproduct, tensorcontraction metrics = [_.data for _ in self.index_types] marray = self.data marraydim = marray.rank() for metric in metrics: marray = tensorproduct(marray, metric, marray) marray = tensorcontraction(marray, (0, marraydim), (marraydim+1, marraydim+2)) return marray ** (other * S.Half) @property def data(self): deprecate_data() with ignore_warnings(SymPyDeprecationWarning): return _tensor_data_substitution_dict[self] @data.setter def data(self, data): deprecate_data() with ignore_warnings(SymPyDeprecationWarning): _tensor_data_substitution_dict[self] = data @data.deleter def data(self): deprecate_data() if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def __iter__(self): deprecate_data() with ignore_warnings(SymPyDeprecationWarning): return self.data.__iter__() def _components_data_full_destroy(self): """ EXPERIMENTAL: do not rely on this API method. Destroy components data associated to the ``TensorHead`` object, this checks for attached components data, and destroys components data too. """ # do not garbage collect Kronecker tensor (it should be done by # ``TensorIndexType`` garbage collection) deprecate_data() if self.name == "KD": return # the data attached to a tensor must be deleted only by the TensorHead # destructor. If the TensorHead is deleted, it means that there are no # more instances of that tensor anywhere. if self in _tensor_data_substitution_dict: del _tensor_data_substitution_dict[self] def tensor_heads(s, index_types, symmetry=None, comm=0): """ Returns a sequence of TensorHeads from a string `s` """ if isinstance(s, str): names = [x.name for x in symbols(s, seq=True)] else: raise ValueError('expecting a string') thlist = [TensorHead(name, index_types, symmetry, comm) for name in names] if len(thlist) == 1: return thlist[0] return thlist
TensorHead
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0035_container_time_limit_as_integer.py
{ "start": 150, "end": 595 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0034_remove_unused_project_model_fields"), ] operations = [ migrations.AlterField( model_name="project", name="container_time_limit", field=models.IntegerField( blank=True, null=True, verbose_name="Container time limit in seconds" ), ), ]
Migration
python
kamyu104__LeetCode-Solutions
Python/minimize-product-sum-of-two-arrays.py
{ "start": 179, "end": 555 }
class ____(object): def minProductSum(self, nums1, nums2): """ :type nums1: List[int] :type nums2: List[int] :rtype: int """ def inner_product(vec1, vec2): return sum(itertools.imap(operator.mul, vec1, vec2)) nums1.sort() nums2.sort(reverse=True) return inner_product(nums1, nums2)
Solution
python
Textualize__textual
docs/examples/how-to/center10.py
{ "start": 118, "end": 617 }
class ____(App): """How to center things.""" CSS = """ Screen { align: center middle; } .words { background: blue 50%; border: wide white; width: auto; } """ def compose(self) -> ComposeResult: with Center(): yield Static("How about a nice game", classes="words") with Center(): yield Static("of chess?", classes="words") if __name__ == "__main__": app = CenterApp() app.run()
CenterApp