language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
ansible__ansible
test/units/playbook/test_base.py
{ "start": 1123, "end": 6408 }
class ____(unittest.TestCase): ClassUnderTest = base.Base def setUp(self): self.assorted_vars = {'var_2_key': 'var_2_value', 'var_1_key': 'var_1_value', 'a_list': ['a_list_1', 'a_list_2'], 'a_dict': {'a_dict_key': 'a_dict_value'}, 'a_int': 42, 'a_float': 37.371, 'a_bool': True, 'a_none': None, } self.b = self.ClassUnderTest() def _base_validate(self, ds): bsc = self.ClassUnderTest() parent = ExampleParentBaseSubClass() bsc._parent = parent bsc._dep_chain = [parent] parent._dep_chain = None bsc.load_data(ds) fake_loader = DictDataLoader({}) templar = TemplateEngine(loader=fake_loader) bsc.post_validate(templar) return bsc def test(self): self.assertIsInstance(self.b, base.Base) self.assertIsInstance(self.b, self.ClassUnderTest) # dump me doesnt return anything or change anything so not much to assert def test_dump_me_empty(self): self.b.dump_me() def test_dump_me(self): ds = {'environment': [], 'vars': {'var_2_key': 'var_2_value', 'var_1_key': 'var_1_value'}} b = self._base_validate(ds) b.dump_me() def _assert_copy(self, orig, copy): self.assertIsInstance(copy, self.ClassUnderTest) self.assertIsInstance(copy, base.Base) self.assertEqual(len(orig.fattributes), len(copy.fattributes)) sentinel = 'Empty DS' self.assertEqual(getattr(orig, '_ds', sentinel), getattr(copy, '_ds', sentinel)) def test_copy_empty(self): copy = self.b.copy() self._assert_copy(self.b, copy) def test_copy_with_vars(self): ds = {'vars': self.assorted_vars} b = self._base_validate(ds) copy = b.copy() self._assert_copy(b, copy) def test_post_validate_empty(self): fake_loader = DictDataLoader({}) templar = TemplateEngine(loader=fake_loader) ret = self.b.post_validate(templar) self.assertIsNone(ret) def test_get_ds_none(self): ds = self.b.get_ds() self.assertIsNone(ds) def test_load_data_ds_is_none(self): self.assertRaises(AssertionError, self.b.load_data, None) def test_load_data_invalid_attr(self): ds = {'not_a_valid_attr': [], 'other': None} self.assertRaises(AnsibleParserError, self.b.load_data, ds) def test_load_data_invalid_attr_type(self): ds = {'environment': True} # environment is supposed to be a list. This # seems like it shouldn't work? ret = self.b.load_data(ds) self.assertEqual(True, ret._environment) def test_post_validate(self): ds = {'environment': [], 'port': 443} b = self._base_validate(ds) self.assertEqual(b.port, 443) self.assertEqual(b.environment, []) def test_post_validate_invalid_attr_types(self): ds = {'environment': [], 'port': 'some_port'} b = self._base_validate(ds) self.assertEqual(b.port, 'some_port') def test_vars(self): # vars as a dict. ds = {'environment': [], 'vars': {'var_2_key': 'var_2_value', 'var_1_key': 'var_1_value'}} b = self._base_validate(ds) self.assertEqual(b.vars['var_1_key'], 'var_1_value') def test_vars_list_of_dicts(self): ds = {'environment': [], 'vars': [{'var_2_key': 'var_2_value'}, {'var_1_key': 'var_1_value'}] } self.assertRaises(AnsibleParserError, self.b.load_data, ds) def test_vars_not_dict_or_list(self): ds = {'environment': [], 'vars': 'I am a string, not a dict or a list of dicts'} self.assertRaises(AnsibleParserError, self.b.load_data, ds) def test_vars_not_valid_identifier(self): ds = {'environment': [], 'vars': [{'var_2_key': 'var_2_value'}, {'1an-invalid identifier': 'var_1_value'}] } self.assertRaises(AnsibleParserError, self.b.load_data, ds) def test_vars_is_list_but_not_of_dicts(self): ds = {'environment': [], 'vars': ['foo', 'bar', 'this is a string not a dict'] } self.assertRaises(AnsibleParserError, self.b.load_data, ds) def test_vars_is_none(self): # If vars is None, we should get a empty dict back ds = {'environment': [], 'vars': None } b = self._base_validate(ds) self.assertEqual(b.vars, {}) def test_validate_empty(self): self.b.validate() self.assertTrue(self.b._validated) def test_getters(self): # not sure why these exist, but here are tests anyway loader = self.b.get_loader() variable_manager = self.b.get_variable_manager() self.assertEqual(loader, self.b._loader) self.assertEqual(variable_manager, self.b._variable_manager)
TestBase
python
sqlalchemy__sqlalchemy
test/ext/test_extendedattr.py
{ "start": 3111, "end": 4457 }
class ____(_ExtBase, fixtures.TestBase): def test_unregister(self, registry): class MyClassState(instrumentation.InstrumentationManager): def manage(self, class_, manager): setattr(class_, "xyz", manager) def unregister(self, class_, manager): delattr(class_, "xyz") def manager_getter(self, class_): def get(cls): return cls.xyz return get class MyClass: __sa_instrumentation_manager__ = MyClassState assert attributes.opt_manager_of_class(MyClass) is None with expect_raises_message( sa.orm.exc.UnmappedClassError, r"Can't locate an instrumentation manager for class .*MyClass", ): attributes.manager_of_class(MyClass) t = Table( "my_table", registry.metadata, Column("id", Integer, primary_key=True), ) registry.map_imperatively(MyClass, t) manager = attributes.opt_manager_of_class(MyClass) is_not(manager, None) is_(manager, MyClass.xyz) registry.configure() registry.dispose() manager = attributes.opt_manager_of_class(MyClass) is_(manager, None) assert not hasattr(MyClass, "xyz")
DisposeTest
python
doocs__leetcode
solution/3400-3499/3446.Sort Matrix by Diagonals/Solution.py
{ "start": 0, "end": 863 }
class ____: def sortMatrix(self, grid: List[List[int]]) -> List[List[int]]: n = len(grid) for k in range(n - 2, -1, -1): i, j = k, 0 t = [] while i < n and j < n: t.append(grid[i][j]) i += 1 j += 1 t.sort() i, j = k, 0 while i < n and j < n: grid[i][j] = t.pop() i += 1 j += 1 for k in range(n - 2, 0, -1): i, j = k, n - 1 t = [] while i >= 0 and j >= 0: t.append(grid[i][j]) i -= 1 j -= 1 t.sort() i, j = k, n - 1 while i >= 0 and j >= 0: grid[i][j] = t.pop() i -= 1 j -= 1 return grid
Solution
python
pypa__setuptools
setuptools/namespaces.py
{ "start": 3014, "end": 3171 }
class ____(Installer): def _get_root(self): return repr(str(self.egg_path)) def _get_target(self): return self.egg_link
DevelopInstaller
python
apache__airflow
task-sdk/tests/task_sdk/execution_time/test_supervisor.py
{ "start": 97126, "end": 101544 }
class ____: def test_no_retries(self): called = 0 def noop_handler(request: httpx.Request) -> httpx.Response: nonlocal called called += 1 return httpx.Response(500) transport = httpx.MockTransport(noop_handler) client = InProcessTestSupervisor._Client( base_url="http://local.invalid", token="", transport=transport ) with pytest.raises(httpx.HTTPStatusError): client.get("/goo") assert called == 1 @pytest.mark.parametrize( ("remote_logging", "remote_conn", "expected_env"), ( pytest.param(True, "", "AIRFLOW_CONN_AWS_DEFAULT", id="no-conn-id"), pytest.param(True, "aws_default", "AIRFLOW_CONN_AWS_DEFAULT", id="explicit-default"), pytest.param(True, "my_aws", "AIRFLOW_CONN_MY_AWS", id="other"), pytest.param(False, "", "", id="no-remote-logging"), ), ) def test_remote_logging_conn(remote_logging, remote_conn, expected_env, monkeypatch, mocker): # This doesn't strictly need the AWS provider, but it does need something that # airflow.config_templates.airflow_local_settings.DEFAULT_LOGGING_CONFIG knows about pytest.importorskip("airflow.providers.amazon", reason="'amazon' provider not installed") # This test is a little bit overly specific to how the logging is currently configured :/ monkeypatch.delitem(sys.modules, "airflow.logging_config") monkeypatch.delitem(sys.modules, "airflow.config_templates.airflow_local_settings", raising=False) def handle_request(request: httpx.Request) -> httpx.Response: return httpx.Response( status_code=200, json={ # Minimal enough to pass validation, we don't care what fields are in here for the tests "conn_id": remote_conn, "conn_type": "aws", }, ) # Patch configurations in both airflow-core and task-sdk due to shared library refactoring. # # conf_vars() patches airflow.configuration.conf (airflow-core): # - remote_logging: needed by airflow_local_settings.py to decide whether to set up REMOTE_TASK_LOG # - remote_base_log_folder: needed by airflow_local_settings.py to create the CloudWatch handler # # task_sdk_conf_vars() patches airflow.sdk.configuration.conf (task-sdk): # - remote_log_conn_id: needed by load_remote_conn_id() to return the correct connection id with conf_vars( { ("logging", "remote_logging"): str(remote_logging), ("logging", "remote_base_log_folder"): "cloudwatch://arn:aws:logs:::log-group:test", ("logging", "remote_log_conn_id"): remote_conn, } ): with conf_vars( { ("logging", "remote_log_conn_id"): remote_conn, } ): env = os.environ.copy() client = make_client(transport=httpx.MockTransport(handle_request)) with _remote_logging_conn(client): new_keys = os.environ.keys() - env.keys() if remote_logging: assert new_keys == {expected_env} else: assert not new_keys if remote_logging and expected_env: connection_available = {"available": False, "conn_uri": None} def mock_upload_to_remote(process_log, ti): connection_available["available"] = expected_env in os.environ connection_available["conn_uri"] = os.environ.get(expected_env) mocker.patch("airflow.sdk.log.upload_to_remote", side_effect=mock_upload_to_remote) activity_subprocess = ActivitySubprocess( process_log=mocker.MagicMock(), id=TI_ID, pid=12345, stdin=mocker.MagicMock(), client=client, process=mocker.MagicMock(), ) activity_subprocess.ti = mocker.MagicMock() activity_subprocess._upload_logs() assert connection_available["available"], ( f"Connection {expected_env} was not available during upload_to_remote call" ) assert connection_available["conn_uri"] is not None, "Connection URI was None during upload"
TestInProcessClient
python
great-expectations__great_expectations
tests/core/test_batch_definition.py
{ "start": 1079, "end": 10430 }
class ____(DataAsset): @override def get_batch_identifiers_list(self, batch_request: BatchRequest) -> List[dict]: raise NotImplementedError @override def get_batch(self, batch_request: BatchRequest) -> Batch: raise NotImplementedError @pytest.fixture def mock_data_asset(monkeypatch, mocker: pytest_mock.MockerFixture) -> DataAsset: monkeypatch.setattr(DataAsset, "build_batch_request", mocker.Mock()) data_asset: DataAsset = DataAssetForTests(name="my_data_asset", type="table") return data_asset @pytest.mark.parametrize( "batch_parameters", [ (None,), ({"foo": "bar"},), ], ) @pytest.mark.unit def test_build_batch_request( batch_parameters: Optional[BatchParameters], mock_data_asset: DataAsset, mocker: pytest_mock.MockerFixture, ): batching_regex = re.compile(r"data_(?P<year>\d{4})-(?P<month>\d{2}).csv") partitioner = FileNamePartitionerYearly(regex=batching_regex) batch_definition = BatchDefinition( name="test_batch_definition", partitioner=partitioner, ) batch_definition.set_data_asset(mock_data_asset) batch_definition.build_batch_request(batch_parameters=batch_parameters) mock_build_batch_request = batch_definition.data_asset.build_batch_request assert isinstance(mock_build_batch_request, mocker.Mock) mock_build_batch_request.assert_called_once_with( options=batch_parameters, partitioner=partitioner, ) @pytest.mark.unit def test_get_batch_retrieves_only_batch(mocker: pytest_mock.MockFixture): # Arrange batch_definition = BatchDefinition[None](name="test_batch_definition") mock_asset = mocker.Mock(spec=DataAsset) batch_definition.set_data_asset(mock_asset) mock_batch = mocker.Mock(spec=Batch) mock_asset.get_batch.return_value = mock_batch # Act batch = batch_definition.get_batch() # Assert assert batch == mock_batch mock_asset.get_batch.assert_called_once_with(batch_definition.build_batch_request()) @pytest.mark.unit def test_get_batch_identifiers_list(mocker: pytest_mock.MockFixture): # Arrange batch_definition = BatchDefinition[None](name="test_batch_definition") mock_asset = mocker.Mock(spec=DataAsset) batch_definition.set_data_asset(mock_asset) mock_batch_identifiers_list = [{"foo": "bar"}, {"baz": "qux"}] mock_asset.get_batch_identifiers_list.return_value = mock_batch_identifiers_list # Act batch_identifiers_list = batch_definition.get_batch_identifiers_list() # Assert assert batch_identifiers_list == mock_batch_identifiers_list mock_asset.get_batch_identifiers_list.assert_called_once_with( batch_definition.build_batch_request() ) @pytest.mark.unit def test_get_batch_identifiers_list_with_batch_parameters(mocker: pytest_mock.MockFixture): # Arrange batch_definition = BatchDefinition[None](name="test_batch_definition") mock_asset = mocker.Mock(spec=DataAsset) batch_definition.set_data_asset(mock_asset) mock_batch_identifiers_list = [{"foo": "bar"}, {"baz": "qux"}] mock_asset.get_batch_identifiers_list.return_value = mock_batch_identifiers_list # Act batch_parameters: BatchParameters = {"path": "my_path"} batch_identifiers_list = batch_definition.get_batch_identifiers_list(batch_parameters) # Assert assert batch_identifiers_list == mock_batch_identifiers_list mock_asset.get_batch_identifiers_list.assert_called_once_with( batch_definition.build_batch_request(batch_parameters) ) @pytest.mark.unit def test_identifier_bundle_success(in_memory_runtime_context): context = in_memory_runtime_context ds = context.data_sources.add_pandas("pandas_datasource") asset = ds.add_csv_asset("my_asset", "data.csv") batch_definition = asset.add_batch_definition("my_batch_definition") result = batch_definition.identifier_bundle() assert result.datasource.name == "pandas_datasource" and result.datasource.id is not None assert result.asset.name == "my_asset" and result.asset.id is not None assert ( result.batch_definition.name == "my_batch_definition" and result.batch_definition.id is not None ) @pytest.mark.unit def test_identifier_bundle_no_id_raises_error(in_memory_runtime_context): context = in_memory_runtime_context ds = context.data_sources.add_pandas("pandas_datasource") asset = ds.add_csv_asset("my_asset", "data.csv") batch_definition = asset.add_batch_definition("my_batch_definition") batch_definition.id = None with pytest.raises(ResourceFreshnessAggregateError) as e: batch_definition.identifier_bundle() assert len(e.value.errors) == 1 assert isinstance(e.value.errors[0], BatchDefinitionNotAddedError) @pytest.mark.parametrize( "id,is_fresh,num_errors", [ pytest.param(str(uuid.uuid4()), True, 0, id="added"), pytest.param(None, False, 1, id="not_added"), ], ) @pytest.mark.unit def test_is_fresh_is_added( in_memory_runtime_context, id: str | None, is_fresh: bool, num_errors: int ): context = in_memory_runtime_context batch_definition = ( context.data_sources.add_pandas(name="my_pandas_ds") .add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") .add_batch_definition(name="my_batch_def") ) batch_definition.id = id # Fluent API will add an ID but manually overriding for test diagnostics = batch_definition.is_fresh() assert diagnostics.success is is_fresh assert len(diagnostics.errors) == num_errors assert all(isinstance(err, BatchDefinitionNotAddedError) for err in diagnostics.errors) @pytest.mark.cloud def test_is_fresh_freshness( unset_gx_env_variables: None, empty_cloud_context_fluent, ): # Ephemeral/file use a cacheable datasource dict so freshness # with batch definitions is a Cloud-only concern context = empty_cloud_context_fluent batch_definition = ( context.data_sources.add_pandas(name="my_pandas_ds") .add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") .add_batch_definition(name="my_batch_def") ) batching_regex = re.compile(r"data_(?P<year>\d{4})-(?P<month>\d{2}).csv") partitioner = FileNamePartitionerYearly(regex=batching_regex) batch_definition.partitioner = partitioner diagnostics = batch_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 1 assert isinstance(diagnostics.errors[0], BatchDefinitionNotFreshError) @pytest.mark.unit def test_is_fresh_fails_on_datasource_retrieval(in_memory_runtime_context): context = in_memory_runtime_context datasource = context.data_sources.add_pandas(name="my_pandas_ds") asset = datasource.add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") batch_definition = asset.add_batch_definition(name="my_batch_def") context.delete_datasource("my_pandas_ds") diagnostics = batch_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 1 assert isinstance(diagnostics.errors[0], DatasourceNotFoundError) @pytest.mark.unit def test_is_fresh_fails_on_asset_retrieval(in_memory_runtime_context): context = in_memory_runtime_context datasource = context.data_sources.add_pandas(name="my_pandas_ds") asset = datasource.add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") batch_definition = asset.add_batch_definition(name="my_batch_def") datasource.delete_asset("my_csv_asset") diagnostics = batch_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 1 assert isinstance(diagnostics.errors[0], DataAssetNotFoundError) @pytest.mark.unit def test_is_fresh_fails_on_batch_definition_retrieval(in_memory_runtime_context): context = in_memory_runtime_context datasource = context.data_sources.add_pandas(name="my_pandas_ds") asset = datasource.add_csv_asset(name="my_csv_asset", filepath_or_buffer="data.csv") batch_definition = asset.add_batch_definition(name="my_batch_def") asset.delete_batch_definition("my_batch_def") diagnostics = batch_definition.is_fresh() assert diagnostics.success is False assert len(diagnostics.errors) == 1 assert isinstance(diagnostics.errors[0], BatchDefinitionNotFoundError) @pytest.mark.unit def test_save(in_memory_runtime_context: EphemeralDataContext): context = in_memory_runtime_context ds_name = "my_pandas_ds" asset_name = "my_csv_asset" batch_def_name = "my_batch_def" datasource = context.data_sources.add_pandas(name=ds_name) asset = datasource.add_csv_asset(name=asset_name, filepath_or_buffer=pathlib.Path("data.csv")) batch_definition = asset.add_batch_definition(name=batch_def_name) assert batch_definition.partitioner is None batch_definition.partitioner = FileNamePartitionerYearly(regex=re.compile("my_regex")) batch_definition.save() retrieved_datasource = context.data_sources.get(name=ds_name) retrieved_asset = retrieved_datasource.get_asset(name=asset_name) retrieved_batch_definition = retrieved_asset.get_batch_definition(name=batch_def_name) assert retrieved_batch_definition.partitioner
DataAssetForTests
python
scrapy__scrapy
tests/test_downloader_handlers_http_base.py
{ "start": 26669, "end": 26831 }
class ____(TestSimpleHttpsBase): """Connect to HTTPS hosts with IP while certificate uses domain names IDs.""" host = "127.0.0.1"
TestHttpsInvalidDNSIdBase
python
realpython__materials
python-callable-instances/logger.py
{ "start": 0, "end": 232 }
class ____: def __init__(self, filename): self.filename = filename def __call__(self, message): with open(self.filename, mode="a", encoding="utf-8") as log_file: log_file.write(message + "\n")
Logger
python
gevent__gevent
src/gevent/tests/test__greenlet.py
{ "start": 7719, "end": 7804 }
class ____(TestRaise_link): link_method = 'link_exception'
TestRaise_link_exception
python
scipy__scipy
scipy/cluster/tests/test_hierarchy.py
{ "start": 14123, "end": 14645 }
class ____: def test_leaders_single(self, xp): # Tests leaders using a flat clustering generated by single linkage. X = hierarchy_test_data.Q_X Y = pdist(X) Z = linkage(Y) T = fcluster(Z, criterion='maxclust', t=3) Z = xp.asarray(Z) T = xp.asarray(T, dtype=xp.int32) L = leaders(Z, T) expect = xp.asarray([53, 55, 56, 2, 3, 1], dtype=xp.int32) xp_assert_close(xp.concat(L), expect, rtol=1e-15) @make_xp_test_case(is_isomorphic)
TestLeaders
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_format11.py
{ "start": 315, "end": 893 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("format11.xlsx") def test_create_file(self): """Test a vertical and horizontal centered format.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() centered = workbook.add_format({"align": "center", "valign": "vcenter"}) worksheet.write("B2", "Foo", centered) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
walkccc__LeetCode
solutions/112. Path Sum/112.py
{ "start": 0, "end": 308 }
class ____: def hasPathSum(self, root: TreeNode, summ: int) -> bool: if not root: return False if root.val == summ and not root.left and not root.right: return True return (self.hasPathSum(root.left, summ - root.val) or self.hasPathSum(root.right, summ - root.val))
Solution
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0088_remove_monitor_slug_conditions.py
{ "start": 1298, "end": 2647 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = True dependencies = [ ("workflow_engine", "0087_relink_crons_to_compatible_issue_workflows"), ] operations = [ migrations.RunPython( remove_monitor_slug_conditions, migrations.RunPython.noop, hints={"tables": ["workflow_engine_datacondition"]}, ), ]
Migration
python
docker__docker-py
tests/unit/models_networks_test.py
{ "start": 1298, "end": 2176 }
class ____(unittest.TestCase): def test_connect(self): client = make_fake_client() network = client.networks.get(FAKE_NETWORK_ID) network.connect(FAKE_CONTAINER_ID) client.api.connect_container_to_network.assert_called_once_with( FAKE_CONTAINER_ID, FAKE_NETWORK_ID ) def test_disconnect(self): client = make_fake_client() network = client.networks.get(FAKE_NETWORK_ID) network.disconnect(FAKE_CONTAINER_ID) client.api.disconnect_container_from_network.assert_called_once_with( FAKE_CONTAINER_ID, FAKE_NETWORK_ID ) def test_remove(self): client = make_fake_client() network = client.networks.get(FAKE_NETWORK_ID) network.remove() client.api.remove_network.assert_called_once_with(FAKE_NETWORK_ID)
NetworkTest
python
pytest-dev__pytest
testing/test_assertrewrite.py
{ "start": 74664, "end": 75514 }
class ____: class Help: def bound_method(self): # pragma: no cover pass def test_saferepr_bound_method(self): """saferepr() of a bound method should show only the method name""" assert _saferepr(self.Help().bound_method) == "bound_method" def test_saferepr_unbounded(self): """saferepr() of an unbound method should still show the full information""" obj = self.Help() # using id() to fetch memory address fails on different platforms pattern = re.compile( rf"<{Path(__file__).stem}.{self.__class__.__name__}.Help object at 0x[0-9a-fA-F]*>", ) assert pattern.match(_saferepr(obj)) assert ( _saferepr(self.Help) == f"<class '{Path(__file__).stem}.{self.__class__.__name__}.Help'>" )
TestSafereprUnbounded
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/property11.py
{ "start": 510, "end": 616 }
class ____: @classmethod @property def prop1(cls: type[T]) -> type[T]: return cls
Class2
python
astropy__astropy
astropy/io/votable/exceptions.py
{ "start": 20806, "end": 21187 }
class ____(VOTableSpecWarning): """ The column fields as defined using ``FIELD`` elements do not match those in the headers of the embedded FITS file. If ``verify`` is not ``'exception'``, the embedded FITS file will take precedence. """ message_template = ( "The fields defined in the VOTable do not match those in the embedded FITS file" )
W19
python
mwaskom__seaborn
tests/_stats/test_counting.py
{ "start": 1237, "end": 8131 }
class ____: @pytest.fixture def single_args(self): groupby = GroupBy(["group"]) class Scale: scale_type = "continuous" return groupby, "x", {"x": Scale()} @pytest.fixture def triple_args(self): groupby = GroupBy(["group", "a", "s"]) class Scale: scale_type = "continuous" return groupby, "x", {"x": Scale()} def test_string_bins(self, long_df): h = Hist(bins="sqrt") bin_kws = h._define_bin_params(long_df, "x", "continuous") assert bin_kws["range"] == (long_df["x"].min(), long_df["x"].max()) assert bin_kws["bins"] == int(np.sqrt(len(long_df))) def test_int_bins(self, long_df): n = 24 h = Hist(bins=n) bin_kws = h._define_bin_params(long_df, "x", "continuous") assert bin_kws["range"] == (long_df["x"].min(), long_df["x"].max()) assert bin_kws["bins"] == n def test_array_bins(self, long_df): bins = [-3, -2, 1, 2, 3] h = Hist(bins=bins) bin_kws = h._define_bin_params(long_df, "x", "continuous") assert_array_equal(bin_kws["bins"], bins) def test_binwidth(self, long_df): binwidth = .5 h = Hist(binwidth=binwidth) bin_kws = h._define_bin_params(long_df, "x", "continuous") n_bins = bin_kws["bins"] left, right = bin_kws["range"] assert (right - left) / n_bins == pytest.approx(binwidth) def test_binrange(self, long_df): binrange = (-4, 4) h = Hist(binrange=binrange) bin_kws = h._define_bin_params(long_df, "x", "continuous") assert bin_kws["range"] == binrange def test_discrete_bins(self, long_df): h = Hist(discrete=True) x = long_df["x"].astype(int) bin_kws = h._define_bin_params(long_df.assign(x=x), "x", "continuous") assert bin_kws["range"] == (x.min() - .5, x.max() + .5) assert bin_kws["bins"] == (x.max() - x.min() + 1) def test_discrete_bins_from_nominal_scale(self, rng): h = Hist() x = rng.randint(0, 5, 10) df = pd.DataFrame({"x": x}) bin_kws = h._define_bin_params(df, "x", "nominal") assert bin_kws["range"] == (x.min() - .5, x.max() + .5) assert bin_kws["bins"] == (x.max() - x.min() + 1) def test_count_stat(self, long_df, single_args): h = Hist(stat="count") out = h(long_df, *single_args) assert out["y"].sum() == len(long_df) def test_probability_stat(self, long_df, single_args): h = Hist(stat="probability") out = h(long_df, *single_args) assert out["y"].sum() == 1 def test_proportion_stat(self, long_df, single_args): h = Hist(stat="proportion") out = h(long_df, *single_args) assert out["y"].sum() == 1 def test_percent_stat(self, long_df, single_args): h = Hist(stat="percent") out = h(long_df, *single_args) assert out["y"].sum() == 100 def test_density_stat(self, long_df, single_args): h = Hist(stat="density") out = h(long_df, *single_args) assert (out["y"] * out["space"]).sum() == 1 def test_frequency_stat(self, long_df, single_args): h = Hist(stat="frequency") out = h(long_df, *single_args) assert (out["y"] * out["space"]).sum() == len(long_df) def test_invalid_stat(self): with pytest.raises(ValueError, match="The `stat` parameter for `Hist`"): Hist(stat="invalid") def test_cumulative_count(self, long_df, single_args): h = Hist(stat="count", cumulative=True) out = h(long_df, *single_args) assert out["y"].max() == len(long_df) def test_cumulative_proportion(self, long_df, single_args): h = Hist(stat="proportion", cumulative=True) out = h(long_df, *single_args) assert out["y"].max() == 1 def test_cumulative_density(self, long_df, single_args): h = Hist(stat="density", cumulative=True) out = h(long_df, *single_args) assert out["y"].max() == 1 def test_common_norm_default(self, long_df, triple_args): h = Hist(stat="percent") out = h(long_df, *triple_args) assert out["y"].sum() == pytest.approx(100) def test_common_norm_false(self, long_df, triple_args): h = Hist(stat="percent", common_norm=False) out = h(long_df, *triple_args) for _, out_part in out.groupby(["a", "s"]): assert out_part["y"].sum() == pytest.approx(100) def test_common_norm_subset(self, long_df, triple_args): h = Hist(stat="percent", common_norm=["a"]) out = h(long_df, *triple_args) for _, out_part in out.groupby("a"): assert out_part["y"].sum() == pytest.approx(100) def test_common_norm_warning(self, long_df, triple_args): h = Hist(common_norm=["b"]) with pytest.warns(UserWarning, match=r"Undefined variable\(s\)"): h(long_df, *triple_args) def test_common_bins_default(self, long_df, triple_args): h = Hist() out = h(long_df, *triple_args) bins = [] for _, out_part in out.groupby(["a", "s"]): bins.append(tuple(out_part["x"])) assert len(set(bins)) == 1 def test_common_bins_false(self, long_df, triple_args): h = Hist(common_bins=False) out = h(long_df, *triple_args) bins = [] for _, out_part in out.groupby(["a", "s"]): bins.append(tuple(out_part["x"])) assert len(set(bins)) == len(out.groupby(["a", "s"])) def test_common_bins_subset(self, long_df, triple_args): h = Hist(common_bins=False) out = h(long_df, *triple_args) bins = [] for _, out_part in out.groupby("a"): bins.append(tuple(out_part["x"])) assert len(set(bins)) == out["a"].nunique() def test_common_bins_warning(self, long_df, triple_args): h = Hist(common_bins=["b"]) with pytest.warns(UserWarning, match=r"Undefined variable\(s\)"): h(long_df, *triple_args) def test_histogram_single(self, long_df, single_args): h = Hist() out = h(long_df, *single_args) hist, edges = np.histogram(long_df["x"], bins="auto") assert_array_equal(out["y"], hist) assert_array_equal(out["space"], np.diff(edges)) def test_histogram_multiple(self, long_df, triple_args): h = Hist() out = h(long_df, *triple_args) bins = np.histogram_bin_edges(long_df["x"], "auto") for (a, s), out_part in out.groupby(["a", "s"]): x = long_df.loc[(long_df["a"] == a) & (long_df["s"] == s), "x"] hist, edges = np.histogram(x, bins=bins) assert_array_equal(out_part["y"], hist) assert_array_equal(out_part["space"], np.diff(edges))
TestHist
python
doocs__leetcode
solution/1000-1099/1065.Index Pairs of a String/Solution.py
{ "start": 0, "end": 252 }
class ____: def indexPairs(self, text: str, words: List[str]) -> List[List[int]]: words = set(words) n = len(text) return [ [i, j] for i in range(n) for j in range(i, n) if text[i : j + 1] in words ]
Solution
python
kamyu104__LeetCode-Solutions
Python/strange-printer-ii.py
{ "start": 2019, "end": 3466 }
class ____(object): def isPrintable(self, targetGrid): """ :type targetGrid: List[List[int]] :rtype: bool """ VISITING, VISITED = range(2) def has_cycle(adj, color, lookup): lookup[color] = VISITING for new_color in adj[color]: if (new_color not in lookup and has_cycle(adj, new_color, lookup)) or \ lookup[new_color] == VISITING: return True lookup[color] = VISITED return False MAX_COLOR = 60 adj = collections.defaultdict(set) for color in xrange(1, MAX_COLOR+1): min_r = len(targetGrid) min_c = len(targetGrid[0]) max_r = -1 max_c = -1 for r in xrange(len(targetGrid)): for c in xrange(len(targetGrid[r])): if targetGrid[r][c] == color: min_r = min(min_r, r) min_c = min(min_c, c) max_r = max(max_r, r) max_c = max(max_c, c) for r in xrange(min_r, max_r+1): for c in xrange(min_c, max_c+1): if targetGrid[r][c] != color: adj[color].add(targetGrid[r][c]) lookup = {} return all(color in lookup or not has_cycle(adj, color, lookup) for color in xrange(1, MAX_COLOR+1))
Solution2
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pycodestyle/E731.py
{ "start": 1919, "end": 2250 }
class ____(Enum): CELSIUS = (lambda deg_c: deg_c) FAHRENHEIT = (lambda deg_c: deg_c * 9 / 5 + 32) # Regression test for: https://github.com/astral-sh/ruff/issues/7141 def scope(): # E731 f = lambda: ( i := 1, ) from dataclasses import dataclass from typing import Callable @dataclass
TemperatureScales
python
instagram__MonkeyType
tests/test_stubs.py
{ "start": 48964, "end": 50802 }
class ____: @pytest.mark.parametrize( 'anno', [ inspect.Parameter.empty, inspect.Signature.empty, 'not a type', int, ], ) def test_no_imports(self, anno): """We shouldn't import any builtins, non-types, or empty annos""" assert get_imports_for_annotation(anno) == {} @pytest.mark.parametrize( 'anno, expected', [ (Any, {'typing': {'Any'}}), (Union[int, str], {'typing': {'Union'}}), ], ) def test_special_case_types(self, anno, expected): """Any and Union do not have module/qualname and need to be treated specially""" assert get_imports_for_annotation(anno) == expected def test_callable(self): assert get_imports_for_annotation(Callable) == {'typing': {'Callable'}} def test_user_defined_class(self): assert get_imports_for_annotation(Dummy) == {'tests.util': {'Dummy'}} @pytest.mark.parametrize( 'anno, expected', [ (Dict[str, Dummy], {'tests.util': {'Dummy'}, 'typing': {'Dict'}}), (List[Dummy], {'tests.util': {'Dummy'}, 'typing': {'List'}}), (Set[Dummy], {'tests.util': {'Dummy'}, 'typing': {'Set'}}), (Tuple[str, Dummy], {'tests.util': {'Dummy'}, 'typing': {'Tuple'}}), (Type[Dummy], {'tests.util': {'Dummy'}, 'typing': {'Type'}}), (Union[str, Dummy], {'tests.util': {'Dummy'}, 'typing': {'Union'}}), ], ) def test_container_types(self, anno, expected): """We need to descend into container types""" assert get_imports_for_annotation(anno) == expected def test_nested_class(self): assert get_imports_for_annotation(Parent.Child) == {Parent.__module__: {'Parent'}}
TestGetImportsForAnnotation
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 505310, "end": 506217 }
class ____(sgqlc.types.relay.Connection): """The connection type for Commit.""" __schema__ = github_schema __field_names__ = ("author_count", "edges", "nodes", "page_info", "total_count") author_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="authorCount") """The total count of authors and co-authors across all commits.""" edges = sgqlc.types.Field(sgqlc.types.list_of(CommitEdge), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("Commit"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo") """Information to aid in pagination.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
ComparisonCommitConnection
python
readthedocs__readthedocs.org
readthedocs/rtd_tests/tests/test_privacy_urls.py
{ "start": 20835, "end": 21354 }
class ____(PrivateUserProfileMixin, TestCase): # Auth protected default_status_code = 302 def setUp(self): super().setUp() self.response_data.update( { "/accounts/tokens/create/": {"status_code": 302}, "/accounts/tokens/delete/": {"status_code": 302}, "/accounts/login/": {"status_code": 200}, } ) def login(self): pass def is_admin(self): return False
PrivateUserProfileUnauthAccessTest
python
huggingface__transformers
tests/models/detr/test_modeling_detr.py
{ "start": 21967, "end": 31106 }
class ____(unittest.TestCase): @cached_property def default_image_processor(self): return DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") if is_vision_available() else None def test_inference_no_head(self): model = DetrModel.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape = torch.Size((1, 100, 256)) assert outputs.last_hidden_state.shape == expected_shape expected_slices = Expectations( { (None, None): [ [0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826], ], ("rocm", (9, 5)): [ [ 0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826], ], } ) # fmt: skip expected_slice = torch.tensor(expected_slices.get_expectation(), device=torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4) def test_inference_object_detection_head(self): model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slices = Expectations( { (None, None): [ [-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060], ], ("rocm", (9, 5)): [ [-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060], ], } ) # fmt: skip expected_slice_logits = torch.tensor(expected_slices.get_expectation(), device=torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=2e-4, atol=2e-4) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slice_boxes = torch.tensor( [ [0.4433, 0.5302, 0.8852], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9955], ] ).to(torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=2e-4, atol=2e-4) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_scores = torch.tensor([0.9982, 0.9960, 0.9955, 0.9988, 0.9987]).to(torch_device) expected_labels = [75, 75, 63, 17, 17] expected_slice_boxes = torch.tensor([40.1615, 70.8090, 175.5476, 117.9810]).to(torch_device) self.assertEqual(len(results["scores"]), 5) torch.testing.assert_close(results["scores"], expected_scores, rtol=2e-4, atol=2e-4) self.assertSequenceEqual(results["labels"].tolist(), expected_labels) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=2e-4, atol=2e-4) def test_inference_panoptic_segmentation_head(self): model = DetrForSegmentation.from_pretrained("facebook/detr-resnet-50-panoptic").to(torch_device) image_processor = self.default_image_processor image = prepare_img() encoding = image_processor(images=image, return_tensors="pt").to(torch_device) pixel_values = encoding["pixel_values"].to(torch_device) pixel_mask = encoding["pixel_mask"].to(torch_device) with torch.no_grad(): outputs = model(pixel_values, pixel_mask) # verify outputs expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels + 1)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expected_slices = Expectations( { (None, None): [ [-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654], ], ("rocm", (9, 5)): [ [-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654], ], } ) # fmt: skip expected_slice_logits = torch.tensor(expected_slices.get_expectation(), device=torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_slice_logits, rtol=2e-4, atol=2e-4) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) expected_slices = Expectations( { (None, None): [ [0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017], ], ("rocm", (9, 5)): [ [0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017], ], } ) # fmt: skip expected_slice_boxes = torch.tensor(expected_slices.get_expectation(), device=torch_device) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, rtol=2e-4, atol=2e-4) expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267)) self.assertEqual(outputs.pred_masks.shape, expected_shape_masks) expected_slices = Expectations( { (None, None): [ [-7.7557, -10.8788, -11.9797], [-11.8880, -16.4328, -17.7450], [-14.7315, -19.7382, -20.3003], ], ("rocm", (9, 5)): [ [ -7.7558, -10.8789, -11.9798], [-11.8882, -16.4330, -17.7452], [-14.7317, -19.7384, -20.3005], ], } ) # fmt: skip expected_slice_masks = torch.tensor(expected_slices.get_expectation(), device=torch_device) torch.testing.assert_close(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, rtol=2e-3, atol=2e-3) # verify postprocessing results = image_processor.post_process_panoptic_segmentation( outputs, threshold=0.3, target_sizes=[image.size[::-1]] )[0] expected_shape = torch.Size([480, 640]) expected_slice_segmentation = torch.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype=torch.int32).to( torch_device ) expected_number_of_segments = 5 expected_first_segment = {"id": 1, "label_id": 17, "was_fused": False, "score": 0.9941} number_of_unique_segments = len(torch.unique(results["segmentation"])) self.assertTrue( number_of_unique_segments, expected_number_of_segments + 1 ) # we add 1 for the background class self.assertTrue(results["segmentation"].shape, expected_shape) torch.testing.assert_close(results["segmentation"][:3, :3], expected_slice_segmentation, rtol=1e-4, atol=1e-4) self.assertTrue(len(results["segments_info"]), expected_number_of_segments) predicted_first_segment = results["segments_info"][0] self.assertEqual(predicted_first_segment["id"], expected_first_segment["id"]) self.assertEqual(predicted_first_segment["label_id"], expected_first_segment["label_id"]) self.assertEqual(predicted_first_segment["was_fused"], expected_first_segment["was_fused"]) self.assertAlmostEqual(predicted_first_segment["score"], expected_first_segment["score"], places=3) @require_vision @require_torch @slow
DetrModelIntegrationTestsTimmBackbone
python
huggingface__transformers
src/transformers/models/d_fine/modeling_d_fine.py
{ "start": 40207, "end": 44079 }
class ____(ModelOutput): r""" last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, config.num_labels)`): Stacked intermediate logits (logits of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate predicted corners (predicted corners of each layer of the decoder). initial_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points used for the first decoder layer. init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the encoder stage. Output of bounding box binary classification (i.e. foreground and background). enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`): Logits of predicted bounding boxes coordinates in the encoder stage. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. denoising_meta_values (`dict`): Extra dictionary for the denoising related values. """ last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_logits: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None intermediate_predicted_corners: Optional[torch.FloatTensor] = None initial_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None decoder_attentions: Optional[tuple[torch.FloatTensor]] = None cross_attentions: Optional[tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None encoder_attentions: Optional[tuple[torch.FloatTensor]] = None init_reference_points: Optional[torch.FloatTensor] = None enc_topk_logits: Optional[torch.FloatTensor] = None enc_topk_bboxes: Optional[torch.FloatTensor] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None denoising_meta_values: Optional[dict] = None
DFineModelOutput
python
ansible__ansible
test/lib/ansible_test/_internal/util.py
{ "start": 2770, "end": 3317 }
class ____(enum.Enum): """The output stream to use when running a subprocess and redirecting/capturing stdout or stderr.""" ORIGINAL = enum.auto() AUTO = enum.auto() def get_buffer(self, original: t.BinaryIO) -> t.BinaryIO: """Return the correct output buffer to use, taking into account the given original buffer.""" if self == OutputStream.ORIGINAL: return original if self == OutputStream.AUTO: return display.fd.buffer raise NotImplementedError(str(self))
OutputStream
python
numba__numba
numba/core/types/containers.py
{ "start": 14474, "end": 14594 }
class ____(BaseContainerIterator): """ Type class for list iterators. """ container_class = List
ListIter
python
realpython__materials
python-protocol/animals_v2.py
{ "start": 263, "end": 524 }
class ____: def __init__(self, name): self.name = name def eat(self): print(f"{self.name} is eating.") def drink(self): print(f"{self.name} is drinking.") def make_sound(self): print(f"{self.name} is meowing.")
Cat
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/packages/dtrun3/package.py
{ "start": 217, "end": 492 }
class ____(Package): """Simple package which acts as a run dependency""" homepage = "http://www.example.com" url = "http://www.example.com/dtrun3-1.0.tar.gz" version("1.0", md5="0123456789abcdef0123456789abcdef") depends_on("dtbuild3", type="build")
Dtrun3
python
pypa__warehouse
tests/unit/rate_limiting/test_core.py
{ "start": 3355, "end": 3590 }
class ____: def test_basic(self): limiter = DummyRateLimiter() assert limiter.test() assert limiter.hit() assert limiter.clear() is None assert limiter.resets_in() is None
TestDummyRateLimiter
python
readthedocs__readthedocs.org
readthedocs/organizations/views/public.py
{ "start": 1202, "end": 1373 }
class ____(CheckOrganizationsEnabled, TemplateView): """Wrapper around `TemplateView` to check if organizations are enabled.""" # Organization
OrganizationTemplateView
python
wandb__wandb
wandb/sdk/artifacts/artifact_saver.py
{ "start": 1088, "end": 9644 }
class ____: _server_artifact: dict | None # TODO better define this dict def __init__( self, api: InternalApi, digest: str, manifest_json: dict, file_pusher: FilePusher, is_user_created: bool = False, ) -> None: self._api = api self._file_pusher = file_pusher self._digest = digest self._manifest = ArtifactManifest.from_manifest_json(manifest_json) self._manifest.storage_policy._api = self._api self._is_user_created = is_user_created self._server_artifact = None def save( self, entity: str, project: str, type: str, name: str, client_id: str, sequence_client_id: str, distributed_id: str | None = None, finalize: bool = True, metadata: dict | None = None, ttl_duration_seconds: int | None = None, description: str | None = None, aliases: Sequence[str] | None = None, tags: Sequence[str] | None = None, use_after_commit: bool = False, incremental: bool = False, history_step: int | None = None, base_id: str | None = None, ) -> dict | None: return self._save_internal( entity, project, type, name, client_id, sequence_client_id, distributed_id, finalize, metadata, ttl_duration_seconds, description, aliases, tags, use_after_commit, incremental, history_step, base_id, ) def _save_internal( self, entity: str, project: str, type: str, name: str, client_id: str, sequence_client_id: str, distributed_id: str | None = None, finalize: bool = True, metadata: dict | None = None, ttl_duration_seconds: int | None = None, description: str | None = None, aliases: Sequence[str] | None = None, tags: Sequence[str] | None = None, use_after_commit: bool = False, incremental: bool = False, history_step: int | None = None, base_id: str | None = None, ) -> dict | None: alias_specs = [] for alias in aliases or []: alias_specs.append({"artifactCollectionName": name, "alias": alias}) tag_specs = [{"tagName": tag} for tag in tags or []] """Returns the server artifact.""" self._server_artifact, latest = self._api.create_artifact( type, name, self._digest, metadata=metadata, ttl_duration_seconds=ttl_duration_seconds, aliases=alias_specs, tags=tag_specs, description=description, is_user_created=self._is_user_created, distributed_id=distributed_id, client_id=client_id, sequence_client_id=sequence_client_id, history_step=history_step, ) assert self._server_artifact is not None # mypy optionality unwrapper artifact_id = self._server_artifact["id"] if base_id is None and latest: base_id = latest["id"] if self._server_artifact["state"] == "COMMITTED": if use_after_commit: self._api.use_artifact( artifact_id, artifact_entity_name=entity, artifact_project_name=project, ) return self._server_artifact if ( self._server_artifact["state"] != "PENDING" # For old servers, see https://github.com/wandb/wandb/pull/6190 and self._server_artifact["state"] != "DELETED" ): raise Exception( 'Unknown artifact state "{}"'.format(self._server_artifact["state"]) ) manifest_type = "FULL" manifest_filename = "wandb_manifest.json" if incremental: manifest_type = "INCREMENTAL" manifest_filename = "wandb_manifest.incremental.json" elif distributed_id: manifest_type = "PATCH" manifest_filename = "wandb_manifest.patch.json" artifact_manifest_id, _ = self._api.create_artifact_manifest( manifest_filename, "", artifact_id, base_artifact_id=base_id, include_upload=False, type=manifest_type, ) step_prepare = wandb.filesync.step_prepare.StepPrepare( self._api, 0.1, 0.01, 1000 ) # TODO: params step_prepare.start() # Upload Artifact "L1" files, the actual artifact contents self._file_pusher.store_manifest_files( self._manifest, artifact_id, lambda entry, progress_callback: self._manifest.storage_policy.store_file( artifact_id, artifact_manifest_id, entry, step_prepare, progress_callback=progress_callback, ), ) def before_commit() -> None: self._resolve_client_id_manifest_references() with tempfile.NamedTemporaryFile("w+", suffix=".json", delete=False) as fp: path = os.path.abspath(fp.name) json.dump(self._manifest.to_manifest_json(), fp, indent=4) digest = md5_file_b64(path) if distributed_id or incremental: # If we're in the distributed flow, we want to update the # patch manifest we created with our finalized digest. _, resp = self._api.update_artifact_manifest( artifact_manifest_id, digest=digest, ) else: # In the regular flow, we can recreate the full manifest with the # updated digest. # # NOTE: We do this for backwards compatibility with older backends # that don't support the 'updateArtifactManifest' API. _, resp = self._api.create_artifact_manifest( manifest_filename, digest, artifact_id, base_artifact_id=base_id, ) # We're duplicating the file upload logic a little, which isn't great. upload_url = resp["uploadUrl"] upload_headers = resp["uploadHeaders"] extra_headers = {} for upload_header in upload_headers: key, val = upload_header.split(":", 1) extra_headers[key] = val with open(path, "rb") as fp2: self._api.upload_file_retry( upload_url, fp2, extra_headers=extra_headers, ) commit_result: concurrent.futures.Future[None] = concurrent.futures.Future() # Queue the commit. It will only happen after all file uploads finish. self._file_pusher.commit_artifact( artifact_id, finalize=finalize, before_commit=before_commit, result_future=commit_result, ) # Block until all artifact files are uploaded and the # artifact is committed. try: commit_result.result() finally: step_prepare.shutdown() if finalize and use_after_commit: self._api.use_artifact( artifact_id, artifact_entity_name=entity, artifact_project_name=project, ) return self._server_artifact def _resolve_client_id_manifest_references(self) -> None: for entry_path in self._manifest.entries: entry = self._manifest.entries[entry_path] if entry.ref is not None: if entry.ref.startswith("wandb-client-artifact:"): client_id = util.host_from_path(entry.ref) artifact_file_path = util.uri_from_path(entry.ref) artifact_id = self._api._resolve_client_id(client_id) if artifact_id is None: raise RuntimeError(f"Could not resolve client id {client_id}") entry.ref = URIStr( f"wandb-artifact://{b64_to_hex_id(B64MD5(artifact_id))}/{artifact_file_path}" )
ArtifactSaver
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol45.py
{ "start": 306, "end": 382 }
class ____: def __call__(self, item: T, /) -> T: return item
Impl1
python
joblib__joblib
joblib/test/test_parallel.py
{ "start": 2630, "end": 23450 }
class ____(Exception): """An exception class with non trivial __init__""" def __init__(self, a, b, c, d): pass def exception_raiser(x, custom_exception=False): if x == 7: raise ( MyExceptionWithFinickyInit("a", "b", "c", "d") if custom_exception else ValueError ) return x def interrupt_raiser(x): time.sleep(0.05) raise KeyboardInterrupt def f(x, y=0, z=0): """A module-level function so that it can be spawn with multiprocessing. """ return x**2 + y + z def _active_backend_type(): return type(parallel.get_active_backend()[0]) def parallel_func(inner_n_jobs, backend): return Parallel(n_jobs=inner_n_jobs, backend=backend)( delayed(square)(i) for i in range(3) ) ############################################################################### def test_cpu_count(): assert cpu_count() > 0 def test_effective_n_jobs(): assert effective_n_jobs() > 0 @parametrize("context", [parallel_config, parallel_backend]) @pytest.mark.parametrize( "backend_n_jobs, expected_n_jobs", [(3, 3), (-1, effective_n_jobs(n_jobs=-1)), (None, 1)], ids=["positive-int", "negative-int", "None"], ) @with_multiprocessing def test_effective_n_jobs_None(context, backend_n_jobs, expected_n_jobs): # check the number of effective jobs when `n_jobs=None` # non-regression test for https://github.com/joblib/joblib/issues/984 with context("threading", n_jobs=backend_n_jobs): # when using a backend, the default of number jobs will be the one set # in the backend assert effective_n_jobs(n_jobs=None) == expected_n_jobs # without any backend, None will default to a single job assert effective_n_jobs(n_jobs=None) == 1 ############################################################################### # Test parallel @parametrize("backend", ALL_VALID_BACKENDS) @parametrize("n_jobs", [1, 2, -1, -2]) @parametrize("verbose", [2, 11, 100]) def test_simple_parallel(backend, n_jobs, verbose): assert [square(x) for x in range(5)] == Parallel( n_jobs=n_jobs, backend=backend, verbose=verbose )(delayed(square)(x) for x in range(5)) @parametrize("backend", ALL_VALID_BACKENDS) @parametrize("n_jobs", [1, 2]) def test_parallel_pretty_print(backend, n_jobs): n_tasks = 100 pattern = re.compile(r"(Done\s+\d+ out of \d+ \|)") class ParallelLog(Parallel): messages = [] def _print(self, msg): self.messages.append(msg) executor = ParallelLog(n_jobs=n_jobs, backend=backend, verbose=10000) executor([delayed(f)(i) for i in range(n_tasks)]) lens = set() for message in executor.messages: if s := pattern.search(message): a, b = s.span() lens.add(b - a) assert len(lens) == 1 @parametrize("backend", ALL_VALID_BACKENDS) def test_main_thread_renamed_no_warning(backend, monkeypatch): # Check that no default backend relies on the name of the main thread: # https://github.com/joblib/joblib/issues/180#issuecomment-253266247 # Some programs use a different name for the main thread. This is the case # for uWSGI apps for instance. monkeypatch.setattr( target=threading.current_thread(), name="name", value="some_new_name_for_the_main_thread", ) with warnings.catch_warnings(record=True) as warninfo: results = Parallel(n_jobs=2, backend=backend)( delayed(square)(x) for x in range(3) ) assert results == [0, 1, 4] # Due to the default parameters of LokyBackend, there is a chance that # warninfo catches Warnings from worker timeouts. We remove it if it exists # We also remove DeprecationWarnings which could lead to false negatives. warninfo = [ w for w in warninfo if "worker timeout" not in str(w.message) and not isinstance(w.message, DeprecationWarning) ] # Under Python 3.13 if backend='multiprocessing', you will get a # warning saying that forking a multi-threaded process is not a good idea, # we ignore them in this test if backend in [None, "multiprocessing"] or isinstance( backend, MultiprocessingBackend ): message_part = "multi-threaded, use of fork() may lead to deadlocks" warninfo = [w for w in warninfo if message_part not in str(w.message)] # The multiprocessing backend will raise a warning when detecting that is # started from the non-main thread. Let's check that there is no false # positive because of the name change. assert len(warninfo) == 0 def _assert_warning_nested(backend, inner_n_jobs, expected): with warnings.catch_warnings(record=True) as warninfo: warnings.simplefilter("always") parallel_func(backend=backend, inner_n_jobs=inner_n_jobs) warninfo = [w.message for w in warninfo] if expected: if warninfo: warnings_are_correct = all( "backed parallel loops cannot" in each.args[0] for each in warninfo ) # With free-threaded Python, when the outer backend is threading, # we might see more that one warning warnings_have_the_right_length = ( len(warninfo) >= 1 if IS_GIL_DISABLED else len(warninfo) == 1 ) return warnings_are_correct and warnings_have_the_right_length return False else: assert not warninfo return True @with_multiprocessing @parametrize( "parent_backend,child_backend,expected", [ ("loky", "multiprocessing", True), ("loky", "loky", False), ("multiprocessing", "multiprocessing", True), ("multiprocessing", "loky", True), ("threading", "multiprocessing", True), ("threading", "loky", True), ], ) def test_nested_parallel_warnings(parent_backend, child_backend, expected): # no warnings if inner_n_jobs=1 Parallel(n_jobs=2, backend=parent_backend)( delayed(_assert_warning_nested)( backend=child_backend, inner_n_jobs=1, expected=False ) for _ in range(5) ) # warnings if inner_n_jobs != 1 and expected res = Parallel(n_jobs=2, backend=parent_backend)( delayed(_assert_warning_nested)( backend=child_backend, inner_n_jobs=2, expected=expected ) for _ in range(5) ) # warning handling is not thread safe. One thread might see multiple # warning or no warning at all. if parent_backend == "threading": assert any(res) else: assert all(res) @with_multiprocessing @parametrize("backend", ["loky", "multiprocessing", "threading"]) def test_background_thread_parallelism(backend): is_run_parallel = [False] def background_thread(is_run_parallel): with warnings.catch_warnings(record=True) as warninfo: Parallel(n_jobs=2)(delayed(sleep)(0.1) for _ in range(4)) print(len(warninfo)) is_run_parallel[0] = len(warninfo) == 0 t = threading.Thread(target=background_thread, args=(is_run_parallel,)) t.start() t.join() assert is_run_parallel[0] def nested_loop(backend): Parallel(n_jobs=2, backend=backend)(delayed(square)(0.01) for _ in range(2)) @parametrize("child_backend", BACKENDS) @parametrize("parent_backend", BACKENDS) def test_nested_loop(parent_backend, child_backend): Parallel(n_jobs=2, backend=parent_backend)( delayed(nested_loop)(child_backend) for _ in range(2) ) def raise_exception(backend): raise ValueError @with_multiprocessing def test_nested_loop_with_exception_with_loky(): with raises(ValueError): with Parallel(n_jobs=2, backend="loky") as parallel: parallel([delayed(nested_loop)("loky"), delayed(raise_exception)("loky")]) def test_mutate_input_with_threads(): """Input is mutable when using the threading backend""" q = Queue(maxsize=5) Parallel(n_jobs=2, backend="threading")(delayed(q.put)(1) for _ in range(5)) assert q.full() @parametrize("n_jobs", [1, 2, 3]) def test_parallel_kwargs(n_jobs): """Check the keyword argument processing of pmap.""" lst = range(10) assert [f(x, y=1) for x in lst] == Parallel(n_jobs=n_jobs)( delayed(f)(x, y=1) for x in lst ) @parametrize("backend", PARALLEL_BACKENDS) def test_parallel_as_context_manager(backend): lst = range(10) expected = [f(x, y=1) for x in lst] with Parallel(n_jobs=4, backend=backend) as p: # Internally a pool instance has been eagerly created and is managed # via the context manager protocol managed_backend = p._backend # We make call with the managed parallel object several times inside # the managed block: assert expected == p(delayed(f)(x, y=1) for x in lst) assert expected == p(delayed(f)(x, y=1) for x in lst) # Those calls have all used the same pool instance: if mp is not None: assert get_workers(managed_backend) is get_workers(p._backend) # As soon as we exit the context manager block, the pool is terminated and # no longer referenced from the parallel object: if mp is not None: assert get_workers(p._backend) is None # It's still possible to use the parallel instance in non-managed mode: assert expected == p(delayed(f)(x, y=1) for x in lst) if mp is not None: assert get_workers(p._backend) is None @with_multiprocessing def test_parallel_pickling(): """Check that pmap captures the errors when it is passed an object that cannot be pickled. """ class UnpicklableObject(object): def __reduce__(self): raise RuntimeError("123") with raises(PicklingError, match=r"the task to send"): Parallel(n_jobs=2, backend="loky")( delayed(id)(UnpicklableObject()) for _ in range(10) ) @with_numpy @with_multiprocessing @parametrize("byteorder", ["<", ">", "="]) @parametrize("max_nbytes", [1, "1M"]) def test_parallel_byteorder_corruption(byteorder, max_nbytes): def inspect_byteorder(x): return x, x.dtype.byteorder x = np.arange(6).reshape((2, 3)).view(f"{byteorder}i4") initial_np_byteorder = x.dtype.byteorder result = Parallel(n_jobs=2, backend="loky", max_nbytes=max_nbytes)( delayed(inspect_byteorder)(x) for _ in range(3) ) for x_returned, byteorder_in_worker in result: assert byteorder_in_worker == initial_np_byteorder assert byteorder_in_worker == x_returned.dtype.byteorder np.testing.assert_array_equal(x, x_returned) @parametrize("backend", PARALLEL_BACKENDS) def test_parallel_timeout_success(backend): # Check that timeout isn't thrown when function is fast enough assert ( len( Parallel(n_jobs=2, backend=backend, timeout=30)( delayed(sleep)(0.001) for x in range(10) ) ) == 10 ) @with_multiprocessing @parametrize("backend", PARALLEL_BACKENDS) def test_parallel_timeout_fail(backend): # Check that timeout properly fails when function is too slow with raises(TimeoutError): Parallel(n_jobs=2, backend=backend, timeout=0.01)( delayed(sleep)(10) for x in range(10) ) @with_multiprocessing @parametrize("backend", set(RETURN_GENERATOR_BACKENDS) - {"sequential"}) @parametrize("return_as", ["generator", "generator_unordered"]) def test_parallel_timeout_fail_with_generator(backend, return_as): # Check that timeout properly fails when function is too slow with # return_as=generator with raises(TimeoutError): list( Parallel(n_jobs=2, backend=backend, return_as=return_as, timeout=0.1)( delayed(sleep)(10) for x in range(10) ) ) # Fast tasks and high timeout should not raise list( Parallel(n_jobs=2, backend=backend, return_as=return_as, timeout=10)( delayed(sleep)(0.01) for x in range(10) ) ) @with_multiprocessing @parametrize("backend", PROCESS_BACKENDS) def test_error_capture(backend): # Check that error are captured, and that correct exceptions # are raised. if mp is not None: with raises(ZeroDivisionError): Parallel(n_jobs=2, backend=backend)( [delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))] ) with raises(KeyboardInterrupt): Parallel(n_jobs=2, backend=backend)( [delayed(interrupt_raiser)(x) for x in (1, 0)] ) # Try again with the context manager API with Parallel(n_jobs=2, backend=backend) as parallel: assert get_workers(parallel._backend) is not None original_workers = get_workers(parallel._backend) with raises(ZeroDivisionError): parallel([delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) # The managed pool should still be available and be in a working # state despite the previously raised (and caught) exception assert get_workers(parallel._backend) is not None # The pool should have been interrupted and restarted: assert get_workers(parallel._backend) is not original_workers assert [f(x, y=1) for x in range(10)] == parallel( delayed(f)(x, y=1) for x in range(10) ) original_workers = get_workers(parallel._backend) with raises(KeyboardInterrupt): parallel([delayed(interrupt_raiser)(x) for x in (1, 0)]) # The pool should still be available despite the exception assert get_workers(parallel._backend) is not None # The pool should have been interrupted and restarted: assert get_workers(parallel._backend) is not original_workers assert [f(x, y=1) for x in range(10)] == parallel( delayed(f)(x, y=1) for x in range(10) ), ( parallel._iterating, parallel.n_completed_tasks, parallel.n_dispatched_tasks, parallel._aborting, ) # Check that the inner pool has been terminated when exiting the # context manager assert get_workers(parallel._backend) is None else: with raises(KeyboardInterrupt): Parallel(n_jobs=2)([delayed(interrupt_raiser)(x) for x in (1, 0)]) # wrapped exceptions should inherit from the class of the original # exception to make it easy to catch them with raises(ZeroDivisionError): Parallel(n_jobs=2)([delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))]) with raises(MyExceptionWithFinickyInit): Parallel(n_jobs=2, verbose=0)( (delayed(exception_raiser)(i, custom_exception=True) for i in range(30)) ) @with_multiprocessing @parametrize("backend", BACKENDS) def test_error_in_task_iterator(backend): def my_generator(raise_at=0): for i in range(20): if i == raise_at: raise ValueError("Iterator Raising Error") yield i with Parallel(n_jobs=2, backend=backend) as p: # The error is raised in the pre-dispatch phase with raises(ValueError, match="Iterator Raising Error"): p(delayed(square)(i) for i in my_generator(raise_at=0)) # The error is raised when dispatching a new task after the # pre-dispatch (likely to happen in a different thread) with raises(ValueError, match="Iterator Raising Error"): p(delayed(square)(i) for i in my_generator(raise_at=5)) # Same, but raises long after the pre-dispatch phase with raises(ValueError, match="Iterator Raising Error"): p(delayed(square)(i) for i in my_generator(raise_at=19)) def consumer(queue, item): queue.append("Consumed %s" % item) @parametrize("backend", BACKENDS) @parametrize( "batch_size, expected_queue", [ ( 1, [ "Produced 0", "Consumed 0", "Produced 1", "Consumed 1", "Produced 2", "Consumed 2", "Produced 3", "Consumed 3", "Produced 4", "Consumed 4", "Produced 5", "Consumed 5", ], ), ( 4, [ # First Batch "Produced 0", "Produced 1", "Produced 2", "Produced 3", "Consumed 0", "Consumed 1", "Consumed 2", "Consumed 3", # Second batch "Produced 4", "Produced 5", "Consumed 4", "Consumed 5", ], ), ], ) def test_dispatch_one_job(backend, batch_size, expected_queue): """Test that with only one job, Parallel does act as a iterator.""" queue = list() def producer(): for i in range(6): queue.append("Produced %i" % i) yield i Parallel(n_jobs=1, batch_size=batch_size, backend=backend)( delayed(consumer)(queue, x) for x in producer() ) assert queue == expected_queue assert len(queue) == 12 @with_multiprocessing @parametrize("backend", PARALLEL_BACKENDS) def test_dispatch_multiprocessing(backend): """Check that using pre_dispatch Parallel does indeed dispatch items lazily. """ manager = mp.Manager() queue = manager.list() def producer(): for i in range(6): queue.append("Produced %i" % i) yield i Parallel(n_jobs=2, batch_size=1, pre_dispatch=3, backend=backend)( delayed(consumer)(queue, "any") for _ in producer() ) queue_contents = list(queue) assert queue_contents[0] == "Produced 0" # Only 3 tasks are pre-dispatched out of 6. The 4th task is dispatched only # after any of the first 3 jobs have completed. first_consumption_index = queue_contents[:4].index("Consumed any") assert first_consumption_index > -1 produced_3_index = queue_contents.index("Produced 3") # 4th task produced assert produced_3_index > first_consumption_index assert len(queue) == 12 def test_batching_auto_threading(): # batching='auto' with the threading backend leaves the effective batch # size to 1 (no batching) as it has been found to never be beneficial with # this low-overhead backend. with Parallel(n_jobs=2, batch_size="auto", backend="threading") as p: p(delayed(id)(i) for i in range(5000)) # many very fast tasks assert p._backend.compute_batch_size() == 1 @with_multiprocessing @parametrize("backend", PROCESS_BACKENDS) def test_batching_auto_subprocesses(backend): with Parallel(n_jobs=2, batch_size="auto", backend=backend) as p: p(delayed(id)(i) for i in range(5000)) # many very fast tasks # It should be strictly larger than 1 but as we don't want heisen # failures on clogged CI worker environment be safe and only check that # it's a strictly positive number. assert p._backend.compute_batch_size() > 0 def test_exception_dispatch(): """Make sure that exception raised during dispatch are indeed captured""" with raises(ValueError): Parallel(n_jobs=2, pre_dispatch=16, verbose=0)( delayed(exception_raiser)(i) for i in range(30) ) def nested_function_inner(i): Parallel(n_jobs=2)(delayed(exception_raiser)(j) for j in range(30)) def nested_function_outer(i): Parallel(n_jobs=2)(delayed(nested_function_inner)(j) for j in range(30)) @with_multiprocessing @parametrize("backend", PARALLEL_BACKENDS) @pytest.mark.xfail(reason="https://github.com/joblib/loky/pull/255") def test_nested_exception_dispatch(backend): """Ensure errors for nested joblib cases gets propagated We rely on the Python 3 built-in __cause__ system that already report this kind of information to the user. """ with raises(ValueError) as excinfo: Parallel(n_jobs=2, backend=backend)( delayed(nested_function_outer)(i) for i in range(30) ) # Check that important information such as function names are visible # in the final error message reported to the user report_lines = format_exception(excinfo.type, excinfo.value, excinfo.tb) report = "".join(report_lines) assert "nested_function_outer" in report assert "nested_function_inner" in report assert "exception_raiser" in report assert type(excinfo.value) is ValueError
MyExceptionWithFinickyInit
python
kamyu104__LeetCode-Solutions
Python/iterator-for-combination.py
{ "start": 63, "end": 719 }
class ____(object): def __init__(self, characters, combinationLength): """ :type characters: str :type combinationLength: int """ self.__it = itertools.combinations(characters, combinationLength) self.__curr = None self.__last = characters[-combinationLength:] def next(self): """ :rtype: str """ self.__curr = "".join(self.__it.next()) return self.__curr def hasNext(self): """ :rtype: bool """ return self.__curr != self.__last # Time: O(k), per operation # Space: O(k) import functools
CombinationIterator
python
pytorch__pytorch
test/torch_np/test_basic.py
{ "start": 1428, "end": 2381 }
class ____(TestCase): """Base for smoke tests of one-arg functions: (array_like) -> (array_like) Accepts array_likes, torch.Tensors, w.ndarays; returns an ndarray """ @parametrize("func", one_arg_funcs) def test_asarray_tensor(self, func): t = torch.Tensor([[1.0, 2, 3], [4, 5, 6]]) ta = func(t) assert isinstance(ta, w.ndarray) @parametrize("func", one_arg_funcs) def test_asarray_list(self, func): lst = [[1.0, 2, 3], [4, 5, 6]] la = func(lst) assert isinstance(la, w.ndarray) @parametrize("func", one_arg_funcs) def test_asarray_array(self, func): a = w.asarray([[1.0, 2, 3], [4, 5, 6]]) la = func(a) assert isinstance(la, w.ndarray) one_arg_axis_funcs = [ w.argmax, w.argmin, w.prod, w.sum, w.all, w.any, w.mean, w.argsort, w.std, w.var, w.flip, ] @instantiate_parametrized_tests
TestOneArr
python
getsentry__sentry
src/sentry/api/endpoints/project_rule_preview.py
{ "start": 2887, "end": 2989 }
class ____(BaseGroupSerializerResponse): inbox: InboxDetails lastTriggered: int
_PreviewResponse
python
pypa__setuptools
setuptools/tests/test_windows_wrappers.py
{ "start": 788, "end": 1894 }
class ____: @classmethod def prep_script(cls, template): python_exe = subprocess.list2cmdline([sys.executable]) return template % locals() @classmethod def create_script(cls, tmpdir): """ Create a simple script, foo-script.py Note that the script starts with a Unix-style '#!' line saying which Python executable to run. The wrapper will use this line to find the correct Python executable. """ script = cls.prep_script(cls.script_tmpl) with (tmpdir / cls.script_name).open('w') as f: f.write(script) # also copy cli.exe to the sample directory with (tmpdir / cls.wrapper_name).open('wb') as f: w = resources.files('setuptools').joinpath(cls.wrapper_source).read_bytes() f.write(w) def win_launcher_exe(prefix): """A simple routine to select launcher script based on platform.""" assert prefix in ('cli', 'gui') if platform.machine() == "ARM64": return f"{prefix}-arm64.exe" else: return f"{prefix}-32.exe"
WrapperTester
python
numba__numba
numba/core/typing/builtins.py
{ "start": 10751, "end": 10830 }
class ____(BitwiseLogicOperation): pass @infer_global(operator.xor)
BitwiseOr
python
apache__airflow
airflow-core/src/airflow/triggers/testing.py
{ "start": 907, "end": 1224 }
class ____(BaseTrigger): """ A trigger that always succeeds immediately. Should only be used for testing. """ def serialize(self) -> tuple[str, dict[str, Any]]: return ("airflow.triggers.testing.SuccessTrigger", {}) async def run(self): yield TriggerEvent(True)
SuccessTrigger
python
TheAlgorithms__Python
scheduling/multi_level_feedback_queue.py
{ "start": 32, "end": 592 }
class ____: def __init__(self, process_name: str, arrival_time: int, burst_time: int) -> None: self.process_name = process_name # process name self.arrival_time = arrival_time # arrival time of the process # completion time of finished process or last interrupted time self.stop_time = arrival_time self.burst_time = burst_time # remaining burst time self.waiting_time = 0 # total time of the process wait in ready queue self.turnaround_time = 0 # time from arrival time to completion time
Process
python
openai__openai-python
src/openai/types/realtime/realtime_connect_params.py
{ "start": 202, "end": 288 }
class ____(TypedDict, total=False): call_id: str model: str
RealtimeConnectParams
python
pymupdf__PyMuPDF
src/__init__.py
{ "start": 834593, "end": 835582 }
class ____(mupdf.FzDevice2): def __init__(self, result, layers): super().__init__() self.result = result self.layers = layers self.layer_name = "" self.use_virtual_fill_path() self.use_virtual_stroke_path() self.use_virtual_fill_text() self.use_virtual_stroke_text() self.use_virtual_ignore_text() self.use_virtual_fill_shade() self.use_virtual_fill_image() self.use_virtual_fill_image_mask() self.use_virtual_begin_layer() self.use_virtual_end_layer() begin_layer = jm_lineart_begin_layer end_layer = jm_lineart_end_layer fill_path = jm_bbox_fill_path stroke_path = jm_bbox_stroke_path fill_text = jm_bbox_fill_text stroke_text = jm_bbox_stroke_text ignore_text = jm_bbox_ignore_text fill_shade = jm_bbox_fill_shade fill_image = jm_bbox_fill_image fill_image_mask = jm_bbox_fill_image_mask
JM_new_bbox_device_Device
python
django__django
tests/model_inheritance/tests.py
{ "start": 23301, "end": 24316 }
class ____(TestCase): @classmethod def setUpTestData(cls): cls.grand_parent = GrandParent.objects.create( email="grand_parent@example.com", first_name="grand", last_name="parent", ) def test_unique(self): grand_child = GrandChild( email=self.grand_parent.email, first_name="grand", last_name="child", ) msg = "Grand parent with this Email already exists." with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique() def test_unique_together(self): grand_child = GrandChild( email="grand_child@example.com", first_name=self.grand_parent.first_name, last_name=self.grand_parent.last_name, ) msg = "Grand parent with this First name and Last name already exists." with self.assertRaisesMessage(ValidationError, msg): grand_child.validate_unique()
InheritanceUniqueTests
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py
{ "start": 864, "end": 1310 }
class ____(ResolverException): def __init__(self, candidate, criterion): super(InconsistentCandidate, self).__init__(candidate, criterion) self.candidate = candidate self.criterion = criterion def __str__(self): return "Provided candidate {!r} does not satisfy {}".format( self.candidate, ", ".join(repr(r) for r in self.criterion.iter_requirement()), )
InconsistentCandidate
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typePrinter1.py
{ "start": 445, "end": 586 }
class ____: ... def func3(v: typePrinter2.IntOrStr | IntOrStr | None): reveal_type(v, expected_text="int | str | IntOrStr | None")
IntOrStr
python
huggingface__transformers
src/transformers/models/hgnet_v2/modular_hgnet_v2.py
{ "start": 9107, "end": 10188 }
class ____(RTDetrResNetConvLayer): def __init__( self, in_channels: int, out_channels: int, kernel_size: int, stride: int = 1, groups: int = 1, activation: str = "relu", use_learnable_affine_block: bool = False, ): super().__init__(in_channels, out_channels, kernel_size, stride, activation) self.convolution = nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, groups=groups, padding=(kernel_size - 1) // 2, bias=False, ) if activation and use_learnable_affine_block: self.lab = HGNetV2LearnableAffineBlock() else: self.lab = nn.Identity() def forward(self, input: Tensor) -> Tensor: hidden_state = self.convolution(input) hidden_state = self.normalization(hidden_state) hidden_state = self.activation(hidden_state) hidden_state = self.lab(hidden_state) return hidden_state
HGNetV2ConvLayer
python
mozilla__bleach
bleach/_vendor/html5lib/_ihatexml.py
{ "start": 12711, "end": 16728 }
class ____(object): replacementRegexp = re.compile(r"U[\dA-F]{5,5}") def __init__(self, dropXmlnsLocalName=False, dropXmlnsAttrNs=False, preventDoubleDashComments=False, preventDashAtCommentEnd=False, replaceFormFeedCharacters=True, preventSingleQuotePubid=False): self.dropXmlnsLocalName = dropXmlnsLocalName self.dropXmlnsAttrNs = dropXmlnsAttrNs self.preventDoubleDashComments = preventDoubleDashComments self.preventDashAtCommentEnd = preventDashAtCommentEnd self.replaceFormFeedCharacters = replaceFormFeedCharacters self.preventSingleQuotePubid = preventSingleQuotePubid self.replaceCache = {} def coerceAttribute(self, name, namespace=None): if self.dropXmlnsLocalName and name.startswith("xmlns:"): warnings.warn("Attributes cannot begin with xmlns", DataLossWarning) return None elif (self.dropXmlnsAttrNs and namespace == "http://www.w3.org/2000/xmlns/"): warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning) return None else: return self.toXmlName(name) def coerceElement(self, name): return self.toXmlName(name) def coerceComment(self, data): if self.preventDoubleDashComments: while "--" in data: warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning) data = data.replace("--", "- -") if data.endswith("-"): warnings.warn("Comments cannot end in a dash", DataLossWarning) data += " " return data def coerceCharacters(self, data): if self.replaceFormFeedCharacters: for _ in range(data.count("\x0C")): warnings.warn("Text cannot contain U+000C", DataLossWarning) data = data.replace("\x0C", " ") # Other non-xml characters return data def coercePubid(self, data): dataOutput = data for char in nonPubidCharRegexp.findall(data): warnings.warn("Coercing non-XML pubid", DataLossWarning) replacement = self.getReplacementCharacter(char) dataOutput = dataOutput.replace(char, replacement) if self.preventSingleQuotePubid and dataOutput.find("'") >= 0: warnings.warn("Pubid cannot contain single quote", DataLossWarning) dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'")) return dataOutput def toXmlName(self, name): nameFirst = name[0] nameRest = name[1:] m = nonXmlNameFirstBMPRegexp.match(nameFirst) if m: warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) nameFirstOutput = self.getReplacementCharacter(nameFirst) else: nameFirstOutput = nameFirst nameRestOutput = nameRest replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest)) for char in replaceChars: warnings.warn("Coercing non-XML name: %s" % name, DataLossWarning) replacement = self.getReplacementCharacter(char) nameRestOutput = nameRestOutput.replace(char, replacement) return nameFirstOutput + nameRestOutput def getReplacementCharacter(self, char): if char in self.replaceCache: replacement = self.replaceCache[char] else: replacement = self.escapeChar(char) return replacement def fromXmlName(self, name): for item in set(self.replacementRegexp.findall(name)): name = name.replace(item, self.unescapeChar(item)) return name def escapeChar(self, char): replacement = "U%05X" % ord(char) self.replaceCache[char] = replacement return replacement def unescapeChar(self, charcode): return chr(int(charcode[1:], 16))
InfosetFilter
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 672030, "end": 672401 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "project_column") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") project_column = sgqlc.types.Field("ProjectColumn", graphql_name="projectColumn")
UpdateProjectColumnPayload
python
pandas-dev__pandas
pandas/tests/indexes/timedeltas/test_indexing.py
{ "start": 11637, "end": 12501 }
class ____: def test_contains_nonunique(self): # GH#9512 for vals in ( [0, 1, 0], [0, 0, -1], [0, -1, -1], ["00:01:00", "00:01:00", "00:02:00"], ["00:01:00", "00:01:00", "00:00:01"], ): idx = TimedeltaIndex(vals) assert idx[0] in idx def test_contains(self): # Checking for any NaT-like objects # GH#13603, GH#59051 msg = "'d' is deprecated and will be removed in a future version." with tm.assert_produces_warning(Pandas4Warning, match=msg): td = to_timedelta(range(5), unit="d") + offsets.Hour(1) for v in [NaT, None, float("nan"), np.nan]: assert v not in td td = to_timedelta([NaT]) for v in [NaT, None, float("nan"), np.nan]: assert v in td
TestContains
python
huggingface__transformers
src/transformers/models/table_transformer/modeling_table_transformer.py
{ "start": 13606, "end": 15435 }
class ____(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.int64, device=pixel_values.device).float() dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->TableTransformer
TableTransformerSinePositionEmbedding
python
run-llama__llama_index
llama-index-core/llama_index/core/callbacks/schema.py
{ "start": 1417, "end": 2923 }
class ____(str, Enum): DOCUMENTS = "documents" # list of documents before parsing CHUNKS = "chunks" # list of text chunks NODES = "nodes" # list of nodes PROMPT = "formatted_prompt" # formatted prompt sent to LLM MESSAGES = "messages" # list of messages sent to LLM COMPLETION = "completion" # completion from LLM RESPONSE = "response" # message response from LLM QUERY_STR = "query_str" # query used for query engine SUB_QUESTION = "sub_question" # a sub question & answer + sources EMBEDDINGS = "embeddings" # list of embeddings TOP_K = "top_k" # top k nodes retrieved ADDITIONAL_KWARGS = "additional_kwargs" # additional kwargs for event call SERIALIZED = "serialized" # serialized object for event caller FUNCTION_CALL = "function_call" # function call for the LLM FUNCTION_OUTPUT = "function_call_response" # function call output TOOL = "tool" # tool used in LLM call MODEL_NAME = "model_name" # model name used in an event TEMPLATE = "template" # template used in LLM call TEMPLATE_VARS = "template_vars" # template variables used in LLM call SYSTEM_PROMPT = "system_prompt" # system prompt used in LLM call QUERY_WRAPPER_PROMPT = "query_wrapper_prompt" # query wrapper prompt used in LLM EXCEPTION = "exception" # exception raised in an event # events that will never have children events LEAF_EVENTS = (CBEventType.CHUNKING, CBEventType.LLM, CBEventType.EMBEDDING) @dataclass
EventPayload
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink29.py
{ "start": 315, "end": 1538 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink29.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format1 = workbook.add_format({"hyperlink": True}) format2 = workbook.add_format({"font_color": "red", "underline": 1}) worksheet.write_url("A1", "http://www.perl.org/", format1) worksheet.write_url("A2", "http://www.perl.com/", format2) workbook.close() self.assertExcelEqual() def test_create_file_with_default_format(self): """Test the creation of a simple XlsxWriter file with hyperlinks.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() format2 = workbook.add_format({"font_color": "red", "underline": 1}) worksheet.write_url("A1", "http://www.perl.org/") worksheet.write_url("A2", "http://www.perl.com/", format2) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
ray-project__ray
rllib/utils/exploration/soft_q.py
{ "start": 491, "end": 2115 }
class ____(StochasticSampling): """Special case of StochasticSampling w/ Categorical and temperature param. Returns a stochastic sample from a Categorical parameterized by the model output divided by the temperature. Returns the argmax iff explore=False. """ def __init__( self, action_space: Space, *, framework: Optional[str], temperature: float = 1.0, **kwargs ): """Initializes a SoftQ Exploration object. Args: action_space: The gym action space used by the environment. temperature: The temperature to divide model outputs by before creating the Categorical distribution to sample from. framework: One of None, "tf", "torch". """ assert isinstance(action_space, (Discrete, MultiDiscrete)) super().__init__(action_space, framework=framework, **kwargs) self.temperature = temperature @override(StochasticSampling) def get_exploration_action( self, action_distribution: ActionDistribution, timestep: Union[int, TensorType], explore: bool = True, ): cls = type(action_distribution) assert issubclass(cls, (Categorical, TorchCategorical)) # Re-create the action distribution with the correct temperature # applied. dist = cls(action_distribution.inputs, self.model, temperature=self.temperature) # Delegate to super method. return super().get_exploration_action( action_distribution=dist, timestep=timestep, explore=explore )
SoftQ
python
astropy__astropy
astropy/io/tests/safeio.py
{ "start": 77, "end": 360 }
class ____(io.BufferedWriter): """File handle to intercept 0-byte writes.""" def write(self, buffer): nbytes = super().write(buffer) if nbytes == 0: raise ValueError("This writer does not allow empty writes") return nbytes
CatchZeroByteWriter
python
Pylons__pyramid
tests/test_path.py
{ "start": 1151, "end": 2109 }
class ____(unittest.TestCase): def _callFUT(self, *arg, **kw): from pyramid.path import caller_module return caller_module(*arg, **kw) def test_it_level_1(self): from . import test_path result = self._callFUT(1) self.assertEqual(result, test_path) def test_it_level_2(self): from . import test_path result = self._callFUT(2) self.assertEqual(result, test_path) def test_it_level_3(self): from . import test_path result = self._callFUT(3) self.assertNotEqual(result, test_path) def test_it_no___name__(self): class DummyFrame: f_globals = {} class DummySys: def _getframe(self, level): return DummyFrame() modules = {'__main__': 'main'} dummy_sys = DummySys() result = self._callFUT(3, sys=dummy_sys) self.assertEqual(result, 'main')
TestCallerModule
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/event.py
{ "start": 2888, "end": 3495 }
class ____(BaseEvent): """Event fired when an attachment is skipped.""" page_id: str = Field(description="ID of the parent page") attachment_id: str = Field(description="ID of the attachment") attachment_name: str = Field(description="Name of the attachment") attachment_type: str = Field(description="MIME type of the attachment") attachment_size: int = Field(description="Size of the attachment in bytes") attachment_link: str = Field(description="Link to the attachment") reason: str = Field(description="Reason why the attachment was skipped")
SNOWKBAttachmentSkippedEvent
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/assignment3.py
{ "start": 1035, "end": 1229 }
class ____(Protocol): def __call__(self, x: int, y: dict[str, int]) -> int: ... v1: Adder = lambda x, y: x + y["hi"] reveal_type(v1, expected_text="(x: int, y: dict[str, int]) -> int")
Adder
python
apache__airflow
providers/http/src/airflow/providers/http/exceptions.py
{ "start": 973, "end": 1084 }
class ____(AirflowException): """Exception raised for invalid HTTP methods in Http hook."""
HttpMethodException
python
dask__dask
dask/dataframe/dask_expr/_describe.py
{ "start": 2878, "end": 3362 }
class ____(DescribeNumeric): _parameters = ["frame", "split_every"] def _lower(self): frame = self.frame vcounts = ValueCounts(frame, split_every=self.split_every, sort=True) count_unique = Size(Filter(vcounts, vcounts > 0)) stats = [ count_unique, frame.count(split_every=self.split_every), Head(vcounts, n=1), ] return DescribeNonNumericAggregate(frame._meta.name, *stats)
DescribeNonNumeric
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/enum14.py
{ "start": 288, "end": 351 }
class ____(Enum): # This should generate an error. x: B.x
B
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/cursor.py
{ "start": 52567, "end": 53882 }
class ____(ResultMetaData): __slots__ = () returns_rows = False def _we_dont_return_rows( self, err: Optional[BaseException] = None ) -> NoReturn: raise exc.ResourceClosedError( "This result object does not return rows. " "It has been closed automatically." ) from err def _index_for_key(self, keys: _KeyIndexType, raiseerr: bool) -> NoReturn: self._we_dont_return_rows() def _metadata_for_keys(self, keys: Sequence[_KeyIndexType]) -> NoReturn: self._we_dont_return_rows() def _reduce(self, keys: Sequence[_KeyIndexType]) -> NoReturn: self._we_dont_return_rows() @property def _keymap(self) -> NoReturn: # type: ignore[override] self._we_dont_return_rows() @property def _key_to_index(self) -> NoReturn: # type: ignore[override] self._we_dont_return_rows() @property def _processors(self) -> NoReturn: # type: ignore[override] self._we_dont_return_rows() @property def keys(self) -> NoReturn: self._we_dont_return_rows() _NO_RESULT_METADATA = _NoResultMetaData() def null_dml_result() -> IteratorResult[Any]: it: IteratorResult[Any] = IteratorResult(_NoResultMetaData(), iter([])) it._soft_close() return it
_NoResultMetaData
python
huggingface__transformers
src/transformers/models/kosmos2_5/image_processing_kosmos2_5.py
{ "start": 3291, "end": 14966 }
class ____(BaseImageProcessor): r""" Constructs a Kosmos2_5 image processor. Args: do_convert_rgb (`bool`, *optional*, defaults to `True`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. According to Kosmos2_5 paper and code, the image is normalized with its own mean and standard deviation. patch_size (`Dict[str, int]`, *optional*, defaults to `{"height": 16, "width": 16}`): The patch size to use for the image. According to Kosmos2_5 paper and code, the patch size is 16x16. max_patches (`int`, *optional*, defaults to 4096): The maximum number of patches to extract from the image as per the [KOSMOS 2.5 paper](https://huggingface.co/papers/2309.11419). """ model_input_names = ["flattened_patches"] valid_kwargs = Kosmos2_5ImageProcessorKwargs def __init__( self, do_convert_rgb: bool = True, do_normalize: bool = True, patch_size: Optional[dict[str, int]] = None, max_patches: int = 4096, **kwargs, ) -> None: super().__init__(**kwargs) self.patch_size = patch_size if patch_size is not None else {"height": 16, "width": 16} self.do_normalize = do_normalize self.do_convert_rgb = do_convert_rgb self.max_patches = max_patches def extract_flattened_patches( self, image: np.ndarray, max_patches: int, patch_size: dict, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Extract flattened patches from an image. Args: image (`np.ndarray`): Image to extract flattened patches from. max_patches (`int`): Maximum number of patches to extract. patch_size (`dict`): Dictionary containing the patch height and width. Returns: result (`np.ndarray`): A sequence of `max_patches` flattened patches. """ requires_backends(self.extract_flattened_patches, "torch") # convert to torch image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) image = torch.from_numpy(image) patch_height, patch_width = patch_size["height"], patch_size["width"] image_height, image_width = get_image_size(image, ChannelDimension.FIRST) # maximize scale s.t. scale = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) num_feasible_rows = max(min(math.floor(scale * image_height / patch_height), max_patches), 1) num_feasible_cols = max(min(math.floor(scale * image_width / patch_width), max_patches), 1) resized_height = max(num_feasible_rows * patch_height, 1) resized_width = max(num_feasible_cols * patch_width, 1) image = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode="bilinear", align_corners=False, antialias=True, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] patches = torch_extract_patches(image, patch_height, patch_width) patches_shape = patches.shape rows = patches_shape[1] columns = patches_shape[2] depth = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] patches = patches.reshape([rows * columns, depth]) # [rows * columns, 1] row_ids = ( torch.arange(rows, device=patches.device) .reshape([rows, 1]) .repeat(1, columns) .reshape([rows * columns, 1]) ) col_ids = ( torch.arange(columns, device=patches.device) .reshape([1, columns]) .repeat(rows, 1) .reshape([rows * columns, 1]) ) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] row_ids = row_ids.to(torch.float32) col_ids = col_ids.to(torch.float32) # [rows * columns, 2 + patch_height * patch_width * image_channels] result = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] result = torch.nn.functional.pad(result, [0, 0, 0, max_patches - (rows * columns)]).float() result = to_numpy_array(result) return result, resized_width, resized_height, rows, columns # Copied from transformers.models.pix2struct.image_processing_pix2struct.Pix2StructImageProcessor.normalize def normalize( self, image: np.ndarray, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred. """ if image.dtype == np.uint8: image = image.astype(np.float32) # take mean across the whole `image` mean = np.mean(image) std = np.std(image) adjusted_stddev = max(std, 1.0 / math.sqrt(np.prod(image.shape))) return normalize( image, mean=mean, std=adjusted_stddev, data_format=data_format, input_data_format=input_data_format, **kwargs, ) def preprocess( self, images: ImageInput, do_convert_rgb: Optional[bool] = None, do_normalize: Optional[bool] = None, max_patches: Optional[int] = None, patch_size: Optional[dict[str, int]] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> ImageInput: """ Preprocess an image or batch of images. The processor first computes the maximum possible number of aspect-ratio preserving patches of size `patch_size` that can be extracted from the image. It then pads the image with zeros to make the image respect the constraint of `max_patches`. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. max_patches (`int`, *optional*, defaults to `self.max_patches`): Maximum number of patches to extract. patch_size (`dict`, *optional*, defaults to `self.patch_size`): Dictionary containing the patch height and width. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: Use the channel dimension format of the input image. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format. """ do_normalize = do_normalize if do_normalize is not None else self.do_normalize do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb patch_size = patch_size if patch_size is not None else self.patch_size max_patches = max_patches if max_patches is not None else self.max_patches if kwargs.get("data_format") is not None: raise ValueError("data_format is not an accepted input as the outputs are ") images = make_flat_list_of_images(images) if not valid_images(images): raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor") # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if input_data_format is None: # We assume that all images have the same channel dimension format. input_data_format = infer_channel_dimension_format(images[0]) flattened_patches, width, height, rows, cols, attention_masks = [], [], [], [], [], [] for image in images: if do_normalize: image = self.normalize(image=image, input_data_format=input_data_format) # convert to torch tensor and permute patches, resized_width, resized_height, n_rows, n_columns = self.extract_flattened_patches( image=image, max_patches=max_patches, patch_size=patch_size, input_data_format=input_data_format, ) flattened_patches.append(patches) width.append(resized_width) height.append(resized_height) rows.append(n_rows) cols.append(n_columns) # create attention mask in numpy attention_masks.append((patches.sum(axis=-1) != 0).astype(np.float32)) encoded_outputs = BatchFeature( data={ "flattened_patches": flattened_patches, "attention_mask": attention_masks, "width": width, "height": height, "rows": rows, "cols": cols, }, tensor_type=return_tensors, ) return encoded_outputs __all__ = ["Kosmos2_5ImageProcessor"]
Kosmos2_5ImageProcessor
python
huggingface__transformers
tests/models/audio_spectrogram_transformer/test_modeling_audio_spectrogram_transformer.py
{ "start": 8259, "end": 9459 }
class ____(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593") if is_torchaudio_available() else None ) @slow def test_inference_audio_classification(self): feature_extractor = self.default_feature_extractor model = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593").to(torch_device) feature_extractor = self.default_feature_extractor audio, sampling_rate = prepare_audio() audio = audio.squeeze().numpy() inputs = feature_extractor(audio, sampling_rate=sampling_rate, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 527)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4)
ASTModelIntegrationTest
python
huggingface__transformers
src/transformers/models/wav2vec2_bert/configuration_wav2vec2_bert.py
{ "start": 813, "end": 18142 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Wav2Vec2BertModel`]. It is used to instantiate an Wav2Vec2Bert model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Bert [facebook/wav2vec2-bert-rel-pos-large](https://huggingface.co/facebook/wav2vec2-bert-rel-pos-large) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*): Vocabulary size of the Wav2Vec2Bert model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`Wav2Vec2BertModel`]. Vocabulary size of the model. Defines the different tokens that can be represented by the *inputs_ids* passed to the forward method of [`Wav2Vec2BertModel`]. hidden_size (`int`, *optional*, defaults to 1024): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 24): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. feature_projection_input_dim (`int`, *optional*, defaults to 160): Input dimension of this model, i.e the dimension after processing input audios with [`SeamlessM4TFeatureExtractor`] or [`Wav2Vec2BertProcessor`]. hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for the feature projection. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`Wav2Vec2BertForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates `mask_time_prob*len(time_axis)/mask_time_length ``independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2): The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks`. mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates `mask_feature_prob*len(feature_axis)/mask_time_length` independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0): The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if `mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks`. ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`Wav2Vec2BertForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2BertForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 768): Dimensionality of the projection before token mean-pooling for classification. tdnn_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`): A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers. tdnn_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*. tdnn_dilation (`tuple[int]` or `list[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`): A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*. xvector_output_dim (`int`, *optional*, defaults to 512): Dimensionality of the *XVector* embedding vectors. pad_token_id (`int`, *optional*, defaults to 0): The id of the _beginning-of-stream_ token. bos_token_id (`int`, *optional*, defaults to 1): The id of the _padding_ token. eos_token_id (`int`, *optional*, defaults to 2): The id of the _end-of-stream_ token. add_adapter (`bool`, *optional*, defaults to `False`): Whether a convolutional attention network should be stacked on top of the Wav2Vec2Bert Encoder. Can be very useful for warm-starting Wav2Vec2Bert for SpeechEncoderDecoder models. adapter_kernel_size (`int`, *optional*, defaults to 3): Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. adapter_stride (`int`, *optional*, defaults to 2): Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`. num_adapter_layers (`int`, *optional*, defaults to 1): Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is True`. adapter_act (`str` or `function`, *optional*, defaults to `"relu"`): The non-linear activation function (function or string) in the adapter layers. If string, `"gelu"`, `"relu"`, `"selu"`, `"swish"` and `"gelu_new"` are supported. use_intermediate_ffn_before_adapter (`bool`, *optional*, defaults to `False`): Whether an intermediate feed-forward block should be stacked on top of the Wav2Vec2Bert Encoder and before the adapter network. Only relevant if `add_adapter is True`. output_hidden_size (`int`, *optional*): Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant if `add_adapter is True`. position_embeddings_type (`str`, *optional*, defaults to `"relative_key"`): Can be specified to : - `rotary`, for rotary position embeddings. - `relative`, for relative position embeddings. - `relative_key`, for relative position embeddings as defined by Shaw in [Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155). If left to `None`, no relative position embeddings is applied. rotary_embedding_base (`int`, *optional*, defaults to 10000): If `"rotary"` position embeddings are used, defines the size of the embedding base. max_source_positions (`int`, *optional*, defaults to 5000): if `"relative"` position embeddings are used, defines the maximum source input positions. left_max_position_embeddings (`int`, *optional*, defaults to 64): If `"relative_key"` (aka Shaw) position embeddings are used, defines the left clipping value for relative positions. right_max_position_embeddings (`int`, *optional*, defaults to 8): If `"relative_key"` (aka Shaw) position embeddings are used, defines the right clipping value for relative positions. conv_depthwise_kernel_size (`int`, *optional*, defaults to 31): Kernel size of convolutional depthwise 1D layer in Conformer blocks. conformer_conv_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all convolutional layers in Conformer blocks. Example: ```python >>> from transformers import Wav2Vec2BertConfig, Wav2Vec2BertModel >>> # Initializing a Wav2Vec2Bert facebook/wav2vec2-bert-rel-pos-large style configuration >>> configuration = Wav2Vec2BertConfig() >>> # Initializing a model (with random weights) from the facebook/wav2vec2-bert-rel-pos-large style configuration >>> model = Wav2Vec2BertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "wav2vec2-bert" def __init__( self, vocab_size=None, hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, intermediate_size=4096, feature_projection_input_dim=160, hidden_act="swish", hidden_dropout=0.0, activation_dropout=0.0, attention_dropout=0.0, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="sum", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=768, tdnn_dim=(512, 512, 512, 512, 1500), tdnn_kernel=(5, 3, 3, 1, 1), tdnn_dilation=(1, 2, 3, 1, 1), xvector_output_dim=512, pad_token_id=0, bos_token_id=1, eos_token_id=2, add_adapter=False, adapter_kernel_size=3, adapter_stride=2, num_adapter_layers=1, adapter_act="relu", use_intermediate_ffn_before_adapter=False, output_hidden_size=None, position_embeddings_type="relative_key", rotary_embedding_base=10000, max_source_positions=5000, left_max_position_embeddings=64, right_max_position_embeddings=8, conv_depthwise_kernel_size=31, conformer_conv_dropout=0.1, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.feature_projection_input_dim = feature_projection_input_dim self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size self.use_weighted_layer_sum = use_weighted_layer_sum self.max_source_positions = max_source_positions if position_embeddings_type is not None and position_embeddings_type not in [ "rotary", "relative", "relative_key", ]: raise ValueError( """ `position_embeddings_type` is not valid. It must be one of the following values: `["rotary", "relative", "relative_key"]` or left as `None`. """ ) self.position_embeddings_type = position_embeddings_type self.rotary_embedding_base = rotary_embedding_base self.left_max_position_embeddings = left_max_position_embeddings self.right_max_position_embeddings = right_max_position_embeddings # Conformer-block related self.conv_depthwise_kernel_size = conv_depthwise_kernel_size self.conformer_conv_dropout = conformer_conv_dropout # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # adapter self.add_adapter = add_adapter self.adapter_kernel_size = adapter_kernel_size self.adapter_stride = adapter_stride self.num_adapter_layers = num_adapter_layers self.adapter_act = adapter_act self.output_hidden_size = output_hidden_size if output_hidden_size is not None else hidden_size if use_intermediate_ffn_before_adapter and not add_adapter: raise ValueError("`use_intermediate_ffn_before_adapter` is `True` but `add_adapter` is `False`.") self.use_intermediate_ffn_before_adapter = use_intermediate_ffn_before_adapter # SequenceClassification-specific parameter. Feel free to ignore for other classes. self.classifier_proj_size = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. self.tdnn_dim = list(tdnn_dim) self.tdnn_kernel = list(tdnn_kernel) self.tdnn_dilation = list(tdnn_dilation) self.xvector_output_dim = xvector_output_dim @property def inputs_to_logits_ratio(self): ratio = self.feature_projection_input_dim * 2 if self.add_adapter: ratio = ratio * (self.adapter_stride**self.num_adapter_layers) return ratio __all__ = ["Wav2Vec2BertConfig"]
Wav2Vec2BertConfig
python
faif__python-patterns
patterns/other/hsm/hsm.py
{ "start": 2679, "end": 3142 }
class ____: def __init__(self, HierachicalStateMachine): self.hsm = HierachicalStateMachine def on_switchover(self): raise UnsupportedTransition def on_fault_trigger(self): raise UnsupportedTransition def on_diagnostics_failed(self): raise UnsupportedTransition def on_diagnostics_passed(self): raise UnsupportedTransition def on_operator_inservice(self): raise UnsupportedTransition
Unit
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 229661, "end": 230454 }
class ____(sgqlc.types.Input): """Autogenerated input type of EnqueuePullRequest""" __schema__ = github_schema __field_names__ = ("pull_request_id", "jump", "expected_head_oid", "client_mutation_id") pull_request_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="pullRequestId") """The ID of the pull request to enqueue.""" jump = sgqlc.types.Field(Boolean, graphql_name="jump") """Add the pull request to the front of the queue.""" expected_head_oid = sgqlc.types.Field(GitObjectID, graphql_name="expectedHeadOid") """The expected head OID of the pull request.""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
EnqueuePullRequestInput
python
spack__spack
lib/spack/spack/test/cray_manifest.py
{ "start": 1035, "end": 1856 }
class ____: def __init__(self, name, hash, prefix, version, arch, compiler, dependencies, parameters): self.name = name self.hash = hash self.prefix = prefix self.version = version self.arch = arch self.compiler = compiler self.dependencies = dependencies self.parameters = parameters def to_dict(self): return { "name": self.name, "hash": self.hash, "prefix": self.prefix, "version": self.version, "arch": self.arch, "compiler": self.compiler, "dependencies": self.dependencies, "parameters": self.parameters, } def as_dependency(self, deptypes): return (self.name, {"hash": self.hash, "type": list(deptypes)})
JsonSpecEntry
python
scipy__scipy
scipy/stats/tests/test_hypotests.py
{ "start": 41078, "end": 55995 }
class ____(_TestPythranFunc): def setup_method(self): self.dtypes = self.ALL_INTEGER + self.ALL_FLOAT self.arguments = {0: (np.arange(10), self.ALL_INTEGER + self.ALL_FLOAT), 1: (np.arange(10), self.ALL_INTEGER + self.ALL_FLOAT)} input_array = [self.arguments[idx][0] for idx in self.arguments] # In this case, self.partialfunc can simply be stats.somersd, # since `alternative` is an optional argument. If it is required, # we can use functools.partial to freeze the value, because # we only mainly test various array inputs, not str, etc. self.partialfunc = functools.partial(stats.somersd, alternative='two-sided') self.expected = self.partialfunc(*input_array) def pythranfunc(self, *args): res = self.partialfunc(*args) assert_allclose(res.statistic, self.expected.statistic, atol=1e-15) assert_allclose(res.pvalue, self.expected.pvalue, atol=1e-15) def test_pythranfunc_keywords(self): # Not specifying the optional keyword args table = [[27, 25, 14, 7, 0], [7, 14, 18, 35, 12], [1, 3, 2, 7, 17]] res1 = stats.somersd(table) # Specifying the optional keyword args with default value optional_args = self.get_optional_args(stats.somersd) res2 = stats.somersd(table, **optional_args) # Check if the results are the same in two cases assert_allclose(res1.statistic, res2.statistic, atol=1e-15) assert_allclose(res1.pvalue, res2.pvalue, atol=1e-15) def test_like_kendalltau(self): # All tests correspond with one in test_stats.py `test_kendalltau` # case without ties, con-dis equal zero x = [5, 2, 1, 3, 6, 4, 7, 8] y = [5, 2, 6, 3, 1, 8, 7, 4] # Cross-check with result from SAS FREQ: expected = (0.000000000000000, 1.000000000000000) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # case without ties, con-dis equal zero x = [0, 5, 2, 1, 3, 6, 4, 7, 8] y = [5, 2, 0, 6, 3, 1, 8, 7, 4] # Cross-check with result from SAS FREQ: expected = (0.000000000000000, 1.000000000000000) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # case without ties, con-dis close to zero x = [5, 2, 1, 3, 6, 4, 7] y = [5, 2, 6, 3, 1, 7, 4] # Cross-check with result from SAS FREQ: expected = (-0.142857142857140, 0.630326953157670) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # simple case without ties x = np.arange(10) y = np.arange(10) # Cross-check with result from SAS FREQ: # SAS p value is not provided. expected = (1.000000000000000, 0) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # swap a couple values and a couple more x = np.arange(10) y = np.array([0, 2, 1, 3, 4, 6, 5, 7, 8, 9]) # Cross-check with result from SAS FREQ: expected = (0.911111111111110, 0.000000000000000) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # same in opposite direction x = np.arange(10) y = np.arange(10)[::-1] # Cross-check with result from SAS FREQ: # SAS p value is not provided. expected = (-1.000000000000000, 0) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # swap a couple values and a couple more x = np.arange(10) y = np.array([9, 7, 8, 6, 5, 3, 4, 2, 1, 0]) # Cross-check with result from SAS FREQ: expected = (-0.9111111111111111, 0.000000000000000) res = stats.somersd(x, y) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # with some ties x1 = [12, 2, 1, 12, 2] x2 = [1, 4, 7, 1, 0] # Cross-check with result from SAS FREQ: expected = (-0.500000000000000, 0.304901788178780) res = stats.somersd(x1, x2) assert_allclose(res.statistic, expected[0], atol=1e-15) assert_allclose(res.pvalue, expected[1], atol=1e-15) # with only ties in one or both inputs # SAS will not produce an output for these: # NOTE: No statistics are computed for x * y because x has fewer # than 2 nonmissing levels. # WARNING: No OUTPUT data set is produced for this table because a # row or column variable has fewer than 2 nonmissing levels and no # statistics are computed. res = stats.somersd([2, 2, 2], [2, 2, 2]) assert_allclose(res.statistic, np.nan) assert_allclose(res.pvalue, np.nan) res = stats.somersd([2, 0, 2], [2, 2, 2]) assert_allclose(res.statistic, np.nan) assert_allclose(res.pvalue, np.nan) res = stats.somersd([2, 2, 2], [2, 0, 2]) assert_allclose(res.statistic, np.nan) assert_allclose(res.pvalue, np.nan) res = stats.somersd([0], [0]) assert_allclose(res.statistic, np.nan) assert_allclose(res.pvalue, np.nan) # empty arrays provided as input res = stats.somersd([], []) assert_allclose(res.statistic, np.nan) assert_allclose(res.pvalue, np.nan) # test unequal length inputs x = np.arange(10.) y = np.arange(20.) assert_raises(ValueError, stats.somersd, x, y) def test_asymmetry(self): # test that somersd is asymmetric w.r.t. input order and that # convention is as described: first input is row variable & independent # data is from Wikipedia: # https://en.wikipedia.org/wiki/Somers%27_D # but currently that example contradicts itself - it says X is # independent yet take D_XY x = [1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3] y = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] # Cross-check with result from SAS FREQ: d_cr = 0.272727272727270 d_rc = 0.342857142857140 p = 0.092891940883700 # same p-value for either direction res = stats.somersd(x, y) assert_allclose(res.statistic, d_cr, atol=1e-15) assert_allclose(res.pvalue, p, atol=1e-4) assert_equal(res.table.shape, (3, 2)) res = stats.somersd(y, x) assert_allclose(res.statistic, d_rc, atol=1e-15) assert_allclose(res.pvalue, p, atol=1e-15) assert_equal(res.table.shape, (2, 3)) def test_somers_original(self): # test against Somers' original paper [1] # Table 5A # Somers' convention was column IV table = np.array([[8, 2], [6, 5], [3, 4], [1, 3], [2, 3]]) # Our convention (and that of SAS FREQ) is row IV table = table.T dyx = 129/340 assert_allclose(stats.somersd(table).statistic, dyx) # table 7A - d_yx = 1 table = np.array([[25, 0], [85, 0], [0, 30]]) dxy, dyx = 3300/5425, 3300/3300 assert_allclose(stats.somersd(table).statistic, dxy) assert_allclose(stats.somersd(table.T).statistic, dyx) # table 7B - d_yx < 0 table = np.array([[25, 0], [0, 30], [85, 0]]) dyx = -1800/3300 assert_allclose(stats.somersd(table.T).statistic, dyx) def test_contingency_table_with_zero_rows_cols(self): # test that zero rows/cols in contingency table don't affect result N = 100 shape = 4, 6 size = np.prod(shape) rng = np.random.RandomState(0) s = stats.multinomial.rvs(N, p=np.ones(size)/size, random_state=rng).reshape(shape) res = stats.somersd(s) s2 = np.insert(s, 2, np.zeros(shape[1]), axis=0) res2 = stats.somersd(s2) s3 = np.insert(s, 2, np.zeros(shape[0]), axis=1) res3 = stats.somersd(s3) s4 = np.insert(s2, 2, np.zeros(shape[0]+1), axis=1) res4 = stats.somersd(s4) # Cross-check with result from SAS FREQ: assert_allclose(res.statistic, -0.116981132075470, atol=1e-15) assert_allclose(res.statistic, res2.statistic) assert_allclose(res.statistic, res3.statistic) assert_allclose(res.statistic, res4.statistic) assert_allclose(res.pvalue, 0.156376448188150, atol=1e-15) assert_allclose(res.pvalue, res2.pvalue) assert_allclose(res.pvalue, res3.pvalue) assert_allclose(res.pvalue, res4.pvalue) def test_invalid_contingency_tables(self): N = 100 shape = 4, 6 size = np.prod(shape) rng = np.random.default_rng(0) # start with a valid contingency table s = stats.multinomial.rvs(N, p=np.ones(size)/size, random_state=rng).reshape(shape) s5 = s - 2 message = "All elements of the contingency table must be non-negative" with assert_raises(ValueError, match=message): stats.somersd(s5) s6 = s + 0.01 message = "All elements of the contingency table must be integer" with assert_raises(ValueError, match=message): stats.somersd(s6) message = ("At least two elements of the contingency " "table must be nonzero.") with assert_raises(ValueError, match=message): stats.somersd([[]]) with assert_raises(ValueError, match=message): stats.somersd([[1]]) s7 = np.zeros((3, 3)) with assert_raises(ValueError, match=message): stats.somersd(s7) s7[0, 1] = 1 with assert_raises(ValueError, match=message): stats.somersd(s7) def test_only_ranks_matter(self): # only ranks of input data should matter x = [1, 2, 3] x2 = [-1, 2.1, np.inf] y = [3, 2, 1] y2 = [0, -0.5, -np.inf] res = stats.somersd(x, y) res2 = stats.somersd(x2, y2) assert_equal(res.statistic, res2.statistic) assert_equal(res.pvalue, res2.pvalue) def test_contingency_table_return(self): # check that contingency table is returned x = np.arange(10) y = np.arange(10) res = stats.somersd(x, y) assert_equal(res.table, np.eye(10)) def test_somersd_alternative(self): # Test alternative parameter, asymptotic method (due to tie) # Based on scipy.stats.test_stats.TestCorrSpearman2::test_alternative x1 = [1, 2, 3, 4, 5] x2 = [5, 6, 7, 8, 7] # strong positive correlation expected = stats.somersd(x1, x2, alternative="two-sided") assert expected.statistic > 0 # rank correlation > 0 -> large "less" p-value res = stats.somersd(x1, x2, alternative="less") assert_equal(res.statistic, expected.statistic) assert_allclose(res.pvalue, 1 - (expected.pvalue / 2)) # rank correlation > 0 -> small "greater" p-value res = stats.somersd(x1, x2, alternative="greater") assert_equal(res.statistic, expected.statistic) assert_allclose(res.pvalue, expected.pvalue / 2) # reverse the direction of rank correlation x2.reverse() # strong negative correlation expected = stats.somersd(x1, x2, alternative="two-sided") assert expected.statistic < 0 # rank correlation < 0 -> large "greater" p-value res = stats.somersd(x1, x2, alternative="greater") assert_equal(res.statistic, expected.statistic) assert_allclose(res.pvalue, 1 - (expected.pvalue / 2)) # rank correlation < 0 -> small "less" p-value res = stats.somersd(x1, x2, alternative="less") assert_equal(res.statistic, expected.statistic) assert_allclose(res.pvalue, expected.pvalue / 2) with pytest.raises(ValueError, match="`alternative` must be..."): stats.somersd(x1, x2, alternative="ekki-ekki") @pytest.mark.parametrize("positive_correlation", (False, True)) def test_somersd_perfect_correlation(self, positive_correlation): # Before the addition of `alternative`, perfect correlation was # treated as a special case. Now it is treated like any other case, but # make sure there are no divide by zero warnings or associated errors x1 = np.arange(10) x2 = x1 if positive_correlation else np.flip(x1) expected_statistic = 1 if positive_correlation else -1 # perfect correlation -> small "two-sided" p-value (0) res = stats.somersd(x1, x2, alternative="two-sided") assert res.statistic == expected_statistic assert res.pvalue == 0 # rank correlation > 0 -> large "less" p-value (1) res = stats.somersd(x1, x2, alternative="less") assert res.statistic == expected_statistic assert res.pvalue == (1 if positive_correlation else 0) # rank correlation > 0 -> small "greater" p-value (0) res = stats.somersd(x1, x2, alternative="greater") assert res.statistic == expected_statistic assert res.pvalue == (0 if positive_correlation else 1) def test_somersd_large_inputs_gh18132(self): # Test that large inputs where potential overflows could occur give # the expected output. This is tested in the case of binary inputs. # See gh-18126. # generate lists of random classes 1-2 (binary) classes = [1, 2] n_samples = 10 ** 6 rng = np.random.default_rng(6889320191) x = rng.choice(classes, n_samples) y = rng.choice(classes, n_samples) # get value to compare with: sklearn output # from sklearn import metrics # val_auc_sklearn = metrics.roc_auc_score(x, y) # # convert to the Gini coefficient (Gini = (AUC*2)-1) # val_sklearn = 2 * val_auc_sklearn - 1 val_sklearn = 0.000624401938730923 # calculate the Somers' D statistic, which should be equal to the # result of val_sklearn until approximately machine precision val_scipy = stats.somersd(x, y).statistic assert_allclose(val_sklearn, val_scipy, atol=1e-15)
TestSomersD
python
huggingface__transformers
src/transformers/models/lfm2_moe/modular_lfm2_moe.py
{ "start": 6706, "end": 9753 }
class ____(MixtralModel): def __init__(self, config: Lfm2MoeConfig): super().__init__(config) self.pos_emb = Lfm2MoeRotaryEmbedding(config) self.embedding_norm = Lfm2MoeRMSNorm(config.hidden_size, eps=config.norm_eps) del self.norm del self.rotary_emb def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Lfm2MoeHybridConvCache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: batch_size = inputs_embeds.shape[0] past_key_values = Lfm2MoeHybridConvCache( config=self.config, max_batch_size=batch_size, dtype=self.dtype, device=self.device ) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = create_causal_mask( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) # Skip masking for decoding stage. We check shape here to be compile-friendly linear_attention = attention_mask if inputs_embeds.shape[1] != 1 else None hidden_states = inputs_embeds position_embeddings = self.pos_emb(hidden_states, position_ids=position_ids) # decoder layers for decoder_layer in self.layers[: self.config.num_hidden_layers]: layer_mask = causal_mask if decoder_layer.is_attention_layer else linear_attention hidden_states = decoder_layer( hidden_states, attention_mask=layer_mask, position_ids=position_ids, past_key_values=past_key_values, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.embedding_norm(hidden_states) return MoeModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, )
Lfm2MoeModel
python
pytorch__pytorch
torch/ao/quantization/pt2e/port_metadata_pass.py
{ "start": 5324, "end": 9179 }
class ____(PassBase): """ Port metadata for nodes added by quantization flow. For static quant these are: - quantizer_per_tensor.default, dequantize_per_tensor.default - quantizer_per_channel.default, dequantize_per_channel.default For dynamic quant these are: - choose_qparams.tensor - quantizer_per_tensor.tensor, dequantize_per_tensor.tensor - quantizer_per_channel.default, dequantize_per_channel.default Rules of porting metadata: - Metadata to be ported: - nn_module_stack - stack_trace - quantization_tag - Metadata to NOT be ported: - Everything else - Rules: - Statically quantized patterns: - Dequantize nodes on the inputs to be quantized inherit metadata of the consumer node. - Quantize nodes on the outputs inherit metadata of the producer node. - Example 1: - Original: [Conv -> AvgPool -> Linear] - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ] - Inner brackets specify which nodes Q/DQ inherit metadata from - [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> [DQ -> Linear -> Q] -> DQ] - Note first Q and last DQ do not inherit metadata from any nodes - Example 2: - Original: [Conv -> AvgPool -> Linear] - AvgPool is not quantized - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> Linear -> Q -> DQ] - Inner brackets specify which nodes Q/DQ inherit metadata from - [Q-> [DQ -> Conv -> Q] -> DQ -> [AvgPool] -> Q -> [DQ -> Linear -> Q] -> DQ] - Note DQ and Q nodes around AvgPool do not inherit metadata from AvgPool because AvgPool was not supposed to be quantized. Metadata porting relies on quantization_annotation on the nodes (in this case AvgPool node) to conclude if the node or pattern was supposed to be quantized. And subsequently decide if the preceding Q, if any, should inherit metadata from AvgPool. - Dynamically quantized patterns: - Input that are dynamically quantized have choose_qparams, quantize and dequantize nodes - For example, below linear is dynamically quantized while rest statically: - Original: [Conv -> AvgPool -> Linear] - Quantized [Q-> DQ -> Conv -> Q -> DQ -> AvgPool -> Q -> DQ -> choose_params -> Q -> DQ -> Linear] - Quantized [Q-> [DQ -> Conv -> Q] -> [DQ -> AvgPool -> Q] -> DQ -> [choose_params -> Q -> DQ -> Linear]] - Note first Q does not inherit metadata from any nodes NB: - The best place for porting metadata is during observer conversion to q/dq. This is because it precisely knows which quantization spec is converted to q/dq and thus from where the metadata should be ported. However, since FX and PT2E quant workflow are on a common code-base, this hurts readability quite a bit. Doing it via a separate pass, helps readability of the code. Once we are able to refactor PT2E quant code, this pass should like to be integrated in the refactored variant of "convert" step. """ def call(self, graph_module: torch.fx.GraphModule) -> PassResult: for node in graph_module.graph.nodes: annotation = node.meta.get("quantization_annotation", None) if _is_valid_annotation(annotation): input_qspec_map = node.meta["quantization_annotation"].input_qspec_map output_qspec = node.meta["quantization_annotation"].output_qspec for input_node, qspec in input_qspec_map.items(): _port_metadata_for_input_quant_nodes(input_node, node, qspec) _port_metadata_for_output_quant_nodes(node, output_qspec) return PassResult(graph_module, True)
PortNodeMetaForQDQ
python
walkccc__LeetCode
solutions/270. Closest Binary Search Tree Value/270.py
{ "start": 0, "end": 550 }
class ____: def closestValue(self, root: TreeNode | None, target: float) -> int: # If target < root.val, search the left subtree. if target < root.val and root.left: left = self.closestValue(root.left, target) if abs(left - target) <= abs(root.val - target): return left # If target > root.val, search the right subtree. if target > root.val and root.right: right = self.closestValue(root.right, target) if abs(right - target) < abs(root.val - target): return right return root.val
Solution
python
pennersr__django-allauth
tests/apps/socialaccount/providers/netiq/tests.py
{ "start": 238, "end": 969 }
class ____(OAuth2TestsMixin, TestCase): provider_id = NetIQProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ { "sub": "d4c094dd899ab0408fb9d4c094dd899a", "acr": "secure/name/password/uri", "preferred_username": "Mocktest", "email": "mocktest@your.netiq.server.example.com", "nickname": "Mocktest", "family_name": "test", "given_name": "Mock", "website": "https://www.exanple.com" } """, ) def get_expected_to_str(self): return "mocktest@your.netiq.server.example.com"
NetIQTests
python
pytorch__pytorch
test/higher_order_ops/test_invoke_subgraph.py
{ "start": 8265, "end": 11286 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[8]", L_y_: "f32[8]", L_mod_buffers_buf_: "f32[8]"): l_x_ = L_x_ l_y_ = L_y_ l_mod_buffers_buf_ = L_mod_buffers_buf_ subgraph_0 = self.subgraph_0 invoke_subgraph = torch.ops.higher_order.invoke_subgraph(subgraph_0, 'subgraph_0', l_mod_buffers_buf_, l_x_, l_y_); subgraph_0 = None getitem: "f32[8]" = invoke_subgraph[0]; invoke_subgraph = None subgraph_1 = self.subgraph_0 invoke_subgraph_1 = torch.ops.higher_order.invoke_subgraph(subgraph_1, 'subgraph_0', l_mod_buffers_buf_, l_x_, l_y_); subgraph_1 = l_mod_buffers_buf_ = l_x_ = l_y_ = None getitem_1: "f32[8]" = invoke_subgraph_1[0]; invoke_subgraph_1 = None add: "f32[8]" = getitem + getitem_1; getitem = getitem_1 = None return (add,) class subgraph_0(torch.nn.Module): def forward(self, l_mod_buffers_buf_: "f32[8]", l_x_: "f32[8]", l_y_: "f32[8]"): add_: "f32[8]" = l_mod_buffers_buf_.add_(1); add_ = None mul: "f32[8]" = torch.mul(l_x_, l_y_); l_x_ = l_y_ = None sin: "f32[8]" = mul.sin(); mul = None add: "f32[8]" = sin + 5; sin = None add_1: "f32[8]" = add + l_mod_buffers_buf_; add = l_mod_buffers_buf_ = None return (add_1,) """, ) self.assertExpectedInline( str(fw_schema[0]), """invoke_subgraph(Any subgraph, str identifier, Tensor(a2!) arg0, Tensor arg1, Tensor arg2) -> ((Tensor))""", ) self.assertExpectedInline( str(fw_schema[1]), """invoke_subgraph(Any subgraph, str identifier, Tensor(a2!) arg0, Tensor arg1, Tensor arg2) -> ((Tensor))""", ) self.assertEqual(res, ref) self.assertEqual(mod.buf, mod_ref.buf) def test_auto_functionalize(self): class Mod(torch.nn.Module): def __init__(self): super().__init__() self.c = 5 self.register_buffer("buf", torch.ones(8, requires_grad=False)) @nested_compile_region def forward(self, x, y): return torch.mul(x, y).sin() * self.c * self.buf mod_ref = Mod() mod = Mod() def fn(mod, x, y): return mod(x, y) + mod(x, y) x = torch.randn(8, requires_grad=True) y = torch.randn(8, requires_grad=True) ref = fn(mod_ref, x, y) x_clone = x.detach().clone().requires_grad_(True) y_clone = y.detach().clone().requires_grad_(True) backend = AotEagerAndRecordGraphs() res = torch.compile(fn, backend=backend, fullgraph=True)(mod, x_clone, y_clone) res.sum().backward() self.assertEqual(len(backend.fw_graphs), 1) self.assertEqual(len(backend.bw_graphs), 1) self.assertEqual(ref, res) self.assertExpectedInline( normalize_gm(backend.fw_graphs[0].print_readable(print_output=False)), """\
GraphModule
python
ray-project__ray
rllib/utils/framework.py
{ "start": 7249, "end": 7391 }
class ____: def __init__(self) -> None: self.Model = _FakeTfClassStub # Fake classes under keras (e.g for tf.keras.Model)
_KerasStub
python
PrefectHQ__prefect
src/prefect/client/schemas/objects.py
{ "start": 24545, "end": 24907 }
class ____(PrefectBaseModel): """ Base class for classes that represent inputs to task runs, which could include, constants, parameters, or other task runs. """ model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True) if not TYPE_CHECKING: # subclasses provide the concrete type for this field input_type: str
RunInput
python
django__django
tests/serializers/models/data.py
{ "start": 3597, "end": 3693 }
class ____(models.Model): data = models.ManyToManyField("self", symmetrical=False)
M2MSelfData
python
dask__distributed
distributed/worker_state_machine.py
{ "start": 18963, "end": 19032 }
class ____(StateMachineEvent): __slots__ = () @dataclass
PauseEvent
python
django__django
tests/admin_checks/tests.py
{ "start": 1076, "end": 1168 }
class ____(admin.ModelAdmin): def check(self, **kwargs): return ["error!"]
MyAdmin
python
scrapy__scrapy
tests/mockserver/http.py
{ "start": 785, "end": 3149 }
class ____(resource.Resource): def __init__(self): super().__init__() self.putChild(b"status", Status()) self.putChild(b"follow", Follow()) self.putChild(b"delay", Delay()) self.putChild(b"partial", Partial()) self.putChild(b"drop", Drop()) self.putChild(b"raw", Raw()) self.putChild(b"echo", Echo()) self.putChild(b"payload", PayloadResource()) self.putChild(b"alpayload", ArbitraryLengthPayloadResource()) self.putChild(b"static", File(str(Path(tests_datadir, "test_site/")))) self.putChild(b"redirect-to", RedirectTo()) self.putChild(b"text", Data(b"Works", "text/plain")) self.putChild( b"html", Data( b"<body><p class='one'>Works</p><p class='two'>World</p></body>", "text/html", ), ) self.putChild( b"enc-gb18030", Data(b"<p>gb18030 encoding</p>", "text/html; charset=gb18030"), ) self.putChild(b"redirect", Redirect(b"/redirected")) self.putChild( b"redirect-no-meta-refresh", NoMetaRefreshRedirect(b"/redirected") ) self.putChild(b"redirected", Data(b"Redirected here", "text/plain")) numbers = [str(x).encode("utf8") for x in range(2**18)] self.putChild(b"numbers", Data(b"".join(numbers), "text/plain")) self.putChild(b"wait", ForeverTakingResource()) self.putChild(b"hang-after-headers", ForeverTakingResource(write=True)) self.putChild(b"host", HostHeaderResource()) self.putChild(b"broken", BrokenDownloadResource()) self.putChild(b"chunked", ChunkedResource()) self.putChild(b"broken-chunked", BrokenChunkedResource()) self.putChild(b"contentlength", ContentLengthHeaderResource()) self.putChild(b"nocontenttype", EmptyContentTypeHeaderResource()) self.putChild(b"largechunkedfile", LargeChunkedFileResource()) self.putChild(b"compress", Compress()) self.putChild(b"duplicate-header", DuplicateHeaderResource()) self.putChild(b"response-headers", ResponseHeadersResource()) self.putChild(b"set-cookie", SetCookie()) def getChild(self, name, request): return self def render(self, request): return b"Scrapy mock HTTP server\n"
Root
python
getsentry__sentry
src/sentry/workflow_engine/typings/notification_action.py
{ "start": 5336, "end": 9434 }
class ____(ABC): @property @abstractmethod def action_type(self) -> ActionType: pass # Represents the mapping of a target field to a source field {target_field: FieldMapping} field_mappings: ClassVar[dict[str, FieldMapping]] = {} def __init__(self, action: dict[str, Any]): self.action = action @property @abstractmethod def required_fields(self) -> list[str]: """Return the required fields for this action""" pass @property def missing_fields(self) -> list[str]: """Return the missing fields for this action""" return [field for field in self.required_fields if self.action.get(field) is None] @property @abstractmethod def target_type(self) -> int | None: """Return the target type for this action""" pass @property def integration_id(self) -> int | None: """Return the integration ID for this action, if any""" if mapping := ACTION_FIELD_MAPPINGS.get(self.action_type): if ActionFieldMappingKeys.INTEGRATION_ID_KEY.value in mapping: return self.action.get(mapping[ActionFieldMappingKeys.INTEGRATION_ID_KEY.value]) return None @property def target_identifier(self) -> str | None: """Return the target identifier for this action, if any""" if mapping := ACTION_FIELD_MAPPINGS.get(self.action_type): if ActionFieldMappingKeys.TARGET_IDENTIFIER_KEY.value in mapping: return self.action.get(mapping[ActionFieldMappingKeys.TARGET_IDENTIFIER_KEY.value]) return None @property def target_display(self) -> str | None: """Return the display name for the target, if any""" if mapping := ACTION_FIELD_MAPPINGS.get(self.action_type): if ActionFieldMappingKeys.TARGET_DISPLAY_KEY in mapping: return self.action.get(mapping[ActionFieldMappingKeys.TARGET_DISPLAY_KEY.value]) return None @property def action_config(self) -> dict[str, str | int | None]: base_config = { "target_identifier": self.target_identifier, "target_display": self.target_display, "target_type": self.target_type if self.target_type is not None else None, } if self.action_type == ActionType.SENTRY_APP: base_config["sentry_app_identifier"] = SentryAppIdentifier.SENTRY_APP_INSTALLATION_UUID return base_config @property def blob_type(self) -> type[DataBlob] | None: """Return the blob type for this action, if any""" return None def is_valid(self) -> bool: """ Validate that all required fields for this action are present. Should be overridden by subclasses to add specific validation. """ return len(self.missing_fields) == 0 def get_sanitized_data(self) -> dict[str, Any]: """ Return sanitized data for this action If a blob type is specified, convert the action data to a dataclass Otherwise, remove excluded keys """ if self.blob_type: mapped_data = {} for field_name in (field.name for field in dataclasses.fields(self.blob_type)): mapping = self.field_mappings.get(field_name) # If a mapping is specified, use the source field value or default value if mapping: source_field = mapping.source_field value = self.action.get(source_field, mapping.default_value) # Otherwise, use the field value else: value = self.action.get(field_name, "") mapped_data[field_name] = value blob_instance = self.blob_type(**mapped_data) return dataclasses.asdict(blob_instance) else: # Remove excluded keys and required fields excluded_keys = EXCLUDED_ACTION_DATA_KEYS + self.required_fields return {k: v for k, v in self.action.items() if k not in excluded_keys}
BaseActionTranslator
python
streamlit__streamlit
lib/tests/streamlit/runtime/caching/cache_data_api_test.py
{ "start": 29427, "end": 29866 }
class ____(CacheStorageManager): """A CacheStorageManager that always fails in check_context.""" def create(self, context: CacheStorageContext) -> CacheStorage: return DummyCacheStorage() def clear_all(self) -> None: pass def check_context(self, context: CacheStorageContext) -> None: raise InvalidCacheStorageContextError("This CacheStorageManager always fails")
AlwaysFailingTestCacheStorageManager
python
ray-project__ray
python/ray/llm/_internal/serve/core/configs/llm_config.py
{ "start": 4216, "end": 19350 }
class ____(BaseModelExtended): runtime_env: Optional[Dict[str, Any]] = Field( default=None, description=( "The runtime_env to use for the model deployment replica " "and the engine workers." ), ) model_loading_config: Union[Dict[str, Any], ModelLoadingConfig] = Field( description="The settings for how to download and expose the model. Validated against ModelLoadingConfig." ) llm_engine: str = Field( default=LLMEngine.vLLM.value, description=f"The LLMEngine that should be used to run the model. Only the following values are supported: {str([t.value for t in LLMEngine])}", ) engine_kwargs: Dict[str, Any] = Field( default={}, description=( "Additional keyword arguments for the engine. In case of vLLM, " "this will include all the configuration knobs they provide out " "of the box, except for tensor-parallelism which is set " "automatically from Ray Serve configs." ), ) accelerator_type: Optional[str] = Field( default=None, description=f"The type of accelerator runs the model on. Only the following values are supported: {str([t.value for t in GPUType])}", ) placement_group_config: Optional[Dict[str, Any]] = Field( default=None, description=( "Ray placement group configuration for scheduling vLLM engine workers. " "Defines resource bundles and placement strategy for multi-node deployments. " "Should contain 'bundles' (list of resource dicts) and optionally 'strategy' " "(defaults to 'PACK'). Example: {'bundles': [{'GPU': 1, 'CPU': 2}], 'strategy': 'PACK'}" ), ) lora_config: Optional[Union[Dict[str, Any], LoraConfig]] = Field( default=None, description="Settings for LoRA adapter. Validated against LoraConfig.", ) deployment_config: Dict[str, Any] = Field( default_factory=dict, description=""" The Ray @server.deployment options. Supported fields are: `name`, `num_replicas`, `ray_actor_options`, `max_ongoing_requests`, `autoscaling_config`, `max_queued_requests`, `user_config`, `health_check_period_s`, `health_check_timeout_s`, `graceful_shutdown_wait_loop_s`, `graceful_shutdown_timeout_s`, `logging_config`, `request_router_config`. For more details, see the `Ray Serve Documentation <https://docs.ray.io/en/latest/serve/configure-serve-deployment.html>`_. """, ) experimental_configs: Dict[str, Any] = Field( default_factory=dict, description="Experimental configurations for Ray Serve LLM. This is a " "dictionary of key-value pairs. Current supported keys are:\n" "- `stream_batching_interval_ms`: Ray Serve LLM batches streaming " "requests together. This config decides how long to wait for the " "batch before processing the requests. Defaults to " f"{MODEL_RESPONSE_BATCH_TIMEOUT_MS}.\n" "- `num_ingress_replicas`: The number of replicas for the router. Ray " "Serve will take the max amount all the replicas. Default would be 2 " "router replicas per model replica.\n", ) log_engine_metrics: Optional[bool] = Field( default=True, description="Enable additional engine metrics via Ray Prometheus port.", ) callback_config: CallbackConfig = Field( default_factory=CallbackConfig, description="Callback configuration to use for model initialization. Can be a string path to a class or a Callback subclass.", ) _supports_vision: bool = PrivateAttr(False) _model_architecture: str = PrivateAttr("UNSPECIFIED") _engine_config: EngineConfigType = PrivateAttr(None) _callback_instance: Optional[CallbackBase] = PrivateAttr(None) def _infer_supports_vision(self, model_id_or_path: str) -> None: """Called in llm node initializer together with other transformers calls. It loads the model config from huggingface and sets the supports_vision attribute based on whether the config has `vision_config`. All LVM models has `vision_config` setup. """ try: hf_config = transformers.PretrainedConfig.from_pretrained(model_id_or_path) self._supports_vision = hasattr(hf_config, "vision_config") except Exception as e: raise ValueError( f"Failed to load Hugging Face config for model_id='{model_id_or_path}'.\ Ensure `model_id` is a valid Hugging Face repo or a local path that \ contains a valid `config.json` file. " f"Original error: {repr(e)}" ) from e def _set_model_architecture( self, model_id_or_path: Optional[str] = None, model_architecture: Optional[str] = None, ) -> None: """Called in llm node initializer together with other transformers calls. It loads the model config from huggingface and sets the model_architecture attribute based on whether the config has `architectures`. """ if model_id_or_path: try: hf_config = transformers.PretrainedConfig.from_pretrained( model_id_or_path ) if ( hf_config and hasattr(hf_config, "architectures") and hf_config.architectures ): self._model_architecture = hf_config.architectures[0] except Exception as e: raise ValueError( f"Failed to load Hugging Face config for model_id='{model_id_or_path}'.\ Ensure `model_id` is a valid Hugging Face repo or a local path that \ contains a valid `config.json` file. " f"Original error: {repr(e)}" ) from e if model_architecture: self._model_architecture = model_architecture def apply_checkpoint_info( self, model_id_or_path: str, trust_remote_code: bool = False ) -> None: """Apply the checkpoint info to the model config.""" self._infer_supports_vision(model_id_or_path) self._set_model_architecture(model_id_or_path) def get_or_create_callback(self) -> Optional[CallbackBase]: """Get or create the callback instance for this process. This ensures one callback instance per process (singleton pattern). The instance is cached so the same object is used across all hooks. Returns: Instance of class that implements Callback """ # Return cached instance if exists if self._callback_instance is not None: return self._callback_instance engine_config = self.get_engine_config() assert engine_config is not None pg = engine_config.get_or_create_pg() runtime_env = engine_config.get_runtime_env_with_local_env_vars() if self.engine_kwargs.get("load_format", None) in STREAMING_LOAD_FORMATS: worker_node_download_model = NodeModelDownloadable.NONE else: worker_node_download_model = NodeModelDownloadable.MODEL_AND_TOKENIZER # Create new instance if isinstance(self.callback_config.callback_class, str): callback_class = load_class(self.callback_config.callback_class) else: callback_class = self.callback_config.callback_class self._callback_instance = callback_class( raise_error_on_callback=self.callback_config.raise_error_on_callback, llm_config=self, ctx_kwargs={ "worker_node_download_model": worker_node_download_model, "placement_group": pg, "runtime_env": runtime_env, }, **self.callback_config.callback_kwargs, ) return self._callback_instance @property def supports_vision(self) -> bool: return self._supports_vision @property def model_architecture(self) -> str: return self._model_architecture @property def input_modality(self) -> str: """Returns the input modality of the model. There could be more types in the future. Right now assumes if the model doesn't support version, it'll be text. """ if self.supports_vision: return InputModality.image.value return InputModality.text.value @property def model_id(self) -> str: return self.model_loading_config.model_id @property def max_request_context_length(self) -> Optional[int]: return self.engine_kwargs.get("max_model_len") @field_validator("accelerator_type") def validate_accelerator_type(cls, value: Optional[str]): if value is None: return value # Ensure A10 is converted to A10G. if value == "A10": value = "A10G" if value not in [t.value for t in GPUType]: raise ValueError(f"Unsupported accelerator type: {value}") return value @field_validator("llm_engine") def validate_llm_engine(cls, value: str) -> str: """Validates the llm_engine string value.""" try: # Validate the engine LLMEngine(value) except ValueError as e: raise ValueError(f"Unsupported engine: {value}") from e return value @field_validator("deployment_config") def validate_deployment_config(cls, value: Dict[str, Any]) -> Dict[str, Any]: """Validates the deployment config dictionary.""" try: DeploymentConfig(**value) except Exception as e: raise ValueError(f"Invalid deployment config: {value}") from e return value @field_validator("model_loading_config") def validate_model_loading_config( cls, value: Union[Dict[str, Any], ModelLoadingConfig] ) -> ModelLoadingConfig: """Validates the model loading config dictionary.""" if isinstance(value, ModelLoadingConfig): return value try: model_loading_config = ModelLoadingConfig(**value) except Exception as e: raise ValueError(f"Invalid model_loading_config: {value}") from e return model_loading_config @field_validator("lora_config") def validate_lora_config( cls, value: Optional[Union[Dict[str, Any], LoraConfig]] ) -> Optional[LoraConfig]: """Validates the lora config dictionary.""" if value is None or isinstance(value, LoraConfig): return value try: lora_config = LoraConfig(**value) except Exception as e: raise ValueError(f"Invalid lora_config: {value}") from e return lora_config @field_validator("experimental_configs") def validate_experimental_configs(cls, value: Dict[str, Any]) -> Dict[str, Any]: """Validates the experimental configs dictionary.""" # TODO(Kourosh): Remove this deprecation check after users have # migrated. if "num_router_replicas" in value: raise ValueError( "The 'num_router_replicas' key in experimental_configs has " "been renamed to 'num_ingress_replicas'. Please update " "your configuration to use 'num_ingress_replicas' instead." ) return value @model_validator(mode="after") def _check_log_stats_with_metrics(self): """Validate that disable_log_stats isn't enabled when log_engine_metrics is enabled.""" if self.log_engine_metrics and self.engine_kwargs.get("disable_log_stats"): raise ValueError( "disable_log_stats cannot be set to True when log_engine_metrics is enabled. " "Engine metrics require log stats to be enabled." ) return self def multiplex_config(self) -> ServeMultiplexConfig: multiplex_config = None if self.lora_config: multiplex_config = ServeMultiplexConfig( max_num_models_per_replica=self.lora_config.max_num_adapters_per_replica, download_timeout_s=self.lora_config.download_timeout_s, max_download_tries=self.lora_config.max_download_tries, ) return multiplex_config def get_engine_config(self) -> EngineConfigType: """Returns the engine config for the given LLM config. LLMConfig not only has engine config but also deployment config, etc. """ # Note (genesu): This is important that we cache the engine config as the # `hf_model_id` attribute on the engine config will be set based on whether # the model is downloaded from a remote storage and will be set to the # local path of the model. This is important for vLLM not going to Hugging # Face to download the model again after it's already downloaded during node # initialization step. if self._engine_config: return self._engine_config if self.llm_engine == LLMEngine.vLLM: from ray.llm._internal.serve.engines.vllm.vllm_models import ( VLLMEngineConfig, ) self._engine_config = VLLMEngineConfig.from_llm_config(self) else: # Note (genesu): This should never happen because we validate the engine # in the config. raise ValueError(f"Unsupported engine: {self.llm_engine}") return self._engine_config def update_engine_kwargs(self, **kwargs: Any) -> None: """Update the engine_kwargs and the engine_config engine_kwargs. This is typically called during engine starts, when certain engine_kwargs (e.g., data_parallel_rank) become available. """ self.engine_kwargs.update(kwargs) # engine_config may be created before engine starts, this makes sure # the engine_config is updated with the latest engine_kwargs. if self._engine_config: self._engine_config.engine_kwargs.update(kwargs) def setup_engine_backend(self): self._setup_kv_connector_backend() def _setup_kv_connector_backend(self): """Private method to setup kv connector depending on the local deployment state""" # 1. validate that the backend is one of the backends supported (Nixl or LMCache) kv_transfer_config = self.engine_kwargs.get("kv_transfer_config") if not kv_transfer_config: return kv_connector = kv_transfer_config.get("kv_connector") if not kv_connector: raise ValueError("Connector type is not specified.") # 2. Setup the backend using factory kv_connector_backend = KVConnectorBackendFactory.create_backend( kv_connector, self ) kv_connector_backend.setup()
LLMConfig
python
walkccc__LeetCode
solutions/3200. Maximum Height of a Triangle/3200.py
{ "start": 0, "end": 941 }
class ____: def maxHeightOfTriangle(self, red: int, blue: int) -> int: return max(self._maxHeight(red, blue), self._maxHeight(blue, red)) def _maxHeight(self, n1: int, n2: int) -> int: """ Returns the maximum height of a triangle with the odd levels having `n1` balls and the even levels having `n2` balls. """ # 1 + 3 + ... + h <= n1 # ((1 + h) * (n + 1) / 2) / 2 <= n1 # h <= sqrt(4 * n1) - 1 oddHeight = math.isqrt(4 * n1) - 1 # 2 + 4 + ... + h <= n2 # ((2 + h) * h / 2) / 2 <= n2 # h <= sqrt(4 * n2 + 1) - 1 evenHeight = math.isqrt(4 * n2 + 1) - 1 # If the difference between the odd and even heights is >= 1, we can add an # extra level to the minimum height. return min(oddHeight, evenHeight) + (1 if abs(oddHeight - evenHeight) >= 1 else 0)
Solution
python
readthedocs__readthedocs.org
readthedocs/projects/views/private.py
{ "start": 18780, "end": 19398 }
class ____(ProjectAdminMixin, PrivateViewMixin): model = ProjectRelationship form_class = ProjectRelationshipForm lookup_field = "child__slug" lookup_url_kwarg = "subproject_slug" def get_queryset(self): self.project = self.get_project() return self.model.objects.filter(parent=self.project) def get_form(self, data=None, files=None, **kwargs): kwargs["user"] = self.request.user return super().get_form(data, files, **kwargs) def get_success_url(self): return reverse("projects_subprojects", args=[self.get_project().slug])
ProjectRelationshipMixin
python
pytorch__pytorch
torch/export/dynamic_shapes.py
{ "start": 12348, "end": 12487 }
class ____: """ This represents input tensor dimensions. """ t_id: int dim: int @dataclasses.dataclass
_ConstraintTarget
python
scrapy__scrapy
tests/test_loader.py
{ "start": 798, "end": 871 }
class ____(ItemLoader): default_item_class = SummaryItem
NameItemLoader
python
PrefectHQ__prefect
tests/server/models/test_flow_run_states.py
{ "start": 5069, "end": 10562 }
class ____: async def test_create_flow_run_state_succeeds(self, flow_run, session): flow_run_state = ( await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Running(), ) ).state assert flow_run_state.name == "Running" assert flow_run_state.type == StateType.RUNNING assert flow_run_state.state_details.flow_run_id == flow_run.id async def test_run_details_are_updated_entering_running(self, flow_run, session): await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Scheduled(), ) await session.refresh(flow_run) assert flow_run.start_time is None assert flow_run.run_count == 0 assert flow_run.total_run_time == datetime.timedelta(0) dt = now("UTC") await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Running(timestamp=dt), ) await session.refresh(flow_run) assert flow_run.start_time == dt assert flow_run.run_count == 1 assert flow_run.total_run_time == datetime.timedelta(0) assert flow_run.estimated_run_time > datetime.timedelta(0) dt2 = now("UTC") await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Running(timestamp=dt2), # running / running isn't usually allowed force=True, ) await session.commit() await session.refresh(flow_run) assert flow_run.start_time == dt assert flow_run.run_count == 2 assert flow_run.total_run_time == (dt2 - dt) assert flow_run.estimated_run_time > (dt2 - dt) async def test_database_is_not_updated_when_no_transition_takes_place( self, flow_run, session ): # place the run in a scheduled state in the future frs = await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Scheduled(scheduled_time=now("UTC") + datetime.timedelta(days=30)), flow_policy=await provide_flow_policy(), ) # attempt to put the run in a pending state, which will tell the transition to WAIT frs2 = await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Running(), flow_policy=await provide_flow_policy(), ) assert frs2.status == schemas.responses.SetStateStatus.WAIT # the original state remains in place await session.refresh(flow_run) assert flow_run.state.id == frs.state.id async def test_no_orchestration_with_injected_empty_policy(self, flow_run, session): class EmptyPolicy(BaseOrchestrationPolicy): @staticmethod def priority(): return [] with temporary_flow_policy(EmptyPolicy): # place the run in a scheduled state in the future frs = await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Scheduled( scheduled_time=now("UTC") + datetime.timedelta(days=30) ), flow_policy=await provide_flow_policy(), ) # put the run in a pending state, which succeeds due to injected orchestration frs2 = await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Running(), flow_policy=await provide_flow_policy(), ) assert frs2.status == schemas.responses.SetStateStatus.ACCEPT # the original state remains in place await session.refresh(flow_run) assert flow_run.state.id != frs.state.id async def test_orchestration_with_injected_parameters(self, flow_run, session): class AbortingRule(BaseOrchestrationRule): FROM_STATES = ALL_ORCHESTRATION_STATES TO_STATES = ALL_ORCHESTRATION_STATES async def before_transition(self, initial_state, proposed_state, context): # this rule mutates the proposed state type, but won't fizzle itself upon exiting if context.parameters.get("special-signal") == "abort": await self.abort_transition("wow, aborting this transition") class AbortingPolicy(BaseOrchestrationPolicy): @staticmethod def priority(): return [AbortingRule] with temporary_flow_orchestration_parameters({"special-signal": "abort"}): with temporary_flow_policy(AbortingPolicy): frs = await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=Scheduled( scheduled_time=now("UTC") + datetime.timedelta(days=30) ), flow_policy=await provide_flow_policy(), orchestration_parameters=await provide_flow_orchestration_parameters(), ) assert frs.status == schemas.responses.SetStateStatus.ABORT
TestCreateFlowRunState
python
keras-team__keras
keras/src/ops/linalg_test.py
{ "start": 6624, "end": 11379 }
class ____(testing.TestCase): def test_cholesky(self): x = KerasTensor([4, 3, 3]) out = linalg.cholesky(x) self.assertEqual(out.shape, (4, 3, 3)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.cholesky(x) def test_cholesky_inverse(self): x = KerasTensor([4, 3, 3]) out = linalg.cholesky_inverse(x) self.assertEqual(out.shape, (4, 3, 3)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.cholesky_inverse(x) def test_det(self): x = KerasTensor([4, 3, 3]) out = linalg.det(x) self.assertEqual(out.shape, (4,)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.det(x) def test_eig(self): x = KerasTensor([4, 3, 3]) w, v = linalg.eig(x) self.assertEqual(w.shape, (4, 3)) self.assertEqual(v.shape, (4, 3, 3)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.eig(x) def test_eigh(self): x = KerasTensor([4, 3, 3]) w, v = linalg.eigh(x) self.assertEqual(w.shape, (4, 3)) self.assertEqual(v.shape, (4, 3, 3)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.eigh(x) def test_inv(self): x = KerasTensor([4, 3, 3]) out = linalg.inv(x) self.assertEqual(out.shape, (4, 3, 3)) x = KerasTensor([10, 20, 15]) with self.assertRaises(ValueError): linalg.inv(x) def test_lu_factor(self): if testing.jax_uses_gpu(): self.skipTest("Skipping test with JAX + GPU due to temporary error") x = KerasTensor([10, 4, 3]) lu, p = linalg.lu_factor(x) self.assertEqual(lu.shape, (10, 4, 3)) self.assertEqual(p.shape, (10, 3)) x = KerasTensor([10, 2, 3]) lu, p = linalg.lu_factor(x) self.assertEqual(lu.shape, (10, 2, 3)) self.assertEqual(p.shape, (10, 2)) def test_norm(self): x = KerasTensor((10, 3)) self.assertEqual(linalg.norm(x).shape, ()) x = KerasTensor((10, 3, 3)) self.assertEqual(linalg.norm(x, axis=1).shape, (10, 3)) self.assertEqual( linalg.norm(x, axis=1, keepdims=True).shape, (10, 1, 3) ) def test_qr(self): x = KerasTensor((4, 3), dtype="float32") q, r = linalg.qr(x, mode="reduced") qref, rref = np.linalg.qr(np.ones((4, 3)), mode="reduced") self.assertEqual(q.shape, qref.shape) self.assertEqual(r.shape, rref.shape) q, r = linalg.qr(x, mode="complete") qref, rref = np.linalg.qr(np.ones((4, 3)), mode="complete") self.assertEqual(q.shape, qref.shape) self.assertEqual(r.shape, rref.shape) with self.assertRaises(ValueError): linalg.qr(x, mode="invalid") def test_solve(self): a = KerasTensor([4, 3, 3]) b = KerasTensor([4, 3, 5]) out = linalg.solve(a, b) self.assertEqual(out.shape, (4, 3, 5)) a = KerasTensor([4, 3, 3]) b = KerasTensor([4, 3]) out = linalg.solve(a, b) self.assertEqual(out.shape, (4, 3)) a = KerasTensor([10, 20, 15]) b = KerasTensor([10, 20, 5]) with self.assertRaises(ValueError): linalg.solve(a, b) a = KerasTensor([20, 20]) b = KerasTensor([]) with self.assertRaises(ValueError): linalg.solve(a, b) def test_solve_triangular(self): if testing.jax_uses_gpu(): self.skipTest("Skipping test with JAX + GPU due to temporary error") a = KerasTensor([4, 3, 3]) b = KerasTensor([4, 3, 5]) out = linalg.solve_triangular(a, b) self.assertEqual(out.shape, (4, 3, 5)) a = KerasTensor([4, 3, 3]) b = KerasTensor([4, 3]) out = linalg.solve_triangular(a, b) self.assertEqual(out.shape, (4, 3)) a = KerasTensor([10, 20, 15]) b = KerasTensor([10, 20, 5]) with self.assertRaises(ValueError): linalg.solve_triangular(a, b) def test_svd(self): x = KerasTensor((10, 3, 2)) u, s, v = linalg.svd(x) self.assertEqual(u.shape, (10, 3, 3)) self.assertEqual(s.shape, (10, 2)) self.assertEqual(v.shape, (10, 2, 2)) u, s, v = linalg.svd(x, full_matrices=False) self.assertEqual(u.shape, (10, 3, 2)) self.assertEqual(s.shape, (10, 2)) self.assertEqual(v.shape, (10, 2, 2)) s = linalg.svd(x, compute_uv=False) self.assertEqual(s.shape, (10, 2))
LinalgOpsStaticShapeTest
python
huggingface__transformers
src/transformers/models/glm4v/modular_glm4v.py
{ "start": 23031, "end": 27469 }
class ____(Glm4RotaryEmbedding): # Ignore copy def forward(self, x, position_ids): # In contrast to other models, GLM4V different position ids for the grids # So we expand the inv_freq to shape (3, ...) inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1) position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions) device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu" with torch.autocast(device_type=device_type, enabled=False): # Force float32 freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3) emb = torch.cat((freqs, freqs), dim=-1) cos = emb.cos() * self.attention_scaling sin = emb.sin() * self.attention_scaling return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype) def rotate_half_llm(x): """Rotates half the hidden dims of the input.""" x1 = x[..., 0::2] x2 = x[..., 1::2] return torch.stack((-x2, x1), dim=-1).flatten(-2) def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1): """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/). Explanation: Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately. Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding. For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal, height and width) of text embedding is always the same, so the text embedding rotary position embedding has no difference with modern LLMs. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. mrope_section(`List(int)`): Multimodal rope section is for channel dimension of temporal, height and width in rope calculation. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ mrope_section = mrope_section * 2 cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze( unsqueeze_dim ) # Interleave them instead of usual shape cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1) sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1) # Keep half or full tensor for later concatenation rotary_dim = cos.shape[-1] q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:] k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:] # Apply rotary embeddings on the first half or full tensor q_embed = (q_rot * cos) + (rotate_half_llm(q_rot) * sin) k_embed = (k_rot * cos) + (rotate_half_llm(k_rot) * sin) # Concatenate back to full shape q_embed = torch.cat([q_embed, q_pass], dim=-1) k_embed = torch.cat([k_embed, k_pass], dim=-1) return q_embed, k_embed
Glm4vTextRotaryEmbedding
python
openai__openai-python
src/openai/resources/files.py
{ "start": 29573, "end": 30436 }
class ____: def __init__(self, files: AsyncFiles) -> None: self._files = files self.create = async_to_streamed_response_wrapper( files.create, ) self.retrieve = async_to_streamed_response_wrapper( files.retrieve, ) self.list = async_to_streamed_response_wrapper( files.list, ) self.delete = async_to_streamed_response_wrapper( files.delete, ) self.content = async_to_custom_streamed_response_wrapper( files.content, AsyncStreamedBinaryAPIResponse, ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( files.retrieve_content, # pyright: ignore[reportDeprecated], ) )
AsyncFilesWithStreamingResponse
python
aimacode__aima-python
reinforcement_learning4e.py
{ "start": 5692, "end": 7880 }
class ____: """ [Figure 21.4] The abstract class for a Passive (non-learning) agent that uses temporal differences to learn utility estimates. Override update_state method to convert percept to state and reward. The mdp being provided should be an instance of a subclass of the MDP Class. import sys from mdp import sequential_decision_environment north = (0, 1) south = (0,-1) west = (-1, 0) east = (1, 0) policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, (0, 1): north, (2, 1): north, (3, 1): None, (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,} agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60./(59+n)) for i in range(200): run_single_trial(agent,sequential_decision_environment) agent.U[(0, 0)] > 0.2 True agent.U[(0, 1)] > 0.2 True """ def __init__(self, pi, mdp, alpha=None): self.pi = pi self.U = {s: 0. for s in mdp.states} self.Ns = {s: 0 for s in mdp.states} self.s = None self.a = None self.r = None self.gamma = mdp.gamma self.terminals = mdp.terminals if alpha: self.alpha = alpha else: self.alpha = lambda n: 1 / (1 + n) # udacity video def __call__(self, percept): s1, r1 = self.update_state(percept) pi, U, Ns, s, r = self.pi, self.U, self.Ns, self.s, self.r alpha, gamma, terminals = self.alpha, self.gamma, self.terminals if not Ns[s1]: U[s1] = r1 if s is not None: Ns[s] += 1 U[s] += alpha(Ns[s]) * (r + gamma * U[s1] - U[s]) if s1 in terminals: self.s = self.a = self.r = None else: self.s, self.a, self.r = s1, pi[s1], r1 return self.a def update_state(self, percept): """To be overridden in most cases. The default case assumes the percept to be of type (state, reward).""" return percept # __________________________________________ # 21.3. Active Reinforcement Learning # 21.3.2 Learning an action-utility function
PassiveTDAgent
python
pytorch__pytorch
torch/_inductor/template_heuristics/triton.py
{ "start": 62582, "end": 62861 }
class ____(MMTemplateConfigMixin): """ Ensure that we feed in has_int8_tensor=True """ def __init__(self) -> None: super().__init__() self.has_int8_tensor = True # MMPlusMM specific mixin to avoid running _scale_mm_configs
INT8MMTemplateConfigMixin
python
tensorflow__tensorflow
tensorflow/python/tpu/tensor_tracer_report.py
{ "start": 7205, "end": 7883 }
class ____(object): """Context manager for writing report file.""" def __init__(self, tt_parameters): if not tt_parameters.report_file_path: self._report_file = None return try: self._report_file = gfile.Open(tt_parameters.report_file_path, 'w') except IOError as e: raise e def __enter__(self): return self._report_file def __exit__(self, unused_type, unused_value, unused_traceback): if self._report_file: self._report_file.close() def proto_fingerprint(message_proto): serialized_message = message_proto.SerializeToString() hasher = hashlib.sha256(serialized_message) return hasher.hexdigest()
OpenReportFile
python
python__mypy
mypy/traverser.py
{ "start": 26198, "end": 26733 }
class ____(TraverserVisitor): def __init__(self) -> None: self.found = False def visit_return_stmt(self, o: ReturnStmt) -> None: if o.expr is None or isinstance(o.expr, NameExpr) and o.expr.name == "None": return self.found = True def has_return_statement(fdef: FuncBase) -> bool: """Find if a function has a non-trivial return statement. Plain 'return' and 'return None' don't count. """ seeker = ReturnSeeker() fdef.accept(seeker) return seeker.found
ReturnSeeker
python
wandb__wandb
tests/system_tests/test_artifacts/test_model_workflows.py
{ "start": 67, "end": 2964 }
class ____: def wait(self): pass def is_draft(self): return False def test_offline_link_artifact(user): run = wandb.init(mode="offline") with pytest.raises(NotImplementedError): run.link_artifact(FakeArtifact(), "entity/project/portfolio", "latest") run.finish() def test_log_model( tmp_path: pathlib.Path, user, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") run.log_model(local_path, "test-model") run.finish() run = wandb.init() download_path = run.use_model("test-model:v0") file = download_path assert file == f"{env.get_artifact_dir()}/test-model:v0/boom.txt" run.finish() def test_use_model( tmp_path: pathlib.Path, user, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") logged_artifact = run.log_artifact(local_path, name="test-model", type="model") logged_artifact.wait() download_path = run.use_model("test-model:v0") file = download_path assert file == f"{env.get_artifact_dir()}/test-model:v0/boom.txt" run.finish() def test_use_model_error_artifact_type( user, tmp_path: pathlib.Path, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") logged_artifact = run.log_artifact(local_path, name="test-model", type="dataset") logged_artifact.wait() with pytest.raises(AssertionError): _ = run.use_model("test-model:v0") run.finish() def test_link_model( user, tmp_path: pathlib.Path, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") run.link_model(local_path, "test_portfolio", "test_model") run.finish() run = wandb.init() download_path = run.use_model("model-registry/test_portfolio:v0") file = download_path assert file == f"{env.get_artifact_dir()}/test_model:v0/boom.txt" run.finish() def test_link_model_error_artifact_type( user, tmp_path: pathlib.Path, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") logged_artifact = run.log_artifact(local_path, name="test_model", type="dataset") logged_artifact.wait() with pytest.raises(AssertionError): run.link_model(local_path, "test_portfolio", "test_model") run.finish() def test_link_model_log_new_artifact( user, tmp_path: pathlib.Path, ): run = wandb.init() local_path = tmp_path / "boom.txt" local_path.write_text("testing") run.link_model(local_path, "test_portfolio", "test_model") run.finish() run = wandb.init() download_path = run.use_model("model-registry/test_portfolio:v0") file = download_path assert file == f"{env.get_artifact_dir()}/test_model:v0/boom.txt" run.finish()
FakeArtifact
python
tensorflow__tensorflow
tensorflow/python/tools/print_selective_registration_header_test.py
{ "start": 2811, "end": 10286 }
class ____(test.TestCase): def setUp(self): _, self.script_name = os.path.split(sys.argv[0]) def WriteGraphFiles(self, graphs): fnames = [] for i, graph in enumerate(graphs): fname = os.path.join(self.get_temp_dir(), 'graph%s.pb' % i) with gfile.GFile(fname, 'wb') as f: f.write(graph.SerializeToString()) fnames.append(fname) return fnames def WriteTextFile(self, content): fname = os.path.join(self.get_temp_dir(), 'text.txt') with gfile.GFile(fname, 'w') as f: f.write(content) return [fname] def testGetOps(self): default_ops = 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp' graphs = [ text_format.Parse(d, graph_pb2.GraphDef()) for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2] ] ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) matmul_prefix = 'Batch' self.assertListEqual( [ ('AccumulateNV2', None), # ('BiasAdd', 'BiasOp<CPUDevice, float>'), # ('Const', 'ConstantOp'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, double, double, double, true>'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, float, float, true>'), # ('Maximum', 'BinaryOp<CPUDevice, functor::maximum<int64_t>>'), # ('NoOp', 'NoOp'), # ('Reshape', 'ReshapeOp'), # ('_Recv', 'RecvOp'), # ('_Send', 'SendOp'), # ], ops_and_kernels) graphs[0].node[0].ClearField('device') graphs[0].node[2].ClearField('device') ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) self.assertListEqual( [ ('AccumulateNV2', None), # ('BiasAdd', 'BiasOp<CPUDevice, float>'), # ('Const', 'ConstantOp'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, double, double, double, true>'), # ('MatMul', matmul_prefix + 'MatMulOp<CPUDevice, float, float, float, true>'), # ('Maximum', 'BinaryOp<CPUDevice, functor::maximum<int64_t>>'), # ('NoOp', 'NoOp'), # ('Reshape', 'ReshapeOp'), # ('_Recv', 'RecvOp'), # ('_Send', 'SendOp'), # ], ops_and_kernels) def testGetOpsFromList(self): default_ops = '' # Test with 2 different ops. ops_list = """[["Add", "BinaryOp<CPUDevice, functor::add<float>>"], ["Softplus", "SoftplusOp<CPUDevice, float>"]]""" ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list), default_ops) self.assertListEqual([ ('Add', 'BinaryOp<CPUDevice, functor::add<float>>'), ('Softplus', 'SoftplusOp<CPUDevice, float>'), ], ops_and_kernels) # Test with a single op. ops_list = '[["Softplus", "SoftplusOp<CPUDevice, float>"]]' ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list), default_ops) self.assertListEqual([ ('Softplus', 'SoftplusOp<CPUDevice, float>'), ], ops_and_kernels) # Test with duplicated op. ops_list = """[["Add", "BinaryOp<CPUDevice, functor::add<float>>"], ["Add", "BinaryOp<CPUDevice, functor::add<float>>"]]""" ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list), default_ops) self.assertListEqual([ ('Add', 'BinaryOp<CPUDevice, functor::add<float>>'), ], ops_and_kernels) # Test op with no kernel. ops_list = '[["Softplus", ""]]' ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list), default_ops) self.assertListEqual([ ('Softplus', None), ], ops_and_kernels) # Test two ops_list files. ops_list = '[["Softplus", "SoftplusOp<CPUDevice, float>"]]' ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list) + self.WriteTextFile(ops_list), default_ops) self.assertListEqual([ ('Softplus', 'SoftplusOp<CPUDevice, float>'), ], ops_and_kernels) # Test empty file. ops_list = '' with self.assertRaises(Exception): ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'ops_list', self.WriteTextFile(ops_list), default_ops) def testAll(self): default_ops = 'all' graphs = [ text_format.Parse(d, graph_pb2.GraphDef()) for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2] ] ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels( 'rawproto', self.WriteGraphFiles(graphs), default_ops) header = selective_registration_header_lib.get_header_from_ops_and_kernels( ops_and_kernels, include_all_ops_and_kernels=True) self.assertListEqual( [ '// This file was autogenerated by %s' % self.script_name, '#ifndef OPS_TO_REGISTER', # '#define OPS_TO_REGISTER', # '#define SHOULD_REGISTER_OP(op) true', # '#define SHOULD_REGISTER_OP_KERNEL(clz) true', # '#define SHOULD_REGISTER_OP_GRADIENT true', # '#endif' ], header.split('\n')) self.assertListEqual( header.split('\n'), selective_registration_header_lib.get_header( self.WriteGraphFiles(graphs), 'rawproto', default_ops).split('\n')) def testGetSelectiveHeader(self): default_ops = '' graphs = [text_format.Parse(GRAPH_DEF_TXT_2, graph_pb2.GraphDef())] expected = """// This file was autogenerated by %s #ifndef OPS_TO_REGISTER #define OPS_TO_REGISTER namespace { constexpr const char* skip(const char* x) { return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x; } constexpr bool isequal(const char* x, const char* y) { return (*skip(x) && *skip(y)) ? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1)) : (!*skip(x) && !*skip(y)); } template<int N> struct find_in { static constexpr bool f(const char* x, const char* const y[N]) { return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1); } }; template<> struct find_in<0> { static constexpr bool f(const char* x, const char* const y[]) { return false; } }; } // end namespace constexpr const char* kNecessaryOpKernelClasses[] = { "BiasOp<CPUDevice, float>", }; #define SHOULD_REGISTER_OP_KERNEL(clz) (find_in<sizeof(kNecessaryOpKernelClasses) / sizeof(*kNecessaryOpKernelClasses)>::f(clz, kNecessaryOpKernelClasses)) constexpr inline bool ShouldRegisterOp(const char op[]) { return false || isequal(op, "AccumulateNV2") || isequal(op, "BiasAdd") ; } #define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op) #define SHOULD_REGISTER_OP_GRADIENT false #endif""" % self.script_name header = selective_registration_header_lib.get_header( self.WriteGraphFiles(graphs), 'rawproto', default_ops) print(header) self.assertListEqual(expected.split('\n'), header.split('\n')) if __name__ == '__main__': test.main()
PrintOpFilegroupTest
python
Pylons__pyramid
docs/tutorials/wiki/src/authorization/tutorial/models/__init__.py
{ "start": 321, "end": 736 }
class ____(Persistent): def __init__(self, data): self.data = data def appmaker(zodb_root): if 'app_root' not in zodb_root: app_root = Wiki() frontpage = Page('This is the front page') app_root['FrontPage'] = frontpage frontpage.__name__ = 'FrontPage' frontpage.__parent__ = app_root zodb_root['app_root'] = app_root return zodb_root['app_root']
Page
python
scipy__scipy
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
{ "start": 18535, "end": 19715 }
class ____(Benchmark): r""" Bukin02 objective function. The Bukin02 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\text{Bukin02}}(x) = 100 (x_2^2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2 with :math:`x_1 \in [-15, -5], x_2 \in [-3, 3]` *Global optimum*: :math:`f(x) = -124.75` for :math:`x = [-15, 0]` .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: I think that Gavana and Jamil are wrong on this function. In both sources the x[1] term is not squared. As such there will be a minimum at the smallest value of x[1]. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = [(-15.0, -5.0), (-3.0, 3.0)] self.global_optimum = [[-15.0, 0.0]] self.fglob = -124.75 def fun(self, x, *args): self.nfev += 1 return (100 * (x[1] ** 2 - 0.01 * x[0] ** 2 + 1.0) + 0.01 * (x[0] + 10.0) ** 2.0)
Bukin02