language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
tests/sentry/preprod/api/endpoints/test_project_preprod_artifact_update.py
{ "start": 342, "end": 17140 }
class ____(TestCase): def setUp(self) -> None: super().setUp() self.file = self.create_file(name="test_artifact.apk", type="application/octet-stream") self.preprod_artifact = PreprodArtifact.objects.create( project=self.project, file_id=self.file.id, state=PreprodArtifact.ArtifactState.UPLOADING, ) def _get_url(self, artifact_id=None): artifact_id = artifact_id or self.preprod_artifact.id return f"/api/0/internal/{self.organization.slug}/{self.project.slug}/files/preprodartifacts/{artifact_id}/update/" def _make_request(self, data, artifact_id=None, authenticated=True): url = self._get_url(artifact_id) json_data = orjson.dumps(data) if isinstance(data, dict) else data kwargs: dict[str, Any] = {"data": json_data, "content_type": "application/json"} if authenticated: signature = generate_service_request_signature( url, json_data, ["test-secret-key"], "Launchpad" ) kwargs["HTTP_AUTHORIZATION"] = f"rpcsignature {signature}" return self.client.put(url, **kwargs) @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_success(self) -> None: data = { "date_built": "2024-01-01T00:00:00Z", "artifact_type": 1, "build_version": "1.2.3", "build_number": 123, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert resp_data["artifactId"] == str(self.preprod_artifact.id) assert set(resp_data["updatedFields"]) == { "date_built", "artifact_type", "build_version", "build_number", "state", } self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.date_built is not None assert self.preprod_artifact.date_built.isoformat() == "2024-01-01T00:00:00+00:00" assert self.preprod_artifact.artifact_type == 1 assert self.preprod_artifact.build_version == "1.2.3" assert self.preprod_artifact.build_number == 123 @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_partial_update(self) -> None: data = {"artifact_type": 2, "error_message": "Build failed"} response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert set(resp_data["updatedFields"]) == {"artifact_type", "error_message", "state"} self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.artifact_type == 2 assert self.preprod_artifact.error_message == "Build failed" assert self.preprod_artifact.state == PreprodArtifact.ArtifactState.FAILED @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_sets_failed_state_on_error(self) -> None: # Test that setting error_code sets state to FAILED data = {"error_code": 1} response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert set(resp_data["updatedFields"]) == {"error_code", "state"} self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.error_code == 1 assert self.preprod_artifact.state == PreprodArtifact.ArtifactState.FAILED # Reset for next test self.preprod_artifact.state = PreprodArtifact.ArtifactState.UPLOADING self.preprod_artifact.error_code = None self.preprod_artifact.save() # Test that setting error_message sets state to FAILED data_two = {"error_message": "Processing failed"} response = self._make_request(data_two) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert set(resp_data["updatedFields"]) == {"error_message", "state"} self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.error_message == "Processing failed" assert self.preprod_artifact.state == PreprodArtifact.ArtifactState.FAILED @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_not_found(self) -> None: response = self._make_request({"artifact_type": 1}, artifact_id=999999) assert response.status_code == 404 assert "The requested head preprod artifact does not exist" in response.json()["detail"] @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_invalid_json(self) -> None: response = self._make_request(b"invalid json") assert response.status_code == 400 assert "Invalid json body" in response.json()["error"] @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_invalid_schema(self) -> None: response = self._make_request({"artifact_type": 99}) # Invalid value assert response.status_code == 400 assert ( "The artifact_type field must be an integer between 0 and 2." in response.json()["error"] ) @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_extra_properties(self) -> None: response = self._make_request({"artifact_type": 1, "extra_field": "not allowed"}) assert response.status_code == 200 def test_update_preprod_artifact_unauthorized(self) -> None: response = self._make_request({"artifact_type": 1}, authenticated=False) assert response.status_code == 401 @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_empty_update(self) -> None: response = self._make_request({}) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert resp_data["updatedFields"] == [] @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_apple_app_info(self) -> None: apple_info = { "is_simulator": True, "codesigning_type": "development", "profile_name": "Test Profile", "is_code_signature_valid": False, "code_signature_errors": ["Certificate expired", "Missing entitlements"], "missing_dsym_binaries": ["TestLib.dylib", "TestFramework.framework"], } data = { "date_built": "2024-01-01T00:00:00Z", "artifact_type": 1, "build_version": "1.2.3", "build_number": 123, "apple_app_info": apple_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert "extras" in resp_data["updatedFields"] self.preprod_artifact.refresh_from_db() stored_apple_info = self.preprod_artifact.extras or {} # Verify that missing_dsym_binaries array is converted to has_missing_dsym_binaries boolean expected_extras = { "is_simulator": True, "codesigning_type": "development", "profile_name": "Test Profile", "is_code_signature_valid": False, "code_signature_errors": ["Certificate expired", "Missing entitlements"], "has_missing_dsym_binaries": True, # Converted from non-empty array } assert stored_apple_info == expected_extras @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_android_app_info(self) -> None: android_info = { "has_proguard_mapping": True, } data = { "date_built": "2024-01-01T00:00:00Z", "artifact_type": 2, "build_version": "1.2.3", "build_number": 123, "android_app_info": android_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert "extras" in resp_data["updatedFields"] self.preprod_artifact.refresh_from_db() stored_android_info = self.preprod_artifact.extras or {} assert stored_android_info == android_info assert stored_android_info["has_proguard_mapping"] is True @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_missing_dsym_binaries_empty_array(self) -> None: """Test that empty missing_dsym_binaries array converts to has_missing_dsym_binaries=False.""" apple_info: dict[str, Any] = {"missing_dsym_binaries": []} data = { "artifact_type": 1, "apple_app_info": apple_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True self.preprod_artifact.refresh_from_db() stored_apple_info = self.preprod_artifact.extras or {} assert stored_apple_info.get("has_missing_dsym_binaries") is False @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_missing_dsym_binaries_non_empty_array(self) -> None: """Test that non-empty missing_dsym_binaries array converts to has_missing_dsym_binaries=True.""" # Even a large list should just convert to True large_list = [f"VeryLongLibraryName{i:04d}.dylib" for i in range(40)] apple_info: dict[str, Any] = {"missing_dsym_binaries": large_list} data = { "artifact_type": 1, "apple_app_info": apple_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True self.preprod_artifact.refresh_from_db() stored_apple_info = self.preprod_artifact.extras or {} assert stored_apple_info.get("has_missing_dsym_binaries") is True @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_partial_apple_app_info(self) -> None: apple_info = { "is_simulator": False, "codesigning_type": "distribution", } data = { "artifact_type": 2, "apple_app_info": apple_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert "extras" in resp_data["updatedFields"] self.preprod_artifact.refresh_from_db() stored_apple_info = self.preprod_artifact.extras or {} # Should only contain the fields that were provided assert stored_apple_info == apple_info assert "profile_name" not in stored_apple_info assert "is_code_signature_valid" not in stored_apple_info @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_preserves_existing_extras(self) -> None: """Test that updating with apple_app_info preserves existing extras data like release_notes""" # First, create an artifact with existing extras (release notes) self.preprod_artifact.extras = {"release_notes": "Important bug fixes in this release"} self.preprod_artifact.save() # Update with apple app info apple_info = { "is_simulator": False, "codesigning_type": "distribution", "profile_name": "Production Profile", } data = { "apple_app_info": apple_info, } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert "extras" in resp_data["updatedFields"] self.preprod_artifact.refresh_from_db() stored_extras = self.preprod_artifact.extras or {} # Should contain both the original release notes and the new apple app info assert stored_extras["release_notes"] == "Important bug fixes in this release" assert stored_extras["is_simulator"] is False assert stored_extras["codesigning_type"] == "distribution" assert stored_extras["profile_name"] == "Production Profile" @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_release_only_created_on_first_transition_to_processed(self) -> None: from sentry.models.release import Release data = { "app_id": "com.example.app", "build_version": "1.0.0", "build_number": 123, } response = self._make_request(data) assert response.status_code == 200 self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.state == PreprodArtifact.ArtifactState.PROCESSED releases = Release.objects.filter( organization_id=self.project.organization_id, projects=self.project, version="com.example.app@1.0.0+123", ) assert releases.count() == 1 created_release = releases.first() assert created_release is not None data2 = { "date_built": "2024-01-01T00:00:00Z", } response2 = self._make_request(data2) assert response2.status_code == 200 self.preprod_artifact.refresh_from_db() assert self.preprod_artifact.state == PreprodArtifact.ArtifactState.PROCESSED releases_after = Release.objects.filter( organization_id=self.project.organization_id, projects=self.project, version="com.example.app@1.0.0+123", ) assert releases_after.count() == 1 first_release = releases_after.first() assert first_release is not None assert first_release.id == created_release.id @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_release_created_when_conditions_met_even_no_fields_updated(self) -> None: from sentry.models.release import Release self.preprod_artifact.state = PreprodArtifact.ArtifactState.PROCESSED self.preprod_artifact.app_id = "com.example.app" self.preprod_artifact.build_version = "1.0.0" self.preprod_artifact.build_number = 123 self.preprod_artifact.save() response = self._make_request({}) assert response.status_code == 200 resp_data = response.json() assert resp_data["updatedFields"] == [] releases = Release.objects.filter( organization_id=self.project.organization_id, projects=self.project, version="com.example.app@1.0.0+123", ) assert releases.count() == 1 @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_with_dequeued_at(self) -> None: data = { "dequeued_at": "2024-04-07T14:03:18+00:00", } response = self._make_request(data) assert response.status_code == 200 resp_data = response.json() assert resp_data["success"] is True assert "extras" in resp_data["updatedFields"] self.preprod_artifact.refresh_from_db() stored_extras = self.preprod_artifact.extras or {} assert stored_extras["dequeued_at"] == "2024-04-07T14:03:18+00:00" @override_settings(LAUNCHPAD_RPC_SHARED_SECRET=["test-secret-key"]) def test_update_preprod_artifact_preserves_existing_data(self) -> None: self.preprod_artifact.extras = {"is_simulator": False, "existing_field": "value"} self.preprod_artifact.save() data = { "dequeued_at": "2024-04-07T14:03:18+00:00", } response = self._make_request(data) assert response.status_code == 200 self.preprod_artifact.refresh_from_db() stored_extras = self.preprod_artifact.extras or {} assert stored_extras["dequeued_at"] == "2024-04-07T14:03:18+00:00" assert stored_extras["is_simulator"] is False assert stored_extras["existing_field"] == "value"
ProjectPreprodArtifactUpdateEndpointTest
python
walkccc__LeetCode
solutions/770. Basic Calculator IV/770.py
{ "start": 0, "end": 1848 }
class ____: def __init__(self, term: str = None, coef: int = None): if term and coef: self.terms = collections.Counter({term: coef}) else: self.terms = collections.Counter() def __add__(self, other): for term, coef in other.terms.items(): self.terms[term] += coef return self def __sub__(self, other): for term, coef in other.terms.items(): self.terms[term] -= coef return self def __mul__(self, other): res = Poly() for a, aCoef in self.terms.items(): for b, bCoef in other.terms.items(): res.terms[self._merge(a, b)] += aCoef * bCoef return res # Def __str__(self): # res = [] # for term, coef in self.terms.items(): # res.append(term + ': ' + str(coef)) # return '{' + ', '.join(res) + '}' def toList(self) -> list[str]: for term in list(self.terms.keys()): if not self.terms[term]: del self.terms[term] def cmp(term: str) -> tuple: # the minimum degree is the last if term == '1': return (0,) var = term.split('*') # the maximum degree is the first # Break ties by their lexicographic orders. return (-len(var), term) def concat(term: str) -> str: if term == '1': return str(self.terms[term]) return str(self.terms[term]) + '*' + term terms = list(self.terms.keys()) terms.sort(key=cmp) return [concat(term) for term in terms] def _merge(self, a: str, b: str) -> str: if a == '1': return b if b == '1': return a res = [] A = a.split('*') B = b.split('*') i = 0 # A's index j = 0 # B's index while i < len(A) and j < len(B): if A[i] < B[j]: res.append(A[i]) i += 1 else: res.append(B[j]) j += 1 return '*'.join(res + A[i:] + B[j:])
Poly
python
sqlalchemy__sqlalchemy
test/orm/test_versioning.py
{ "start": 38671, "end": 52098 }
class ____(fixtures.MappedTest): run_define_tables = "each" __sparse_driver_backend__ = True @classmethod def define_tables(cls, metadata): from sqlalchemy.sql import ColumnElement from sqlalchemy.ext.compiler import compiles import itertools counter = itertools.count(1) class IncDefault(ColumnElement): pass @compiles(IncDefault) def compile_(element, compiler, **kw): # cache the counter value on the statement # itself so the assertsql system gets the same # value when it compiles the statement a second time stmt = compiler.statement if hasattr(stmt, "_counter"): return stmt._counter else: stmt._counter = str(next(counter)) return stmt._counter Table( "version_table", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column( "version_id", Integer, nullable=False, default=IncDefault(), onupdate=IncDefault(), ), Column("value", String(40), nullable=False), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass def _fixture(self, expire_on_commit=True, eager_defaults=False): Foo, version_table = self.classes.Foo, self.tables.version_table self.mapper_registry.map_imperatively( Foo, version_table, version_id_col=version_table.c.version_id, version_id_generator=False, eager_defaults=eager_defaults, ) s1 = fixture_session(expire_on_commit=expire_on_commit) return s1 def test_insert_col(self): self._test_insert_col() def test_insert_col_eager_defaults(self): self._test_insert_col(eager_defaults=True) def _test_insert_col(self, **kw): sess = self._fixture(**kw) f1 = self.classes.Foo(value="f1") sess.add(f1) statements = [ CompiledSQL( "INSERT INTO version_table (version_id, value) " "VALUES (1, :value) " "RETURNING version_table.id, version_table.version_id", lambda ctx: [{"value": "f1"}], ) ] if not testing.db.dialect.insert_returning: # DBs without implicit returning, we must immediately # SELECT for the new version id statements.append( CompiledSQL( "SELECT version_table.version_id " "FROM version_table WHERE version_table.id = :pk_1", lambda ctx: [{"pk_1": 1}], ) ) self.assert_sql_execution(testing.db, sess.flush, *statements) def test_update_col(self): self._test_update_col() def test_update_col_eager_defaults(self): self._test_update_col(eager_defaults=True) def _test_update_col(self, **kw): sess = self._fixture(**kw) f1 = self.classes.Foo(value="f1") sess.add(f1) sess.flush() f1.value = "f2" if testing.db.dialect.update_returning: statements = [ CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id " "RETURNING version_table.version_id", lambda ctx: [ { "version_table_id": 1, "version_table_version_id": 1, "value": "f2", } ], enable_returning=True, ) ] else: # DBs without implicit returning, we must immediately # SELECT for the new version id statements = [ CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id", lambda ctx: [ { "version_table_id": 1, "version_table_version_id": 1, "value": "f2", } ], enable_returning=False, ), CompiledSQL( "SELECT version_table.version_id " "FROM version_table WHERE version_table.id = :pk_1", lambda ctx: [{"pk_1": 1}], ), ] with conditional_sane_rowcount_warnings( update=True, only_returning=True ): self.assert_sql_execution(testing.db, sess.flush, *statements) @testing.requires.updateable_autoincrement_pks def test_sql_expr_bump(self): sess = self._fixture() f1 = self.classes.Foo(value="f1") sess.add(f1) sess.flush() eq_(f1.version_id, 1) f1.id = self.classes.Foo.id + 0 with conditional_sane_rowcount_warnings( update=True, only_returning=True ): sess.flush() eq_(f1.version_id, 2) @testing.requires.updateable_autoincrement_pks @testing.requires.update_returning def test_sql_expr_w_mods_bump(self): sess = self._fixture() f1 = self.classes.Foo(id=2, value="f1") sess.add(f1) sess.flush() eq_(f1.version_id, 1) f1.id = self.classes.Foo.id + 3 with conditional_sane_rowcount_warnings(update=True): sess.flush() eq_(f1.id, 5) eq_(f1.version_id, 2) def test_multi_update(self): sess = self._fixture() f1 = self.classes.Foo(value="f1") f2 = self.classes.Foo(value="f2") f3 = self.classes.Foo(value="f3") sess.add_all([f1, f2, f3]) sess.flush() f1.value = "f1a" f2.value = "f2a" f3.value = "f3a" if testing.db.dialect.update_returning: statements = [ CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id " "RETURNING version_table.version_id", lambda ctx: [ { "version_table_id": 1, "version_table_version_id": 1, "value": "f1a", } ], enable_returning=True, ), CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id " "RETURNING version_table.version_id", lambda ctx: [ { "version_table_id": 2, "version_table_version_id": 1, "value": "f2a", } ], enable_returning=True, ), CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id " "RETURNING version_table.version_id", lambda ctx: [ { "version_table_id": 3, "version_table_version_id": 1, "value": "f3a", } ], enable_returning=True, ), ] else: # DBs without update returning, we must immediately # SELECT for the new version id statements = [ CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id", lambda ctx: [ { "version_table_id": 1, "version_table_version_id": 1, "value": "f1a", } ], enable_returning=False, ), CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id", lambda ctx: [ { "version_table_id": 2, "version_table_version_id": 1, "value": "f2a", } ], enable_returning=False, ), CompiledSQL( "UPDATE version_table SET version_id=2, value=:value " "WHERE version_table.id = :version_table_id AND " "version_table.version_id = :version_table_version_id", lambda ctx: [ { "version_table_id": 3, "version_table_version_id": 1, "value": "f3a", } ], enable_returning=False, ), CompiledSQL( "SELECT version_table.version_id " "FROM version_table WHERE version_table.id = :pk_1", lambda ctx: [{"pk_1": 1}], ), CompiledSQL( "SELECT version_table.version_id " "FROM version_table WHERE version_table.id = :pk_1", lambda ctx: [{"pk_1": 2}], ), CompiledSQL( "SELECT version_table.version_id " "FROM version_table WHERE version_table.id = :pk_1", lambda ctx: [{"pk_1": 3}], ), ] with conditional_sane_rowcount_warnings( update=True, only_returning=True ): self.assert_sql_execution(testing.db, sess.flush, *statements) def test_delete_col(self): sess = self._fixture() f1 = self.classes.Foo(value="f1") sess.add(f1) sess.flush() sess.delete(f1) statements = [ # note that the assertsql tests the rule against # "default" - on a "returning" backend, the statement # includes "RETURNING" CompiledSQL( "DELETE FROM version_table " "WHERE version_table.id = :id AND " "version_table.version_id = :version_id", lambda ctx: [{"id": 1, "version_id": 1}], ) ] with conditional_sane_rowcount_warnings(delete=True): self.assert_sql_execution(testing.db, sess.flush, *statements) @testing.requires.independent_connections @provision.allow_stale_updates def test_concurrent_mod_err_expire_on_commit(self): sess = self._fixture() f1 = self.classes.Foo(value="f1") sess.add(f1) sess.commit() f1.value s2 = fixture_session() f2 = s2.query(self.classes.Foo).first() f2.value = "f2" s2.commit() f1.value = "f3" assert_raises_message( orm.exc.StaleDataError, r"UPDATE statement on table 'version_table' expected to " r"update 1 row\(s\); 0 were matched.", sess.commit, ) @testing.requires.independent_connections def test_concurrent_mod_err_noexpire_on_commit(self): sess = self._fixture(expire_on_commit=False) f1 = self.classes.Foo(value="f1") sess.add(f1) sess.commit() # here, we're not expired overall, so no load occurs and we # stay without a version id, unless we've emitted # a SELECT for it within the flush. f1.value s2 = fixture_session(expire_on_commit=False) f2 = s2.query(self.classes.Foo).first() f2.value = "f2" s2.commit() f1.value = "f3" assert_raises_message( orm.exc.StaleDataError, r"UPDATE statement on table 'version_table' expected to " r"update 1 row\(s\); 0 were matched.", sess.commit, )
ServerVersioningTest
python
aio-libs__aiohttp
aiohttp/client_exceptions.py
{ "start": 3511, "end": 3591 }
class ____(ClientConnectionError, OSError): """OSError error."""
ClientOSError
python
coleifer__peewee
playhouse/migrate.py
{ "start": 3725, "end": 5156 }
class ____(object): """Encapsulate a single schema altering operation.""" def __init__(self, migrator, method, *args, **kwargs): self.migrator = migrator self.method = method self.args = args self.kwargs = kwargs def execute(self, node): self.migrator.database.execute(node) def _handle_result(self, result): if isinstance(result, (Node, Context)): self.execute(result) elif isinstance(result, Operation): result.run() elif isinstance(result, (list, tuple)): for item in result: self._handle_result(item) def run(self): kwargs = self.kwargs.copy() kwargs['with_context'] = True method = getattr(self.migrator, self.method) self._handle_result(method(*self.args, **kwargs)) def operation(fn): @functools.wraps(fn) def inner(self, *args, **kwargs): with_context = kwargs.pop('with_context', False) if with_context: return fn(self, *args, **kwargs) return Operation(self, fn.__name__, *args, **kwargs) return inner def make_index_name(table_name, columns): index_name = '_'.join((table_name,) + tuple(columns)) if len(index_name) > 64: index_hash = hashlib.md5(index_name.encode('utf-8')).hexdigest() index_name = '%s_%s' % (index_name[:56], index_hash[:7]) return index_name
Operation
python
doocs__leetcode
solution/3100-3199/3137.Minimum Number of Operations to Make Word K-Periodic/Solution.py
{ "start": 0, "end": 199 }
class ____: def minimumOperationsToMakeKPeriodic(self, word: str, k: int) -> int: n = len(word) return n // k - max(Counter(word[i : i + k] for i in range(0, n, k)).values())
Solution
python
apache__airflow
task-sdk/src/airflow/sdk/io/stat.py
{ "start": 866, "end": 2494 }
class ____(dict): """ stat_result: Result from stat, fstat, or lstat. This object provides a subset of os.stat_result attributes, for results returned from ObjectStoragePath.stat() It provides st_dev, st_ino, st_mode, st_nlink, st_uid, st_gid, st_size and st_mtime if they are available from the underlying storage. Extended attributes maybe accessed via dict access. See os.stat for more information. """ st_dev = property(lambda self: 0) """device""" st_size = property(lambda self: self.get("size", 0)) """total size, in bytes""" st_gid = property(lambda self: self.get("gid", 0)) """group ID of owner""" st_uid = property(lambda self: self.get("uid", 0)) """user ID of owner""" st_ino = property(lambda self: self.get("ino", 0)) """inode""" st_nlink = property(lambda self: self.get("nlink", 0)) """number of hard links""" @property def st_mtime(self): """Time of most recent content modification.""" if "mtime" in self: return self.get("mtime") if "LastModified" in self: return self.get("LastModified").timestamp() # per posix.py return 0 @property def st_mode(self): """Protection bits.""" if "mode" in self: return self.get("mode") # per posix.py mode = 0o0 if self.get("type", "") == "file": mode = S_IFREG if self.get("type", "") == "directory": mode = S_IFDIR if self.get("isLink", False): mode = S_IFLNK return mode
stat_result
python
pallets__werkzeug
src/werkzeug/wrappers/request.py
{ "start": 931, "end": 24730 }
class ____(_SansIORequest): """Represents an incoming WSGI HTTP request, with headers and body taken from the WSGI environment. Has properties and methods for using the functionality defined by various HTTP specs. The data in requests object is read-only. Text data is assumed to use UTF-8 encoding, which should be true for the vast majority of modern clients. Using an encoding set by the client is unsafe in Python due to extra encodings it provides, such as ``zip``. To change the assumed encoding, subclass and replace :attr:`charset`. :param environ: The WSGI environ is generated by the WSGI server and contains information about the server configuration and client request. :param populate_request: Add this request object to the WSGI environ as ``environ['werkzeug.request']``. Can be useful when debugging. :param shallow: Makes reading from :attr:`stream` (and any method that would read from it) raise a :exc:`RuntimeError`. Useful to prevent consuming the form data in middleware, which would make it unavailable to the final application. .. versionchanged:: 3.0 The ``charset``, ``url_charset``, and ``encoding_errors`` parameters were removed. .. versionchanged:: 2.1 Old ``BaseRequest`` and mixin classes were removed. .. versionchanged:: 2.1 Remove the ``disable_data_descriptor`` attribute. .. versionchanged:: 2.0 Combine ``BaseRequest`` and mixins into a single ``Request`` class. .. versionchanged:: 0.5 Read-only mode is enforced with immutable classes for all data. """ #: the maximum content length. This is forwarded to the form data #: parsing function (:func:`parse_form_data`). When set and the #: :attr:`form` or :attr:`files` attribute is accessed and the #: parsing fails because more than the specified value is transmitted #: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. #: #: .. versionadded:: 0.5 max_content_length: int | None = None #: the maximum form field size. This is forwarded to the form data #: parsing function (:func:`parse_form_data`). When set and the #: :attr:`form` or :attr:`files` attribute is accessed and the #: data in memory for post data is longer than the specified value a #: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. #: #: .. versionchanged:: 3.1 #: Defaults to 500kB instead of unlimited. #: #: .. versionadded:: 0.5 max_form_memory_size: int | None = 500_000 #: The maximum number of multipart parts to parse, passed to #: :attr:`form_data_parser_class`. Parsing form data with more than this #: many parts will raise :exc:`~.RequestEntityTooLarge`. #: #: .. versionadded:: 2.2.3 max_form_parts = 1000 #: The form data parser that should be used. Can be replaced to customize #: the form date parsing. form_data_parser_class: type[FormDataParser] = FormDataParser #: The WSGI environment containing HTTP headers and information from #: the WSGI server. environ: WSGIEnvironment #: Set when creating the request object. If ``True``, reading from #: the request body will cause a ``RuntimeException``. Useful to #: prevent modifying the stream from middleware. shallow: bool def __init__( self, environ: WSGIEnvironment, populate_request: bool = True, shallow: bool = False, ) -> None: super().__init__( method=environ.get("REQUEST_METHOD", "GET"), scheme=environ.get("wsgi.url_scheme", "http"), server=_get_server(environ), root_path=_wsgi_decoding_dance(environ.get("SCRIPT_NAME") or ""), path=_wsgi_decoding_dance(environ.get("PATH_INFO") or ""), query_string=environ.get("QUERY_STRING", "").encode("latin1"), headers=EnvironHeaders(environ), remote_addr=environ.get("REMOTE_ADDR"), ) self.environ = environ self.shallow = shallow if populate_request and not shallow: self.environ["werkzeug.request"] = self @classmethod def from_values(cls, *args: t.Any, **kwargs: t.Any) -> Request: """Create a new request object based on the values provided. If environ is given missing values are filled from there. This method is useful for small scripts when you need to simulate a request from an URL. Do not use this method for unittesting, there is a full featured client object (:class:`Client`) that allows to create multipart requests, support for cookies etc. This accepts the same options as the :class:`~werkzeug.test.EnvironBuilder`. .. versionchanged:: 0.5 This method now accepts the same arguments as :class:`~werkzeug.test.EnvironBuilder`. Because of this the `environ` parameter is now called `environ_overrides`. :return: request object """ from ..test import EnvironBuilder builder = EnvironBuilder(*args, **kwargs) try: return builder.get_request(cls) finally: builder.close() @classmethod def application(cls, f: t.Callable[[Request], WSGIApplication]) -> WSGIApplication: """Decorate a function as responder that accepts the request as the last argument. This works like the :func:`responder` decorator but the function is passed the request object as the last argument and the request object will be closed automatically:: @Request.application def my_wsgi_app(request): return Response('Hello World!') As of Werkzeug 0.14 HTTP exceptions are automatically caught and converted to responses instead of failing. :param f: the WSGI callable to decorate :return: a new WSGI callable """ #: return a callable that wraps the -2nd argument with the request #: and calls the function with all the arguments up to that one and #: the request. The return value is then called with the latest #: two arguments. This makes it possible to use this decorator for #: both standalone WSGI functions as well as bound methods and #: partially applied functions. from ..exceptions import HTTPException @functools.wraps(f) def application(*args: t.Any) -> cabc.Iterable[bytes]: request = cls(args[-2]) with request: try: resp = f(*args[:-2] + (request,)) except HTTPException as e: resp = t.cast("WSGIApplication", e.get_response(args[-2])) return resp(*args[-2:]) return t.cast("WSGIApplication", application) def _get_file_stream( self, total_content_length: int | None, content_type: str | None, filename: str | None = None, content_length: int | None = None, ) -> t.IO[bytes]: """Called to get a stream for the file upload. This must provide a file-like class with `read()`, `readline()` and `seek()` methods that is both writeable and readable. The default implementation returns a temporary file if the total content length is higher than 500KB. Because many browsers do not provide a content length for the files only the total content length matters. :param total_content_length: the total content length of all the data in the request combined. This value is guaranteed to be there. :param content_type: the mimetype of the uploaded file. :param filename: the filename of the uploaded file. May be `None`. :param content_length: the length of this file. This value is usually not provided because webbrowsers do not provide this value. """ return default_stream_factory( total_content_length=total_content_length, filename=filename, content_type=content_type, content_length=content_length, ) @property def want_form_data_parsed(self) -> bool: """``True`` if the request method carries content. By default this is true if a ``Content-Type`` is sent. .. versionadded:: 0.8 """ return bool(self.environ.get("CONTENT_TYPE")) def make_form_data_parser(self) -> FormDataParser: """Creates the form data parser. Instantiates the :attr:`form_data_parser_class` with some parameters. .. versionadded:: 0.8 """ return self.form_data_parser_class( stream_factory=self._get_file_stream, max_form_memory_size=self.max_form_memory_size, max_content_length=self.max_content_length, max_form_parts=self.max_form_parts, cls=self.parameter_storage_class, ) def _load_form_data(self) -> None: """Method used internally to retrieve submitted data. After calling this sets `form` and `files` on the request object to multi dicts filled with the incoming form data. As a matter of fact the input stream will be empty afterwards. You can also call this method to force the parsing of the form data. .. versionadded:: 0.8 """ # abort early if we have already consumed the stream if "form" in self.__dict__: return if self.want_form_data_parsed: parser = self.make_form_data_parser() data = parser.parse( self._get_stream_for_parsing(), self.mimetype, self.content_length, self.mimetype_params, ) else: data = ( self.stream, self.parameter_storage_class(), self.parameter_storage_class(), ) # inject the values into the instance dict so that we bypass # our cached_property non-data descriptor. d = self.__dict__ d["stream"], d["form"], d["files"] = data def _get_stream_for_parsing(self) -> t.IO[bytes]: """This is the same as accessing :attr:`stream` with the difference that if it finds cached data from calling :meth:`get_data` first it will create a new stream out of the cached data. .. versionadded:: 0.9.3 """ cached_data = getattr(self, "_cached_data", None) if cached_data is not None: return BytesIO(cached_data) return self.stream def close(self) -> None: """Closes associated resources of this request object. This closes all file handles explicitly. You can also use the request object in a with statement which will automatically close it. .. versionadded:: 0.9 """ files = self.__dict__.get("files") for _key, value in iter_multi_items(files or ()): value.close() def __enter__(self) -> Request: return self def __exit__(self, exc_type, exc_value, tb) -> None: # type: ignore self.close() @cached_property def stream(self) -> t.IO[bytes]: """The WSGI input stream, with safety checks. This stream can only be consumed once. Use :meth:`get_data` to get the full data as bytes or text. The :attr:`data` attribute will contain the full bytes only if they do not represent form data. The :attr:`form` attribute will contain the parsed form data in that case. Unlike :attr:`input_stream`, this stream guards against infinite streams or reading past :attr:`content_length` or :attr:`max_content_length`. If ``max_content_length`` is set, it can be enforced on streams if ``wsgi.input_terminated`` is set. Otherwise, an empty stream is returned. If the limit is reached before the underlying stream is exhausted (such as a file that is too large, or an infinite stream), the remaining contents of the stream cannot be read safely. Depending on how the server handles this, clients may show a "connection reset" failure instead of seeing the 413 response. .. versionchanged:: 2.3 Check ``max_content_length`` preemptively and while reading. .. versionchanged:: 0.9 The stream is always set (but may be consumed) even if form parsing was accessed first. """ if self.shallow: raise RuntimeError( "This request was created with 'shallow=True', reading" " from the input stream is disabled." ) return get_input_stream( self.environ, max_content_length=self.max_content_length ) input_stream = environ_property[t.IO[bytes]]( "wsgi.input", doc="""The raw WSGI input stream, without any safety checks. This is dangerous to use. It does not guard against infinite streams or reading past :attr:`content_length` or :attr:`max_content_length`. Use :attr:`stream` instead. """, ) @cached_property def data(self) -> bytes: """The raw data read from :attr:`stream`. Will be empty if the request represents form data. To get the raw data even if it represents form data, use :meth:`get_data`. """ return self.get_data(parse_form_data=True) @t.overload def get_data( self, cache: bool = True, as_text: t.Literal[False] = False, parse_form_data: bool = False, ) -> bytes: ... @t.overload def get_data( self, cache: bool = True, as_text: t.Literal[True] = ..., parse_form_data: bool = False, ) -> str: ... def get_data( self, cache: bool = True, as_text: bool = False, parse_form_data: bool = False ) -> bytes | str: """This reads the buffered incoming data from the client into one bytes object. By default this is cached but that behavior can be changed by setting `cache` to `False`. Usually it's a bad idea to call this method without checking the content length first as a client could send dozens of megabytes or more to cause memory problems on the server. Note that if the form data was already parsed this method will not return anything as form data parsing does not cache the data like this method does. To implicitly invoke form data parsing function set `parse_form_data` to `True`. When this is done the return value of this method will be an empty string if the form parser handles the data. This generally is not necessary as if the whole data is cached (which is the default) the form parser will used the cached data to parse the form data. Please be generally aware of checking the content length first in any case before calling this method to avoid exhausting server memory. If `as_text` is set to `True` the return value will be a decoded string. .. versionadded:: 0.9 """ rv = getattr(self, "_cached_data", None) if rv is None: if parse_form_data: self._load_form_data() rv = self.stream.read() if cache: self._cached_data = rv if as_text: rv = rv.decode(errors="replace") return rv @cached_property def form(self) -> ImmutableMultiDict[str, str]: """The form parameters. By default an :class:`~werkzeug.datastructures.ImmutableMultiDict` is returned from this function. This can be changed by setting :attr:`parameter_storage_class` to a different type. This might be necessary if the order of the form data is important. Please keep in mind that file uploads will not end up here, but instead in the :attr:`files` attribute. .. versionchanged:: 0.9 Previous to Werkzeug 0.9 this would only contain form data for POST and PUT requests. """ self._load_form_data() return self.form @cached_property def values(self) -> CombinedMultiDict[str, str]: """A :class:`werkzeug.datastructures.CombinedMultiDict` that combines :attr:`args` and :attr:`form`. For GET requests, only ``args`` are present, not ``form``. .. versionchanged:: 2.0 For GET requests, only ``args`` are present, not ``form``. """ sources = [self.args] if self.method != "GET": # GET requests can have a body, and some caching proxies # might not treat that differently than a normal GET # request, allowing form data to "invisibly" affect the # cache without indication in the query string / URL. sources.append(self.form) args = [] for d in sources: if not isinstance(d, MultiDict): d = MultiDict(d) args.append(d) return CombinedMultiDict(args) @cached_property def files(self) -> ImmutableMultiDict[str, FileStorage]: """:class:`~werkzeug.datastructures.MultiDict` object containing all uploaded files. Each key in :attr:`files` is the name from the ``<input type="file" name="">``. Each value in :attr:`files` is a Werkzeug :class:`~werkzeug.datastructures.FileStorage` object. It basically behaves like a standard file object you know from Python, with the difference that it also has a :meth:`~werkzeug.datastructures.FileStorage.save` function that can store the file on the filesystem. Note that :attr:`files` will only contain data if the request method was POST, PUT or PATCH and the ``<form>`` that posted to the request had ``enctype="multipart/form-data"``. It will be empty otherwise. See the :class:`~werkzeug.datastructures.MultiDict` / :class:`~werkzeug.datastructures.FileStorage` documentation for more details about the used data structure. """ self._load_form_data() return self.files @property def script_root(self) -> str: """Alias for :attr:`self.root_path`. ``environ["SCRIPT_NAME"]`` without a trailing slash. """ return self.root_path @cached_property def url_root(self) -> str: """Alias for :attr:`root_url`. The URL with scheme, host, and root path. For example, ``https://example.com/app/``. """ return self.root_url remote_user = environ_property[str]( "REMOTE_USER", doc="""If the server supports user authentication, and the script is protected, this attribute contains the username the user has authenticated as.""", ) is_multithread = environ_property[bool]( "wsgi.multithread", doc="""boolean that is `True` if the application is served by a multithreaded WSGI server.""", ) is_multiprocess = environ_property[bool]( "wsgi.multiprocess", doc="""boolean that is `True` if the application is served by a WSGI server that spawns multiple processes.""", ) is_run_once = environ_property[bool]( "wsgi.run_once", doc="""boolean that is `True` if the application will be executed only once in a process lifetime. This is the case for CGI for example, but it's not guaranteed that the execution only happens one time.""", ) # JSON #: A module or other object that has ``dumps`` and ``loads`` #: functions that match the API of the built-in :mod:`json` module. json_module = json @property def json(self) -> t.Any: """The parsed JSON data if :attr:`mimetype` indicates JSON (:mimetype:`application/json`, see :attr:`is_json`). Calls :meth:`get_json` with default arguments. If the request content type is not ``application/json``, this will raise a 415 Unsupported Media Type error. .. versionchanged:: 2.3 Raise a 415 error instead of 400. .. versionchanged:: 2.1 Raise a 400 error if the content type is incorrect. """ return self.get_json() # Cached values for ``(silent=False, silent=True)``. Initialized # with sentinel values. _cached_json: tuple[t.Any, t.Any] = (Ellipsis, Ellipsis) @t.overload def get_json( self, force: bool = ..., silent: t.Literal[False] = ..., cache: bool = ... ) -> t.Any: ... @t.overload def get_json( self, force: bool = ..., silent: bool = ..., cache: bool = ... ) -> t.Any | None: ... def get_json( self, force: bool = False, silent: bool = False, cache: bool = True ) -> t.Any | None: """Parse :attr:`data` as JSON. If the mimetype does not indicate JSON (:mimetype:`application/json`, see :attr:`is_json`), or parsing fails, :meth:`on_json_loading_failed` is called and its return value is used as the return value. By default this raises a 415 Unsupported Media Type resp. :param force: Ignore the mimetype and always try to parse JSON. :param silent: Silence mimetype and parsing errors, and return ``None`` instead. :param cache: Store the parsed JSON to return for subsequent calls. .. versionchanged:: 2.3 Raise a 415 error instead of 400. .. versionchanged:: 2.1 Raise a 400 error if the content type is incorrect. """ if cache and self._cached_json[silent] is not Ellipsis: return self._cached_json[silent] if not (force or self.is_json): if not silent: return self.on_json_loading_failed(None) else: return None data = self.get_data(cache=cache) try: rv = self.json_module.loads(data) except ValueError as e: if silent: rv = None if cache: normal_rv, _ = self._cached_json self._cached_json = (normal_rv, rv) else: rv = self.on_json_loading_failed(e) if cache: _, silent_rv = self._cached_json self._cached_json = (rv, silent_rv) else: if cache: self._cached_json = (rv, rv) return rv def on_json_loading_failed(self, e: ValueError | None) -> t.Any: """Called if :meth:`get_json` fails and isn't silenced. If this method returns a value, it is used as the return value for :meth:`get_json`. The default implementation raises :exc:`~werkzeug.exceptions.BadRequest`. :param e: If parsing failed, this is the exception. It will be ``None`` if the content type wasn't ``application/json``. .. versionchanged:: 2.3 Raise a 415 error instead of 400. """ if e is not None: raise BadRequest(f"Failed to decode JSON object: {e}") raise UnsupportedMediaType( "Did not attempt to load JSON data because the request" " Content-Type was not 'application/json'." )
Request
python
getsentry__sentry
tests/sentry/issues/test_issue_search.py
{ "start": 21247, "end": 21921 }
class ____(TestCase): def test(self) -> None: assert convert_device_class_value(["high"], [self.project], self.user, None) == ["3"] assert convert_device_class_value(["medium"], [self.project], self.user, None) == ["2"] assert convert_device_class_value(["low"], [self.project], self.user, None) == ["1"] assert sorted( convert_device_class_value(["medium", "high"], [self.project], self.user, None) ) == [ "2", "3", ] assert sorted( convert_device_class_value(["low", "medium", "high"], [self.project], self.user, None) ) == ["1", "2", "3"]
DeviceClassValueTest
python
pytorch__pytorch
test/distributed/test_aten_comm_compute_reordering.py
{ "start": 56147, "end": 65361 }
class ____(TestComputeCommReorderingMultiProc): """ Tests for manual overlap scheduling and subgraph utilities. """ @unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch") def test_make_graph_view_and_get_subgraph_by_path(self): from torch._inductor.fx_passes.graph_view import ( get_subgraph_by_path, make_graph_view, ) model = get_toy_model(device_type) gm = torch.fx.symbolic_trace(model) graph_view = make_graph_view(gm.graph) # Fetch subgraph for first transformer layer sub_nodes = get_subgraph_by_path(graph_view, "layers.0.wq") self.assertEqual([n.name for n in sub_nodes], ["layers_0_wq"]) # Fetch multiple paths at once multi_nodes = get_subgraph_by_path(graph_view, ["layers.0.wq", "layers.0.proj"]) self.assertEqual( [n.name for n in multi_nodes], ["layers_0_wq", "layers_0_proj"] ) # Fetch non existing paths non_exist_nodes = get_subgraph_by_path(graph_view, "nonexistent.module.path") self.assertEqual(non_exist_nodes, []) # Fetch mixed of existing and non existing paths mixed_nodes = get_subgraph_by_path( graph_view, ["layers.0.wq", "nonexistent.module.path"] ) self.assertEqual([n.name for n in mixed_nodes], ["layers_0_wq"]) @unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch") def test_manual_reordering_bucketing_pass_separate_buckets( self, ): def func(a, b, c, d, *, ranks): # All 4 all-gathers are independent - COULD be bucketed together ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks) ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks) ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks) ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks) # First compute - can hide ag1 and ag2 e = a * 5 # Use a to avoid fusion mm1 = torch.matmul(e, e.T) # Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred) # Use first 8x8 elements to match mm1's shape intermediate = ag1[:8, :8] + ag2[:8, :8] # Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4 mm2 = torch.matmul(mm1 + intermediate, c[:8]) # Use all results result = ( ag1.sum() * 1.1 + ag2.sum() * 1.2 + ag3.sum() * 1.3 + ag4.sum() * 1.4 + mm1.sum() + mm2.sum() ) return result with _dynamo_dist_per_rank_init( self.rank, self.world_size, self.backend(device_type), fake_pg=not at_least_x_gpu(2), ): a = torch.ones(8, 8, dtype=torch.float, device=device_type) b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2 c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3 d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4 ranks = list(range(self.world_size)) func_c = functools.partial(func, ranks=ranks) compiled = torch.compile(func_c) out, aten_graph = run_and_get_manual_aten_graph( compiled, ["module_1", "module_2"], a, b, c, d ) ( FileCheck() .check("_pre_bucket_all_gather") .check("all_gather_into_tensor_out") .check("_pre_bucket_all_gather_1") .check("all_gather_into_tensor_out_1") .check("wait_tensor_4") .check("wait_tensor_5") .run(str(aten_graph)) ) correct = func(a, b, c, d, ranks=ranks) self.assertTrue(same(out, correct)) @unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch") def test_bucketing_reordering_pass_no_bucket( self, ): def func(a, b, c, d, *, ranks): # All 4 all-gathers are independent - COULD be bucketed together ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks) ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks) ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks) ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks) # First compute - can hide ag1 and ag2 e = a * 5 # Use a to avoid fusion mm1 = torch.matmul(e, e.T) # Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred) # Use first 8x8 elements to match mm1's shape intermediate = ag1[:8, :8] + ag2[:8, :8] # Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4 mm2 = torch.matmul(mm1 + intermediate, c[:8]) # Use all results result = ( ag1.sum() * 1.1 + ag2.sum() * 1.2 + ag3.sum() * 1.3 + ag4.sum() * 1.4 + mm1.sum() + mm2.sum() ) return result with _dynamo_dist_per_rank_init( self.rank, self.world_size, self.backend(device_type), fake_pg=not at_least_x_gpu(2), ): a = torch.ones(8, 8, dtype=torch.float, device=device_type) b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2 c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3 d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4 ranks = list(range(self.world_size)) func_c = functools.partial(func, ranks=ranks) compiled = torch.compile(func_c) out, aten_graph = run_and_get_manual_aten_graph(compiled, [], a, b, c, d) ( FileCheck() .check("all_gather_into_tensor") .check("all_gather_into_tensor_1") .check("all_gather_into_tensor_2") .check("all_gather_into_tensor_3") .check("wait_tensor") .check("wait_tensor_1") .check("wait_tensor_2") .check("wait_tensor_3") .run(str(aten_graph)) ) correct = func(a, b, c, d, ranks=ranks) self.assertTrue(same(out, correct)) @unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch") def test_bucketing_reordering_pass_single_bucket( self, ): def func(a, b, c, d, *, ranks): # All 4 all-gathers are independent - COULD be bucketed together ag1 = _functional_collectives.all_gather_tensor(a, 0, ranks) ag2 = _functional_collectives.all_gather_tensor(b, 0, ranks) ag3 = _functional_collectives.all_gather_tensor(c[:4], 0, ranks) ag4 = _functional_collectives.all_gather_tensor(d[:4], 0, ranks) # First compute - can hide ag1 and ag2 e = a * 5 # Use a to avoid fusion mm1 = torch.matmul(e, e.T) # Force ag1/ag2 to complete before mm2 (but ag3/ag4 can still be deferred) # Use first 8x8 elements to match mm1's shape intermediate = ag1[:8, :8] + ag2[:8, :8] # Second compute - depends on ag1/ag2 through intermediate, can hide ag3/ag4 mm2 = torch.matmul(mm1 + intermediate, c[:8]) # Use all results result = ( ag1.sum() * 1.1 + ag2.sum() * 1.2 + ag3.sum() * 1.3 + ag4.sum() * 1.4 + mm1.sum() + mm2.sum() ) return result with _dynamo_dist_per_rank_init( self.rank, self.world_size, self.backend(device_type), fake_pg=not at_least_x_gpu(2), ): a = torch.ones(8, 8, dtype=torch.float, device=device_type) b = torch.ones(8, 8, dtype=torch.float, device=device_type) * 2 c = torch.ones(8, 8, dtype=torch.float, device=device_type) * 3 d = torch.ones(8, 8, dtype=torch.float, device=device_type) * 4 ranks = list(range(self.world_size)) func_c = functools.partial(func, ranks=ranks) compiled = torch.compile(func_c) out, aten_graph = run_and_get_manual_aten_graph( compiled, [["module_1", "module_2"]], a, b, c, d ) ( FileCheck() .check("_pre_bucket_all_gather") .check("all_gather_into_tensor_out") .check("wait_tensor_4") .run(str(aten_graph)) ) correct = func(a, b, c, d, ranks=ranks) self.assertTrue(same(out, correct)) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
TestManualOverlapBucketing
python
tensorflow__tensorflow
tensorflow/python/autograph/pyct/static_analysis/type_inference.py
{ "start": 6154, "end": 16435 }
class ____(gast.NodeVisitor): """Runs type inference on a single AST statement. This visitor annotates most nodes with type information. It also sets types for the symbols modified by this statement in its types_out property. Note: this inferrer is able to capture side effects of functions, however, these side effects will not be applied to the current expression. Doing so would create too much of a dependence on the runtime's internal rules about execution order. Example: def f(): nonlocal a a = 1 return a a = 0.0 b = f() + a # a = float; side effect of f() ignored print(a) # a = int; side effect of f() accounted for """ def __init__(self, resolver: Resolver, scope: activity.Scope, namespace: Dict[qual_names.QN, Any], closure_types: Dict[qual_names.QN, Set[Any]], types_in: _TypeMap): self.resolver = resolver self.scope = scope self.namespace = namespace self.closure_types = closure_types self.types_in = types_in self.new_symbols = {} # rvalue type. This property is set when encountering an assign operation, # so that visiting nodes with Store ctx (typically found on left side of # assignments) can infer the type they should receive. self.rtype = None def visit(self, node): types = super().visit(node) if __debug__: self._check_set(types) if types is not None: # TODO(mdan): Normalize by removing subtypes. anno.setanno(node, anno.Static.TYPES, tuple(types)) return types def _check_set(self, value): if value is not None and not isinstance(value, set): raise ValueError('{} method expected to return set, got {}'.format( self.resolver, value)) def visit_Constant(self, node): types = self.resolver.res_value(self.namespace, node.value) if __debug__: self._check_set(types) return types def _apply_unpacking(self, node): assert isinstance(node.ctx, gast.Store) if self.rtype is not None: original_stype = self.rtype # TODO(mdan): Find a better way to express unpacking. i_type = self.resolver.res_value(self.namespace, 0) for i, elt in enumerate(node.elts): self.rtype = self.resolver.res_slice( self.namespace, self.types_in.types, i, original_stype, i_type) self.visit(elt) self.rtype = original_stype return original_stype return None def visit_Tuple(self, node): if isinstance(node.ctx, gast.Load): elt_types = () for elt in node.elts: types_ = self.visit(elt) if types_ is None: return None elt_types += (types_,) return set(itertools.product(*elt_types)) return self._apply_unpacking(node) def visit_List(self, node): if isinstance(node.ctx, gast.Load): elt_types = tuple(self.visit(elt) for elt in node.elts) return self.resolver.res_list_literal(self.namespace, elt_types) return self._apply_unpacking(node) def visit_Set(self, node): raise NotImplementedError() def visit_Name(self, node): name = anno.getanno(node, anno.Basic.QN) if isinstance(node.ctx, gast.Load): types = self.types_in.types.get(name, None) if types is None: if (name not in self.scope.bound) or (name in self.scope.nonlocals): # TODO(mdan): Test with global variables. if name in self.closure_types: types = self.closure_types[name] else: types, value = self.resolver.res_name( self.namespace, self.types_in.types, name) if value is not None: anno.setanno(node, anno.Static.VALUE, value) elif isinstance(node.ctx, gast.Param): # The direct parent it the whole function scope. See activity.py. f_is_local = self.scope.parent.parent is not None type_name = anno.getanno(node.annotation, anno.Basic.QN, None) types = self.resolver.res_arg(self.namespace, self.types_in.types, self.scope.function_name, name, type_name, f_is_local) if types is not None: self.new_symbols[name] = types elif isinstance(node.ctx, gast.Store): if self.rtype is not None: self.new_symbols[name] = self.rtype types = self.rtype else: assert False, 'unknown ctx' if __debug__: self._check_set(types) return types def visit_Attribute(self, node): parent_types = self.visit(node.value) # Attempt to use the static value if known. parent_value = anno.Static.VALUE.of(node.value, None) if parent_value is not None: static_value = getattr(parent_value, node.attr, NO_VALUE) if static_value is NO_VALUE: # Unexpected failure to resolve attribute. Ask the resolver about the # full name instead. types, static_value = self.resolver.res_name( self.namespace, self.types_in, anno.Basic.QN.of(node)) anno.setanno(node, anno.Static.VALUE, static_value) if __debug__: self._check_set(types) return types else: # Fall back to the type if that is known. if parent_types is None: return None inferred_values = [getattr(t, node.attr, None) for t in parent_types] if not inferred_values: return None static_value = inferred_values[0] if static_value is None: return None if any(v is not static_value for v in inferred_values[1:]): # Static value not stable, assume it's dynamic. return None types = self.resolver.res_value(self.namespace, static_value) anno.setanno(node, anno.Static.VALUE, static_value) if __debug__: self._check_set(types) return types def visit_FunctionDef(self, node): f_name = qual_names.QN(node.name) if node.decorator_list: raise NotImplementedError('decorators: {}'.format(node.decorator_list)) ret_types = None if node.returns: ret_types, _ = self.resolver.res_name( self.namespace, self.types_in.types, anno.Basic.QN.of(node.returns)) if __debug__: self._check_set(ret_types) if ret_types is None: ret_types = {Any} f_types = set() for rt in ret_types: f_types.add(Callable[[Any], rt]) self.new_symbols[f_name] = f_types # The definition of a function is an expression, hence has no return value. return None def _resolve_typed_callable(self, f_types, arg_types, keyword_types): ret_types = set() for t in f_types: if isinstance(t, Callable): # Note: these are undocumented - may be version-specific! # Callable[[x], y]: __args__ are (x, y) args = t.__args__ if args: ret_types.add(args[-1]) else: ret_types.add(Any) else: raise NotImplementedError('callable type {}'.format(type(t))) # Side effects can not be inferred based on type alone. side_effects = None return ret_types, side_effects def visit_Call(self, node): self.visit(node.func) f_name = anno.Basic.QN.of(node.func) arg_types = [self.visit(a) for a in node.args] keyword_types = [self.visit(kw.value) for kw in node.keywords] if f_name in self.scope.bound: # Local function, use local type definitions, if available. f_type = self.types_in.types.get(f_name, None) if f_type is None: # No static type info available, nothing more to do. ret_type, side_effects = None, None else: ret_type, side_effects = self._resolve_typed_callable( f_type, arg_types, keyword_types) else: # Nonlocal function, resolve externally. f_type = anno.Static.TYPES.of(node.func, None) ret_type, side_effects = self.resolver.res_call(self.namespace, self.types_in.types, node, f_type, arg_types, keyword_types) if __debug__: self._check_set(ret_type) if side_effects: if not isinstance(side_effects, dict): raise ValueError( 'side effects must be dict, got {}'.format(side_effects)) for k, v in side_effects.items(): if not isinstance(k, qual_names.QN): raise ValueError('side effect keys must be QNs, got {}'.format(k)) self._check_set(v) if side_effects: self.new_symbols.update(side_effects) return ret_type def visit_Expr(self, node): return self.visit(node.value) def visit_Assign(self, node): self.rtype = self.visit(node.value) for t in node.targets: self.visit(t) self.rtype = None def visit_Subscript(self, node): val_types = self.visit(node.value) slice_types = self.visit(node.slice) if val_types is None or slice_types is None: return None types = self.resolver.res_slice( self.namespace, self.types_in.types, node, val_types, slice_types) if __debug__: self._check_set(types) return types def visit_Compare(self, node): left_types = self.visit(node.left) right_types = [self.visit(c) for c in node.comparators] if left_types is None or any(t is None for t in right_types): return None types = self.resolver.res_compare( self.namespace, self.types_in.types, node, left_types, right_types) if __debug__: self._check_set(types) return types def visit_BinOp(self, node): left_types = self.visit(node.left) right_types = self.visit(node.right) if left_types is None or right_types is None: return None types = self.resolver.res_binop( self.namespace, self.types_in.types, node, left_types, right_types) if __debug__: self._check_set(types) return types def visit_UnaryOp(self, node): opnd_types = self.visit(node.operand) if opnd_types is None: return None types = self.resolver.res_unop( self.namespace, self.types_in.types, node, opnd_types) if __debug__: self._check_set(types) return types
StmtInferrer
python
pytorch__pytorch
test/quantization/core/experimental/test_quantizer.py
{ "start": 350, "end": 9213 }
class ____(unittest.TestCase): r""" Tests quantize_APoT result on random 1-dim tensor and hardcoded values for b, k by comparing to uniform quantization (non-uniform quantization reduces to uniform for k = 1) quantized tensor (https://pytorch.org/docs/stable/generated/torch.quantize_per_tensor.html) * tensor2quantize: Tensor * b: 8 * k: 1 """ def test_quantize_APoT_rand_k1(self): # generate random size of tensor2quantize between 1 -> 20 size = random.randint(1, 20) # generate tensor with random fp values between 0 -> 1000 tensor2quantize = 1000 * torch.rand(size, dtype=torch.float) apot_observer = APoTObserver(b=8, k=1) apot_observer(tensor2quantize) alpha, gamma, quantization_levels, level_indices = apot_observer.calculate_qparams(signed=False) # get apot quantized tensor result qtensor = quantize_APoT(tensor2quantize=tensor2quantize, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) # get uniform quantization quantized tensor result uniform_observer = MinMaxObserver() uniform_observer(tensor2quantize) scale, zero_point = uniform_observer.calculate_qparams() uniform_quantized = quantize_per_tensor(input=tensor2quantize, scale=scale, zero_point=zero_point, dtype=torch.quint8).int_repr() qtensor_data = qtensor.data.int() uniform_quantized_tensor = uniform_quantized.data.int() self.assertTrue(torch.equal(qtensor_data, uniform_quantized_tensor)) r""" Tests quantize_APoT for k != 1. Tests quantize_APoT result on random 1-dim tensor and hardcoded values for b=4, k=2 by comparing results to hand-calculated results from APoT paper https://arxiv.org/pdf/1909.13144.pdf * tensor2quantize: Tensor * b: 4 * k: 2 """ def test_quantize_APoT_k2(self): r""" given b = 4, k = 2, alpha = 1.0, we know: (from APoT paper example: https://arxiv.org/pdf/1909.13144.pdf) quantization_levels = tensor([0.0000, 0.0208, 0.0417, 0.0625, 0.0833, 0.1250, 0.1667, 0.1875, 0.2500, 0.3333, 0.3750, 0.5000, 0.6667, 0.6875, 0.7500, 1.0000]) level_indices = tensor([ 0, 3, 12, 15, 2, 14, 8, 11, 10, 1, 13, 9, 4, 7, 6, 5])) """ # generate tensor with random fp values tensor2quantize = torch.tensor([0, 0.0215, 0.1692, 0.385, 1, 0.0391]) observer = APoTObserver(b=4, k=2) observer.forward(tensor2quantize) alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False) # get apot quantized tensor result qtensor = quantize_APoT(tensor2quantize=tensor2quantize, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) qtensor_data = qtensor.data.int() # expected qtensor values calculated based on # corresponding level_indices to nearest quantization level # for each fp value in tensor2quantize # e.g. # 0.0215 in tensor2quantize nearest 0.0208 in quantization_levels -> 3 in level_indices expected_qtensor = torch.tensor([0, 3, 8, 13, 5, 12], dtype=torch.int32) self.assertTrue(torch.equal(qtensor_data, expected_qtensor)) r""" Tests dequantize_apot result on random 1-dim tensor and hardcoded values for b, k. Dequant -> quant an input tensor and verify that result is equivalent to input * tensor2quantize: Tensor * b: 4 * k: 2 """ def test_dequantize_quantize_rand_b4(self): # make observer observer = APoTObserver(4, 2) # generate random size of tensor2quantize between 1 -> 20 size = random.randint(1, 20) # make tensor2quantize: random fp values between 0 -> 1000 tensor2quantize = 1000 * torch.rand(size, dtype=torch.float) observer.forward(tensor2quantize) alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False) # make mock apot_tensor original_apot = quantize_APoT(tensor2quantize=tensor2quantize, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) original_input = torch.clone(original_apot.data).int() # dequantize apot_tensor dequantize_result = dequantize_APoT(apot_tensor=original_apot) # quantize apot_tensor final_apot = quantize_APoT(tensor2quantize=dequantize_result, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) result = final_apot.data.int() self.assertTrue(torch.equal(original_input, result)) r""" Tests dequantize_apot result on random 1-dim tensor and hardcoded values for b, k. Dequant -> quant an input tensor and verify that result is equivalent to input * tensor2quantize: Tensor * b: 12 * k: 4 """ def test_dequantize_quantize_rand_b6(self): # make observer observer = APoTObserver(12, 4) # generate random size of tensor2quantize between 1 -> 20 size = random.randint(1, 20) # make tensor2quantize: random fp values between 0 -> 1000 tensor2quantize = 1000 * torch.rand(size, dtype=torch.float) observer.forward(tensor2quantize) alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False) # make mock apot_tensor original_apot = quantize_APoT(tensor2quantize=tensor2quantize, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) original_input = torch.clone(original_apot.data).int() # dequantize apot_tensor dequantize_result = dequantize_APoT(apot_tensor=original_apot) # quantize apot_tensor final_apot = quantize_APoT(tensor2quantize=dequantize_result, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) result = final_apot.data.int() self.assertTrue(torch.equal(original_input, result)) r""" Tests for correct dimensions in dequantize_apot result on random 3-dim tensor with random dimension sizes and hardcoded values for b, k. Dequant an input tensor and verify that dimensions are same as input. * tensor2quantize: Tensor * b: 4 * k: 2 """ def test_dequantize_dim(self): # make observer observer = APoTObserver(4, 2) # generate random size of tensor2quantize between 1 -> 20 size1 = random.randint(1, 20) size2 = random.randint(1, 20) size3 = random.randint(1, 20) # make tensor2quantize: random fp values between 0 -> 1000 tensor2quantize = 1000 * torch.rand(size1, size2, size3, dtype=torch.float) observer.forward(tensor2quantize) alpha, gamma, quantization_levels, level_indices = observer.calculate_qparams(signed=False) # make mock apot_tensor original_apot = quantize_APoT(tensor2quantize=tensor2quantize, alpha=alpha, gamma=gamma, quantization_levels=quantization_levels, level_indices=level_indices) # dequantize apot_tensor dequantize_result = dequantize_APoT(apot_tensor=original_apot) self.assertEqual(original_apot.data.size(), dequantize_result.size()) def test_q_apot_alpha(self): with self.assertRaises(NotImplementedError): APoTQuantizer.q_apot_alpha(self) if __name__ == '__main__': unittest.main()
TestQuantizer
python
django__django
tests/model_fields/test_floatfield.py
{ "start": 101, "end": 1776 }
class ____(TestCase): def test_float_validates_object(self): instance = FloatModel(size=2.5) # Try setting float field to unsaved object instance.size = instance with transaction.atomic(): with self.assertRaises(TypeError): instance.save() # Set value to valid and save instance.size = 2.5 instance.save() self.assertTrue(instance.id) # Set field to object on saved instance instance.size = instance msg = ( "Tried to update field model_fields.FloatModel.size with a model " "instance, %r. Use a value compatible with FloatField." ) % instance with transaction.atomic(): with self.assertRaisesMessage(TypeError, msg): instance.save() # Try setting field to object on retrieved object obj = FloatModel.objects.get(pk=instance.id) obj.size = obj with self.assertRaisesMessage(TypeError, msg): obj.save() def test_invalid_value(self): tests = [ (TypeError, ()), (TypeError, []), (TypeError, {}), (TypeError, set()), (TypeError, object()), (TypeError, complex()), (ValueError, "non-numeric string"), (ValueError, b"non-numeric byte-string"), ] for exception, value in tests: with self.subTest(value): msg = "Field 'size' expected a number but got %r." % (value,) with self.assertRaisesMessage(exception, msg): FloatModel.objects.create(size=value)
TestFloatField
python
tensorflow__tensorflow
tensorflow/dtensor/python/tests/rng_test.py
{ "start": 7167, "end": 21022 }
class ____(test_util.DTensorBaseTest): def setUp(self): super(DTensorRNGTest, self).setUp() global_ids = test_util.create_device_ids_array((2, 4)) local_ids = _LOCAL_IDS mesh_dict = { device: Mesh( [_MESH_DIM_X, _MESH_DIM_Y], global_ids, local_ids, test_util.create_device_list((2, 4), device), ) for device in ('CPU', 'GPU', 'TPU') } self.mesh = self.configTestMesh(mesh_dict) # Creates a bunch of common layouts used by tests later. self.replicated_layout_2d = Layout.replicated(self.mesh, rank=2) self.shardings = { 'batch': Layout.batch_sharded, 'inner': Layout.inner_sharded } # Creates a bunch of parameters for rng V2 ops self.key = constant_op.constant([123], dtype=dtypes.uint64) self.counter = constant_op.constant([1, 1], dtype=dtypes.uint64) self.alg = 1 self.minval = 1 self.maxval = 100 @parameterized.named_parameters(test_util_ops.RANDOM_OPS) def testStatelessRNGWithFullyReplicated(self, op, dtype, op_version): layout = self.replicated_layout_2d shape = [16, 16] seed = [123, 321] with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) self.assertListEqual(shape, list(b.shape)) b = [tensor.numpy() for tensor in api.unpack(b)] for i in range(self.mesh.num_local_devices() - 1): self.assertAllEqual(b[i], b[i + 1]) @parameterized.named_parameters(test_util_ops.RANDOM_OPS) def testStatelessRNGWithFullyReplicatedComparingWithNonDTensor( self, op, dtype, op_version): layout = self.replicated_layout_2d shape = [16, 16] seed = [123, 321] with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) self.assertListEqual(shape, list(b.shape)) b = [tensor.numpy() for tensor in api.unpack(b)] local_shape = shape for index, device_id in enumerate(_LOCAL_IDS): self.assertAllEqual( b[index], rng_op_spmd( op, device_id, seed, local_shape, dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, device_index_fn=None, # not needed full_replicated=True, is_tpu=self.mesh.device_type().upper() == 'TPU')) @parameterized.named_parameters( test_util_ops.expand_test_config( test_util_ops.RANDOM_OPS, [ { 'dim': _MESH_DIM_X, 'shard_type': 'batch', }, { 'dim': _MESH_DIM_Y, 'shard_type': 'batch', }, { 'dim': _MESH_DIM_X, 'shard_type': 'inner', }, {'dim': _MESH_DIM_Y, 'shard_type': 'inner'}, ], ) ) def testStatelessRNGOpsWithSingleDimensionSharded(self, op, dtype, op_version, dim, shard_type): shape = [128, 128] seed = [123, 321] sharding = self.shardings[shard_type] layout = sharding(self.mesh, dim, rank=2) # Raw rng Ops do not have inputs, so we need to place the Op DTensor device # explicitly. with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) b = [tensor.numpy() for tensor in api.unpack(b)] if dim == _MESH_DIM_X: if shard_type == 'batch': self.assertAllEqual(b[0].shape, [64, 128]) else: assert shard_type == 'inner' self.assertAllEqual(b[0].shape, [128, 64]) # first check that each component is same as the row header. for i in range(self.mesh.num_local_devices()): self.assertAllEqual(b[i], b[_ROW_HEAD[i]]) # then check the row header are NOT identital. self.assertNotAllEqual(b[_ROW_0_HEAD], b[_ROW_1_HEAD]) elif dim == _MESH_DIM_Y: if shard_type == 'batch': self.assertAllEqual(b[0].shape, [32, 128]) else: assert shard_type == 'inner' self.assertAllEqual(b[0].shape, [128, 32]) # first check elements in same columns are identical for i in range(self.mesh.num_local_devices()): self.assertAllEqual(b[i], b[_COL_HEAD[i]]) col_heads = [_COL_0_HEAD, _COL_1_HEAD, _COL_2_HEAD, _COL_3_HEAD] # then check the column header are not identital (mutually) for i in range(self.mesh.num_local_devices() - 1): for j in range(self.mesh.num_local_devices()): if i == j: continue if i in col_heads and j in col_heads: self.assertNotAllEqual(b[i], b[j]) else: self.fail('should not reach here.') @parameterized.named_parameters( test_util_ops.expand_test_config( test_util_ops.RANDOM_OPS, [ { 'dim': _MESH_DIM_X, 'shard_type': 'batch', }, { 'dim': _MESH_DIM_Y, 'shard_type': 'batch', }, { 'dim': _MESH_DIM_X, 'shard_type': 'inner', }, {'dim': _MESH_DIM_Y, 'shard_type': 'inner'}, ], ) ) def testStatelessRNGOpsWithSingleDimensionShardedComparingWithNonDTensor( self, op, dtype, op_version, dim, shard_type): shape = [128, 128] seed = [123, 321] sharding = self.shardings[shard_type] layout = sharding(self.mesh, dim, rank=2) # Raw rng Ops do not have inputs, so we need to place the Op DTensor device # explicitly. with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) b = [tensor.numpy() for tensor in api.unpack(b)] if dim == _MESH_DIM_X: if shard_type == 'batch': local_shape = [64, 128] else: local_shape = [128, 64] def device_index_fn(x_cord, y_cord): # See todo of device_index_fn in 2d sharding case. del y_cord return x_cord for index, device_id in enumerate(_LOCAL_IDS): self.assertAllEqual( b[index], rng_op_spmd( op, device_id, seed, local_shape, dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, device_index_fn=device_index_fn, is_tpu=self.mesh.device_type().upper() == 'TPU')) elif dim == _MESH_DIM_Y: if shard_type == 'batch': local_shape = [32, 128] else: local_shape = [128, 32] def device_index_fn(x_cord, y_cord): # See todo of device_index_fn in 2d sharding case. note this case is # particulary interesting as 2*y_cord is more natual. del x_cord return y_cord for index, device_id in enumerate(_LOCAL_IDS): self.assertAllEqual( b[index], rng_op_spmd( op, device_id, seed, local_shape, dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, device_index_fn=device_index_fn, is_tpu=self.mesh.device_type().upper() == 'TPU')) else: self.fail('should not reach here.') @parameterized.named_parameters(test_util_ops.RANDOM_OPS) def testStatelessRNGOpsWith2DSharding(self, op, dtype, op_version): shape = [128, 128] seed = [123, 321] layout = Layout([_MESH_DIM_Y, _MESH_DIM_X], self.mesh) # Raw rng Ops do not have inputs, so we need to place the Op DTensor device # explicitly. with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) b = [tensor.numpy() for tensor in api.unpack(b)] # check all raw components are not identital (mutually) for i in range(self.mesh.num_local_devices() - 1): for j in range(self.mesh.num_local_devices()): if i == j: continue self.assertNotAllEqual(b[i], b[j]) @parameterized.named_parameters(test_util_ops.RANDOM_OPS) def testStatelessRNGOpsWith2DShardingComparingWithNonDTensor( self, op, dtype, op_version): shape = [128, 128] seed = [123, 321] layout = Layout([_MESH_DIM_Y, _MESH_DIM_X], self.mesh) local_shape = [128 // 4, 128 // 2] # Raw rng Ops do not have inputs, so we need to place the Op DTensor device # explicitly. with ops.device_v2(api.device_name()): with api._dtensor_device()._default_layout(layout): b = _call_dtensor_op( op=op, seed=seed, shape=shape, dtype=dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, mesh=self.mesh) api.check_layout(b, layout) b = [tensor.numpy() for tensor in api.unpack(b)] def device_index_fn(x_cord, y_cord): # TODO(bfontain,xiejw): Currently, the device index is x+2y. But it is # more natual to use 4x+y for a mesh<x=2, y=4>. Consider to change this # once all correctness tests are done. return x_cord + 2 * y_cord for index, device_id in enumerate(_LOCAL_IDS): self.assertAllEqual( b[index], rng_op_spmd( op, device_id, seed, local_shape, dtype, key=self.key, counter=self.counter, alg=self.alg, minval=self.minval, maxval=self.maxval, op_version=op_version, device_index_fn=device_index_fn, is_tpu=self.mesh.device_type().upper() == 'TPU')) def testRNGReadAndSkip(self): replicated_layout = Layout.replicated(self.mesh, 1) a = constant_op.constant([1, 2, 3], dtype=dtypes.int64) v = variables.Variable(a) expected = gen_stateful_random_ops.rng_read_and_skip( resource=v.handle, alg=1, delta=constant_op.constant(1, dtype=dtypes.uint64), ) a = numpy_util.pack_numpy(a, replicated_layout) v = d_variable.DVariable(a) got = gen_stateful_random_ops.rng_read_and_skip( resource=v.handle, alg=1, delta=constant_op.constant(1, dtype=dtypes.uint64), ) self.assertDTensorEqual(expected, replicated_layout, got) def testStatelessRandomGetKeyCounter(self): seed = constant_op.constant([7, 17], dtypes.int32) # TPU computation result is different from CPU computation. # We force it to run on the TPU using tpu_strategy for TPU mesh # so that we compare equal values. @polymorphic_function.function def tpu_fn(): return gen_stateless_random_ops_v2.stateless_random_get_key_counter( seed=seed ) if self.mesh.device_type().upper() == 'TPU': expected = get_tpu_strategy().run(tpu_fn) else: expected = gen_stateless_random_ops_v2.stateless_random_get_key_counter( seed=seed ) replicated_1d_layout = Layout.replicated(self.mesh, 1) seed = numpy_util.pack_numpy(seed, replicated_1d_layout) got = gen_stateless_random_ops_v2.stateless_random_get_key_counter( seed=seed ) self.assertDTensorEqual(expected[0], replicated_1d_layout, got[0]) self.assertDTensorEqual(expected[1], replicated_1d_layout, got[1]) if __name__ == '__main__': test.main()
DTensorRNGTest
python
jazzband__prettytable
tests/test_prettytable.py
{ "start": 23545, "end": 24716 }
class ____: def test_slice_all(self, city_data: PrettyTable) -> None: table = city_data[:] assert city_data.get_string() == table.get_string() def test_slice_first_two_rows(self, city_data: PrettyTable) -> None: table = city_data[0:2] string = table.get_string() assert len(string.split("\n")) == 6 for row_index in (0, 1): city = CITY_DATA[row_index][0] assert isinstance(city, str) assert city in string for row_index in (2, 3, 4, 5, 6): city = CITY_DATA[row_index][0] assert isinstance(city, str) assert city not in string def test_slice_last_two_rows(self, city_data: PrettyTable) -> None: table = city_data[-2:] string = table.get_string() assert len(string.split("\n")) == 6 for row_index in (0, 1, 2, 3, 4): city = CITY_DATA[row_index][0] assert isinstance(city, str) assert city not in string for row_index in (5, 6): city = CITY_DATA[row_index][0] assert isinstance(city, str) assert city in string
TestSlicing
python
airbytehq__airbyte
airbyte-integrations/connectors/source-zenloop/source_zenloop/streams.py
{ "start": 4539, "end": 5118 }
class ____(ZenloopStream): # API Doc: https://docs.zenloop.com/reference#get-list-of-surveys primary_key = None has_date_param = False extra_params = {"page": "1"} use_cache = True def path( self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None ) -> str: return "surveys" def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: response_json = response.json() yield from response_json.get("surveys", [])
Surveys
python
apache__airflow
providers/teradata/tests/unit/teradata/utils/test_bteq_util.py
{ "start": 1321, "end": 13211 }
class ____: def test_identify_os_linux(self): # Arrange ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"Linux\n" ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) # Act os_info = identify_os(ssh_client) # Assert ssh_client.exec_command.assert_called_once_with("uname || ver") assert os_info == "linux\n" def test_identify_os_windows(self): # Arrange ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"Microsoft Windows [Version 10.0.19045.3324]\n" ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) # Act os_info = identify_os(ssh_client) # Assert ssh_client.exec_command.assert_called_once_with("uname || ver") assert "windows" in os_info def test_identify_os_macos(self): # Arrange ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"Darwin\n" ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) # Act os_info = identify_os(ssh_client) # Assert ssh_client.exec_command.assert_called_once_with("uname || ver") assert os_info == "darwin\n" def test_identify_os_empty_response(self): # Arrange ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"" ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) # Act os_info = identify_os(ssh_client) # Assert ssh_client.exec_command.assert_called_once_with("uname || ver") assert os_info == "" @patch("shutil.which") def test_verify_bteq_installed_success(self, mock_which): mock_which.return_value = "/usr/bin/bteq" # Should not raise verify_bteq_installed() mock_which.assert_called_with("bteq") @patch("shutil.which") def test_verify_bteq_installed_fail(self, mock_which): mock_which.return_value = None with pytest.raises(AirflowException): verify_bteq_installed() def test_prepare_bteq_script_for_remote_execution(self): conn = {"host": "myhost", "login": "user", "password": "pass"} sql = "SELECT * FROM DUAL;" script = prepare_bteq_script_for_remote_execution(conn, sql) assert ".LOGON myhost/user,pass" in script assert "SELECT * FROM DUAL;" in script assert ".EXIT" in script def test_prepare_bteq_script_for_local_execution(self): sql = "SELECT 1;" script = prepare_bteq_script_for_local_execution(sql) assert "SELECT 1;" in script assert ".EXIT" in script @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="linux") def test_verify_bteq_installed_remote_linux(self, mock_os): ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"/usr/bin/bteq" stdout_mock.channel.recv_exit_status.return_value = 0 ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) verify_bteq_installed_remote(ssh_client) ssh_client.exec_command.assert_called_once_with("which bteq") @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="windows") def test_verify_bteq_installed_remote_windows(self, mock_os): ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"C:\\Program Files\\bteq.exe" stdout_mock.channel.recv_exit_status.return_value = 0 ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) verify_bteq_installed_remote(ssh_client) ssh_client.exec_command.assert_called_once_with("where bteq") @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="darwin") def test_verify_bteq_installed_remote_macos(self, mock_os): ssh_client = MagicMock() stdout_mock = MagicMock() stdout_mock.read.return_value = b"/usr/local/bin/bteq" stdout_mock.channel.recv_exit_status.return_value = 0 ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, MagicMock()) verify_bteq_installed_remote(ssh_client) ssh_client.exec_command.assert_has_calls( [ call("command -v zsh"), call('zsh -l -c "which bteq"'), ] ) @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="darwin") def test_verify_bteq_installed_remote_macos_which_called_when_no_zsh(self, mock_os): ssh_client = MagicMock() # Mock for "command -v zsh" returning empty (no zsh) stdin_mock_1 = MagicMock() stdout_mock_1 = MagicMock() stderr_mock_1 = MagicMock() stdout_mock_1.read.return_value = b"" # No zsh path found stderr_mock_1.read.return_value = b"" # Return empty bytes here! ssh_client.exec_command.side_effect = [ (stdin_mock_1, stdout_mock_1, stderr_mock_1), # command -v zsh (MagicMock(), MagicMock(), MagicMock()), # which bteq ] # Mock for "which bteq" command response stdin_mock_2 = MagicMock() stdout_mock_2 = MagicMock() stderr_mock_2 = MagicMock() stdout_mock_2.channel.recv_exit_status.return_value = 0 stdout_mock_2.read.return_value = b"/usr/local/bin/bteq" stderr_mock_2.read.return_value = b"" # Also return bytes here # Since side_effect was already assigned, override second call manually ssh_client.exec_command.side_effect = [ (stdin_mock_1, stdout_mock_1, stderr_mock_1), # command -v zsh (stdin_mock_2, stdout_mock_2, stderr_mock_2), # which bteq ] verify_bteq_installed_remote(ssh_client) ssh_client.exec_command.assert_has_calls( [ call("command -v zsh"), call("which bteq"), ] ) @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="darwin") def test_verify_bteq_installed_remote_macos_which_fails_no_zsh(self, mock_os): ssh_client = MagicMock() # Mock for "command -v zsh" returning empty (no zsh) stdin_mock_1 = MagicMock() stdout_mock_1 = MagicMock() stderr_mock_1 = MagicMock() stdout_mock_1.read.return_value = b"" # No zsh path found ssh_client.exec_command.side_effect = [ (stdin_mock_1, stdout_mock_1, stderr_mock_1), # command -v zsh (MagicMock(), MagicMock(), MagicMock()), # which bteq ] # For which bteq failure ssh_client.exec_command.return_value[1].channel.recv_exit_status.return_value = 1 ssh_client.exec_command.return_value[1].read.return_value = b"" ssh_client.exec_command.return_value[2].read.return_value = b"command not found" with pytest.raises(AirflowException) as exc_info: verify_bteq_installed_remote(ssh_client) assert "BTEQ is not installed or not available in PATH" in str(exc_info.value) ssh_client.exec_command.assert_has_calls( [ call("command -v zsh"), call("which bteq"), ] ) @patch("airflow.providers.teradata.utils.bteq_util.identify_os", return_value="linux") def test_verify_bteq_installed_remote_fail(self, mock_os): ssh_client = MagicMock() stdout_mock = MagicMock() stderr_mock = MagicMock() stdout_mock.read.return_value = b"" stderr_mock.read.return_value = b"command not found" stdout_mock.channel.recv_exit_status.return_value = 1 ssh_client.exec_command.return_value = (MagicMock(), stdout_mock, stderr_mock) with pytest.raises(AirflowException, match="BTEQ is not installed or not available in PATH"): verify_bteq_installed_remote(ssh_client) ssh_client.exec_command.assert_called_once_with("which bteq") @patch("paramiko.SSHClient.exec_command") def test_verify_bteq_installed_remote_success(self, mock_exec): mock_stdin = MagicMock() mock_stdout = MagicMock() mock_stderr = MagicMock() mock_stdout.channel.recv_exit_status.return_value = 0 mock_stdout.read.return_value = b"/usr/bin/bteq" mock_stderr.read.return_value = b"" mock_exec.return_value = (mock_stdin, mock_stdout, mock_stderr) ssh_client = MagicMock() ssh_client.exec_command = mock_exec # Should not raise verify_bteq_installed_remote(ssh_client) @patch("paramiko.SSHClient.open_sftp") def test_transfer_file_sftp(self, mock_open_sftp): mock_sftp = MagicMock() mock_open_sftp.return_value = mock_sftp ssh_client = MagicMock() ssh_client.open_sftp = mock_open_sftp transfer_file_sftp(ssh_client, "local_file.txt", "remote_file.txt") mock_open_sftp.assert_called_once() mock_sftp.put.assert_called_once_with("local_file.txt", "remote_file.txt") mock_sftp.close.assert_called_once() def test_is_valid_file(self): # create temp file with open("temp_test_file.txt", "w") as f: f.write("hello") assert is_valid_file("temp_test_file.txt") is True assert is_valid_file("non_existent_file.txt") is False os.remove("temp_test_file.txt") def test_is_valid_encoding(self): # Write a file with UTF-8 encoding with open("temp_utf8_file.txt", "w", encoding="utf-8") as f: f.write("hello world") # Should return True assert is_valid_encoding("temp_utf8_file.txt", encoding="utf-8") is True # Cleanup os.remove("temp_utf8_file.txt") def test_read_file_success(self): content = "Sample content" with open("temp_read_file.txt", "w") as f: f.write(content) read_content = read_file("temp_read_file.txt") assert read_content == content os.remove("temp_read_file.txt") def test_read_file_file_not_found(self): with pytest.raises(FileNotFoundError): read_file("non_existent_file.txt") @patch("paramiko.SSHClient.open_sftp") def test_is_valid_remote_bteq_script_file_exists(self, mock_open_sftp): mock_sftp = MagicMock() mock_open_sftp.return_value = mock_sftp # Mock stat to return a regular file mode mock_stat = MagicMock() mock_stat.st_mode = stat.S_IFREG mock_sftp.stat.return_value = mock_stat ssh_client = MagicMock() ssh_client.open_sftp = mock_open_sftp result = is_valid_remote_bteq_script_file(ssh_client, "/remote/path/to/file") assert result is True mock_sftp.close.assert_called_once() @patch("paramiko.SSHClient.open_sftp") def test_is_valid_remote_bteq_script_file_not_exists(self, mock_open_sftp): mock_sftp = MagicMock() mock_open_sftp.return_value = mock_sftp # Raise FileNotFoundError for stat mock_sftp.stat.side_effect = FileNotFoundError ssh_client = MagicMock() ssh_client.open_sftp = mock_open_sftp result = is_valid_remote_bteq_script_file(ssh_client, "/remote/path/to/file") assert result is False mock_sftp.close.assert_called_once() def test_is_valid_remote_bteq_script_file_none_path(self): ssh_client = MagicMock() result = is_valid_remote_bteq_script_file(ssh_client, None) assert result is False if __name__ == "__main__": unittest.main()
TestBteqUtils
python
pytorch__pytorch
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
{ "start": 1527, "end": 7749 }
class ____(MultiProcessTestCase): def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): try: os.remove(self.file_name) except OSError: pass def _get_process_group_nccl(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( backend="nccl", world_size=self.world_size, rank=self.rank, store=store, ) return dist.distributed_c10d._get_default_group() @property def world_size(self): return 2 def _local_model(self): local_model = TestDdpCommHook().cpu() return local_model def _get_grads(self, process_group, hook_type=None): device_id = gpus_for_rank(self.world_size)[self.rank][0] gpu_model = DistributedDataParallel( TestDdpCommHook().to(device_id), device_ids=[device_id], process_group=process_group, ) # Register DDP Communication Hook if defined if hook_type is not None: register_ddp_comm_hook( comm_hook_type=hook_type, model=gpu_model, state=process_group ) return self._run_and_get_grads(gpu_model) def _run_and_get_grads(self, model): torch.manual_seed(2020) input = torch.randn(40, 20) # Run forward output = model(input, self.rank) # Run backward output.mean().backward() # The only layer param = next(model.parameters()) return param.grad @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_allreduce_hook(self): """ This unit test verifies the ``allreduce`` hook registered case gives same result with no hook registered case. """ process_group = self._get_process_group_nccl() # No hook registered case, get the reference grads. reference_grads = self._get_grads(process_group, None) # Register hook case, get the hook grads. hook_grads = self._get_grads(process_group, DDPCommHookType.ALLREDUCE) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_fp16compress_hook(self): """ This unit test verifies the ``fp16 compress`` hook registered case gives close result with no hook registered case. """ process_group = self._get_process_group_nccl() # No hook registered case, get the reference grads. reference_grads = self._get_grads(process_group, None) # Register hook case, get the hook grads. hook_grads = self._get_grads(process_group, DDPCommHookType.FP16_COMPRESS) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_quantize_per_tensor_hook(self): """ This unit test verifies the ``quantize per tensor`` hook registered case gives close result with no hook registered case. """ process_group = self._get_process_group_nccl() # No hook registered case, get the reference grads. reference_grads = self._get_grads(process_group, None) # Register hook case, get the hook grads. hook_grads = self._get_grads(process_group, DDPCommHookType.QUANTIZE_PER_TENSOR) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_quantize_per_channel_hook(self): """ This unit test verifies the ``quantize per channel`` hook registered case gives close result with no hook registered case. """ process_group = self._get_process_group_nccl() # No hook registered case, get the reference grads. reference_grads = self._get_grads(process_group, None) # Register hook case, get the hook grads. hook_grads = self._get_grads( process_group, DDPCommHookType.QUANTIZE_PER_CHANNEL ) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=1e-4) @requires_nccl() @skip_if_lt_x_gpu(2) def test_ddp_comm_hook_noop_hook(self): """ This unit test verifies the ``noop`` hook registered case and a subsequent allreduce gives same result with no hook registered case. """ process_group = self._get_process_group_nccl() # No hook registered case, get the reference grads. reference_grads = self._get_grads(process_group, None) # Register hook case, get the hook grads. hook_grads = self._get_grads(process_group, DDPCommHookType.NOOP) # Apply a subsequent allreduce to average grads. hook_grads.div_(self.world_size) dist.all_reduce(hook_grads, group=process_group) torch.testing.assert_close(hook_grads, reference_grads, rtol=1e-5, atol=0) @requires_nccl() @skip_if_lt_x_gpu(2) def test_is_last_hook(self): process_group = self._get_process_group_nccl() def hook(flags, bucket): flags.append(bucket.is_last()) fut = torch.futures.Future() fut.set_result(bucket.buffer()) return fut flags = [] device_id = gpus_for_rank(self.world_size)[self.rank][0] model = nn.Sequential( nn.Linear(2, 4000, bias=False), *[nn.Linear(4000, 4000, bias=False) for _ in range(10)], ) gpu_model = DistributedDataParallel( model.to(device_id), device_ids=[device_id], process_group=process_group, ) gpu_model.register_comm_hook(state=flags, hook=hook) input = torch.randn(10, 2) gpu_model(input).sum().backward() self.assertTrue(flags[-1]) self.assertFalse(any(flags[:-1])) if __name__ == "__main__": assert not torch.cuda._initialized, ( "test_distributed must not have initialized CUDA context on main process" ) run_tests()
DistributedDataParallelCommHookTest
python
wandb__wandb
wandb/vendor/pygments/lexers/robotframework.py
{ "start": 8766, "end": 8927 }
class ____(TestCaseSetting): _keyword_settings = ('teardown',) _other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
KeywordSetting
python
coleifer__peewee
tests/queries.py
{ "start": 296, "end": 5574 }
class ____(DatabaseTestCase): database = get_in_memory_db() def setUp(self): super(TestQueryExecution, self).setUp() User.bind(self.database) Tweet.bind(self.database) Register.bind(self.database) self.execute('CREATE TABLE "users" (id INTEGER NOT NULL PRIMARY KEY, ' 'username TEXT)') self.execute('CREATE TABLE "tweet" (id INTEGER NOT NULL PRIMARY KEY, ' 'user_id INTEGER NOT NULL, content TEXT, FOREIGN KEY ' '(user_id) REFERENCES users (id))') self.execute('CREATE TABLE "register" (' 'id INTEGER NOT NULL PRIMARY KEY, ' 'value REAL)') def tearDown(self): self.execute('DROP TABLE "tweet";') self.execute('DROP TABLE "users";') self.execute('DROP TABLE "register";') super(TestQueryExecution, self).tearDown() def create_user_tweets(self, username, *tweets): user_id = User.insert({User.username: username}).execute() for tweet in tweets: Tweet.insert({ Tweet.user_id: user_id, Tweet.content: tweet}).execute() return user_id def test_selection(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr') query = User.select() self.assertEqual(query[:], [{'id': huey_id, 'username': 'huey'}]) query = (Tweet .select(Tweet.content, User.username) .join(User, on=(Tweet.user_id == User.id)) .order_by(Tweet.id)) self.assertEqual(query[:], [ {'content': 'meow', 'username': 'huey'}, {'content': 'purr', 'username': 'huey'}]) def test_select_peek_first(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr', 'hiss') query = Tweet.select(Tweet.content).order_by(Tweet.id) self.assertEqual(query.peek(n=2), [ {'content': 'meow'}, {'content': 'purr'}]) self.assertEqual(query.first(), {'content': 'meow'}) query = Tweet.select().where(Tweet.id == 0) self.assertIsNone(query.peek(n=2)) self.assertIsNone(query.first()) def test_select_get(self): huey_id = self.create_user_tweets('huey') self.assertEqual(User.select().where(User.username == 'huey').get(), { 'id': huey_id, 'username': 'huey'}) self.assertIsNone(User.select().where(User.username == 'x').get()) def test_select_count(self): huey_id = self.create_user_tweets('huey', 'meow', 'purr') mickey_id = self.create_user_tweets('mickey', 'woof', 'pant', 'whine') self.assertEqual(User.select().count(), 2) self.assertEqual(Tweet.select().count(), 5) query = Tweet.select().where(Tweet.user_id == mickey_id) self.assertEqual(query.count(), 3) query = (Tweet .select() .join(User, on=(Tweet.user_id == User.id)) .where(User.username == 'foo')) self.assertEqual(query.count(), 0) def test_select_exists(self): self.create_user_tweets('huey') self.assertTrue(User.select().where(User.username == 'huey').exists()) self.assertFalse(User.select().where(User.username == 'foo').exists()) def test_scalar(self): values = [1.0, 1.5, 2.0, 5.0, 8.0] (Register .insert([{Register.value: value} for value in values]) .execute()) query = Register.select(fn.AVG(Register.value)) self.assertEqual(query.scalar(), 3.5) query = query.where(Register.value < 5) self.assertEqual(query.scalar(), 1.5) query = (Register .select( fn.SUM(Register.value), fn.COUNT(Register.value), fn.SUM(Register.value) / fn.COUNT(Register.value))) self.assertEqual(query.scalar(as_tuple=True), (17.5, 5, 3.5)) query = query.where(Register.value >= 2) self.assertEqual(query.scalar(as_tuple=True), (15, 3, 5)) def test_scalars(self): values = [1.0, 1.5, 2.0, 5.0, 8.0] (Register .insert([{Register.value: value} for value in values]) .execute()) query = Register.select(Register.value).order_by(Register.value) self.assertEqual(list(query.scalars()), values) query = query.where(Register.value < 5) self.assertEqual(list(query.scalars()), [1.0, 1.5, 2.0]) def test_slicing_select(self): values = [1., 1., 2., 3., 5., 8.] (Register .insert([(v,) for v in values], columns=(Register.value,)) .execute()) query = (Register .select(Register.value) .order_by(Register.value) .tuples()) with self.assertQueryCount(1): self.assertEqual(query[0], (1.,)) self.assertEqual(query[:2], [(1.,), (1.,)]) self.assertEqual(query[1:4], [(1.,), (2.,), (3.,)]) self.assertEqual(query[-1], (8.,)) self.assertEqual(query[-2], (5.,)) self.assertEqual(query[-2:], [(5.,), (8.,)]) self.assertEqual(query[2:-2], [(2.,), (3.,)])
TestQueryExecution
python
aimacode__aima-python
agents.py
{ "start": 24162, "end": 24465 }
class ____(Environment): """Model for Continuous World""" def __init__(self, width=10, height=10): super().__init__() self.width = width self.height = height def add_obstacle(self, coordinates): self.things.append(PolygonObstacle(coordinates))
ContinuousWorld
python
encode__httpx
httpx/_config.py
{ "start": 2098, "end": 5406 }
class ____: """ Timeout configuration. **Usage**: Timeout(None) # No timeouts. Timeout(5.0) # 5s timeout on all operations. Timeout(None, connect=5.0) # 5s timeout on connect, no other timeouts. Timeout(5.0, connect=10.0) # 10s timeout on connect. 5s timeout elsewhere. Timeout(5.0, pool=None) # No timeout on acquiring connection from pool. # 5s timeout elsewhere. """ def __init__( self, timeout: TimeoutTypes | UnsetType = UNSET, *, connect: None | float | UnsetType = UNSET, read: None | float | UnsetType = UNSET, write: None | float | UnsetType = UNSET, pool: None | float | UnsetType = UNSET, ) -> None: if isinstance(timeout, Timeout): # Passed as a single explicit Timeout. assert connect is UNSET assert read is UNSET assert write is UNSET assert pool is UNSET self.connect = timeout.connect # type: typing.Optional[float] self.read = timeout.read # type: typing.Optional[float] self.write = timeout.write # type: typing.Optional[float] self.pool = timeout.pool # type: typing.Optional[float] elif isinstance(timeout, tuple): # Passed as a tuple. self.connect = timeout[0] self.read = timeout[1] self.write = None if len(timeout) < 3 else timeout[2] self.pool = None if len(timeout) < 4 else timeout[3] elif not ( isinstance(connect, UnsetType) or isinstance(read, UnsetType) or isinstance(write, UnsetType) or isinstance(pool, UnsetType) ): self.connect = connect self.read = read self.write = write self.pool = pool else: if isinstance(timeout, UnsetType): raise ValueError( "httpx.Timeout must either include a default, or set all " "four parameters explicitly." ) self.connect = timeout if isinstance(connect, UnsetType) else connect self.read = timeout if isinstance(read, UnsetType) else read self.write = timeout if isinstance(write, UnsetType) else write self.pool = timeout if isinstance(pool, UnsetType) else pool def as_dict(self) -> dict[str, float | None]: return { "connect": self.connect, "read": self.read, "write": self.write, "pool": self.pool, } def __eq__(self, other: typing.Any) -> bool: return ( isinstance(other, self.__class__) and self.connect == other.connect and self.read == other.read and self.write == other.write and self.pool == other.pool ) def __repr__(self) -> str: class_name = self.__class__.__name__ if len({self.connect, self.read, self.write, self.pool}) == 1: return f"{class_name}(timeout={self.connect})" return ( f"{class_name}(connect={self.connect}, " f"read={self.read}, write={self.write}, pool={self.pool})" )
Timeout
python
doocs__leetcode
solution/0800-0899/0884.Uncommon Words from Two Sentences/Solution.py
{ "start": 0, "end": 193 }
class ____: def uncommonFromSentences(self, s1: str, s2: str) -> List[str]: cnt = Counter(s1.split()) + Counter(s2.split()) return [s for s, v in cnt.items() if v == 1]
Solution
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/event_log/base.py
{ "start": 6806, "end": 6885 }
class ____: name: str limit: int from_default: bool @public
PoolLimit
python
tensorflow__tensorflow
tensorflow/python/ops/control_flow_ops_test.py
{ "start": 49184, "end": 52671 }
class ____(test_util.TensorFlowTestCase): # The same test can run with and without XLA compilation. # In non-XLA gpu case, it exercises gpu branch. # In XLA gpu cases, it exercises the default case. # This test is to test the non-XLA case so that we disable XLA. @test_util.disable_xla("xla has different execution branch") def testCommonCases(self): def cpu_fn(x): return x + x def gpu_fn(x): return x * x def flexible_fn(a): branches = {"CPU": lambda: cpu_fn(a), "GPU": lambda: gpu_fn(a)} return control_flow_switch_case.execute_fn_for_device( branches, lambda: cpu_fn(a)) @def_function.function def flexible_defun(a): return flexible_fn(a) def run_defun_and_tape(a): with backprop.GradientTape() as tape: tape.watch(a) result = flexible_defun(a) grad = tape.gradient(result, a) r = flexible_fn(a) return r, result, grad a = array_ops.constant(3.) with ops.device("cpu:0"): r, result, grad = run_defun_and_tape(a) self.assertEqual(6., self.evaluate(r)) self.assertEqual(6., self.evaluate(result)) self.assertEqual([2.], self.evaluate(grad)) if test_util.is_gpu_available(): with ops.device("gpu:0"): r, result, grad = run_defun_and_tape(a) self.assertEqual(9., self.evaluate(r)) self.assertEqual(9., self.evaluate(result)) self.assertEqual([6.], self.evaluate(grad)) # no device annotation r, result, grad = run_defun_and_tape(a) if test_util.is_gpu_available(): self.assertEqual(9., self.evaluate(r)) self.assertEqual(9., self.evaluate(result)) self.assertEqual([6.], self.evaluate(grad)) else: self.assertEqual(6., self.evaluate(r)) self.assertEqual(6., self.evaluate(result)) self.assertEqual([2.], self.evaluate(grad)) def testCompile(self): if not test_util.is_gpu_available(): return def cpu_fn(x): return x + x def gpu_fn(x): return x * x @def_function.function(jit_compile=True) def flexible_defun(a): branches = {"CPU": lambda: cpu_fn(a), "GPU": lambda: gpu_fn(a)} return control_flow_switch_case.execute_fn_for_device( branches, lambda: cpu_fn(a)) # Always execute the default branch in xla compilation case. a = array_ops.constant(3.) r = flexible_defun(a) self.assertEqual(6., self.evaluate(r)) def testFallBack(self): def default_fn(x): return x def tpu_fn(x): return x * x * x def flexible_fn(a): branches = {"TPU": lambda: tpu_fn(a)} return control_flow_switch_case.execute_fn_for_device( branches, default_fn=lambda: default_fn(a)) @def_function.function def flexible_defun(a): return flexible_fn(a) a = array_ops.constant(3.) with ops.device("cpu:0"): result_defun = flexible_defun(a) result_defun = flexible_fn(a) self.assertEqual(3., self.evaluate(result_defun)) # execute_fn_for_device is not inside defun_function. result = flexible_fn(a) self.assertEqual(3., self.evaluate(result)) if test_util.is_gpu_available(): with ops.device("gpu:0"): result_defun = flexible_defun(a) self.assertEqual(3., self.evaluate(result_defun)) # execute_fn_for_device is not inside defun_function. result = flexible_fn(a) self.assertEqual(3., self.evaluate(result))
ExecuteFnForDeviceTest
python
pyqtgraph__pyqtgraph
pyqtgraph/graphicsItems/ROI.py
{ "start": 83108, "end": 89654 }
class ____(ROI): r""" Container class for multiple connected LineSegmentROIs. This class allows the user to draw paths of multiple line segments. ============== ============================================================= **Arguments** positions (list of length-2 sequences) The list of points in the path. Note that, unlike the handle positions specified in other ROIs, these positions must be expressed in the normal coordinate system of the ROI, rather than (0 to 1) relative to the size of the ROI. closed (bool) if True, an extra LineSegmentROI is added connecting the beginning and end points. \**args All extra keyword arguments are passed to ROI() ============== ============================================================= """ def __init__(self, positions, closed=False, pos=None, **args): if pos is None: pos = [0,0] self.closed = closed self.segments = [] ROI.__init__(self, pos, size=[1,1], **args) self.setPoints(positions) def setPoints(self, points, closed=None): """ Set the complete sequence of points displayed by this ROI. ============= ========================================================= **Arguments** points List of (x,y) tuples specifying handle locations to set. closed If bool, then this will set whether the ROI is closed (the last point is connected to the first point). If None, then the closed mode is left unchanged. ============= ========================================================= """ if closed is not None: self.closed = closed self.clearPoints() for p in points: self.addFreeHandle(p) start = -1 if self.closed else 0 for i in range(start, len(self.handles)-1): self.addSegment(self.handles[i]['item'], self.handles[i+1]['item']) def clearPoints(self): """ Remove all handles and segments. """ while len(self.handles) > 0: self.removeHandle(self.handles[0]['item']) def getState(self): state = ROI.getState(self) state['closed'] = self.closed state['points'] = [Point(h.pos()) for h in self.getHandles()] return state def saveState(self): state = ROI.saveState(self) state['closed'] = self.closed state['points'] = [tuple(h.pos()) for h in self.getHandles()] return state def setState(self, state): ROI.setState(self, state) self.setPoints(state['points'], closed=state['closed']) def addSegment(self, h1, h2, index=None): seg = _PolyLineSegment(handles=(h1, h2), pen=self.pen, hoverPen=self.hoverPen, parent=self, movable=False, antialias=self._antialias) if index is None: self.segments.append(seg) else: self.segments.insert(index, seg) seg.sigClicked.connect(self.segmentClicked) seg.setAcceptedMouseButtons(QtCore.Qt.MouseButton.LeftButton) seg.setZValue(self.zValue()+1) for h in seg.handles: h['item'].setDeletable(True) h['item'].setAcceptedMouseButtons(h['item'].acceptedMouseButtons() | QtCore.Qt.MouseButton.LeftButton) ## have these handles take left clicks too, so that handles cannot be added on top of other handles def setMouseHover(self, hover): ## Inform all the ROI's segments that the mouse is(not) hovering over it ROI.setMouseHover(self, hover) for s in self.segments: s.setParentHover(hover) def addHandle(self, info, index=None): h = ROI.addHandle(self, info, index=index) h.sigRemoveRequested.connect(self.removeHandle) self.stateChanged(finish=True) return h @QtCore.Slot(object, object) def segmentClicked(self, segment, ev=None, pos=None): ## pos should be in this item's coordinate system if ev is not None: pos = segment.mapToParent(ev.pos()) elif pos is None: raise Exception("Either an event or a position must be given.") h2 = segment.handles[1]['item'] i = self.segments.index(segment) h3 = self.addFreeHandle(pos, index=self.indexOfHandle(h2)) self.addSegment(h3, h2, index=i+1) segment.replaceHandle(h2, h3) @QtCore.Slot(object) def removeHandle(self, handle, updateSegments=True): ROI.removeHandle(self, handle) handle.sigRemoveRequested.disconnect(self.removeHandle) if not updateSegments: return segments = handle.rois[:] if len(segments) == 1: self.removeSegment(segments[0]) elif len(segments) > 1: handles = [h['item'] for h in segments[1].handles] handles.remove(handle) segments[0].replaceHandle(handle, handles[0]) self.removeSegment(segments[1]) self.stateChanged(finish=True) def removeSegment(self, seg): for handle in seg.handles[:]: seg.removeHandle(handle['item']) self.segments.remove(seg) seg.sigClicked.disconnect(self.segmentClicked) self.scene().removeItem(seg) def checkRemoveHandle(self, h): ## called when a handle is about to display its context menu if self.closed: return len(self.handles) > 3 else: return len(self.handles) > 2 def paint(self, p, *args): pass def boundingRect(self): return self.shape().boundingRect() def shape(self): p = QtGui.QPainterPath() if len(self.handles) == 0: return p p.moveTo(self.handles[0]['item'].pos()) for i in range(len(self.handles)): p.lineTo(self.handles[i]['item'].pos()) p.lineTo(self.handles[0]['item'].pos()) return p def getArrayRegion(self, *args, **kwds): return self._getArrayRegionForArbitraryShape(*args, **kwds) def setPen(self, *args, **kwds): ROI.setPen(self, *args, **kwds) for seg in self.segments: seg.setPen(*args, **kwds)
PolyLineROI
python
pytorch__pytorch
test/jit/test_union.py
{ "start": 537, "end": 33966 }
class ____(JitTestCase): """ This class tests the functionality of `Union`. Note: It's important to be able to refine the type of a `Union` to one of its internal types. Currently, there are differences in the way Python expects `isinstance` checks and the way TorchScript expects `isinstance` checks. This means that we can't use `checkScript` in our test cases because either the eager mode or the script mode wouldn't run! So, some test cases have separate but equivalent functions to emulate `checkScript`. """ def test_check_union_annotation(self): def test_func(a: Union[int, float], b: Optional[int]): return 0 scripted_func = torch.jit.script(test_func) graph_rep = str(scripted_func.graph) code_rep = str(scripted_func.code) # TS graph IR for Union should be annotated as Union() FileCheck().check("Union(").check("int?").run(graph_rep) # Serialized code for Union should be annotated as Union[] FileCheck().check("Union[").check("Optional[int]").run(code_rep) self.checkScript(test_func, (5, 6)) # this shouldn't error out torch._C.parse_ir(str(scripted_func.graph)) def test_union_with_scalar_values(self): def fn(x: Union[int, float]) -> str: return "foo" self.checkScript(fn, (1,)) self.checkScript(fn, (1.0,)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[float, int\] but " "instead found type str", ): scripted("1") def test_union_with_collections(self): def fn(x: Union[Dict[str, int], List[int]]) -> str: return "foo" self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},)) self.checkScript(fn, ([1, 2, 3],)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " r"Dict\[str, str\]", ): scripted({"foo": "bar", "baz": "qux"}) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " r"List\[str\]", ): scripted(["foo", "bar", "baz"]) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[List\[int\], Dict\[str, " r"int\]\] but instead found type " "str", ): scripted("1") def test_union_with_enum(self): class Color(Enum): RED = 1 GREEN = 2 make_global(Color) def fn(x: Union[str, Color]) -> str: return "foo" self.checkScript(fn, (Color.RED,)) self.checkScript(fn, ("red",)) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[__torch__.jit.test_union." r"Color, str\] but instead found " "type int", ): scripted(1) def test_union_in_class_constructor(self): @torch.jit.script # noqa: B903 class A: # noqa: B903 def __init__(self, x: Union[int, str]) -> None: self.x = x def fn(x: Union[str, int]) -> A: return A(x) self.assertEqual(fn("foo").x, "foo") self.assertEqual(fn(1).x, 1) scripted = torch.jit.script(fn) with self.assertRaisesRegex( RuntimeError, "Expected a member of" r" Union\[int, str\] but instead " r"found type List\[str\]", ): scripted(["foo", "bar", "baz"]) def test_union_return_type(self): def fn(x: int) -> Union[int, str]: return "foo" self.checkScript(fn, (1,)) def test_union_as_annotation(self): def fn() -> Union[int, str]: x: Union[int, str] = "foo" return x self.checkScript(fn, ()) def test_union_as_annotation_in_typed_container(self): def fn() -> None: l: List[Union[int, str]] = [] u1: Union[int, str] = "foo" u2: Union[int, str] = 1 l.append(u1) l.append(u2) self.checkScript(fn, ()) def test_union_as_annotation_py2(self): def fn(): # type: () -> Union[int, str] x: Union[int, str] = "foo" return x self.checkScript(fn, ()) def test_union_as_internal_tuple_type(self): def fn(): t: Tuple[Union[int, str], Union[int, str]] = (1, "foo") return t self.checkScript(fn, ()) def test_union_variable_can_be_reassigned(self): @torch.jit.script def aux1(i: int): return int(i**2) @torch.jit.script def aux2(s: str): return s + s def fn() -> Union[int, str]: x: Union[int, str] = "foo" i: int = 1 x = i y: int = aux1(x) z: str = aux2(str(y)) x = z return x self.checkScript(fn, ()) def test_union_does_not_replace_existing_annotated_type(self): def fn(): x: List[int] = [1, 2, 3] x.append("foo") return x with self.assertRaisesRegex(RuntimeError, "Could not match type str"): scripted = torch.jit.script(fn) scripted() def test_union_does_not_replace_existing_annotated_type_union(self): def fn(): x: List[Union[int, str]] = [1, "foo", 3] x.append(2.0) return x with self.assertRaisesRegex(RuntimeError, "Could not match type float"): scripted = torch.jit.script(fn) scripted() def test_union_does_not_replace_existing_annotated_type_empty_container(self): def fn(): x: List[int] = [] x.append("foo") return x with self.assertRaisesRegex(RuntimeError, "Could not match type str"): scripted = torch.jit.script(fn) scripted() def test_unions_of_unions_are_flattened(self): @torch.jit.script def fn(x: Union[Union[int, str], float]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float, int, str)").run(s) def test_unions_of_a_single_argument_vanish(self): @torch.jit.script def fn(x: Union[int]) -> str: return "foo" s = fn.graph FileCheck().check("x : int").run(s) def test_union_redundant_arguments_are_skipped(self): @torch.jit.script def fn(x: Union[int, str, int]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(int, str)").run(s) def test_union_redundant_arguments_are_skipped_optional(self): @torch.jit.script def fn(x: Union[int, Optional[float], Optional[int]]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float, int, NoneType)").run(s) def test_union_redundant_arguments_are_skipped_subtyping(self): @torch.jit.script def fn(x: Union[str, Tuple[Optional[int], int], Tuple[int, int]]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union((int?, int), str)").run(s) def test_union_redundant_arguments_are_skipped_container(self): @torch.jit.script def fn(x: Union[List[str], List[float], List[str]]) -> str: return "foo" s = fn.graph FileCheck().check("x : Union(float[], str[])").run(s) def test_union_argument_order_is_ignored(self): @torch.jit.script def fn1(x: Union[int, str]) -> str: return "foo" @torch.jit.script def fn2(x: Union[str, int]) -> str: return "foo" for s in (fn1.graph, fn2.graph): FileCheck().check("x : Union(int, str)").run(s) def test_union_argument_order_is_ignored_container(self): @torch.jit.script def fn1(x: Union[List[str], List[int]]) -> str: return "foo" @torch.jit.script def fn2(x: Union[List[int], List[str]]) -> str: return "foo" for s in (fn1.graph, fn2.graph): FileCheck().check("x : Union(int[], str[])").run(s) def test_union_T_None_is_equivalent_to_optional_T(self): @torch.jit.script def inner(x: Union[int, None]) -> int: if x is not None: return x else: return 5 @torch.jit.script def fn1() -> int: a: Optional[int] = 5 b: Optional[int] = None a_ = inner(a) b_ = inner(b) return a_ + b_ self.assertEqual(fn1(), 10) @torch.jit.script def inner2(x: Optional[int]) -> int: if x is not None: return x else: return 5 @torch.jit.script def fn2() -> int: a: Union[int, None] = 5 b: Union[int, None] = None a_ = inner(a) b_ = inner(b) return a_ + b_ self.assertEqual(fn2(), 10) def test_union_optional_of_union_is_flattened(self): @torch.jit.script def fn(flag: int) -> Union[str, int, None]: y: Union[int, str, None] = "foo" if flag == 0: x: Optional[Union[int, str]] = y elif flag == 1: x: Optional[Union[int, str]] = 1 else: x: Optional[Union[int, str]] = None return x # Can't use `checkScript` because it will flag the fact that # the original code has `Optional[Union[int, str]]` but the # saved/loaded code has `Union[int, NoneType, str]` (even # though this is exactly what we want) self.assertEqual(fn(0), "foo") self.assertEqual(fn(1), 1) self.assertEqual(fn(2), None) buffer = io.BytesIO() torch.jit.save(fn, buffer) buffer = io.BytesIO(buffer.getvalue()) l = torch.jit.load(buffer) s = l.code FileCheck().check("Union[int, NoneType, str]").check( "Union[int, NoneType, str]" ).run(s) def test_union_subclasses_larger_union(self): def fn() -> Union[int, str, torch.Tensor]: x: Union[int, str] = "foo" return x self.checkScript(fn, ()) # TODO: We would like to eventually support this. The issue is being # tracked at https://github.com/pytorch/pytorch/issues/58167 def test_union_as_dict_key(self): def fn(): x: Dict[Union[int, str], str] = {} x["foo"] = "bar" x[1] = 2 return x[1] with self.assertRaisesRegex( RuntimeError, "only int, float, complex, Tensor, device and string keys are supported", ): torch.jit.script(fn) def test_union_as_dict_value(self): def fn(): x: Dict[str, Union[int, str]] = {} x["foo"] = "bar" x["baz"] = 2 return x["baz"] self.checkScript(fn, ()) def test_union_module_with_union_instance_variable(self): class M(torch.nn.Module): x: Union[int, str] def __init__(self, x: Union[int, str]): super().__init__() self.x: Union[int, str] = x def forward(self, y: Union[int, str]): self.x = y return self.x self.checkModule( M( 2, ), (1,), ) self.checkModule(M("bar"), ("foo",)) def test_union_module_with_union_class_variable(self): class M(torch.nn.Module): x: Union[int, str] = "foo" def __init__(self, y: int): super().__init__() x = y def forward(self, z: str): x = z return x self.checkModule(M(1), ("foo",)) def test_union_type_refinement(self): def fn(x: Union[int, str]) -> str: if isinstance(x, str): z = x + "bar" return x else: return "baz" self.checkScript(fn, ("foo",)) self.checkScript(fn, (1,)) def test_union_type_refinement_union_rhs(self): def fn(x: int) -> str: if torch.jit.isinstance(x, Union[int, str]): return "bar" else: return "baz" self.checkScript(fn, (1,)) def test_union_type_refinement_tuple_rhs(self): def fn(x: Union[int, float, List[str]]) -> str: if isinstance(x, (int, float)): if isinstance(x, int): return str(x) else: return "foo" else: if len(x): return x[0] else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (1.0,)) self.checkScript(fn, (["a", "b", "c"],)) def test_union_type_refinement_tuple_rhs_noncontained_type(self): def fn(x: Union[int, List[str]]) -> str: if isinstance(x, (int, float)): y = x + x return str(y) else: if len(x): return x[0] else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (["a", "b", "c"],)) def test_union_type_refinement_tuple_rhs_union(self): @torch.jit.script def fn(x: int) -> str: if torch.jit.isinstance(x, (Union[int, str], float)): y = x + x return str(y) else: return "foo" # TODO: There's currently an unrelated bug in # `torch.jit.isinstance` that makes it fail for tuple literals. # Posted here: https://github.com/pytorch/pytorch/issues/60095 # Change `assertEqual` to `checkScript` when the bug is fixed self.assertEqual(fn(1), "2") def test_union_type_refinement_statically_false(self): @torch.jit.script def fn(x: int) -> str: if torch.jit.isinstance(x, (Union[str, float], List[str], str)): z = x + "foo" return z else: return "bar" s = fn.graph # Check that we don't have any branching statements FileCheck().check_not("block0()").check_not("block1()").run(s) def test_union_type_refinement_statically_true(self): @torch.jit.script def fn(x: Union[List[int], int]) -> Union[List[int], int]: if not torch.jit.isinstance(x, (int, List[int])): return x else: l = [1, 2, 3] y: Union[List[int], int] = l return y s = fn.graph # Check that we don't have any branching statements FileCheck().check_not("block0()").check_not("block1()").run(s) def test_union_type_refinement_partial_static_refinement_tuple_rhs(self): def fn(x: Union[List[int], int]) -> int: if torch.jit.isinstance(x, (int, float, str)): # We should know that `x` is an `int` here z = x + 1 return z else: return 100 self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, (1,)) def test_union_type_refinement_partial_static_refinement_union_rhs(self): def fn(x: Union[List[int], int]) -> int: if torch.jit.isinstance(x, Union[int, float, str]): # We should know that `x` is an `int` here z = x + 1 return z else: return 100 self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, (1,)) def test_union_type_refinement_internal_declaration(self): def fn(flag: bool) -> str: x: Union[int, str, None] = None if flag: y = "foo" else: y = 1 if isinstance(x, str): return x else: return "bar" self.checkScript(fn, (True,)) self.checkScript(fn, (False,)) def test_union_branching_with_union_return_and_homogenous_types(self): def fn(x: int) -> Union[int, str]: if x % 2: return "foo" else: return "bar" self.checkScript(fn, (1,)) self.checkScript(fn, (8,)) def test_union_branching_does_not_autoinfer_undeclared_union(self): def fn(x: int) -> str: if x % 2: y = "foo" else: y = x if isinstance(y, str): return y else: return "bar" with self.assertRaisesRegex( RuntimeError, "y is set to type str in the true branch and type int in the false branch", ): torch.jit.script(fn) def test_union_branching_does_not_widen_existing_inferred_type(self): def fn(x: int) -> str: y = "foo" if x % 2: y = "bar" else: y = x if isinstance(y, str): return y else: return "baz" with self.assertRaisesRegex( RuntimeError, "previously had type str but is now being assigned to a value of type int", ): torch.jit.script(fn) def test_union_schema_matching_on_internal_type(self): def fn(x: Union[List[int], Dict[str, int]]) -> int: if torch.jit.isinstance(x, List[int]): return x[0] else: return list(x.values())[0] self.checkScript(fn, ([1, 2, 3],)) self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},)) def test_union_subtractive_refinement(self): def fn(x: Union[List[int], int]) -> int: if not isinstance(x, int): x.append(1) return x[0] else: return x self.checkScript(fn, (1,)) self.checkScript(fn, ([1, 2, 3],)) def test_union_subtractive_refinement_with_container(self): def fn(x: Union[List[int], int]) -> int: if not torch.jit.isinstance(x, List[int]): return x else: x.append(1) return x[0] self.checkScript(fn, (1,)) self.checkScript(fn, ([1, 2, 3],)) def test_union_memory_aliasing(self): def fn(): x: List[torch.Tensor] = [] z: List[Optional[List[torch.Tensor]]] = [] z.append(x) x_alias = z[0] if torch.jit.isinstance(x_alias, List[torch.Tensor]): x_alias.append(torch.tensor(3)) return x self.checkScript(fn, ()) def test_union_serialization_preserves_type_annotations(self): # This function will fail after being torch.jit.save'd and # torch.jit.load'd if the type annotations aren't preserved # for Union during serialization. We need the `Union[str, int]` # annotation to make sure that `y` is typed as a Union instead # of as a str in one branch and an int in the other def fn(x: int) -> str: if x % 2: y: Union[str, int] = "bar" else: y: Union[str, int] = x if isinstance(y, str): return y else: return "baz" self.checkScript(fn, (1,)) self.checkScript(fn, (8,)) def _assert_passes(self, template: str, ann: str, lhs: str): code = template.format(ann=ann, lhs=lhs) self.checkScript(code, (), name="fn") def _assert_raises(self, template: str, ann: str, lhs: str, msg: str): code = template.format(ann=ann, lhs=lhs) with self.assertRaisesRegex(RuntimeError, msg): cu = torch.jit.CompilationUnit(code, _frames_up=1) string_frontend = getattr(cu, "fn") # noqa: B009 def test_union_with_list_assignment(self): template = dedent( """ def fn(): x: {ann} = {lhs} if torch.jit.isinstance(x, List[torch.Tensor]): x.append(torch.tensor(3)) return x """ ) lhs = { "list_literal_empty": "[]", "list_literal_of_tensor": "[torch.arange(3), torch.arange(5)]", "list_literal_of_str": '["foo", "bar", "baz"]', "list_literal_of_mixed": "[torch.arange(5), 1]", "list_comprehension_of_tensor": "[torch.add(x, 1) for x in [torch.arange(3), torch.arange(5)]]", "list_comprehension_of_str": '[x + "!" for x in ["foo", "bar", "baz"]]', "list_comprehension_of_mixed": "[torch.add(1, x) for x in [torch.arange(5), 1]]", } """ Union[List[str], List[torch.Tensor]] """ self._assert_raises( template, "Union[List[str], List[torch.Tensor]]", lhs["list_literal_empty"], "there are multiple possible List type candidates in the Union annotation", ) self._assert_passes( template, "Union[List[str], List[torch.Tensor]]", lhs["list_literal_of_tensor"], ) self._assert_passes( template, "Union[List[str], List[torch.Tensor]]", lhs["list_literal_of_str"] ) self._assert_raises( template, "Union[List[str], List[torch.Tensor]]", lhs["list_literal_of_mixed"], "none of those types match the types of the given list elements", ) self._assert_passes( template, "Union[List[str], List[torch.Tensor]]", lhs["list_comprehension_of_tensor"], ) self._assert_passes( template, "Union[List[str], List[torch.Tensor]]", lhs["list_comprehension_of_str"], ) # TODO: Support mixed list comprehensions self._assert_raises( template, "Union[List[str], List[torch.Tensor]]", lhs["list_comprehension_of_mixed"], "Arguments for call are not valid", ) """ Union[int, torch.Tensor] """ self._assert_raises( template, "Union[int, torch.Tensor]", lhs["list_literal_empty"], "Expected an Union type annotation with an inner List type", ) self._assert_raises( template, "Union[int, torch.Tensor]", lhs["list_literal_of_tensor"], "Expected an Union type annotation with an inner List type", ) self._assert_raises( template, "Union[int, torch.Tensor]", lhs["list_comprehension_of_tensor"], "Expected an Union type annotation with an inner List type", ) """ Union[List[torch.Tensor], int] """ self._assert_passes( template, "Union[List[torch.Tensor], int]", lhs["list_literal_empty"] ) self._assert_passes( template, "Union[List[torch.Tensor], int]", lhs["list_literal_of_tensor"] ) self._assert_raises( template, "Union[List[torch.Tensor], int]", lhs["list_literal_of_str"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) self._assert_raises( template, "Union[List[torch.Tensor], int]", lhs["list_literal_of_mixed"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) self._assert_passes( template, "Union[List[torch.Tensor], int]", lhs["list_comprehension_of_tensor"], ) self._assert_raises( template, "Union[List[torch.Tensor], int]", lhs["list_comprehension_of_str"], r"List type annotation `List\[Tensor\]` did " "not match the types of the given list " "elements", ) # TODO(@ansley): Support mixed list comprehensions self._assert_raises( template, "Union[List[torch.Tensor], int]", lhs["list_comprehension_of_mixed"], "Arguments for call are not valid", ) def test_union_with_dict_assignment(self): template = dedent( """ def fn(): x: {ann} = {lhs} if torch.jit.isinstance(x, Dict[str, torch.Tensor]): x["foo"] = torch.tensor(3) return x """ ) lhs = { "dict_literal_empty": "{}", "dict_literal_of_str_tensor": '{"foo" : torch.arange(3), "bar" : torch.arange(5)}', "dict_literal_of_str_int": '{"foo" : 1, "bar" : 2}', "dict_literal_of_mixed": '{"foo" : torch.arange(3), "bar" : 2}', "dict_comprehension_of_str_tensor": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])}', "dict_comprehension_of_str_int": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [1, 2]}', "dict_comprehension_of_mixed": '{x : torch.add(y, 1) for x, y in \ zip(["foo", "bar"], [torch.arange(3), 2])}', "dict_keyword": "dict(foo=torch.arange(3), baz=torch.arange(5))", "dict_keyword_with_iterable": 'dict([("foo", torch.arange(3)), ("bar", torch.arange(5))])', "dict_keyword_with_empty_iterable": "dict([])", "dict_keyword_with_internal_aggregate_function": 'dict(zip(["foo", "bar"], [torch.arange(3), torch.arange(5)])', "dict_keyword_with_mapping": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)})', "dict_keyword_with_mapping_and_kwargs": 'dict({"foo" : torch.arange(3), "bar" : torch.arange(5)}, baz=torch.arange(7))', } """ Union[Dict[str, torch.Tensor], Dict[str, int]] """ self._assert_raises( template, "Union[List[str], List[torch.Tensor]]", lhs["dict_literal_empty"], "Expected an Union type annotation with an inner Dict type", ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_literal_of_str_tensor"], ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_literal_of_str_int"], ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_literal_of_mixed"], "none of those dict types can hold the types of the given keys and values", ) # TODO: String frontend does not support tuple unpacking # https://github.com/pytorch/pytorch/issues/64096 # self._assert_passes(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", # lhs["dict_comprehension_of_str_tensor"]) # self._assert_passes(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", # lhs["dict_comprehension_of_str_int"]) # self._assert_raises(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", # lhs["dict_comprehension_of_mixed"], # "foobar") # self._assert_passes(template, # "Union[Dict[str, torch.Tensor], Dict[str, int]]", # lhs["dict_keyword_with_internal_aggregate_function"]) # TODO(@ansley): Follow-up project needed for full type # inference with dict keyword (supported for dict comprehension # and dict literal already; should not be a blocker for anyone) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_keyword"], "full type inference is not yet supported", ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_keyword_with_iterable"], "full type inference is not yet supported", ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_keyword_with_empty_iterable"], "full type inference is not yet supported", ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_keyword_with_mapping"], "full type inference is not yet supported", ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], Dict[str, int]]", lhs["dict_keyword_with_mapping_and_kwargs"], "full type inference is not yet supported", ) """ Union[int, torch.Tensor] """ self._assert_raises( template, "Union[int, torch.Tensor]", lhs["dict_literal_empty"], "Expected an Union type annotation with an inner Dict type", ) self._assert_raises( template, "Union[int, torch.Tensor]", lhs["dict_literal_of_str_tensor"], "Expected an Union type annotation with an inner Dict type", ) # See above--string frontend does not support tuple unpacking # self._assert_raises(template, "Union[int, torch.Tensor]", # lhs["dict_comprehension_of_tensor"], # "foobar") """ Union[Dict[str, torch.Tensor], int] """ self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_literal_empty"] ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_literal_of_str_tensor"], ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_literal_of_str_int"], "Type annotation was inferred to be " r"`Dict\[str, Tensor\]`, but the type of " "values given by the dict literal is", ) self._assert_raises( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_literal_of_mixed"], "Type annotation was inferred to be " r"`Dict\[str, Tensor\]`, but the type of " "values given by the dict literal is", ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword"] ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword_with_iterable"], ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword_with_empty_iterable"], ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword_with_mapping"], ) self._assert_passes( template, "Union[Dict[str, torch.Tensor], int]", lhs["dict_keyword_with_mapping_and_kwargs"], ) # See above--string frontend does not support tuple unpacking # self._assert_passes(template, # "Union[Dict[str, torch.Tensor], int]", # lhs["dict_keyword_with_internal_aggregate_function"]) # # self._assert_passes(template, # "Union[Dict[str, torch.Tensor], int]", # lhs["dict_comprehension_of_str_tensor"]) # self._assert_raises(template, # "Union[Dict[str, torch.Tensor], int]", # lhs["dict_comprehension_of_str_int"], # "foobar") # self._assert_raises(template, # "Union[Dict[str, torch.Tensor], int]", # lhs["dict_comprehension_of_mixed"], # "foobar") if __name__ == "__main__": raise_on_run_directly("test/test_jit.py")
TestUnion
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_crossing05.py
{ "start": 315, "end": 1396 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_crossing05.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "column"}) chart.axis_ids = [55948032, 55950336] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series({"values": "=Sheet1!$A$1:$A$5"}) chart.add_series({"values": "=Sheet1!$B$1:$B$5"}) chart.add_series({"values": "=Sheet1!$C$1:$C$5"}) chart.set_x_axis({"crossing": "min"}) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
coleifer__peewee
tests/shortcuts.py
{ "start": 862, "end": 911 }
class ____(TestModel): label = TextField()
Label
python
django__django
tests/admin_views/models.py
{ "start": 12244, "end": 12410 }
class ____(models.Model): title = models.CharField(max_length=100) published = models.BooleanField(default=False) slug = models.SlugField()
PrePopulatedPost
python
PyCQA__pyflakes
pyflakes/messages.py
{ "start": 56, "end": 424 }
class ____: message = '' message_args = () def __init__(self, filename, loc): self.filename = filename self.lineno = loc.lineno self.col = loc.col_offset def __str__(self): return '{}:{}:{}: {}'.format(self.filename, self.lineno, self.col+1, self.message % self.message_args)
Message
python
HypothesisWorks__hypothesis
hypothesis-python/tests/pytest/test_mark.py
{ "start": 1010, "end": 1433 }
class ____(TestCase): @given(integers()) def test_foo(self, x): pass def test_bar(self): pass """ def test_can_select_mark_on_unittest(testdir): script = testdir.makepyfile(UNITTEST_TESTSUITE) result = testdir.runpytest( script, "--verbose", "--strict-markers", "-m", "hypothesis" ) out = "\n".join(result.stdout.lines) assert "1 passed, 1 deselected" in out
TestStuff
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/strings_ops/unicode_script_op_test.py
{ "start": 1068, "end": 2016 }
class ____(test.TestCase): @test_util.run_deprecated_v1 def testValidScripts(self): inputs = [ ord("a"), 0x0411, # CYRILLIC CAPITAL LETTER BE 0x82b8, # CJK UNIFIED IDEOGRAPH-82B8 ord(",") ] with self.cached_session(): input_vector = constant_op.constant(inputs, dtypes.int32) outputs = string_ops.unicode_script(input_vector).eval() self.assertAllEqual( outputs, [ 25, # USCRIPT_LATIN (LATN) 8, # USCRIPT_CYRILLIC (CYRL) 17, # USCRIPT_HAN (HANI) 0 # USCRIPT_COMMON (ZYYY) ]) @test_util.run_deprecated_v1 def testInvalidScript(self): inputs = [-100, 0xffffff] with self.cached_session(): input_vector = constant_op.constant(inputs, dtypes.int32) outputs = string_ops.unicode_script(input_vector).eval() self.assertAllEqual(outputs, [-1, -1])
UnicodeScriptOpTest
python
huggingface__transformers
tests/models/zamba/test_modeling_zamba.py
{ "start": 10111, "end": 18819 }
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( ZambaModel, ZambaForCausalLM, ZambaForSequenceClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": ZambaModel, "text-classification": ZambaForSequenceClassification, "text-generation": ZambaForCausalLM, "zero-shot": ZambaForSequenceClassification, } if is_torch_available() else {} ) def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config): self.assertIsInstance(past_key_values, ZambaHybridDynamicCache) # (batch, kv heads, seq_length, head_dim) num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads) head_dim = getattr(config, "attention_head_dim") attention_shape = (batch_size, num_heads, seq_length, head_dim) intermediate_size = config.mamba_expand * config.hidden_size conv_shape = (batch_size, intermediate_size, config.mamba_d_conv) ssm_shape = (batch_size, config.n_mamba_heads, intermediate_size // config.n_mamba_heads, config.mamba_d_state) self.assertTrue(config.num_hidden_layers, len(past_key_values)) for idx in range(len(past_key_values)): if config.layers_block_type[idx] == "mamba": self.assertEqual(past_key_values.conv_states[idx].shape, conv_shape) self.assertEqual(past_key_values.ssm_states[idx].shape, ssm_shape) else: self.assertEqual(past_key_values.key_cache[idx].shape, attention_shape) self.assertEqual(past_key_values.value_cache[idx].shape, attention_shape) def _check_caches_are_equal(self, cache1: ZambaHybridDynamicCache, cache2: ZambaHybridDynamicCache): if not isinstance(cache1, ZambaHybridDynamicCache) or not isinstance(cache2, ZambaHybridDynamicCache): raise ValueError("The wrong cache is being used!") if not len(cache1) == len(cache2): raise ValueError("Both caches do not have the same number of layers.") num_layers = len(cache1) for idx in range(num_layers): torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx]) torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx]) torch.testing.assert_close(cache1.conv_states[idx], cache2.conv_states[idx]) torch.testing.assert_close(cache1.ssm_states[idx], cache2.ssm_states[idx]) def setUp(self): self.model_tester = ZambaModelTester(self) self.config_tester = ConfigTester(self, config_class=ZambaConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_attention_outputs(self): r""" Overriding the test_attention_outputs test as the Zamba model outputs attention only for its attention layers """ config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) expected_num_attentions = ( math.ceil( (self.model_tester.num_hidden_layers - self.model_tester.attn_layer_offset) / self.model_tester.attn_layer_period ) + 1 ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def _get_input_ids_and_config(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs return config, input_ids, input_mask @require_flash_attn @require_torch_accelerator @require_bitsandbytes @pytest.mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): r""" Overriding the test_flash_attn_2_fp32_ln test as the Zamba model, like Mixtral, doesn't support right padding + use cache with FA2 """ for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # NOTE: Zamba does not support right padding + use_cache with FA2. dummy_attention_mask[:, -1] = 1 model = model_class.from_pretrained( tmpdirname, dtype=torch.float16, attn_implementation="flash_attention_2", quantization_config=BitsAndBytesConfig(load_in_4bit=True), ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_torch
ZambaModelTest
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/GroupBox.py
{ "start": 102, "end": 3185 }
class ____(QtWidgets.QGroupBox): """Subclass of QGroupBox that implements collapse handle. """ sigCollapseChanged = QtCore.Signal(object) def __init__(self, *args): QtWidgets.QGroupBox.__init__(self, *args) self._collapsed = False # We modify the size policy when the group box is collapsed, so # keep track of the last requested policy: self._lastSizePlocy = self.sizePolicy() self.closePath = QtGui.QPainterPath() self.closePath.moveTo(0, -1) self.closePath.lineTo(0, 1) self.closePath.lineTo(1, 0) self.closePath.lineTo(0, -1) self.openPath = QtGui.QPainterPath() self.openPath.moveTo(-1, 0) self.openPath.lineTo(1, 0) self.openPath.lineTo(0, 1) self.openPath.lineTo(-1, 0) self.collapseBtn = PathButton(path=self.openPath, size=(12, 12), margin=0) self.collapseBtn.setStyleSheet(""" border: none; """) self.collapseBtn.setPen('k') self.collapseBtn.setBrush('w') self.collapseBtn.setParent(self) self.collapseBtn.move(3, 3) self.collapseBtn.setFlat(True) self.collapseBtn.clicked.connect(self.toggleCollapsed) if len(args) > 0 and isinstance(args[0], str): self.setTitle(args[0]) def toggleCollapsed(self): self.setCollapsed(not self._collapsed) def collapsed(self): return self._collapsed def setCollapsed(self, c): if c == self._collapsed: return if c is True: self.collapseBtn.setPath(self.closePath) self.setSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred, closing=True) elif c is False: self.collapseBtn.setPath(self.openPath) self.setSizePolicy(self._lastSizePolicy) else: raise TypeError("Invalid argument %r; must be bool." % c) for ch in self.children(): if isinstance(ch, QtWidgets.QWidget) and ch is not self.collapseBtn: ch.setVisible(not c) self._collapsed = c self.sigCollapseChanged.emit(c) def setSizePolicy(self, *args, **kwds): QtWidgets.QGroupBox.setSizePolicy(self, *args) if kwds.pop('closing', False) is True: self._lastSizePolicy = self.sizePolicy() def setHorizontalPolicy(self, *args): QtWidgets.QGroupBox.setHorizontalPolicy(self, *args) self._lastSizePolicy = self.sizePolicy() def setVerticalPolicy(self, *args): QtWidgets.QGroupBox.setVerticalPolicy(self, *args) self._lastSizePolicy = self.sizePolicy() def setTitle(self, title): # Leave room for button QtWidgets.QGroupBox.setTitle(self, " " + title) def widgetGroupInterface(self): return (self.sigCollapseChanged, GroupBox.collapsed, GroupBox.setCollapsed, True)
GroupBox
python
xlwings__xlwings
xlwings/constants.py
{ "start": 111003, "end": 111148 }
class ____: xlAllAtOnce = 2 # from enum XlRoutingSlipDelivery xlOneAfterAnother = 1 # from enum XlRoutingSlipDelivery
RoutingSlipDelivery
python
allegroai__clearml
clearml/binding/frameworks/tensorflow_bind.py
{ "start": 59013, "end": 77384 }
class ____(object): _current_task = None __original_fn_scalar = None __original_fn_hist = None __original_fn_image = None __original_fn_write_summary = None __trains_event_writer = {} __tf_tb_writer_id_to_logdir = {} __patched = False defaults_dict = dict( report_freq=1, image_report_freq=1, histogram_update_freq_multiplier=5, histogram_granularity=50, ) @staticmethod def update_current_task(task: Any, **kwargs: Any) -> None: if task != PatchTensorFlowEager._current_task: PatchTensorFlowEager.__trains_event_writer = {} PatchTensorFlowEager.defaults_dict.update(kwargs) PatchTensorFlowEager._current_task = task if not task: return if not PatchTensorFlowEager.__patched: PatchTensorFlowEager.__patched = True # make sure we patched the SummaryToEventTransformer PatchTensorFlowEager._patch_summary_ops() PostImportHookPatching.add_on_import("tensorflow", PatchTensorFlowEager._patch_summary_ops) @staticmethod def _patch_summary_ops() -> None: if PatchTensorFlowEager.__original_fn_scalar is not None: return if "tensorflow" in sys.modules: try: # hack: make sure tensorflow.__init__ is called import tensorflow # noqa from tensorflow.python.ops import gen_summary_ops # noqa PatchTensorFlowEager.__original_fn_scalar = gen_summary_ops.write_scalar_summary gen_summary_ops.write_scalar_summary = PatchTensorFlowEager._write_scalar_summary PatchTensorFlowEager.__original_fn_image = gen_summary_ops.write_image_summary gen_summary_ops.write_image_summary = PatchTensorFlowEager._write_image_summary PatchTensorFlowEager.__original_fn_hist = gen_summary_ops.write_histogram_summary gen_summary_ops.write_histogram_summary = PatchTensorFlowEager._write_hist_summary PatchTensorFlowEager.__original_fn_write_summary = gen_summary_ops.write_summary gen_summary_ops.write_summary = PatchTensorFlowEager._write_summary gen_summary_ops.create_summary_file_writer = partial( IsTensorboardInit._patched_tb__init__, gen_summary_ops.create_summary_file_writer, ) gen_summary_ops.create_summary_db_writer = partial( IsTensorboardInit._patched_tb__init__, gen_summary_ops.create_summary_db_writer, ) except ImportError: pass except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).debug(str(ex)) # tensorflow 2.7 support (getting logdir) try: import tensorflow # noqa import tensorflow.python # noqa from tensorflow.python.ops import gen_summary_ops gen_summary_ops.create_summary_file_writer = _patched_call( gen_summary_ops.create_summary_file_writer, PatchTensorFlowEager._create_summary_file_writer, ) except Exception: pass @staticmethod def _create_summary_file_writer(original_fn: Callable, *args: Any, **kwargs: Any) -> Any: if not PatchTensorFlowEager._current_task: return original_fn(*args, **kwargs) # noinspection PyBroadException try: a_logdir = None a_writer = None if kwargs and "logdir" in kwargs: a_logdir = kwargs.get("logdir") elif args and len(args) >= 2: a_logdir = args[1] if kwargs and "writer" in kwargs: a_writer = kwargs.get("writer") elif args and len(args) >= 1: a_writer = args[0] if a_writer is not None and a_logdir is not None: a_logdir = a_logdir.numpy().decode() PatchTensorFlowEager.__tf_tb_writer_id_to_logdir[id(a_writer)] = a_logdir except Exception: pass return original_fn(*args, **kwargs) @staticmethod def _get_event_writer(writer: Any) -> EventTrainsWriter: if not PatchTensorFlowEager._current_task: return None if not PatchTensorFlowEager.__trains_event_writer.get(id(writer)): # noinspection PyBroadException try: logdir = writer.get_logdir() except Exception: # check if we are in eager mode, let's get the global context lopdir # noinspection PyBroadException try: from tensorflow.python.eager import context # noqa logdir = context.context().summary_writer._init_op_fn.keywords.get("logdir") except Exception: # noinspection PyBroadException try: from tensorflow.python.ops.summary_ops_v2 import ( _summary_state, ) # noqa logdir = _summary_state.writer._init_op_fn.keywords.get("logdir") except Exception: try: logdir = PatchTensorFlowEager.__tf_tb_writer_id_to_logdir[id(writer)] except Exception: logdir = None # noinspection PyBroadException try: if logdir is not None: logdir = logdir.numpy().decode() if not isinstance(logdir, str) else logdir except Exception: logdir = None PatchTensorFlowEager.__trains_event_writer[id(writer)] = EventTrainsWriter( logger=PatchTensorFlowEager._current_task.get_logger(), logdir=logdir, **PatchTensorFlowEager.defaults_dict ) return PatchTensorFlowEager.__trains_event_writer[id(writer)] @staticmethod def trains_object(self) -> Union[EventTrainsWriter, None]: if not PatchTensorFlowEager.__trains_event_writer: return None return PatchTensorFlowEager.__trains_event_writer.get( id(self), list(PatchTensorFlowEager.__trains_event_writer.values())[0] ) @staticmethod def _write_summary( writer: Any, step: Union[int, Any], tensor: Any, tag: Any, summary_metadata: Any, name: Optional[str] = None, **kwargs: Any ) -> Any: if not PatchTensorFlowEager._current_task: return PatchTensorFlowEager.__original_fn_write_summary( writer, step, tensor, tag, summary_metadata, name, **kwargs ) event_writer = PatchTensorFlowEager._get_event_writer(writer) # make sure we can get the tensors values if event_writer and isinstance(step, int) or hasattr(step, "numpy"): # noinspection PyBroadException try: plugin_type = summary_metadata.decode() # remove any none alpha numeric value plugin_type = plugin_type[next(i for i, c in enumerate(plugin_type) if c >= "A") :] if plugin_type.startswith("scalars"): event_writer._add_scalar(tag=str(tag), step=tweak_step(step), scalar_data=tensor.numpy()) elif plugin_type.startswith("images"): img_data_np = tensor.numpy() PatchTensorFlowEager._add_image_event_helper( event_writer, img_data_np=img_data_np, tag=tag, step=step, **kwargs ) elif plugin_type.startswith("histograms"): event_writer._add_histogram(tag=str(tag), step=tweak_step(step), hist_data=tensor.numpy()) elif plugin_type.startswith("text"): event_writer._add_text(tag=str(tag), step=tweak_step(step), tensor_bytes=tensor.numpy()) elif "audio" in plugin_type: audio_bytes_list = [a for a in tensor.numpy().flatten() if a] for i, audio_bytes in enumerate(audio_bytes_list): event_writer._add_audio( tag=str(tag) + ("/{}".format(i) if len(audio_bytes_list) > 1 else ""), step=tweak_step(step), values=None, audio_data=audio_bytes, ) else: pass except Exception: pass return PatchTensorFlowEager.__original_fn_write_summary( writer, step, tensor, tag, summary_metadata, name, **kwargs ) @staticmethod def _write_scalar_summary( writer: Any, step: Union[int, Any], tag: Any, value: Any, name: Optional[str] = None, **kwargs: Any ) -> Any: if not PatchTensorFlowEager._current_task: return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs) event_writer = PatchTensorFlowEager._get_event_writer(writer) if event_writer and isinstance(step, int) or hasattr(step, "numpy"): try: event_writer._add_scalar(tag=str(tag), step=tweak_step(step), scalar_data=value.numpy()) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) elif event_writer: def _report_summary_op( a_writer: Any, a_step: Union[int, Any], a_tag: Any, a_value: Any, a_name: Optional[str] = None, **_: Any ) -> None: if isinstance(a_step, int) or hasattr(a_step, "numpy"): try: str_tag = a_tag.numpy() str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag) event_writer._add_scalar( tag=str_tag, step=tweak_step(step), scalar_data=a_value.numpy(), ) except Exception as a_ex: LoggerRoot.get_base_logger(TensorflowBinding).warning( "_report_summary_op: {}".format(str(a_ex)) ) # this is a mix of eager and graph execution try: from tensorflow.python.eager import context as _context if not _context.executing_eagerly(): from tensorflow import py_function # just creating the operator is enough (for some reason) # to make sure it is added into the execution tree. # the operator itself, will do the reporting to the backend py_function( _report_summary_op, inp=[writer, step, tag, value, name], Tout=[], ) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) return PatchTensorFlowEager.__original_fn_scalar(writer, step, tag, value, name, **kwargs) @staticmethod def _write_hist_summary(writer: Any, step: Union[int, Any], tag: Any, values: Any, name: Any, **kwargs: Any) -> Any: if not PatchTensorFlowEager._current_task: return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs) event_writer = PatchTensorFlowEager._get_event_writer(writer) if event_writer and isinstance(step, int) or hasattr(step, "numpy"): try: event_writer._add_histogram(tag=str(tag), step=tweak_step(step), hist_data=values.numpy()) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) elif event_writer: def _report_summary_op( a_writer: Any, a_step: Union[int, Any], a_tag: Any, a_value: Any, a_name: Optional[str] = None, **_: Any ) -> None: if isinstance(a_step, int) or hasattr(a_step, "numpy"): try: str_tag = a_tag.numpy() str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag) event_writer._add_histogram( tag=str_tag, step=tweak_step(a_step), hist_data=a_value.numpy(), ) except Exception as a_ex: LoggerRoot.get_base_logger(TensorflowBinding).warning( "_report_summary_op: {}".format(str(a_ex)) ) # this is a mix of eager and graph execution try: from tensorflow.python.eager import context as _context if not _context.executing_eagerly(): from tensorflow import py_function # just creating the operator is enough (for some reason) # to make sure it is added into the execution tree. # the operator itself, will do the reporting to the backend py_function( _report_summary_op, inp=[writer, step, tag, values, name], Tout=[], ) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) return PatchTensorFlowEager.__original_fn_hist(writer, step, tag, values, name, **kwargs) @staticmethod def _write_image_summary( writer: Any, step: Union[int, Any], tag: Any, tensor: Any, bad_color: Any, max_images: Any, name: Any, **kwargs: Any ) -> Any: if not PatchTensorFlowEager._current_task: return PatchTensorFlowEager.__original_fn_image( writer, step, tag, tensor, bad_color, max_images, name, **kwargs ) event_writer = PatchTensorFlowEager._get_event_writer(writer) if event_writer and isinstance(step, int) or hasattr(step, "numpy"): try: PatchTensorFlowEager._add_image_event_helper( event_writer, img_data_np=tensor.numpy(), tag=tag, step=step, **kwargs ) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) elif event_writer: def _report_summary_op( a_writer: Any, a_step: Union[int, Any], a_tag: Any, a_tensor: Any, a_bad_color: Any, a_max_images: Any, a_name: Optional[str] = None, **_: Any ) -> None: if isinstance(a_step, int) or hasattr(a_step, "numpy"): try: str_tag = a_tag.numpy() str_tag = str_tag.decode() if isinstance(str_tag, bytes) else str(str_tag) PatchTensorFlowEager._add_image_event_helper( event_writer, img_data_np=a_tensor.numpy(), tag=str_tag, step=a_step, **kwargs ) except Exception as a_ex: LoggerRoot.get_base_logger(TensorflowBinding).warning( "_report_summary_op: {}".format(str(a_ex)) ) # this is a mix of eager and graph execution try: from tensorflow.python.eager import context as _context if not _context.executing_eagerly(): from tensorflow import py_function # just creating the operator is enough (for some reason) # to make sure it is added into the execution tree. # the operator itself, will do the reporting to the backend py_function( _report_summary_op, inp=[writer, step, tag, tensor, bad_color, max_images, name], Tout=[], ) except Exception as ex: LoggerRoot.get_base_logger(TensorflowBinding).warning(str(ex)) return PatchTensorFlowEager.__original_fn_image( writer, step, tag, tensor, bad_color, max_images, name, **kwargs ) @staticmethod def _add_image_event_helper( event_writer: EventTrainsWriter, img_data_np: np.ndarray, tag: str, step: Union[int, np.ndarray], **kwargs: Any ) -> None: if img_data_np.ndim == 1 and img_data_np.size >= 3 and (len(img_data_np[0]) < 10 and len(img_data_np[1]) < 10): # this is just for making sure these are actually valid numbers width = int(img_data_np[0].decode()) # noqa: F841 height = int(img_data_np[1].decode()) # noqa: F841 for i in range(2, img_data_np.size): img_data = { "width": None, "height": None, "colorspace": "RGB", "encodedImageString": img_data_np[i], } image_tag = str(tag) + "/sample_{}".format(i - 2) if img_data_np.size > 3 else str(tag) event_writer._add_image(tag=image_tag, step=tweak_step(step), img_data=img_data) else: event_writer._add_image_numpy( tag=str(tag), step=tweak_step(step), img_data_np=img_data_np, max_keep_images=kwargs.get("max_images"), ) @staticmethod def _nothing_op(*_: Any, **__: Any) -> Any: """Convenient else branch for when summaries do not record.""" from tensorflow.python.framework import constant_op return constant_op.constant(False) # noinspection PyPep8Naming,SpellCheckingInspection
PatchTensorFlowEager
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 41649, "end": 42002 }
class ____(sgqlc.types.Enum): """The possible values for the notification restriction setting. Enumeration Choices: * `DISABLED`: The setting is disabled for the owner. * `ENABLED`: The setting is enabled for the owner. """ __schema__ = github_schema __choices__ = ("DISABLED", "ENABLED")
NotificationRestrictionSettingValue
python
getsentry__sentry
src/sentry/quotas/base.py
{ "start": 10153, "end": 27055 }
class ____(Service): """ Quotas handle tracking a project's usage and respond whether or not a project has been configured to throttle incoming data if they go beyond the specified quota. Quotas can specify a window to be tracked in, such as per minute or per hour. Additionally, quotas allow to specify the data categories they apply to, for example error events or attachments. For more information on quota parameters, see ``QuotaConfig``. To retrieve a list of active quotas, use ``quotas.get_quotas``. Also, to check the current status of quota usage, call ``quotas.get_usage``. """ __all__ = ( "get_abuse_quotas", "is_rate_limited", "validate", "refund", "get_event_retention", "get_quotas", "get_blended_sample_rate", "get_transaction_sampling_tier_for_volume", "check_accept_monitor_checkin", "update_monitor_slug", ) def __init__(self, **options): pass def get_quotas( self, project: Project, key: ProjectKey | None = None, keys: Iterable[ProjectKey] | None = None, ) -> list[QuotaConfig]: """ Returns a quotas for the given project and its organization. The return values are instances of ``QuotaConfig``. See its documentation for more information about the values. :param project: The project instance that is used to determine quotas. :param key: A project project key to obtain quotas for. If omitted, only project and organization quotas are used. :param keys: Similar to ``key``, except for multiple keys. """ return [] def is_rate_limited(self, project, key=None): """ Checks whether any of the quotas in effect for the given project and project key has been exceeded and records consumption of the quota. By invoking this method, the caller signals that data is being ingested and needs to be counted against the quota. This increment happens atomically if none of the quotas have been exceeded. Otherwise, a rate limit is returned and data is not counted against the quotas. When an event or any other data is dropped after ``is_rate_limited`` has been called, use ``quotas.refund``. If no key is specified, then only organization-wide and project-wide quotas are checked. If a key is specified, then key-quotas are also checked. The return value is a subclass of ``RateLimit``: - ``RateLimited``, if at least one quota has been exceeded. The event should not be ingested by the caller, and none of the quotas have been counted. - ``NotRateLimited``, if consumption is within all quotas. Data must be ingested by the caller, and the counters for all counters have been incremented. :param project: The project instance that is used to determine quotas. :param key: A project key to obtain quotas for. If omitted, only project and organization quotas are used. """ return NotRateLimited() def refund(self, project, key=None, timestamp=None, category=None, quantity=None): """ Signals event rejection after ``quotas.is_rate_limited`` has been called successfully, and refunds the previously consumed quota. :param project: The project that the dropped data belonged to. :param key: The project key that was used to ingest the data. If omitted, then only project and organization quotas are refunded. :param timestamp: The timestamp at which data was ingested. This is used to determine the correct quota window to refund the previously consumed data to. :param category: The data category of the item to refund. This is used to determine the quotas that should be refunded. Defaults to ``DataCategory.ERROR``. :param quantity: The quantity to refund. Defaults to ``1``, which is the only value that should be used for events. For attachments, this should be set to the size of the attachment in bytes. """ def get_event_retention(self, organization, category: DataCategory | None = None, **kwargs): """ Returns the retention for events in the given organization in days. Returns ``None`` if events are to be stored indefinitely. :param organization: The organization model. :param category: Return the retention policy for this data category. If this is not given, return the org-level policy. """ return _limit_from_settings(options.get("system.event-retention-days")) def get_downsampled_event_retention( self, organization, category: DataCategory | None = None, **kwargs ): """ Returns the retention for downsampled events in the given organization in days. Returning ``0`` means downsampled event retention will default to the value of ``get_event_retention``. """ return 0 def get_retentions( self, organization: Organization, **kwargs ) -> Mapping[DataCategory, RetentionSettings]: return {} def validate(self): """ Validates that the quota service is operational. """ def get_key_quota(self, key): from sentry import features # XXX(epurkhiser): Avoid excessive feature manager checks (which can be # expensive depending on feature handlers) for project rate limits. # This happens on /store. cache_key = f"project:{key.project.id}:features:rate-limits" has_rate_limits = cache.get(cache_key) if has_rate_limits is None: has_rate_limits = features.has("projects:rate-limits", key.project) cache.set(cache_key, has_rate_limits, 600) if not has_rate_limits: return (None, None) limit, window = key.rate_limit return _limit_from_settings(limit), window def get_abuse_quotas(self, org): # Per-project abuse quotas for errors, transactions, attachments, sessions. global_abuse_window = options.get("project-abuse-quota.window") abuse_quotas = [ AbuseQuota( id="pae", option="project-abuse-quota.error-limit", compat_option_org="sentry:project-error-limit", compat_option_sentry="getsentry.rate-limit.project-errors", categories=DataCategory.error_categories(), scope=QuotaScope.PROJECT, ), AbuseQuota( id="paa", option="project-abuse-quota.attachment-limit", categories=[DataCategory.ATTACHMENT], scope=QuotaScope.PROJECT, ), AbuseQuota( id="paai", option="project-abuse-quota.attachment-item-limit", categories=[DataCategory.ATTACHMENT_ITEM], scope=QuotaScope.PROJECT, ), AbuseQuota( id="pas", option="project-abuse-quota.session-limit", categories=[DataCategory.SESSION], scope=QuotaScope.PROJECT, ), ] abuse_quotas.extend(build_metric_abuse_quotas()) # XXX: These reason codes are hardcoded in getsentry: # as `RateLimitReasonLabel.PROJECT_ABUSE_LIMIT` and `RateLimitReasonLabel.ORG_ABUSE_LIMIT`. # Don't change it here. If it's changed in getsentry, it needs to be synced here. reason_codes = { QuotaScope.ORGANIZATION: "org_abuse_limit", QuotaScope.PROJECT: "project_abuse_limit", QuotaScope.GLOBAL: "global_abuse_limit", } for quota in abuse_quotas: limit: int | None = 0 abuse_window = global_abuse_window # compat options were previously present in getsentry # for errors and transactions. The first one is the org # option for overriding the second one (global option). # For now, these deprecated ones take precedence over the new # to preserve existing behavior. if quota.compat_option_org: limit = org.get_option(quota.compat_option_org) if not limit and quota.compat_option_sentry: limit = options.get(quota.compat_option_sentry) if not limit: limit = org.get_option(quota.option) if not limit: limit = options.get(quota.option) limit = _limit_from_settings(limit) if limit is None: # Unlimited. continue # Negative limits in config mean a reject-all quota. if limit < 0: yield QuotaConfig( limit=0, scope=quota.scope, categories=quota.categories, reason_code="disabled", namespace=quota.namespace, ) else: yield QuotaConfig( id=quota.id, limit=limit * abuse_window, scope=quota.scope, categories=quota.categories, window=abuse_window, reason_code=reason_codes[quota.scope], namespace=quota.namespace, ) def get_monitor_quota(self, project): from sentry.monitors.rate_limit import get_project_monitor_quota return get_project_monitor_quota(project) def get_blended_sample_rate( self, project: Project | None = None, organization_id: int | None = None ) -> float | None: """ Returns the blended sample rate for an org based on the package that they are currently on. Returns ``None`` if the organization doesn't have dynamic sampling. The reasoning for having two params as `Optional` is because this method was first designed to work with `Project` but due to requirements change the `Organization` was needed and since we can get the `Organization` from the `Project` we allow one or the other to be passed. :param project: The project model. :param organization_id: The organization id. """ def get_transaction_sampling_tier_for_volume( self, organization_id: int, volume: int ) -> tuple[int, float] | None: """ Returns the transaction sampling tier closest to a specific volume. The organization_id is required because the tier is based on the organization's plan, and we have to check whether the organization has dynamic sampling. :param organization_id: The organization id. :param volume: The volume of transaction of the given project. """ def check_assign_monitor_seat(self, monitor: Monitor) -> SeatAssignmentResult: """ Determines if a monitor can be assigned a seat. If it is not possible to assign a monitor a seat, a reason will be included in the response """ return SeatAssignmentResult(assignable=True) def check_assign_seat( self, data_category: DataCategory, seat_object: SeatObject ) -> SeatAssignmentResult: """ Determines if an assignable seat object can be assigned a seat. If it is not possible to assign a monitor a seat, a reason will be included in the response. """ return SeatAssignmentResult(assignable=True) def check_assign_monitor_seats(self, monitor: list[Monitor]) -> SeatAssignmentResult: """ Determines if a list of monitor can be assigned seat. If it is not possible to assign a seat to all given monitors, a reason will be included in the response """ return SeatAssignmentResult(assignable=True) def check_assign_seats( self, data_category: DataCategory, seat_objects: Sequence[SeatObject] ) -> SeatAssignmentResult: """ Determines if a list of assignable seat objects can be assigned seat. If it is not possible to assign a seat to all given objects, a reason will be included in the response. """ return SeatAssignmentResult(assignable=True) def assign_monitor_seat(self, monitor: Monitor) -> int: """ Assigns a monitor a seat if possible, resulting in a Outcome.ACCEPTED. If the monitor cannot be assigned a seat it will be Outcome.RATE_LIMITED. """ from sentry.utils.outcomes import Outcome return Outcome.ACCEPTED def assign_seat(self, data_category: DataCategory, seat_object: SeatObject) -> int: """ Assigns a seat to an object if possible, resulting in Outcome.ACCEPTED. If the object cannot be assigned a seat it will be Outcome.RATE_LIMITED. """ from sentry.utils.outcomes import Outcome return Outcome.ACCEPTED def disable_monitor_seat(self, monitor: Monitor) -> None: """ Removes a monitor from it's assigned seat. """ def disable_seat(self, data_category: DataCategory, seat_object: SeatObject) -> None: """ Disables an assigned seat. """ def remove_seat(self, data_category: DataCategory, seat_object: SeatObject) -> None: """ Removes an assigned seat. """ def check_accept_monitor_checkin(self, project_id: int, monitor_slug: str): """ Will return a `PermitCheckInStatus`. """ from sentry.monitors.constants import PermitCheckInStatus return PermitCheckInStatus.ACCEPT def update_monitor_slug(self, previous_slug: str, new_slug: str, project_id: int): """ Updates a monitor seat assignment's slug. """ def should_emit_profile_duration_outcome( self, organization: Organization, profile: Profile ) -> bool: """ Determines if the profile duration outcome should be emitted. """ return True def on_role_change( self, organization: Organization, organization_member: OrganizationMember, previous_role: str, new_role: str, ) -> None: """ Called when an organization member's role is changed. This is used to run any Subscription logic that needs to happen when a role is changed. Args: organization: The organization the member belongs to organization_member: The member whose role is being changed previous_role: The member's role before the change new_role: The member's new role after the change """ pass def has_available_reserved_budget(self, org_id: int, data_category: DataCategory) -> bool: """ Determines if the organization has enough reserved budget for the given data category operation. """ return True def record_seer_run( self, org_id: int, project_id: int, data_category: DataCategory, seat_object: SeatObject | None = None, ) -> None: """ Records a seer run for an organization. """ return def has_profile_duration_quota(self, org_id: int, data_category: DataCategory) -> bool: """ Determines if the organization has quota available for the given data category. Args: org_id: The ID of the organization to check data_category: The data category to check quota for. Returns: bool: True if the organization has quota available, False otherwise. Always False if data category is not a profile duration category. """ return True def get_dashboard_limit(self, org_id: int) -> int: """ Returns the maximum number of dashboards allowed for the organization's plan type. """ return -1 def get_metric_detector_limit(self, org_id: int) -> int: """ Returns the maximum number of detectors allowed for the organization's plan type. """ return -1 def check_seer_quota( self, org_id: int, data_category: DataCategory, seat_object: SeatObject | None = None ) -> bool: """ Checks if the organization has access to Seer for the given data category and seat object. """ return True
Quota
python
getsentry__sentry
src/sentry/integrations/jira_server/handlers/jira_server_handler.py
{ "start": 409, "end": 568 }
class ____(TicketingActionHandler): group = ActionHandler.Group.TICKET_CREATION provider_slug = IntegrationProviderSlug.JIRA_SERVER
JiraServerActionHandler
python
ray-project__ray
python/ray/llm/_internal/serve/observability/usage_telemetry/usage.py
{ "start": 9464, "end": 12664 }
class ____: """Hardware usage class to report telemetry.""" def __init__(self, get_hardware_fn: Callable = get_hardware_usages_to_report): self._get_hardware_fn = get_hardware_fn def infer_gpu_from_hardware(self) -> str: """Infer the GPU type from the hardware when the accelerator type on llm config is not specified. Iterate through all the hardware recorded on the cluster and return the first ray-compatible accelerator as the GPU type used for the deployment. If not, return `UNSPECIFIED` as the default GPU type. """ from ray.llm._internal.serve.core.configs.llm_config import GPUType all_accelerator_types = [t.value for t in GPUType] gcs_client = ray.experimental.internal_kv.internal_kv_get_gcs_client() hardwares = self._get_hardware_fn(gcs_client) for hardware in hardwares: if hardware in all_accelerator_types: return hardware return DEFAULT_GPU_TYPE def push_telemetry_report_for_all_models( all_models: Optional[Sequence["LLMConfig"]] = None, get_lora_model_func: Callable = get_lora_model_ids, get_hardware_fn: Callable = get_hardware_usages_to_report, ): """Push telemetry report for all models.""" if not all_models: return for model in all_models: use_lora = ( model.lora_config is not None and model.lora_config.dynamic_lora_loading_path is not None ) initial_num_lora_adapters = 0 if use_lora: lora_model_ids = get_lora_model_func( dynamic_lora_loading_path=model.lora_config.dynamic_lora_loading_path, base_model_id=model.model_id, ) initial_num_lora_adapters = len(lora_model_ids) use_autoscaling = model.deployment_config.get("autoscaling_config") is not None num_replicas, min_replicas, max_replicas = 1, 1, 1 if use_autoscaling: from ray.serve.config import AutoscalingConfig autoscaling_config = AutoscalingConfig( **model.deployment_config["autoscaling_config"] ) num_replicas = ( autoscaling_config.initial_replicas or autoscaling_config.min_replicas ) min_replicas = autoscaling_config.min_replicas max_replicas = autoscaling_config.max_replicas engine_config = model.get_engine_config() hardware_usage = HardwareUsage(get_hardware_fn) telemetry_model = TelemetryModel( model_architecture=model.model_architecture, num_replicas=num_replicas, use_json_mode=True, use_lora=use_lora, initial_num_lora_adapters=initial_num_lora_adapters, use_autoscaling=use_autoscaling, min_replicas=min_replicas, max_replicas=max_replicas, tensor_parallel_degree=engine_config.tensor_parallel_degree, gpu_type=model.accelerator_type or hardware_usage.infer_gpu_from_hardware(), num_gpus=engine_config.num_devices, ) _push_telemetry_report(telemetry_model)
HardwareUsage
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 1303371, "end": 1312880 }
class ____(TextDef): """ FieldOrDatumDefWithConditionStringDatumDefText schema wrapper. Parameters ---------- bandPosition : float Relative position on a band of a stacked, binned, time unit, or band scale. For example, the marks will be positioned at the beginning of the band if set to ``0``, and at the middle of the band if set to ``0.5``. condition : dict, :class:`ConditionalValueDefTextExprRef`, :class:`ConditionalParameterValueDefTextExprRef`, :class:`ConditionalPredicateValueDefTextExprRef`, Sequence[dict, :class:`ConditionalValueDefTextExprRef`, :class:`ConditionalParameterValueDefTextExprRef`, :class:`ConditionalPredicateValueDefTextExprRef`] One or more value definition(s) with `a parameter or a test predicate <https://vega.github.io/vega-lite/docs/condition.html>`__. **Note:** A field definition's ``condition`` property can only contain `conditional value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__ since Vega-Lite only allows at most one encoded field per encoding channel. datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None A constant value in data domain. format : str, dict, :class:`Dict`, :class:`Format`, :class:`TimeFormatSpecifier` The text format specifier for formatting number and date/time in labels of guides (axes, legends, headers) and text marks. If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's `number format pattern string <https://github.com/d3/d3-format#locale_format>`__. If the format type is ``"time"`` (e.g., for temporal fields), this is either: a) D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you desire to set a static time format. b) `dynamic time format specifier object <https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you desire to set a dynamic time format that uses different formats depending on the granularity of the input date (e.g., if the date lies on a year, month, date, hour, etc. boundary). When used with a `custom formatType <https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this value will be passed as ``format`` alongside ``datum.value`` to the registered function. **Default value:** Derived from `numberFormat <https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number format and from `timeFormat <https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time format. formatType : str The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom format type <https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__. **Default value:** * ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``. * ``"number"`` for quantitative fields as well as ordinal and nominal fields without ``timeUnit``. title : str, :class:`Text`, Sequence[str], None A title for the field. If ``null``, the title will be removed. **Default value:** derived from the field's name and transformation function (``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function, the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the field is binned or has a time unit applied, the applied function is shown in parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``). Otherwise, the title is simply the field name. **Notes**: 1) You can customize the default field title format by providing the `fieldTitle <https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle function via the compile function's options <https://vega.github.io/vega-lite/usage/compile.html#field-title>`__. 2) If both field definition's ``title`` and axis, header, or legend ``title`` are defined, axis/header/legend title will be used. type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson'] The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or ``"nominal"``) for the encoded field or constant value (``datum``). It can also be a ``"geojson"`` type for encoding `'geoshape' <https://vega.github.io/vega-lite/docs/geoshape.html>`__. Vega-Lite automatically infers data types in many cases as discussed below. However, type is required for a field if: (1) the field is not nominal and the field encoding has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal scale for a field with ``bin`` or ``timeUnit``. **Default value:** 1) For a data ``field``, ``"nominal"`` is the default data type unless the field encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or ``timeUnit`` that satisfies the following criteria: * ``"quantitative"`` is the default type if (1) the encoded field contains ``bin`` or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is ``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__. * ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit`` or (2) the specified scale type is a time or utc scale * ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort order <https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__, (2) the specified scale type is an ordinal/point/band scale, or (3) the encoding channel is ``order``. 2) For a constant value in data domain (``datum``): * ``"quantitative"`` if the datum is a number * ``"nominal"`` if the datum is a string * ``"temporal"`` if the datum is `a date time object <https://vega.github.io/vega-lite/docs/datetime.html>`__ **Note:** * Data ``type`` describes the semantics of the data rather than the primitive data types (number, string, etc.). The same primitive data type can have different types of measurement. For example, numeric data can represent quantitative, ordinal, or nominal data. * Data values for a temporal field can be either a date-time string (e.g., ``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a timestamp number (e.g., ``1552199579097``). * When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the ``type`` property can be either ``"quantitative"`` (for using a linear bin scale) or `"ordinal" (for using an ordinal bin scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal" (for using an ordinal scale) <https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__. * When using with `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property refers to the post-aggregation data type. For example, we can calculate count ``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct", "field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``. * Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have ``type`` as they must have exactly the same type as their primary channels (e.g., ``x``, ``y``). **See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__ documentation. """ _schema = { "$ref": "#/definitions/FieldOrDatumDefWithCondition<StringDatumDef,Text>" } def __init__( self, bandPosition: Optional[float] = Undefined, condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined, datum: Optional[ Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T ] = Undefined, format: Optional[str | SchemaBase | Map] = Undefined, formatType: Optional[str] = Undefined, title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined, type: Optional[SchemaBase | Type_T] = Undefined, **kwds, ): super().__init__( bandPosition=bandPosition, condition=condition, datum=datum, format=format, formatType=formatType, title=title, type=type, **kwds, )
FieldOrDatumDefWithConditionStringDatumDefText
python
run-llama__llama_index
llama-index-core/llama_index/core/llama_pack/base.py
{ "start": 87, "end": 293 }
class ____: @abstractmethod def get_modules(self) -> Dict[str, Any]: """Get modules.""" @abstractmethod def run(self, *args: Any, **kwargs: Any) -> Any: """Run."""
BaseLlamaPack
python
apache__airflow
providers/google/tests/unit/google/cloud/links/test_managed_kafka.py
{ "start": 2271, "end": 2615 }
class ____: def test_class_attributes(self): assert ApacheKafkaClusterLink.key == EXPECTED_MANAGED_KAFKA_CLUSTER_LINK_KEY assert ApacheKafkaClusterLink.name == EXPECTED_MANAGED_KAFKA_CLUSTER_LINK_NAME assert ApacheKafkaClusterLink.format_str == EXPECTED_MANAGED_KAFKA_CLUSTER_LINK_FORMAT_STR
TestApacheKafkaClusterLink
python
pytorch__pytorch
torch/_higher_order_ops/while_loop.py
{ "start": 23837, "end": 27804 }
class ____(HigherOrderOperator): """ while_loop_stack_output is a variant of while_loop that returns a stack of outputs. Its semantic can be illurated using python code as: def while_loop_stack_output(cond_fn, body_fn, carried_inputs, additional_inputs): outs = [] while cond_fn(*carried_inputs, *additional_inputs): out = body_fn(*carried_inputs, *additional_inputs) outs.append(out) return torch.stack(outs) It's useful for supporting autograd of while_loop. """ def __init__(self) -> None: super().__init__("while_loop_stack_output") def __call__( self, cond_fn: Callable, body_fn: Callable, carried_inputs: tuple[Union[torch.Tensor, int, float, bool]], additional_inputs: tuple[Union[torch.Tensor, torch.SymInt, int], ...], /, ): if not isinstance(carried_inputs, (tuple, list)): raise RuntimeError( f"carried_inputs must be a tuple or list, got {type(carried_inputs)}" ) if not isinstance(additional_inputs, (tuple, list)): raise RuntimeError( f"additional_inputs must be a tuple or list, got {type(additional_inputs)}" ) validate_subgraph_args_types(carried_inputs) validate_subgraph_args_types(additional_inputs) return super().__call__(cond_fn, body_fn, carried_inputs, additional_inputs) # Note [while_loop autograd] # Consider wthe following while_loop that can be visualized as: # additional_inputs # ┌─────┬─────┼─────┬─────┐ # | | | | | # ↓ ↓ ↓ ↓ ↓ # x ──→ y0 ─→ y1 ─→ y2 ─→ y3 ─→ y4 # # The bacwkard can be visualized as follows: # # g_additional_inputs # ┌──────┬──────┼──────┬──────┐ # | | | | | # | | | | | # gx <── gy0 <─ gy1 <─ gy2 <─ gy3 <─ gy4 # # We can compute gx using chain rule: # # gx = gy0 * bw(y0, x), # # where gy0 denotes the gradient of loss with respect to y0, and bw(y0, x) denotes the gradient of y0 with # respect to x. Note that bw can be computed from forward body_fn easily using torch.autograd.grad. # We could substitute the unknowns gy0, gy1, ..., with chain rule until gy4: # # gx = gy1 * bw(y1, y0) * bw(y0, x) # = gy2 * bw(y2, y1) * bw(y1, y0) * bw(y0, x) # = ... # = gy4 * bw(y4, y3) * bw(y3, y2) * bw(y2, y1) * bw(y1, y0) * bw(y0, x) # # since gy4 is the graient of the final output, which is given as the backward input, we've got a formula # to compute gx. A abbr for the formula is: gy4 * bw43210x # # In a similar way, we can compute g_additional_inputs using chain rule: # # g_additional_inputs = gy0 * bw(y0, addi) + gy1 * bw(y1, addi) + gy2 * bw(y2, addi) + ... + gy4 * bw(y4, addi) # # Notice that gy0 = gy4 * bw43210, gy1 = gy4 * bw4321 etc, we now also get a formula for g_additional_inputs. # # Implementation: # The idea of implementation is to construct a while_loop to calculate both gx and g_additional_inputs. # Specifically, we can implement the backward of while_loop with as follows: # # def cond_fn(idx, grad_carries, grad_additional_inputs, fw_additional_inputs, fw_inps): # return idx < fw_inps.size(0) # # def body_fn(idx, grad_carries, grad_additional_inputs, fw_additional_inputs, fw_inps): # reversed_idx = fw_inps.size(0) - 1 - idx # next_grad_carry, next_grad_additional_inputs = bw(fw_inps[reversed_idx], fw_additional_inputs, grad_carries) # return idx + 1, next_grad_carry, next_grad_additional_inputs + grad_additional_inputs # # idx = 0 # init_grad_carries = grads # init_grad_additional_inputs = torch.zeros_like(g_additional_inputs) # fw_inps = torch.cat([ctx.fw_carried_inputs, fw_outputs[:-1]]) # while_loop(cond_fn, body_fn, (idx, init_grad_carries, init_grad_additional_inputs,), (fw_additional_inputs, fw_inps))
WhileLoopStackOutputOp
python
getsentry__sentry
tests/sentry/api/endpoints/test_rule_snooze.py
{ "start": 14190, "end": 17978 }
class ____(BaseRuleSnoozeTest): endpoint = "sentry-api-0-rule-snooze" method = "delete" def test_delete_issue_alert_rule_mute_myself(self) -> None: """Test that a user can unsnooze a rule they've snoozed for just themselves""" self.snooze_rule(user_id=self.user.id, owner_id=self.user.id, rule=self.issue_alert_rule) self.get_success_response( self.organization.slug, self.project.slug, self.issue_alert_rule.id, target="me", status_code=204, ) assert not RuleSnooze.objects.filter( rule=self.issue_alert_rule.id, user_id=self.user.id ).exists() def test_delete_issue_alert_rule_mute_everyone(self) -> None: """Test that a user can unsnooze a rule they've snoozed for everyone""" self.snooze_rule(owner_id=self.user.id, rule=self.issue_alert_rule) self.get_success_response( self.organization.slug, self.project.slug, self.issue_alert_rule.id, target="everyone", status_code=204, ) assert not RuleSnooze.objects.filter( rule=self.issue_alert_rule.id, user_id=self.user.id ).exists() def test_delete_issue_alert_rule_without_alert_write(self) -> None: """Test that a user without alerts:write access cannot unmute an issue alert rule""" self.snooze_rule(user_id=self.user.id, owner_id=self.user.id, rule=self.issue_alert_rule) member_user = self.create_user() self.create_member( user=member_user, organization=self.organization, role="member", teams=[self.team] ) self.organization.update_option("sentry:alerts_member_write", False) self.login_as(member_user) self.get_error_response( self.organization.slug, self.project.slug, self.issue_alert_rule.id, target="me", status_code=403, ) assert RuleSnooze.objects.filter( rule=self.issue_alert_rule.id, user_id=self.user.id ).exists() def test_delete_snooze_enables_workflow(self) -> None: """Test that deleting a rule snooze for everyone will re-enable the workflow""" # Create a snooze for everyone self.snooze_rule(owner_id=self.user.id, rule=self.issue_alert_rule) workflow = self.create_workflow(enabled=False) self.create_alert_rule_workflow(rule_id=self.issue_alert_rule.id, workflow=workflow) with outbox_runner(): self.get_success_response( self.organization.slug, self.project.slug, self.issue_alert_rule.id, status_code=204, ) # Verify workflow is re-enabled workflow.refresh_from_db() assert workflow.enabled is True def test_delete_user_snooze_does_not_enable_workflow(self) -> None: """Test that deleting a user-specific rule snooze does not re-enable the workflow""" # Also create a user-specific snooze self.snooze_rule(user_id=self.user.id, owner_id=self.user.id, rule=self.issue_alert_rule) workflow = self.create_workflow(enabled=False) self.create_alert_rule_workflow(rule_id=self.issue_alert_rule.id, workflow=workflow) with outbox_runner(): self.get_success_response( self.organization.slug, self.project.slug, self.issue_alert_rule.id, target="me", status_code=204, ) # Verify workflow is still disabled (user-specific snooze should not affect it) workflow.refresh_from_db() assert workflow.enabled is False
DeleteRuleSnoozeTest
python
sphinx-doc__sphinx
sphinx/search/__init__.py
{ "start": 6662, "end": 8929 }
class ____(nodes.NodeVisitor): """A special visitor that collects words for the `IndexBuilder`.""" def __init__(self, document: nodes.document, lang: SearchLanguage) -> None: super().__init__(document) self.found_words: list[str] = [] self.found_titles: list[tuple[str, str | None]] = [] self.found_title_words: list[str] = [] self.lang = lang def dispatch_visit(self, node: Node) -> None: if isinstance(node, nodes.comment): raise nodes.SkipNode elif isinstance(node, nodes.Element) and 'no-search' in node['classes']: # skip nodes marked with a 'no-search' class raise nodes.SkipNode elif isinstance(node, nodes.raw): if 'html' in node.get('format', '').split(): # Some people might put content in raw HTML that should be searched, # so we just amateurishly strip HTML tags and index the remaining # content nodetext = re.sub( r'<style.*?</style>', '', node.astext(), flags=re.IGNORECASE | re.DOTALL, ) nodetext = re.sub( r'<script.*?</script>', '', nodetext, flags=re.IGNORECASE | re.DOTALL, ) nodetext = re.sub(r'<[^<]+?>', '', nodetext) self.found_words.extend(self.lang.split(nodetext)) raise nodes.SkipNode elif isinstance(node, nodes.Text): self.found_words.extend(self.lang.split(node.astext())) elif isinstance(node, nodes.title): title = node.astext() if ids := node.parent['ids']: self.found_titles.append((title, ids[0])) else: self.found_titles.append((title, None)) self.found_title_words.extend(self.lang.split(title)) elif isinstance(node, Element) and _is_meta_keywords(node, self.lang.lang): # type: ignore[arg-type] keywords = node['content'] keywords = [keyword.strip() for keyword in keywords.split(',')] self.found_words.extend(keywords)
WordCollector
python
langchain-ai__langchain
libs/core/langchain_core/language_models/fake_chat_models.py
{ "start": 618, "end": 1630 }
class ____(BaseChatModel): """Fake chat model for testing purposes.""" responses: list[BaseMessage] """List of responses to **cycle** through in order.""" sleep: float | None = None """Sleep time in seconds between responses.""" i: int = 0 """Internally incremented after every model invocation.""" @override def _generate( self, messages: list[BaseMessage], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> ChatResult: if self.sleep is not None: time.sleep(self.sleep) response = self.responses[self.i] if self.i < len(self.responses) - 1: self.i += 1 else: self.i = 0 generation = ChatGeneration(message=response) return ChatResult(generations=[generation]) @property @override def _llm_type(self) -> str: return "fake-messages-list-chat-model"
FakeMessagesListChatModel
python
pytorch__pytorch
scripts/release_notes/classifier.py
{ "start": 1052, "end": 1159 }
class ____: title: List[str] files: List[str] author: List[str] @dataclass
CommitClassifierInputs
python
scipy__scipy
scipy/integrate/tests/test_cubature.py
{ "start": 10671, "end": 14814 }
class ____: """ Tests related to the interface of `cubature`. """ @pytest.mark.parametrize("rule_str", [ "gauss-kronrod", "genz-malik", "gk21", "gk15", ]) def test_pass_str(self, rule_str, xp): n = xp.arange(5, dtype=xp.float64) a = xp.asarray([0, 0], dtype=xp.float64) b = xp.asarray([2, 2], dtype=xp.float64) res = cubature(basic_nd_integrand, a, b, rule=rule_str, args=(n, xp)) xp_assert_close( res.estimate, basic_nd_integrand_exact(n, xp), rtol=1e-8, atol=0, ) def test_pass_array_like_not_array(self): n = np_compat.arange(5, dtype=np_compat.float64) a = [0] b = [2] res = cubature( basic_1d_integrand, a, b, args=(n, np_compat) ) xp_assert_close( res.estimate, basic_1d_integrand_exact(n, np_compat), rtol=1e-8, atol=0, ) def test_stops_after_max_subdivisions(self, xp): a = xp.asarray([0]) b = xp.asarray([1]) rule = BadErrorRule() res = cubature( basic_1d_integrand, # Any function would suffice a, b, rule=rule, max_subdivisions=10, args=(xp.arange(5, dtype=xp.float64), xp), ) assert res.subdivisions == 10 assert res.status == "not_converged" def test_a_and_b_must_be_1d(self, xp): a = xp.asarray([[0]], dtype=xp.float64) b = xp.asarray([[1]], dtype=xp.float64) with pytest.raises(Exception, match="`a` and `b` must be 1D arrays"): cubature(basic_1d_integrand, a, b, args=(xp,)) def test_a_and_b_must_be_nonempty(self, xp): a = xp.asarray([]) b = xp.asarray([]) with pytest.raises(Exception, match="`a` and `b` must be nonempty"): cubature(basic_1d_integrand, a, b, args=(xp,)) def test_zero_width_limits(self, xp): n = xp.arange(5, dtype=xp.float64) a = xp.asarray([0], dtype=xp.float64) b = xp.asarray([0], dtype=xp.float64) res = cubature( basic_1d_integrand, a, b, args=(n, xp), ) xp_assert_close( res.estimate, xp.asarray([[0], [0], [0], [0], [0]], dtype=xp.float64), rtol=1e-8, atol=0, ) def test_limits_other_way_around(self, xp): n = xp.arange(5, dtype=xp.float64) a = xp.asarray([2], dtype=xp.float64) b = xp.asarray([0], dtype=xp.float64) res = cubature( basic_1d_integrand, a, b, args=(n, xp), ) xp_assert_close( res.estimate, -basic_1d_integrand_exact(n, xp), rtol=1e-8, atol=0, ) def test_result_dtype_promoted_correctly(self, xp): result_dtype = cubature( basic_1d_integrand, xp.asarray([0], dtype=xp.float64), xp.asarray([1], dtype=xp.float64), points=[], args=(xp.asarray([1], dtype=xp.float64), xp), ).estimate.dtype assert result_dtype == xp.float64 result_dtype = cubature( basic_1d_integrand, xp.asarray([0], dtype=xp.float32), xp.asarray([1], dtype=xp.float32), points=[], args=(xp.asarray([1], dtype=xp.float32), xp), ).estimate.dtype assert result_dtype == xp.float32 result_dtype = cubature( basic_1d_integrand, xp.asarray([0], dtype=xp.float32), xp.asarray([1], dtype=xp.float64), points=[], args=(xp.asarray([1], dtype=xp.float32), xp), ).estimate.dtype assert result_dtype == xp.float64 @make_xp_test_case(cubature) @pytest.mark.parametrize("rtol", [1e-4]) @pytest.mark.parametrize("atol", [1e-5]) @pytest.mark.parametrize("rule", [ "gk15", "gk21", "genz-malik", ])
TestCubature
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 16561, "end": 17012 }
class ____(enum.Enum): SIZE = 0 STRIDE = 1 STORAGE_OFFSET = 2 def method_name(self) -> str: if self is TensorProperty.SIZE: return "size" elif self is TensorProperty.STRIDE: return "stride" elif self is TensorProperty.STORAGE_OFFSET: return "storage_offset" else: raise AssertionError(f"unhandled {self}") @dataclasses.dataclass(frozen=True)
TensorProperty
python
pypa__pip
src/pip/_vendor/rich/errors.py
{ "start": 212, "end": 271 }
class ____(StyleError): """No such style."""
MissingStyle
python
pypa__pip
src/pip/_vendor/urllib3/response.py
{ "start": 710, "end": 1570 }
class ____(object): def __init__(self): self._first_try = True self._data = b"" self._obj = zlib.decompressobj() def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): if not data: return data if not self._first_try: return self._obj.decompress(data) self._data += data try: decompressed = self._obj.decompress(data) if decompressed: self._first_try = False self._data = None return decompressed except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None
DeflateDecoder
python
django__django
tests/admin_views/admin.py
{ "start": 27119, "end": 27278 }
class ____(forms.ModelForm): first = forms.CharField(widget=forms.HiddenInput) second = forms.CharField(widget=forms.HiddenInput)
FormWithoutVisibleField
python
pytorch__pytorch
torch/testing/_internal/distributed/rpc/rpc_test.py
{ "start": 14292, "end": 14840 }
class ____: __slots__ = ("tensor", "lock", "event", "thread") def __init__(self, t): self.tensor = t # Add one non-picklable field, to ensure it's ignored/skipped. self.lock = Lock() self.event = torch.cuda.Event(enable_timing=True) self.thread = threading.Thread() self.thread.start() def increase(self, v): with self.lock: self.tensor += v def sum(self): with self.lock: self.event.record() return self.tensor.sum()
TensorWrapper
python
cython__cython
Cython/Compiler/ParseTreeTransforms.py
{ "start": 28655, "end": 30651 }
class ____(CythonTransform, SkipDeclarations): """ Basic interpretation/validity checking that should only be done on pxd trees. A lot of this checking currently happens in the parser; but what is listed below happens here. - "def" functions are let through only if they fill the getbuffer/releasebuffer slots - cdef functions are let through only if they are on the top level and are declared "inline" """ ERR_INLINE_ONLY = "function definition in pxd file must be declared 'cdef inline'" ERR_NOGO_WITH_INLINE = "inline function definition in pxd file cannot be '%s'" def __call__(self, node): self.scope_type = 'pxd' return super().__call__(node) def visit_CClassDefNode(self, node): old = self.scope_type self.scope_type = 'cclass' self.visitchildren(node) self.scope_type = old return node def visit_FuncDefNode(self, node): # FuncDefNode always come with an implementation (without # an imp they are CVarDefNodes..) err = self.ERR_INLINE_ONLY if (isinstance(node, Nodes.DefNode) and self.scope_type == 'cclass' and node.name in ('__getbuffer__', '__releasebuffer__')): err = None # allow these slots if isinstance(node, Nodes.CFuncDefNode): if ('inline' in node.modifiers and self.scope_type in ('pxd', 'cclass')): node.inline_in_pxd = True if node.visibility != 'private': err = self.ERR_NOGO_WITH_INLINE % node.visibility elif node.api: err = self.ERR_NOGO_WITH_INLINE % 'api' else: err = None # allow inline function else: err = self.ERR_INLINE_ONLY if err: self.context.nonfatal_error(PostParseError(node.pos, err)) return None else: return node
PxdPostParse
python
allegroai__clearml
clearml/backend_api/services/v2_23/dataviews.py
{ "start": 82210, "end": 98713 }
class ____(Request): """ Get all the company's dataviews and all public dataviews :param id: List of IDs to filter by :type id: Sequence[str] :param name: Get only dataviews whose name matches this pattern (python regular expression syntax) :type name: str :param user: List of user IDs used to filter results by the dataview's creating user :type user: Sequence[str] :param project: List of projects to filter by :type project: Sequence[str] :param output_rois: List of output ROIS types to filter by :type output_rois: Sequence[OutputRoisEnum] :param only_fields: List of dataview field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry) :type only_fields: Sequence[str] :param tags: User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion. :type tags: Sequence[str] :param system_tags: System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion. :type system_tags: Sequence[str] :param status: Dataview status to filter by :type status: str :param page: Page number, returns a specific page out of the result list of datasets. :type page: int :param page_size: Page size, specifies the number of results returned in each page (last page may contain fewer results) :type page_size: int :param order_by: List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page :type order_by: Sequence[str] :param search_text: Free text search query :type search_text: str :param _all_: Multi-field pattern condition (all fields match pattern) :type _all_: MultiFieldPatternData :param _any_: Multi-field pattern condition (any field matches pattern) :type _any_: MultiFieldPatternData :param scroll_id: Scroll ID returned from the previos calls to get_all :type scroll_id: str :param refresh_scroll: If set then all the data received with this scroll will be requeried :type refresh_scroll: bool :param size: The number of datavievs to retrieve :type size: int """ _service = "dataviews" _action = "get_all" _version = "2.23" _schema = { "definitions": { "multi_field_pattern_data": { "properties": { "fields": { "description": "List of field names", "items": {"type": "string"}, "type": ["array", "null"], }, "pattern": { "description": "Pattern string (regex)", "type": ["string", "null"], }, }, "type": "object", }, "output_rois_enum": { "enum": ["all_in_frame", "only_filtered", "frame_per_roi"], "type": "string", }, }, "properties": { "_all_": { "description": "Multi-field pattern condition (all fields match pattern)", "oneOf": [ {"$ref": "#/definitions/multi_field_pattern_data"}, {"type": "null"}, ], }, "_any_": { "description": "Multi-field pattern condition (any field matches pattern)", "oneOf": [ {"$ref": "#/definitions/multi_field_pattern_data"}, {"type": "null"}, ], }, "id": { "description": "List of IDs to filter by", "items": {"type": "string"}, "type": ["array", "null"], }, "name": { "description": "Get only dataviews whose name matches this pattern (python regular expression syntax)", "type": ["string", "null"], }, "only_fields": { "description": ( "List of dataview field names (nesting is supported using '.', e.g. execution.model_labels). If" " provided, this list defines the query's projection (only these fields will be returned for each" " result entry)" ), "items": {"type": "string"}, "type": ["array", "null"], }, "order_by": { "description": ( "List of field names to order by. When search_text is used, '@text_score' can be used as a field" " representing the text score of returned documents. Use '-' prefix to specify descending order." " Optional, recommended when using page" ), "items": {"type": "string"}, "type": ["array", "null"], }, "output_rois": { "description": "List of output ROIS types to filter by", "items": {"$ref": "#/definitions/output_rois_enum"}, "type": ["array", "null"], }, "page": { "description": "Page number, returns a specific page out of the result list of datasets.", "minimum": 0, "type": ["integer", "null"], }, "page_size": { "description": ( "Page size, specifies the number of results returned in each page (last page may contain fewer " "results)" ), "minimum": 1, "type": ["integer", "null"], }, "project": { "description": "List of projects to filter by", "items": {"type": "string"}, "type": ["array", "null"], }, "refresh_scroll": { "description": "If set then all the data received with this scroll will be requeried", "type": ["boolean", "null"], }, "scroll_id": { "description": "Scroll ID returned from the previos calls to get_all", "type": ["string", "null"], }, "search_text": { "description": "Free text search query", "type": ["string", "null"], }, "size": { "description": "The number of datavievs to retrieve", "minimum": 1, "type": ["integer", "null"], }, "status": { "description": "Dataview status to filter by", "enum": ["draft", "published"], "type": ["string", "null"], }, "system_tags": { "description": ( "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion." ), "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": ( "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion." ), "items": {"type": "string"}, "type": ["array", "null"], }, "user": { "description": "List of user IDs used to filter results by the dataview's creating user", "items": {"type": "string"}, "type": ["array", "null"], }, }, "type": "object", } def __init__( self, id=None, name=None, user=None, project=None, output_rois=None, only_fields=None, tags=None, system_tags=None, status=None, page=None, page_size=None, order_by=None, search_text=None, _all_=None, _any_=None, scroll_id=None, refresh_scroll=None, size=None, **kwargs ): super(GetAllRequest, self).__init__(**kwargs) self.id = id self.name = name self.user = user self.project = project self.output_rois = output_rois self.only_fields = only_fields self.tags = tags self.system_tags = system_tags self.status = status self.page = page self.page_size = page_size self.order_by = order_by self.search_text = search_text self._all_ = _all_ self._any_ = _any_ self.scroll_id = scroll_id self.refresh_scroll = refresh_scroll self.size = size @schema_property("id") def id(self): return self._property_id @id.setter def id(self, value): if value is None: self._property_id = None return self.assert_isinstance(value, "id", (list, tuple)) self.assert_isinstance(value, "id", six.string_types, is_array=True) self._property_id = value @schema_property("name") def name(self): return self._property_name @name.setter def name(self, value): if value is None: self._property_name = None return self.assert_isinstance(value, "name", six.string_types) self._property_name = value @schema_property("user") def user(self): return self._property_user @user.setter def user(self, value): if value is None: self._property_user = None return self.assert_isinstance(value, "user", (list, tuple)) self.assert_isinstance(value, "user", six.string_types, is_array=True) self._property_user = value @schema_property("project") def project(self): return self._property_project @project.setter def project(self, value): if value is None: self._property_project = None return self.assert_isinstance(value, "project", (list, tuple)) self.assert_isinstance(value, "project", six.string_types, is_array=True) self._property_project = value @schema_property("output_rois") def output_rois(self): return self._property_output_rois @output_rois.setter def output_rois(self, value): if value is None: self._property_output_rois = None return self.assert_isinstance(value, "output_rois", (list, tuple)) if any(isinstance(v, six.string_types) for v in value): value = [ OutputRoisEnum(v) if isinstance(v, six.string_types) else v for v in value ] else: self.assert_isinstance(value, "output_rois", OutputRoisEnum, is_array=True) self._property_output_rois = value @schema_property("only_fields") def only_fields(self): return self._property_only_fields @only_fields.setter def only_fields(self, value): if value is None: self._property_only_fields = None return self.assert_isinstance(value, "only_fields", (list, tuple)) self.assert_isinstance(value, "only_fields", six.string_types, is_array=True) self._property_only_fields = value @schema_property("tags") def tags(self): return self._property_tags @tags.setter def tags(self, value): if value is None: self._property_tags = None return self.assert_isinstance(value, "tags", (list, tuple)) self.assert_isinstance(value, "tags", six.string_types, is_array=True) self._property_tags = value @schema_property("system_tags") def system_tags(self): return self._property_system_tags @system_tags.setter def system_tags(self, value): if value is None: self._property_system_tags = None return self.assert_isinstance(value, "system_tags", (list, tuple)) self.assert_isinstance(value, "system_tags", six.string_types, is_array=True) self._property_system_tags = value @schema_property("status") def status(self): return self._property_status @status.setter def status(self, value): if value is None: self._property_status = None return self.assert_isinstance(value, "status", six.string_types) self._property_status = value @schema_property("page") def page(self): return self._property_page @page.setter def page(self, value): if value is None: self._property_page = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "page", six.integer_types) self._property_page = value @schema_property("page_size") def page_size(self): return self._property_page_size @page_size.setter def page_size(self, value): if value is None: self._property_page_size = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "page_size", six.integer_types) self._property_page_size = value @schema_property("order_by") def order_by(self): return self._property_order_by @order_by.setter def order_by(self, value): if value is None: self._property_order_by = None return self.assert_isinstance(value, "order_by", (list, tuple)) self.assert_isinstance(value, "order_by", six.string_types, is_array=True) self._property_order_by = value @schema_property("search_text") def search_text(self): return self._property_search_text @search_text.setter def search_text(self, value): if value is None: self._property_search_text = None return self.assert_isinstance(value, "search_text", six.string_types) self._property_search_text = value @schema_property("_all_") def _all_(self): return self._property__all_ @_all_.setter def _all_(self, value): if value is None: self._property__all_ = None return if isinstance(value, dict): value = MultiFieldPatternData.from_dict(value) else: self.assert_isinstance(value, "_all_", MultiFieldPatternData) self._property__all_ = value @schema_property("_any_") def _any_(self): return self._property__any_ @_any_.setter def _any_(self, value): if value is None: self._property__any_ = None return if isinstance(value, dict): value = MultiFieldPatternData.from_dict(value) else: self.assert_isinstance(value, "_any_", MultiFieldPatternData) self._property__any_ = value @schema_property("scroll_id") def scroll_id(self): return self._property_scroll_id @scroll_id.setter def scroll_id(self, value): if value is None: self._property_scroll_id = None return self.assert_isinstance(value, "scroll_id", six.string_types) self._property_scroll_id = value @schema_property("refresh_scroll") def refresh_scroll(self): return self._property_refresh_scroll @refresh_scroll.setter def refresh_scroll(self, value): if value is None: self._property_refresh_scroll = None return self.assert_isinstance(value, "refresh_scroll", (bool,)) self._property_refresh_scroll = value @schema_property("size") def size(self): return self._property_size @size.setter def size(self, value): if value is None: self._property_size = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "size", six.integer_types) self._property_size = value
GetAllRequest
python
scrapy__scrapy
scrapy/exporters.py
{ "start": 11338, "end": 11945 }
class ____(BaseItemExporter): """Exports items in a Python-specific binary format (see :mod:`marshal`). :param file: The file-like object to use for exporting the data. Its ``write`` method should accept :class:`bytes` (a disk file opened in binary mode, a :class:`~io.BytesIO` object, etc) """ def __init__(self, file: BytesIO, **kwargs: Any): super().__init__(**kwargs) self.file: BytesIO = file def export_item(self, item: Any) -> None: marshal.dump(dict(self._get_serialized_fields(item)), self.file)
MarshalItemExporter
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/freshness.py
{ "start": 1019, "end": 1137 }
class ____: key: AssetKey freshness_state: FreshnessState @whitelist_for_serdes @record
FreshnessStateEvaluation
python
apache__airflow
airflow-core/src/airflow/models/backfill.py
{ "start": 5371, "end": 18686 }
class ____(Base): """Mapping table between backfill run and dag run.""" __tablename__ = "backfill_dag_run" id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) backfill_id: Mapped[int] = mapped_column(Integer, nullable=False) dag_run_id: Mapped[int | None] = mapped_column(Integer, nullable=True) exception_reason: Mapped[str | None] = mapped_column(StringID(), nullable=True) logical_date: Mapped[datetime] = mapped_column(UtcDateTime, nullable=False) sort_ordinal: Mapped[int] = mapped_column(Integer, nullable=False) backfill = relationship("Backfill", back_populates="backfill_dag_run_associations") dag_run = relationship("DagRun") def __repr__(self): return f"BackfillDagRun(id={self.id}, backfill_id={self.backfill_id}, logical_date='{self.logical_date}')" __table_args__ = ( UniqueConstraint("backfill_id", "dag_run_id", name="ix_bdr_backfill_id_dag_run_id"), ForeignKeyConstraint( [backfill_id], ["backfill.id"], name="bdr_backfill_fkey", ondelete="cascade", ), ForeignKeyConstraint( [dag_run_id], ["dag_run.id"], name="bdr_dag_run_fkey", ondelete="set null", ), ) @validates("sort_ordinal") def validate_sort_ordinal(self, key: str, val: int) -> int: if val < 1: raise ValueError("sort_ordinal must be >= 1") return val def _get_latest_dag_run_row_query(*, dag_id: str, info: DagRunInfo, session: Session): from airflow.models import DagRun return ( select(DagRun) .where( DagRun.logical_date == info.logical_date, DagRun.dag_id == dag_id, ) .order_by(nulls_first(desc(DagRun.start_date), session=session)) .limit(1) ) def _get_dag_run_no_create_reason(dr, reprocess_behavior: ReprocessBehavior) -> str | None: non_create_reason = None if dr.state not in (DagRunState.SUCCESS, DagRunState.FAILED): non_create_reason = BackfillDagRunExceptionReason.IN_FLIGHT elif reprocess_behavior is ReprocessBehavior.NONE: non_create_reason = BackfillDagRunExceptionReason.ALREADY_EXISTS elif reprocess_behavior is ReprocessBehavior.FAILED: if dr.state != DagRunState.FAILED: non_create_reason = BackfillDagRunExceptionReason.ALREADY_EXISTS return non_create_reason def _validate_backfill_params( dag: SerializedDAG, reverse: bool, from_date: datetime, to_date: datetime, reprocess_behavior: ReprocessBehavior | None, ) -> None: depends_on_past = any(x.depends_on_past for x in dag.tasks) if depends_on_past: if reverse is True: raise InvalidBackfillDirection( "Backfill cannot be run in reverse when the DAG has tasks where depends_on_past=True." ) if reprocess_behavior in (None, ReprocessBehavior.NONE): raise InvalidReprocessBehavior( "DAG has tasks for which depends_on_past=True. " "You must set reprocess behavior to reprocess completed or reprocess failed." ) current_time = timezone.utcnow() if from_date >= current_time and to_date >= current_time: raise InvalidBackfillDate("Backfill cannot be executed for future dates.") def _do_dry_run( *, dag_id: str, from_date: datetime, to_date: datetime, reverse: bool, reprocess_behavior: ReprocessBehavior, session: Session, ) -> Sequence[datetime]: from airflow.models import DagModel from airflow.models.serialized_dag import SerializedDagModel serdag = session.scalar(SerializedDagModel.latest_item_select_object(dag_id)) if not serdag: raise DagNotFound(f"Could not find dag {dag_id}") dag = serdag.dag _validate_backfill_params(dag, reverse, from_date, to_date, reprocess_behavior) no_schedule = session.scalar( select(func.count()).where(DagModel.timetable_summary == "None", DagModel.dag_id == dag_id) ) if no_schedule: raise DagNoScheduleException(f"{dag_id} has no schedule") dagrun_info_list = _get_info_list( dag=dag, from_date=from_date, to_date=to_date, reverse=reverse, ) logical_dates: list[datetime] = [] for info in dagrun_info_list: dr = session.scalar( statement=_get_latest_dag_run_row_query(dag_id=dag_id, info=info, session=session), ) if dr: non_create_reason = _get_dag_run_no_create_reason(dr, reprocess_behavior) if not non_create_reason: logical_dates.append(info.logical_date) else: logical_dates.append(info.logical_date) return logical_dates def _create_backfill_dag_run( *, dag: SerializedDAG, info: DagRunInfo, reprocess_behavior: ReprocessBehavior, backfill_id: int, dag_run_conf: dict | None, backfill_sort_ordinal: int, triggering_user_name: str | None, run_on_latest_version: bool, session: Session, ) -> None: from airflow.models.dagrun import DagRun with session.begin_nested() as nested: dr = session.scalar(_get_latest_dag_run_row_query(dag_id=dag.dag_id, info=info, session=session)) if dr: non_create_reason = _get_dag_run_no_create_reason(dr, reprocess_behavior) if non_create_reason: session.add( BackfillDagRun( backfill_id=backfill_id, dag_run_id=None, logical_date=info.logical_date, exception_reason=non_create_reason, sort_ordinal=backfill_sort_ordinal, ) ) return lock = session.execute( with_row_locks( query=select(DagRun).where( DagRun.logical_date == info.logical_date, DagRun.dag_id == dag.dag_id, ), session=session, skip_locked=True, ) ) if lock: _handle_clear_run( session=session, dag=dag, dr=dr, info=info, backfill_id=backfill_id, sort_ordinal=backfill_sort_ordinal, run_on_latest=run_on_latest_version, ) else: session.add( BackfillDagRun( backfill_id=backfill_id, dag_run_id=None, logical_date=info.logical_date, exception_reason=BackfillDagRunExceptionReason.IN_FLIGHT, sort_ordinal=backfill_sort_ordinal, ) ) return try: dr = dag.create_dagrun( run_id=DagRun.generate_run_id( run_type=DagRunType.BACKFILL_JOB, logical_date=info.logical_date, run_after=info.run_after ), logical_date=info.logical_date, data_interval=info.data_interval if info.logical_date else None, run_after=info.run_after, conf=dag_run_conf, run_type=DagRunType.BACKFILL_JOB, triggered_by=DagRunTriggeredByType.BACKFILL, triggering_user_name=triggering_user_name, state=DagRunState.QUEUED, start_date=timezone.utcnow(), backfill_id=backfill_id, session=session, ) session.add( BackfillDagRun( backfill_id=backfill_id, dag_run_id=dr.id, sort_ordinal=backfill_sort_ordinal, logical_date=info.logical_date, ) ) except IntegrityError: log.info( "Skipped creating backfill dag run for dag_id=%s backfill_id=%s, logical_date=%s (already exists)", dag.dag_id, backfill_id, info.logical_date, ) nested.rollback() session.add( BackfillDagRun( backfill_id=backfill_id, dag_run_id=None, logical_date=info.logical_date, exception_reason=BackfillDagRunExceptionReason.IN_FLIGHT, sort_ordinal=backfill_sort_ordinal, ) ) def _get_info_list( *, from_date: datetime, to_date: datetime, reverse: bool, dag: SerializedDAG, ) -> list[DagRunInfo]: infos = dag.iter_dagrun_infos_between(from_date, to_date) now = timezone.utcnow() dagrun_info_list = [x for x in infos if x.data_interval.end < now] if reverse: dagrun_info_list = list(reversed(dagrun_info_list)) return dagrun_info_list def _handle_clear_run( session: Session, dag: SerializedDAG, dr: DagRun, info: DagRunInfo, backfill_id: int, sort_ordinal: int, run_on_latest: bool = False, ) -> None: """Clear the existing DAG run and update backfill metadata.""" from sqlalchemy.sql import update from airflow.models import DagRun from airflow.utils.state import DagRunState from airflow.utils.types import DagRunType dag.clear( run_id=dr.run_id, dag_run_state=DagRunState.QUEUED, session=session, dry_run=False, run_on_latest_version=run_on_latest, ) # Update backfill_id and run_type in DagRun table session.execute( update(DagRun) .where(DagRun.logical_date == info.logical_date, DagRun.dag_id == dag.dag_id) .values( backfill_id=backfill_id, run_type=DagRunType.BACKFILL_JOB, triggered_by=DagRunTriggeredByType.BACKFILL, ) ) session.add( BackfillDagRun( backfill_id=backfill_id, dag_run_id=dr.id, logical_date=info.logical_date, sort_ordinal=sort_ordinal, ) ) def _create_backfill( *, dag_id: str, from_date: datetime, to_date: datetime, max_active_runs: int, reverse: bool, dag_run_conf: dict | None, triggering_user_name: str | None, reprocess_behavior: ReprocessBehavior | None = None, run_on_latest_version: bool = False, ) -> Backfill: from airflow.models import DagModel from airflow.models.serialized_dag import SerializedDagModel with create_session() as session: serdag = session.scalar(SerializedDagModel.latest_item_select_object(dag_id)) if not serdag: raise DagNotFound(f"Could not find dag {dag_id}") no_schedule = session.scalar( select(func.count()).where(DagModel.timetable_summary == "None", DagModel.dag_id == dag_id) ) if no_schedule: raise DagNoScheduleException(f"{dag_id} has no schedule") num_active = session.scalar( select(func.count()).where( Backfill.dag_id == dag_id, Backfill.completed_at.is_(None), ) ) if num_active is None: raise UnknownActiveBackfills(dag_id) if num_active > 0: raise AlreadyRunningBackfill( f"Another backfill is running for dag {dag_id}. " f"There can be only one running backfill per dag." ) dag = serdag.dag _validate_backfill_params(dag, reverse, from_date, to_date, reprocess_behavior) br = Backfill( dag_id=dag_id, from_date=from_date, to_date=to_date, max_active_runs=max_active_runs, dag_run_conf=dag_run_conf, reprocess_behavior=reprocess_behavior, dag_model=dag, triggering_user_name=triggering_user_name, ) session.add(br) session.commit() dagrun_info_list = _get_info_list( from_date=from_date, to_date=to_date, reverse=reverse, dag=dag, ) dag_model = session.scalar(select(DagModel).where(DagModel.dag_id == dag_id)) if not dag_model: raise RuntimeError(f"Dag {dag_id} not found") for backfill_sort_ordinal, info in enumerate(dagrun_info_list, start=1): _create_backfill_dag_run( dag=dag, info=info, backfill_id=br.id, dag_run_conf=br.dag_run_conf, reprocess_behavior=ReprocessBehavior(br.reprocess_behavior), backfill_sort_ordinal=backfill_sort_ordinal, triggering_user_name=br.triggering_user_name, run_on_latest_version=run_on_latest_version, session=session, ) log.info( "created backfill dag run dag_id=%s backfill_id=%s, info=%s", dag.dag_id, br.id, info, ) return br
BackfillDagRun
python
Netflix__metaflow
metaflow/plugins/azure/includefile_support.py
{ "start": 4298, "end": 4726 }
class ____(object): def __init__(self, url, path, exists, size): self._path = path self._url = url self._exists = exists self._size = size @property def path(self): return self._path @property def url(self): return self._url @property def exists(self): return self._exists @property def size(self): return self._size
AzureObject
python
pandas-dev__pandas
pandas/io/formats/xml.py
{ "start": 12100, "end": 15835 }
class ____(_BaseXMLFormatter): """ Class for formatting data in xml using Python standard library modules: `xml.etree.ElementTree` and `xml.dom.minidom`. """ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._convert_empty_str_key() def _build_tree(self) -> bytes: """ Build tree from data. This method initializes the root and builds attributes and elements with optional namespaces. """ from lxml.etree import ( Element, SubElement, tostring, ) self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces) for d in self.frame_dicts.values(): elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") if not self.attr_cols and not self.elem_cols: self.elem_cols = list(d.keys()) self._build_elems(d, elem_row) else: elem_row = self._build_attribs(d, elem_row) self._build_elems(d, elem_row) self.out_xml = tostring( self.root, pretty_print=self.pretty_print, method="xml", encoding=self.encoding, xml_declaration=self.xml_declaration, ) if self.stylesheet is not None: self.out_xml = self._transform_doc() return self.out_xml def _convert_empty_str_key(self) -> None: """ Replace zero-length string in `namespaces`. This method will replace '' with None to align to `lxml` requirement that empty string prefixes are not allowed. """ if self.namespaces and "" in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop("", "default") def _get_prefix_uri(self) -> str: uri = "" if self.namespaces: if self.prefix: try: uri = f"{{{self.namespaces[self.prefix]}}}" except KeyError as err: raise KeyError( f"{self.prefix} is not included in namespaces" ) from err elif "" in self.namespaces: uri = f"{{{self.namespaces['']}}}" else: uri = "" return uri @cache_readonly def _sub_element_cls(self): from lxml.etree import SubElement return SubElement def _transform_doc(self) -> bytes: """ Parse stylesheet from file or buffer and run it. This method will parse stylesheet object into tree for parsing conditionally by its specific object type, then transforms original tree with XSLT script. """ from lxml.etree import ( XSLT, XMLParser, fromstring, parse, ) style_doc = self.stylesheet assert style_doc is not None # is ensured by caller handle_data = get_data_from_filepath( filepath_or_buffer=style_doc, encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, ) with handle_data as xml_data: curr_parser = XMLParser(encoding=self.encoding) if isinstance(xml_data, io.StringIO): xsl_doc = fromstring( xml_data.getvalue().encode(self.encoding), parser=curr_parser ) else: xsl_doc = parse(xml_data, parser=curr_parser) transformer = XSLT(xsl_doc) new_doc = transformer(self.root) return bytes(new_doc)
LxmlXMLFormatter
python
astropy__astropy
astropy/utils/console.py
{ "start": 31820, "end": 32287 }
class ____: def __init__(self): import sys # noqa: F401 import tty # noqa: F401 def __call__(self): import sys import termios import tty fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch
_GetchUnix
python
huggingface__transformers
tests/models/swin/test_modeling_swin.py
{ "start": 8148, "end": 17322 }
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SwinModel, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": SwinModel, "image-classification": SwinForImageClassification} if is_torch_available() else {} ) test_resize_embeddings = False test_torch_exportable = True def setUp(self): self.model_tester = SwinModelTester(self) self.config_tester = ConfigTester( self, config_class=SwinConfig, embed_dim=37, has_text_modality=False, common_properties=["image_size", "patch_size", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass def test_training_gradient_checkpointing(self): super().test_training_gradient_checkpointing() def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin Transformer does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # also another +1 for reshaped_hidden_states added_hidden_states = 1 if model_class.__name__ == "SwinBackbone" else 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) if model_class.__name__ != "SwinBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): model_name = "microsoft/swin-tiny-patch4-window7-224" model = SwinModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_torch
SwinModelTest
python
kamyu104__LeetCode-Solutions
Python/guess-number-higher-or-lower-ii.py
{ "start": 455, "end": 848 }
class ____(object): def getMoneyAmount(self, n): """ :type n: int :rtype: int """ dp = [[0]*(n+1) for _ in xrange(n+1)] # dp[i][j]: min pay in [i+1, j+1) for i in reversed(xrange(n)): for j in xrange(i+2, n+1): dp[i][j] = min((k+1) + max(dp[i][k], dp[k+1][j]) for k in xrange(i, j)) return dp[0][n]
Solution2
python
walkccc__LeetCode
solutions/31. Next Permutation/31.py
{ "start": 0, "end": 695 }
class ____: def nextPermutation(self, nums: list[int]) -> None: n = len(nums) # From back to front, find the first number < nums[i + 1]. i = n - 2 while i >= 0: if nums[i] < nums[i + 1]: break i -= 1 # From back to front, find the first number > nums[i], swap it with nums[i]. if i >= 0: for j in range(n - 1, i, -1): if nums[j] > nums[i]: nums[i], nums[j] = nums[j], nums[i] break def reverse(nums: list[int], l: int, r: int) -> None: while l < r: nums[l], nums[r] = nums[r], nums[l] l += 1 r -= 1 # Reverse nums[i + 1..n - 1]. reverse(nums, i + 1, len(nums) - 1)
Solution
python
pytorch__pytorch
test/test_tensorexpr.py
{ "start": 1089, "end": 58839 }
class ____(BaseTestClass): def test_easy(self): def easy(x, y): aaa = torch.add(x, y) return aaa traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024))) a = torch.rand(1024) b = torch.rand(1024) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy()) def test_three_arg(self): def easy(x, y, z): aaa = torch.add(x, y) bbb = torch.add(aaa, z) return bbb traced = torch.jit.trace( easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024)) ) a = torch.rand(1024) b = torch.rand(1024) c = torch.rand(1024) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = a.numpy() + b.numpy() + c.numpy() np.testing.assert_allclose(npr, x.numpy()) def test_four_arg(self): def run_addcmul(x, y, z, w): c = torch.addcmul(torch.add(x, y), z, w) return c for dev in self.devices: rand_a = torch.rand(1024, dtype=torch.float, device=dev) rand_b = torch.rand(1024, dtype=torch.float, device=dev) rand_c = torch.rand(1024, dtype=torch.float, device=dev) rand_d = torch.rand(1024, dtype=torch.float, device=dev) traced = torch.jit.trace( run_addcmul, ( torch.zeros(1024, dtype=torch.float, device=dev), torch.zeros(1024, dtype=torch.float, device=dev), torch.zeros(1024, dtype=torch.float, device=dev), torch.zeros(1024, dtype=torch.float, device=dev), ), ) x = warmup_and_run_forward(traced, rand_a, rand_b, rand_c, rand_d) self.assertLastGraphAllFused() y = run_addcmul(rand_a, rand_b, rand_c, rand_d) np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=1e-6) def test_three_arg2(self): for device in self.devices: def test(x, y, z): aaa = torch.add(x, y) bbb = torch.add(aaa, z) return bbb M = 32 N = 32 traced = torch.jit.trace( test, ( torch.rand(M, N, device=device), torch.rand(M, N, device=device), torch.rand(M, N, device=device), ), ) a = torch.rand(M, N, device=device) b = torch.rand(M, N, device=device) c = torch.rand(M, N, device=device) x = traced(a, b, c) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy() np.testing.assert_allclose(npr, x.cpu().numpy()) def test_broadcast3(self): for device in self.devices: def test_body(M, N, L, K): def test(x, y, z): v1 = torch.add(x, y) v2 = torch.add(v1, z) return v2 a_shape = [M, N] b_shape = [L, M, 1] c_shape = [K, L, 1, 1] traced = torch.jit.trace( test, ( torch.rand(*a_shape, device=device), torch.rand(*b_shape, device=device), torch.rand(*c_shape, device=device), ), ) a = torch.rand(*a_shape, device=device) b = torch.rand(*b_shape, device=device) c = torch.rand(*c_shape, device=device) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy() np.testing.assert_allclose(npr, x.cpu().numpy()) test_configs = [[5, 2, 7, 3], [8, 8, 8, 8]] for test_config in test_configs: test_body(*test_config) def test_all_combos(self): def easy(x, y, z): a = torch.add(x, y) b = torch.add(a, z) c = torch.add(x, b) d = torch.add(c, a) return d def np_easy(x, y, z): a = x + y b = a + z c = x + b d = c + a return d traced = torch.jit.trace( easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024)) ) a = torch.rand(1024) b = torch.rand(1024) c = torch.rand(1024) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = np_easy(a.numpy(), b.numpy(), c.numpy()) np.testing.assert_allclose(npr, x.numpy()) def test_rank_two(self): def easy(x, y, z): a = torch.add(x, y) b = torch.add(a, z) c = torch.add(x, b) d = torch.add(c, a) return d def np_easy(x, y, z): a = x + y b = a + z c = x + b d = c + a return d shape = 32, 32 traced = torch.jit.trace( easy, (torch.rand(shape), torch.rand(shape), torch.rand(shape)) ) a = torch.rand(shape) b = torch.rand(shape) c = torch.rand(shape) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = np_easy(a.numpy(), b.numpy(), c.numpy()) np.testing.assert_allclose(npr, x.numpy()) def test_broadcast(self): def easy(x, y, z): a = torch.add(x, y) b = torch.add(a, z) return b def np_easy(x, y, z): a = x + y b = a + z return b N = 32 traced = torch.jit.trace(easy, (torch.rand(N, N), torch.rand(N), torch.rand(N, N))) a = torch.rand(N, N) b = torch.rand(N) c = torch.rand(N, N) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() npr = np_easy(a.numpy(), b.numpy(), c.numpy()) np.testing.assert_allclose(npr, x.numpy()) def test_broadcast_2(self): zero = torch.tensor([0.0], dtype=torch.float) def foo(x, y, z): aaa = torch.add(x, y) bbb = torch.add(zero, aaa) return torch.add(bbb, z) def foo_np(x, y, z): a = x + y b = zero.numpy() + a return b + z x = torch.rand(3, 4) y = torch.ones(3, 1) z = torch.rand(4) traced = torch.jit.trace(foo, (x, y, z)) r = warmup_and_run_forward(traced, x, y, z) self.assertLastGraphAllFused() rnp = foo_np(x.numpy(), y.numpy(), z.numpy()) np.testing.assert_allclose(r, rnp) def test_broadcast_big2(self): zero = torch.tensor([0.0], dtype=torch.float) def foo(x, y, z): aaa = torch.add(x, y) bbb = torch.add(zero, aaa) return torch.add(bbb, z) def foo_np(x, y, z): a = x + y b = zero.numpy() + a return b + z x = torch.rand(32, 1024) y = torch.ones(32, 1) z = torch.rand(1024) traced = torch.jit.trace(foo, (x, y, z)) r = warmup_and_run_forward(traced, x, y, z) self.assertLastGraphAllFused() rnp = foo_np(x.numpy(), y.numpy(), z.numpy()) np.testing.assert_allclose(r, rnp) def test_alpha(self): def alpha(x): aaa = torch.add(x, x, alpha=2.0) return aaa traced = torch.jit.trace(alpha, (torch.tensor([1.0]))) a = torch.tensor([1.0]) x = traced(a) np.testing.assert_allclose(a.numpy() + 2.0 * a.numpy(), x.numpy()) @suppress_warnings def test_constant(self): def constant(x): bbb = torch.tensor([1.0]) aaa = torch.add(x, bbb) return aaa traced = torch.jit.trace(constant, (torch.tensor([1.0]))) a = torch.tensor([1.0]) x = warmup_and_run_forward(traced, a) self.assertLastGraphAllFused() np.testing.assert_allclose(a.numpy() + 1.0, x.numpy()) def test_add_sub(self): def easy(x, y, z): aaa = torch.add(x, y) bbb = torch.sub(aaa, z) return bbb traced = torch.jit.trace( easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024)) ) a = torch.rand(1024) b = torch.rand(1024) c = torch.rand(1024) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() np.testing.assert_allclose(a.numpy() + b.numpy() - c.numpy(), x.numpy()) def test_promotion(self): def easy(x, y): aaa = torch.add(x, y) return aaa traced = torch.jit.trace( easy, (torch.zeros(1024, dtype=torch.int32), torch.rand(1024, dtype=torch.float32)), ) a = torch.zeros(1024, dtype=torch.int32) b = torch.rand(1024, dtype=torch.float32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy()) def test_double(self): TENSOR_LEN = 8 def easy(x, y): aaa = torch.add(x, y) bbb = torch.mul(aaa, y) return bbb traced = torch.jit.trace( easy, (torch.rand(TENSOR_LEN, dtype=torch.float64), torch.full((TENSOR_LEN,), 0.5, dtype=torch.float64)), ) a = torch.rand(TENSOR_LEN, dtype=torch.double) b = torch.full((TENSOR_LEN,), 0.5, dtype=torch.double) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy()) def test_short(self): TENSOR_LEN = 8 def easy(x, y): aaa = torch.add(x, y) bbb = torch.mul(aaa, y) return bbb traced = torch.jit.trace( easy, (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16), torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)), ) a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16) b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy()) def test_char(self): TENSOR_LEN = 8 def easy(x, y): aaa = torch.add(x, y) bbb = torch.mul(aaa, y) return bbb traced = torch.jit.trace( easy, (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8), torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)), ) a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8) b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy()) def test_int64_promotion(self): TENSOR_LEN = 8 def easy(x, y): aaa = torch.add(x, y) bbb = torch.mul(aaa, y) return bbb traced = torch.jit.trace( easy, (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8), torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)), ) a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8) b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy()) def test_eq(self): def easy(x, y): c = torch.eq(x, y) return c traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024))) a = torch.zeros(1024, dtype=torch.int32) b = torch.zeros(1024, dtype=torch.int32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.ones(1024), x.numpy()) def test_ne(self): def easy(x, y): c = torch.ne(x, y) return c traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024))) a = torch.zeros(1024, dtype=torch.int32) b = torch.ones(1024, dtype=torch.int32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.ones(1024), x.numpy()) def test_ge(self): def easy(x, y): c = torch.ge(x, y) return c traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024))) aa = np.empty([1024], dtype=np.int32) aa.fill(5) a = torch.from_numpy(aa) b = torch.zeros(1024, dtype=torch.int32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.ones(1024), x.numpy()) def test_gt(self): def easy(x, y): c = torch.gt(x, y) return c traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024))) a = torch.ones(1024, dtype=torch.int32) b = torch.zeros(1024, dtype=torch.int32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.ones(1024), x.numpy()) def test_le(self): def easy(x, y): c = torch.le(x, y) return c traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024))) aa = np.empty([1024], dtype=np.int32) aa.fill(5) a = torch.from_numpy(aa) b = torch.zeros(1024, dtype=torch.int32) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.zeros(1024), x.numpy()) def test_lt(self): def easy(x, y): c = torch.lt(x, y) return c for dev in self.devices: traced = torch.jit.trace(easy, (torch.zeros(1024, device=dev), torch.zeros(1024, device=dev))) a = torch.ones(1024, dtype=torch.int32, device=dev) b = torch.zeros(1024, dtype=torch.int32, device=dev) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() np.testing.assert_allclose(np.zeros(1024), x.cpu().numpy()) @suppress_warnings def test_min_max(self): def test(x, y): return torch.max(torch.min(x, y), torch.tensor([4.0])) traced = torch.jit.trace(test, (torch.zeros(1024), torch.zeros(1024))) a = 8.0 * torch.rand(1024) b = 8.0 * torch.rand(1024) np.testing.assert_allclose( warmup_and_run_forward(traced, a, b), np.maximum(np.minimum(a.numpy(), b.numpy()), [4.0]) ) self.assertLastGraphAllFused() def test_min_max_reduction(self): def test(x): return torch.min(x) + torch.max(x) traced = torch.jit.trace(test, (torch.zeros(1024))) a = 8.0 * torch.rand(1024) np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy())) self.assertLastGraphAllFused() def test_min_max_reduction2(self): def test(x): return x.min() + x.max() traced = torch.jit.trace(test, (torch.zeros(1024))) a = 8.0 * torch.rand(1024) np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy())) self.assertLastGraphAllFused() def test_min_max_reduction_dim1(self): def test(x): return torch.min(x, 1)[0] + torch.max(x, 1)[0] traced = torch.jit.trace(test, (torch.zeros(16, 16))) a = 8.0 * torch.rand(16, 16) np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin( a.numpy(), axis=1) + np.amax(a.numpy(), axis=1)) self.assertLastGraphAllFused() def test_min_max_reduction_dim1_2(self): def test(x): return torch.min(x * x, 1) traced = torch.jit.trace(test, (torch.zeros(16, 16))) a = 8.0 * torch.rand(16, 16) np.testing.assert_allclose(warmup_and_run_forward(traced, a)[0], np.amin((a * a).numpy(), axis=1)) self.assertLastGraphAllFused() def test_clamp(self): def test(x): return torch.clamp(x + 3.0, 0.0, 6.0) for dev in self.devices: traced = torch.jit.trace(test, (torch.zeros(1024, device=dev))) a = 20.0 * torch.rand(1024, device=dev) - 10.0 an = a.cpu().numpy() np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip(an + 3.0, 0.0, 6.0)) self.assertLastGraphAllFused() def test_relu(self): def test(x): return torch.clamp(F.relu(x), 0, 0.5) for dev in self.devices: traced = torch.jit.trace(test, (torch.zeros(1024, device=dev))) a = 20.0 * torch.rand(1024, device=dev) - 10.0 an = a.cpu().numpy() np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip((np.maximum(0, an)), 0, 0.5)) self.assertLastGraphAllFused() def test_reps(self): def easy(x, y): c = torch.add(x, y) return c traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024))) for _ in range(32): a = torch.ones(1024) b = torch.zeros(1024) x = warmup_and_run_forward(traced, a, b) np.testing.assert_allclose(np.ones(1024), x.numpy()) def test_add_const_rhs(self): def test(x): return x + 3.0 traced = torch.jit.trace(test, torch.rand(4)) x = torch.rand(4) y = warmup_and_run_forward(traced, x) self.assertLastGraphAllFused() np.testing.assert_allclose(x.numpy() + 3.0, y.numpy()) def test_int_output(self): def test(x, y, z): return x * y * z xs = [(torch.rand(4) * 3 + 1).to(torch.int32) for i in range(3)] x, y, z = xs xn, yn, zn = (t.numpy() for t in xs) traced = torch.jit.trace(test, (x, y, z)) res = warmup_and_run_forward(traced, x, y, z) self.assertLastGraphAllFused() np.testing.assert_allclose(xn * yn * zn, res.numpy()) def test_binary_ops(self): def test_atan2(x, y): c = torch.atan2(torch.add(x, y), y) return c def test_gt(x, y): c = torch.gt(torch.add(x, y), y) return c def test_ge(x, y): c = torch.ge(torch.add(x, y), y) return c def test_lt(x, y): c = torch.lt(torch.add(x, y), y) return c def test_le(x, y): c = torch.le(torch.add(x, y), y) return c def test_lerp(x, y): c = torch.lerp(torch.add(x, 1), x, 2.0) return c def test_mul(x, y): c = torch.mul(torch.add(x, y), y) return c def test_ne(x, y): c = torch.ne(torch.add(x, y), y) return c def test_div(x, y): c = torch.div(torch.add(x, y), 2) return c def test_eq(x, y): c = torch.eq(torch.add(x, y), y) return c def test_fmod(x, y): c = torch.fmod(torch.add(x, y), 2) return c def test_sub(x, y): c = torch.sub(torch.add(x, y), x) return c def test_remainder(x, y): c = torch.remainder(torch.add(x, y), 3.0) return c def test_pow(x, y): c = torch.pow(torch.add(x, y), 2.0) return c def test_type_as(x, y): return x.type_as(torch.add(x, y)) cmp_fns = { test_gt, test_ge, test_lt, test_le, test_ne, test_eq } non_cmp_fns = { test_atan2, test_lerp, test_mul, test_div, test_fmod, test_sub, test_remainder, test_pow, test_type_as, } all_test_fns = cmp_fns.union(non_cmp_fns) fn_dev_dtype = itertools.product(all_test_fns, self.devices, self.dtypes) for torch_fn, dev, data_type in fn_dev_dtype: if torch_fn is test_lerp and data_type is torch.bfloat16: continue rand_a = torch.rand(1024, dtype=data_type, device=dev) rand_b = torch.rand(1024, dtype=data_type, device=dev) in1 = 20 * torch.rand(1024, dtype=data_type, device=dev) in2 = 20 * torch.rand(1024, dtype=data_type, device=dev) traced = torch.jit.trace(torch_fn, (in1, in2)) x = warmup_and_run_forward(traced, rand_a, rand_b) self.assertLastGraphAllFused() _atol = 2e-3 _rtol = 1e-5 if data_type is torch.bfloat16: # Compared to aten logic, NNC could save additional BF16/Fp32 conversion. # Take d = a + b - c as an example, the aten logic is as follows at # operator level: # tmp = to_bf16(to_fp32(a) + to_fp32(b)) # d = to_bf16(to_fp32(tmp) + to_fp32(c)) # But NNC could fuse the compression and remove the redundant conversions. # The final statement is as follows # d = to_bf16(to_fp32(a) + to_fp32(b) + to_fp32(c)) # Hence, we simulate NNC computation by feeding fp32 tensors and converting # the result tensor back to bf16. The simulation could avoid the numeric # deviation to simplify the result comparison y = warmup_and_run_forward(traced, rand_a.float(), rand_b.float()) if torch_fn not in cmp_fns: y = y.bfloat16() _atol = 2e-2 else: y = torch_fn(rand_a, rand_b) self.assertEqual(x.cpu(), y.cpu(), atol=_atol, rtol=_rtol) def test_unary_ops(self): def test_cast_float(x, y): c = torch.ops.aten._cast_Float(torch.add(x, y)) return c def test_round(x, y): c = torch.round(torch.add(x, y)) return c def test_sin(x, y): c = torch.sin(torch.add(x, y)) return c def test_asin(x, y): c = torch.asin(torch.add(x, y)) return c def test_sinh(x, y): c = torch.sinh(torch.add(x, y)) return c def test_cos(x, y): c = torch.cos(torch.add(x, y)) return c def test_acos(x, y): c = torch.acos(torch.add(x, y)) return c def test_cosh(x, y): c = torch.cosh(torch.add(x, y)) return c def test_tan(x, y): c = torch.tan(torch.add(x, y)) return c def test_atan(x, y): c = torch.atan(torch.add(x, y)) return c def test_tanh(x, y): c = torch.tanh(torch.add(x, y)) return c def test_sqrt(x, y): c = torch.sqrt(torch.add(x, y)) return c def test_rsqrt(x, y): c = torch.rsqrt(torch.add(x, y)) return c def test_floor(x, y): c = torch.floor(torch.add(x, y)) return c def test_ceil(x, y): c = torch.ceil(torch.add(x, y)) return c def test_trunc(x, y): c = torch.trunc(torch.add(x, y)) return c def test_abs(x, y): c = torch.abs(torch.add(x, y)) return c def test_log(x, y): c = torch.log(torch.add(x, y)) return c def test_log2(x, y): c = torch.log2(torch.add(x, y)) return c def test_log10(x, y): c = torch.log10(torch.add(x, y)) return c def test_log1p(x, y): c = torch.log1p(torch.add(x, y)) return c def test_rqrt(x, y): c = torch.rsqrt(torch.add(x, y)) return c def test_erf(x, y): c = torch.erf(torch.add(x, y)) return c def test_exp(x, y): c = torch.exp(torch.add(x, y)) return c def test_expm1(x, y): c = torch.expm1(torch.add(x, y)) return c def test_erfc(x, y): c = torch.erfc(torch.add(x, y)) return c def test_frac(x, y): c = torch.frac(torch.add(x, y)) return c def test_lgamma(x, y): c = torch.lgamma(torch.add(x, y)) return c def test_sigmoid(x, y): c = torch.sigmoid(torch.add(x, y)) return c def test_reciprocal(x, y): c = torch.reciprocal(torch.add(x, y)) return c def test_neg(x, y): c = torch.neg(torch.add(x, y)) return c def test_relu(x, y): c = torch.relu(torch.add(x, y)) return c def test_hardtanh(x, y): c = F.hardtanh(torch.add(x, y), -1.0, 1.0) return c def test_threshold(x, y): c = F.threshold(torch.add(x, y), 0.5, 10) return c gpu_only_fns = { test_erf, test_erfc } fns = { test_round, test_sin, test_asin, test_sinh, test_cos, test_acos, test_cosh, test_tan, test_atan, test_sqrt, test_floor, test_ceil, test_trunc, test_abs, test_log, test_log2, test_log10, test_log1p, test_rsqrt, test_exp, test_expm1, test_frac, test_lgamma, test_reciprocal, test_neg, test_threshold, test_relu, test_tanh, test_hardtanh, test_sigmoid, } fn_dev_dtype = itertools.product(gpu_only_fns.union(fns), self.devices, self.dtypes) torch.manual_seed(0) for torch_fn, dev, data_type in fn_dev_dtype: if torch_fn is test_lgamma and dev == "cuda": # lgamma_cuda does not support BF16 continue rand_a = torch.rand(1024, dtype=data_type, device=dev) rand_b = torch.rand(1024, dtype=data_type, device=dev) ins = 20 * torch.rand(1024, dtype=data_type, device=dev) cc = np.empty([1024], dtype=np.float32) cc.fill(np.nan) nans = torch.from_numpy(cc).to(dev) traced = torch.jit.trace(torch_fn, (ins, ins)) x = warmup_and_run_forward(traced, rand_a, rand_b) self.assertLastGraphAllFused() _atol = 5e-3 if data_type is torch.bfloat16 else 2e-3 _rtol = 1e-5 if data_type is torch.bfloat16 and torch_fn not in gpu_only_fns: y = warmup_and_run_forward(traced, rand_a.float(), rand_b.float()) y = y.bfloat16() else: y = torch_fn(rand_a, rand_b) self.assertEqual(x.cpu(), y.cpu(), atol=_atol, rtol=_rtol) # nans # TODO: reenable. Currently all of the tests fail # traced = torch.jit.trace(torch_fn, (ins, ins)) # x = warmup_and_run_forward(traced, rand_a, rand_b) # y = torch_fn(nans, rand_b) # try: # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy()) # print("Succeeded on dev=", dev, "function=", torch_fn) # except AssertionError: # # Print extra info before exiting: # print("Failed on dev=", dev, "function=", torch_fn) # # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy()) def test_round_2(self): def round(x): return torch.round(x) for data_type in [torch.float32, torch.double]: a = torch.tensor([0.2, 1.6, 2.5, 3.5]).to(data_type) traced = torch.jit.trace(round, (a)) x = warmup_and_run_forward(traced, a) self.assertLastGraphAllFused() y = round(x) self.assertEqual(x, y) def test_rand_like(self): N = 1 << 16 def run_rand_like(x, y): return torch.rand_like(torch.add(x, y)) for device in self.devices: x = torch.rand(N, device=device) traced = torch.jit.trace(run_rand_like, (x, x), check_trace=False) for data_type in self.dtypes: _x = x.to(dtype=data_type) x_v = warmup_and_run_forward(traced, _x, _x) self.assertLastGraphAllFused() x_np = x.cpu().numpy() x1_mean = np.mean(x_np) x2_mean = np.mean(x_np ** 2) x3_mean = np.mean(x_np ** 3) np.testing.assert_allclose(x1_mean, 1. / 2, rtol=2e-2) np.testing.assert_allclose(x2_mean, 1. / 3, rtol=2e-2) np.testing.assert_allclose(x3_mean, 1. / 4, rtol=2e-2) def test_nans(self): def test_max(x, y): return torch.max(2 * x, 2 * y) def test_min(x, y): return torch.min(2 * x, 2 * y) tmax = torch.jit.trace(test_max, (torch.rand(1), torch.rand(1))) tmin = torch.jit.trace(test_min, (torch.rand(1), torch.rand(1))) for data_type in self.dtypes: x = torch.tensor([np.nan]).to(dtype=data_type) y = torch.tensor([1.0]).to(dtype=data_type) assert np.isnan(warmup_and_run_forward(tmin, x, y).float().item()) assert np.isnan(warmup_and_run_forward(tmin, y, x).float().item()) self.assertLastGraphAllFused() assert np.isnan(warmup_and_run_forward(tmax, x, y).float().item()) assert np.isnan(warmup_and_run_forward(tmax, y, x).float().item()) self.assertLastGraphAllFused() def test_double_intrinsics(self): def do_pow(x): return torch.pow(x, 7) for device in self.devices: x = torch.rand(10, dtype=torch.double, device=device) traced = torch.jit.trace(do_pow, (x)) x = warmup_and_run_forward(traced, x) self.assertLastGraphAllFused() def test_remainder(self): def run_remainder(x, y): c = torch.remainder(torch.add(x, y), x) return c for data_type in self.dtypes: a = torch.rand(1024, dtype=data_type) b = torch.rand(1024, dtype=data_type) zeros = torch.zeros(1024, dtype=data_type) cc = np.array(1024, dtype=float) cc.fill(np.nan) nans = torch.from_numpy(cc).to(dtype=data_type) # random floats zeros1 = torch.zeros(1024, dtype=data_type) zeros2 = torch.zeros(1024, dtype=data_type) traced = torch.jit.trace(run_remainder, (zeros1, zeros2)) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() y = run_remainder(a, b) if data_type is torch.bfloat16: self.assertEqual(x, y, atol=4e-3, rtol=2e-3) else: self.assertEqual(x, y) # div by 0 traced = torch.jit.trace(run_remainder, (zeros1, zeros2)) x = warmup_and_run_forward(traced, zeros, a) self.assertLastGraphAllFused() y = run_remainder(zeros, a) self.assertEqual(x, y) # numerators and denominatos are nan traced = torch.jit.trace(run_remainder, (zeros1, zeros2)) x = warmup_and_run_forward(traced, nans, a) self.assertLastGraphAllFused() y = run_remainder(nans, a) self.assertEqual(x, y) def test_multioutput(self): def easy(x): b = x + 1 c = b + b return (b, c) traced = torch.jit.trace(easy, (torch.zeros(1024))) a = torch.zeros(1024) b, c = warmup_and_run_forward(traced, a) self.assertLastGraphAllFused() bp = a.numpy() + 1 cp = bp + bp np.testing.assert_allclose(b.numpy(), bp) np.testing.assert_allclose(c.numpy(), cp) def test_chunk(self): def easy(x): y = x + 1 aaa, bbb = torch.chunk(y, 2) return aaa + bbb for data_type in self.dtypes: trace_input = torch.zeros(1024, 1024, dtype=data_type) traced = torch.jit.trace(easy, (trace_input)) a = torch.zeros(32, 32, dtype=data_type) x = warmup_and_run_forward(traced, a) self.assertLastGraphAllFused() npr = a.float().numpy() npr2 = npr + 1 npr_a, npr_b = np.array_split(npr2, 2) np.testing.assert_allclose(npr_a + npr_b, x.float().numpy()) def test_cat(self): for device in self.devices: _dim = 1 def foo(*args): args_2 = [v + i for i, v in enumerate(args)] v = torch.cat(args_2, dim=_dim) return v * v for data_type in self.dtypes: M = 16 Ns = [128, 16, 1] values = [torch.zeros(M, N, dtype=data_type, device=device) for N in Ns] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().float().numpy(), x.cpu().float().numpy()) # Test channels-last for _cur_dim in range(4): _dim = _cur_dim values = [torch.randn((2, 3, 4, 5), device=device).to(memory_format=torch.channels_last) for _ in range(10)] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) self.assertEqual(ref, x) # This test checks that we correctly handle fusion group with just aten::cat in it. # Note that the test only makes sense with min_fusion_group=1, otherwise no # fusion groups would be formed at all. # TODO: Fix and re-enable the test. @unittest.skip("cat is broken with fusion group inlining disabled") def test_cat_only(self): for device in self.devices: def foo(*args): args_2 = [v + i for i, v in enumerate(args)] v = torch.cat(args_2, dim=1) return v M = 16 Ns = [128, 16, 1] values = [torch.zeros(M, N, device=device) for N in Ns] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) def test_cat_negative_dim(self): for device in self.devices: def foo(*args): v = torch.cat(args, dim=-1) return v * v M = 16 Ns = [128, 16, 1] values = [torch.randn(M, N, device=device) for N in Ns] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) def test_cat_promote_inputs(self): for device in self.devices: def foo(*args): v = torch.cat(args, dim=1) return v * v M = 16 Ns = [128, 16, 1] dtypes = [torch.half, torch.float32, torch.double] values = [torch.randn(M, N, device=device, dtype=dt) for N, dt in zip(Ns, dtypes)] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) def test_cat_empty_tensors(self): for device in self.devices: def foo(*args): v = torch.cat(args, dim=1) return v * v M = 16 Ns = [128, 16, 1] empty = torch.tensor([], device=device, dtype=torch.double) values = [empty] + [torch.randn(M, N, device=device) for N in Ns] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) # now test with only empty tensors values = [empty for i in range(3)] traced = torch.jit.trace(foo, values) x = warmup_and_run_forward(traced, *values) self.assertLastGraphAllFused() ref = foo(*values) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) def test_cat_with_constant_dim(self): for device in self.devices: def foo(*args): v1 = torch.cat(args, dim=1) v2 = torch.cat([v1], dim=1) return v2 * v2 empty = torch.tensor([], device=device, dtype=torch.float32) inputs = [empty] + [torch.randn(1, 64, device=device), torch.randn(1, 64, device=device)] traced = torch.jit.trace(foo, inputs) x = warmup_and_run_forward(traced, *inputs) self.assertLastGraphAllFused() ref = foo(*inputs) np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy()) def test_scalar(self): @torch.jit.script def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor: return torch.add(torch.add(x, y, alpha=a), z, alpha=b) @torch.jit.script def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor: return torch.add(torch.add(x, y, alpha=a), z, alpha=b) for test in (test_float, test_int): for data_type in self.dtypes: x, y, z = (torch.rand(4, dtype=data_type) for i in range(3)) a, b = 1, 2 test(x, y, z, a, b) r = test(x, y, z, a, b) self.assertEqual(r, x + y * a + z * b) def test_loop(self): @torch.jit.script def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor: b = y for _ in range(z): a = x + y b = b + y return b x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4) test(x, y, z) r = test(x, y, z) def test_slice(self): def easy(x, y): a = x[0:512:2] b = y[0:512:2] return a + b traced = torch.jit.trace(easy, (torch.ones(1024, 1024), torch.zeros(1024, 1024))) a = torch.ones(1024, 1024) x = traced(a, a) npr = a[0:512:2] npr = npr + npr np.testing.assert_allclose(npr.numpy(), x.numpy()) def test_unsqueeze(self, N=256): def easy(x, y): a = torch.unsqueeze(x, 0) b = torch.unsqueeze(y, 0) return a + b traced = torch.jit.trace(easy, (torch.ones(N, N), torch.zeros(N, N))) a = torch.rand(N, N) x = traced(a, a) npr = np.expand_dims(a, 0) npr = npr + npr np.testing.assert_allclose(npr, x.numpy()) def _test_softmax(self, device): def test_softmax(x, y): a = F.softmax(x, dim=0, dtype=torch.float32) b = F.softmax(y, dim=0, dtype=torch.float32) c = F.softmax(x, dim=1, dtype=torch.float32) d = F.softmax(y, dim=1, dtype=torch.float32) return a + b + c + d def test_softmax_neg_index(x, y): a = F.softmax(x, dim=-2, dtype=torch.float32) b = F.softmax(y, dim=-2, dtype=torch.float32) c = F.softmax(x, dim=-1, dtype=torch.float32) d = F.softmax(y, dim=-1, dtype=torch.float32) return a + b + c + d def test_log_softmax(x, y): a = F.log_softmax(x, dim=0, dtype=torch.float32) b = F.log_softmax(y, dim=0, dtype=torch.float32) c = F.log_softmax(x, dim=1, dtype=torch.float32) d = F.log_softmax(y, dim=1, dtype=torch.float32) return a + b + c + d for test in (test_softmax, test_log_softmax, test_softmax_neg_index): for data_type in self.dtypes: old = torch._C._jit_set_texpr_reductions_enabled(True) traced_input = torch.randn(2, 3, dtype=data_type, device=device) traced = torch.jit.trace(test, (traced_input, traced_input)) inp = torch.randn(2, 3, dtype=data_type, device=device) res = traced(inp, inp) # Use eager mode as reference. ref = test(inp, inp) np.testing.assert_allclose(ref, res.cpu().numpy(), rtol=1e-06, atol=1e-06) torch._C._jit_set_texpr_reductions_enabled(old) def test_softmax_cpu(self): self._test_softmax('cpu') @unittest.skipIf(not torch.cuda.is_available(), "requires CUDA") @unittest.skip("global allocs are not supported yet.") def test_softmax_cuda(self): self._test_softmax('cuda') def test_half_gelu(self): devices = ["cuda"] if torch.cuda.is_available() else [] @torch.jit.script def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.erf(x / 1.41421)) for device in devices: a = torch.rand(1024, dtype=torch.half, device=device) b = torch.rand(1024, dtype=torch.half, device=device) traced = torch.jit.trace(bias_gelu, (a, b)) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() def test_half_bn_relu(self): devices = ["cuda"] if torch.cuda.is_available() else [] def foo(a, b, c): y = torch.nn.functional.batch_norm(a, b, c) z = y.relu() return z for device in devices: a = torch.rand(16, 16, dtype=torch.half, device=device) b = torch.rand(16, dtype=torch.half, device=device) c = torch.rand(16, dtype=torch.half, device=device) traced = torch.jit.trace(foo, (a, b, c)) print(traced.graph) x = warmup_and_run_forward(traced, a, b, c) self.assertLastGraphAllFused() def test_exp_pow(self): @torch.jit.script def do_exp(x, y, z): return ((x * y) * 2) * torch.pow(z, 2) for device in self.devices: x = torch.rand(10, dtype=torch.double, device=device) y = torch.rand(10, dtype=torch.double, device=device) z = torch.rand(10, dtype=torch.double, device=device) traced = torch.jit.trace(do_exp, (x, y, z)) x = warmup_and_run_forward(traced, x, y, z) self.assertLastGraphAllFused() def test_sin_pow(self): def test(x): return torch.sin(torch.pow(x, 0)) for data_type, shape in itertools.product(self.dtypes, [[3], [5], [10]]): x = torch.rand(shape, dtype=data_type) scripted = torch.jit.script(test) out = warmup_and_run_forward(scripted, x) self.assertLastGraphAllFused() self.assertEqual(out, test(x)) def test_transpose(self): @torch.jit.script def test(x, y, z): return x.transpose(0, 1) + y + z x = torch.rand(4, 5, 2, 3) y = torch.rand(5, 4, 2, 3) z = torch.rand(5, 4, 2, 3) ref = test(x, y, z) res = test(x, y, z) np.testing.assert_allclose(ref.numpy(), res.numpy()) def test_sliced_stride(self): @torch.jit.script def test(x, y, z): return x + y + z x = torch.rand(16, 4, 2, 3)[::2] y = torch.rand(8, 4, 2, 3) z = torch.rand(8, 4, 2, 3) ref = test(x, y, z) res = test(x, y, z) np.testing.assert_allclose(ref.numpy(), res.numpy()) @unittest.skip("dynamic shapes are not quite there yet") @unittest.skipIf(not torch.cuda.is_available(), "requires CUDA") def test_dynamic_shape(self): with num_profiled_runs(2): @torch.jit.script def test(x, y, z): return x * y * z x, y, z = (torch.rand(4, 8).cuda() for _ in range(3)) ref = test(x, y, z) _ = test(*[torch.rand(6, 8).cuda() for _ in range(3)]) res = test(x, y, z) np.testing.assert_allclose(ref.cpu().numpy(), res.cpu().numpy()) # A wild broadcast appears. x = torch.rand(4, 8).cuda() y = torch.rand(1, 8).cuda() z = torch.rand(4, 1).cuda() res = test(x, y, z) xn, yn, zn = (t.cpu().numpy() for t in (x, y, z)) np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn) # Mismatched shapes shouldn't reach codegen. x = torch.rand(4, 8).cuda() y = torch.rand(4, 8).cuda() z = torch.rand(5, 8).cuda() try: res = test(x, y, z) except RuntimeError as e: assert "The size of tensor a (4) must match" in e.args[0] # Changing a static dimension fails guards. # x, y, z = [torch.rand(4, 7).cuda() for _ in range(3)] # xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)] # res = test(x, y, z) # print(test.graph_for(x, y, z)) # np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn) @unittest.skipIf(not torch.cuda.is_available(), "requires CUDA") def test_guard_fails(self): @torch.jit.script def test(x, y, z): return x * y * z r1 = test(*[torch.rand(4).cuda() for _ in range(3)]) r2 = test(*[torch.rand(4).cuda() for _ in range(3)]) r3 = test(*[torch.rand(4).cuda() for _ in range(3)]) r4 = test(*[torch.rand(7).cuda() for _ in range(3)]) def test_bitwise_ops(self): def run_and(x, y): return x & (x & y) def run_or(x, y): return x & (x | y) def run_xor(x, y): return x ^ (x ^ y) def run_lshift(x, y): return x & (x << y) def run_rshift(x, y): return x & (x >> y) fns = {run_and, run_or, run_xor, run_lshift, run_rshift} for device in self.devices: for fn in fns: a = torch.ones(128, dtype=torch.int32, device=device) b = torch.zeros(128, dtype=torch.int32, device=device) inp = torch.ones(128, dtype=torch.int32, device=device) traced = torch.jit.trace(fn, (inp, inp)) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() y = fn(a, b) np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy()) def test_where(self): def run_where(x, y): return torch.where(torch.gt(x, y), x, y) for data_type in self.dtypes: a = torch.rand(1024, dtype=data_type) b = torch.rand(1024, dtype=data_type) zeros = torch.zeros(1024, dtype=data_type) traced = torch.jit.trace(run_where, (zeros, zeros)) x = warmup_and_run_forward(traced, a, b) self.assertLastGraphAllFused() y = run_where(a, b) np.testing.assert_allclose(x.float().numpy(), y.float().numpy()) def test_multi_rand(self): for device in self.devices: def test(x): y = torch.rand_like(x) return (x + y) - (y - x) _atol = 2e-3 _rtol = 1e-5 for data_type in self.dtypes: if data_type is torch.bfloat16: _atol = 2e-2 a = torch.rand(4, dtype=data_type, device=device) scripted = torch.jit.script(test) out = warmup_and_run_forward(scripted, a) self.assertLastGraphAllFused() assert torch.allclose(out, 2 * a, atol=_atol, rtol=_rtol) def test_mask(self): def test(x): return x.unsqueeze(1) == 0 for d in self.devices: for data_type in self.dtypes: x = torch.rand(4, dtype=data_type, device=d) > 0.5 scripted = torch.jit.script(test) out = warmup_and_run_forward(scripted, x) self.assertLastGraphAllFused() assert torch.equal(out, test(x)) def test_simple_add(self): val = torch._C._jit_get_te_generate_block_code() torch._C._jit_set_te_generate_block_code(True) fall_bk = torch._C._jit_texpr_fallback_allowed() torch._C._jit_texpr_set_fallback_allowed(True) def simple(a, b): return torch.add(a, b) a = torch.ones(256, 256) b = torch.ones(256, 256) traced = torch.jit.trace(simple, (torch.ones(256, 256), torch.ones(256, 256))) f = traced(a, b) f_test = np.full((256, 256), 2, dtype=float) np.testing.assert_allclose(f.numpy(), f_test) torch._C._jit_set_te_generate_block_code(val) torch._C._jit_texpr_set_fallback_allowed(fall_bk) def test_strided_output_preserved(self): def foo(a, b): return a + b - a # smaller, easier to debug example x = torch.arange(6) x = torch.as_strided(x, (2, 3), (1, 2)) total = 0 for i in range(2): for j in range(3): x[i, j] = total total += 1 foo_script = torch.jit.script(foo) foo_script(x, x) foo_script(x, x) out_s = foo_script(x, x) out_eager = foo(x, x) self.assertEqual(out_s, out_eager) self.assertEqual(out_s.stride(), out_eager.stride()) self.assertLastGraphAllFused() # more dims N, C, H, W, = 2, 3, 4, 5 x = torch.rand(N, C, H, W).to(memory_format=torch.channels_last) foo_script = torch.jit.script(foo) foo_script(x, x) foo_script(x, x) out_s = foo_script(x, x) out_eager = foo(x, x) self.assertEqual(out_s, out_eager) self.assertEqual(out_s.stride(), out_eager.stride()) self.assertLastGraphAllFused() def test_alias_analysis_module(self): class AliasModule(nn.Module): def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) self.b = torch.randn(128, 128) self.c = torch.randn(128, 128) def forward(self, x, y, z): z = z + self.a self.b.add_(y) w = z + self.a z = w + x return z x = torch.randn(128, 128) def getModule(script): am = AliasModule() if script: return torch.jit.script(am) return am am = getModule(False) am_s = getModule(True) ref = am(x, x, x) test = am_s(x, x, x) torch.testing.assert_close(ref, test) # Now do the aliasing am.a = am.b ref = am(x, x, x) am_s.a = am_s.b test = am_s(x, x, x) torch.testing.assert_close(ref, test) def test_alias_analysis_inputs(self): class AliasModule(nn.Module): def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) self.b = torch.randn(128, 128) self.c = torch.randn(128, 128) def forward(self, x, y, z): x.add_(y) w = z + self.a z = w + x return z def getModule(script): am = AliasModule() if script: return torch.jit.script(am) return am am = getModule(False) am_s = getModule(True) torch.manual_seed(1337) x = torch.randn(128, 128) ref = am(x, x, x) torch.manual_seed(1337) x = torch.randn(128, 128) test = am_s(x, x, x) torch.testing.assert_close(ref, test) def test_alias_analysis_input_and_module(self): class AliasModule(nn.Module): def __init__(self) -> None: super().__init__() torch.manual_seed(1337) self.a = torch.randn(128, 128) self.b = torch.randn(128, 128) self.c = torch.randn(128, 128) def forward(self, x, y, z): x.add_(y) w = z + self.b z = w + x return z def getModule(script): am = AliasModule() if script: return torch.jit.script(am) return am am = getModule(False) am_s = getModule(True) torch.manual_seed(1337) x = torch.randn(128, 128) am.b = x ref = am(x, x, x) torch.manual_seed(1337) x = torch.randn(128, 128) am_s.b = x test = am_s(x, x, x) torch.testing.assert_close(ref, test) def test_multiple_outputs(self): for device in self.devices: # A bug reported internally similar to the one reported in #48533 def foo(a, b, c): t_next = c + 1 t5 = t_next * b t6 = torch.unsqueeze(t_next, 1) t7 = a * t6 return (t7, t5, t_next) for data_type in self.dtypes: a = torch.rand(20, 20, dtype=data_type, device=device) b = torch.rand(20 * 29, dtype=data_type, device=device).as_strided([20], [29]) c = torch.ones(20, dtype=torch.int64, device=device) traced = torch.jit.trace(foo, (a, b, c)) ref = foo(a, b, c) exp = traced(a, b, c) exp = traced(a, b, c) self.assertEqual(ref, exp) def test_propagated_mem_layout(self): def foo(a, b, c): t_next = c + 1 t5 = t_next * b t7 = a * t5 return t7 def foo_multi_outputs(a, b, c): t_next = c + 1 t5 = b * t_next t7 = a * t5 return (t7, t5, t_next) def foo_multi_outputs_i_nhwc_o_nchw(a, b, c): t_next = c + 1 t5 = b * t_next t7 = a * t5 t8 = t7.to(memory_format=torch.contiguous_format) return (t8, t7, t5, t_next) def run_foo_case(foo, a, b, c): traced_contiguous = torch.jit.trace(foo, (a, b, c)) ref = foo(a, b, c) exp = traced_contiguous(a, b, c) exp = traced_contiguous(a, b, c) self.assertEqual(ref, exp) mem_layouts = list(itertools.product([torch.contiguous_format, torch.channels_last], repeat=3)) shapes = [(2, 3, 4, 5), (2, 1, 1, 5), (1, 1, 1, 1)] permutes = [(0, 3, 2, 1), (0, 3, 1, 2)] funcs = [foo, foo_multi_outputs, foo_multi_outputs_i_nhwc_o_nchw] configs = itertools.product(funcs, shapes, mem_layouts, permutes) for strategy in ["STATIC", "DYNAMIC"]: old_strategy = torch.jit.set_fusion_strategy([(strategy, 10)]) for _func, _shape, _mem_layouts, _permute in configs: a = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[0]) b = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[1]) c = torch.rand(_shape, dtype=torch.float32).to(memory_format=_mem_layouts[2]) run_foo_case(_func, a, b, c) a = a.permute(dims=_permute) b = b.permute(dims=_permute) c = c.permute(dims=_permute) run_foo_case(_func, a, b, c) torch.jit.set_fusion_strategy(old_strategy) if __name__ == '__main__': run_tests()
TestTensorExprFuser
python
nedbat__coveragepy
coverage/cmdline.py
{ "start": 10323, "end": 12325 }
class ____(optparse.OptionParser): """Base OptionParser for coverage.py. Problems don't exit the program. Defaults are initialized for all options. """ def __init__(self, *args: Any, **kwargs: Any) -> None: kwargs["add_help_option"] = False super().__init__(*args, **kwargs) self.set_defaults( # Keep these arguments alphabetized by their names. action=None, append=None, branch=None, concurrency=None, context=None, contexts=None, data_file=None, debug=None, directory=None, fail_under=None, format=None, help=None, ignore_errors=None, include=None, keep=None, module=None, omit=None, parallel_mode=None, precision=None, pylib=None, quiet=None, rcfile=True, save_signal=None, show_contexts=None, show_missing=None, skip_covered=None, skip_empty=None, sort=None, source=None, timid=None, title=None, version=None, ) self.disable_interspersed_args() class OptionParserError(Exception): """Used to stop the optparse error handler ending the process.""" pass def parse_args_ok(self, args: list[str]) -> tuple[bool, optparse.Values | None, list[str]]: """Call optparse.parse_args, but return a triple: (ok, options, args) """ try: options, args = super().parse_args(args) except self.OptionParserError: return False, None, [] return True, options, args def error(self, msg: str) -> NoReturn: """Override optparse.error so sys.exit doesn't get called.""" show_help(msg) raise self.OptionParserError
CoverageOptionParser
python
django-compressor__django-compressor
compressor/tests/test_offline.py
{ "start": 14671, "end": 15211 }
class ____( SuperMixin, OfflineTestCaseMixin, TestCase ): templates_dir = "test_block_super_multiple_cached" expected_hash = "055f88f4751f" additional_test_settings = { "TEMPLATE_LOADERS": ( ( "django.template.loaders.cached.Loader", ( "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ), ), ) }
OfflineCompressBlockSuperMultipleCachedLoaderTestCase
python
doocs__leetcode
solution/0500-0599/0553.Optimal Division/Solution.py
{ "start": 0, "end": 266 }
class ____: def optimalDivision(self, nums: List[int]) -> str: n = len(nums) if n == 1: return str(nums[0]) if n == 2: return f'{nums[0]}/{nums[1]}' return f'{nums[0]}/({"/".join(map(str, nums[1:]))})'
Solution
python
neetcode-gh__leetcode
python/0665-non-decreasing-array.py
{ "start": 0, "end": 497 }
class ____: def checkPossibility(self, nums): if len(nums) <= 2: return True changed = False for i, num in enumerate(nums): if i == len(nums) - 1 or num <= nums[i + 1]: continue if changed: return False if i == 0 or nums[i + 1] >= nums[i - 1]: nums[i] = nums[i + 1] else: nums[i + 1] = nums[i] changed = True return True
Solution
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/methodOverride1.py
{ "start": 13213, "end": 13296 }
class ____(Base7[int]): def method1(self, x: U) -> U: return x
Derived7_2
python
django__django
tests/proxy_models/models.py
{ "start": 1581, "end": 1782 }
class ____(Person, ManagerMixin): """ A class with the default manager from Person, plus a secondary manager. """ class Meta: proxy = True ordering = ["name"]
OtherPerson
python
django__django
tests/admin_views/admin.py
{ "start": 13656, "end": 13864 }
class ____(admin.ModelAdmin): inlines = [ WidgetInline, DooHickeyInline, GrommetInline, WhatsitInline, FancyDoodadInline, CategoryInline, ]
CollectorAdmin
python
mkdocs__mkdocs
mkdocs/tests/localization_tests.py
{ "start": 221, "end": 2645 }
class ____(unittest.TestCase): def setUp(self): self.env = mock.Mock() def test_jinja_extension_installed(self): install_translations(self.env, parse_locale('en'), []) self.env.add_extension.assert_called_once_with('jinja2.ext.i18n') def test_valid_language(self): locale = parse_locale('en') self.assertEqual(locale.language, 'en') def test_valid_language_territory(self): locale = parse_locale('en_US') self.assertEqual(locale.language, 'en') self.assertEqual(locale.territory, 'US') self.assertEqual(str(locale), 'en_US') def test_unknown_locale(self): self.assertRaises(ValidationError, parse_locale, 'foo') def test_invalid_locale(self): self.assertRaises(ValidationError, parse_locale, '42') @tempdir() def test_no_translations_found(self, dir_without_translations): with self.assertLogs('mkdocs') as cm: install_translations(self.env, parse_locale('fr_CA'), [dir_without_translations]) self.assertEqual( '\n'.join(cm.output), "WARNING:mkdocs.localization:No translations could be found for the locale 'fr_CA'. " "Defaulting to English.", ) self.env.install_null_translations.assert_called_once() @tempdir() def test_translations_found(self, tdir): translations = mock.Mock() with mock.patch('mkdocs.localization.Translations.load', return_value=translations): install_translations(self.env, parse_locale('en'), [tdir]) self.env.install_gettext_translations.assert_called_once_with(translations) @tempdir() @tempdir() def test_merge_translations(self, custom_dir, theme_dir): custom_dir_translations = mock.Mock() theme_dir_translations = mock.Mock() def side_effet(*args, **kwargs): dirname = args[0] if dirname.startswith(custom_dir): return custom_dir_translations elif dirname.startswith(theme_dir): return theme_dir_translations else: self.fail() with mock.patch('mkdocs.localization.Translations.load', side_effect=side_effet): install_translations(self.env, parse_locale('en'), [custom_dir, theme_dir]) theme_dir_translations.merge.assert_called_once_with(custom_dir_translations)
LocalizationTests
python
astropy__astropy
astropy/io/fits/column.py
{ "start": 10639, "end": 13192 }
class ____(_BaseColumnFormat): """Similar to _ColumnFormat but specifically for columns in ASCII tables. The formats of ASCII table columns and binary table columns are inherently incompatible in FITS. They don't support the same ranges and types of values, and even reuse format codes in subtly different ways. For example the format code 'Iw' in ASCII columns refers to any integer whose string representation is at most w characters wide, so 'I' can represent effectively any integer that will fit in a FITS columns. Whereas for binary tables 'I' very explicitly refers to a 16-bit signed integer. Conversions between the two column formats can be performed using the ``to/from_binary`` methods on this class, or the ``to/from_ascii`` methods on the `_ColumnFormat` class. But again, not all conversions are possible and may result in a `ValueError`. """ def __new__(cls, format, strict=False): self = super().__new__(cls, format) self.format, self.width, self.precision = _parse_ascii_tformat(format, strict) # If no width has been specified, set the dtype here to default as well if format == self.format: self.recformat = ASCII2NUMPY[format] # This is to support handling logical (boolean) data from binary tables # in an ASCII table self._pseudo_logical = False return self @classmethod def from_column_format(cls, format): inst = cls.from_recformat(format.recformat) # Hack if format.format == "L": inst._pseudo_logical = True return inst @classmethod def from_recformat(cls, recformat): """Creates a column format from a Numpy record dtype format.""" return cls(_convert_ascii_format(recformat, reverse=True)) @lazyproperty def recformat(self): """Returns the equivalent Numpy record format string.""" return _convert_ascii_format(self) @lazyproperty def canonical(self): """ Returns a 'canonical' string representation of this format. This is in the proper form of Tw.d where T is the single character data type code, w is the width in characters for this field, and d is the number of digits after the decimal place (for format codes 'E', 'F', and 'D' only). """ if self.format in ("E", "F", "D"): return f"{self.format}{self.width}.{self.precision}" return f"{self.format}{self.width}"
_AsciiColumnFormat
python
getsentry__sentry
src/sentry/integrations/slack/utils/users.py
{ "start": 875, "end": 3344 }
class ____: email: str team_id: str slack_id: str def format_slack_info_by_email(users: list[dict[str, Any]]) -> dict[str, SlackUserData]: return { member["profile"]["email"]: SlackUserData( email=member["profile"]["email"], team_id=member["team_id"], slack_id=member["id"] ) for member in users if not member["deleted"] and member["profile"].get("email") } def format_slack_data_by_user( emails_by_user: Mapping[User, Iterable[str]], users: list[dict[str, Any]] ) -> Mapping[User, SlackUserData]: slack_info_by_email = format_slack_info_by_email(users) slack_data_by_user: MutableMapping[User, SlackUserData] = {} for user, emails in emails_by_user.items(): # get overlap between user emails and emails in slack user_slack_emails = set(emails) & set(slack_info_by_email.keys()) if user_slack_emails: slack_data_by_user[user] = slack_info_by_email[list(user_slack_emails)[0]] return slack_data_by_user def get_slack_user_list( integration: Integration | RpcIntegration, organization: Organization | RpcOrganization | None = None, kwargs: dict[str, Any] | None = None, ) -> Generator[list[dict[str, Any]]]: sdk_client = SlackSdkClient(integration_id=integration.id) try: users_list = ( sdk_client.users_list(limit=SLACK_GET_USERS_PAGE_SIZE, **kwargs) if kwargs else sdk_client.users_list(limit=SLACK_GET_USERS_PAGE_SIZE) ) metrics.incr(SLACK_UTILS_GET_USER_LIST_SUCCESS_DATADOG_METRIC, sample_rate=1.0) for page in users_list: yield page["members"] except SlackApiError as e: metrics.incr(SLACK_UTILS_GET_USER_LIST_FAILURE_DATADOG_METRIC, sample_rate=1.0) _logger.info( "slack.post_install.get_users.error", extra={ "error": str(e), "organization": organization.slug if organization else None, "integration_id": integration.id, }, ) raise def get_slack_data_by_user( integration: Integration | RpcIntegration, organization: Organization | RpcOrganization, emails_by_user: Mapping[User, Iterable[str]], ) -> Iterable[Mapping[User, SlackUserData]]: all_users = get_slack_user_list(integration, organization) yield from (format_slack_data_by_user(emails_by_user, users) for users in all_users)
SlackUserData
python
huggingface__transformers
src/transformers/models/patchtsmixer/modeling_patchtsmixer.py
{ "start": 77445, "end": 85201 }
class ____(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config) self.loss = config.loss self.distribution_output = config.distribution_output self.use_return_dict = config.use_return_dict self.num_parallel_samples = config.num_parallel_samples if config.loss == "mse": self.distribution_output = None else: distribution_output_map = { "student_t": StudentTOutput, "normal": NormalOutput, "negative_binomial": NegativeBinomialOutput, } output_class = distribution_output_map.get(config.distribution_output) if output_class is not None: self.distribution_output = output_class(dim=config.num_targets) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") if config.scaling in ["std", "mean", True]: self.inject_scale = InjectScalerStatistics4D(d_model=config.d_model, num_patches=config.num_patches) else: self.inject_scale = None self.head = PatchTSMixerLinearHead( config=config, distribution_output=self.distribution_output, ) # Initialize weights and apply final processing if config.post_init: self.post_init() @auto_docstring def forward( self, past_values: torch.Tensor, target_values: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = False, return_loss: bool = True, return_dict: Optional[bool] = None, ) -> PatchTSMixerForRegressionOutput: r""" past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`): Context values of the time series. For a pretraining task, this denotes the input time series to predict the masked portion. For a forecasting task, this denotes the history/past time series values. Similarly, for classification or regression tasks, it denotes the appropriate context values of the time series. For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series, it is greater than 1. target_values (`torch.FloatTensor` of shape `(batch_size, target_len, num_input_channels)` for forecasting, `(batch_size, num_targets)` for regression, or `(batch_size,)` for classification, *optional*): Target values of the time series, that serve as labels for the model. The `target_values` is what the Transformer needs during training to learn to output, given the `past_values`. Note that, this is NOT required for a pretraining task. For a forecasting task, the shape is be `(batch_size, target_len, num_input_channels)`. Even if we want to forecast only specific channels by setting the indices in `prediction_channel_indices` parameter, pass the target data with all channels, as channel Filtering for both prediction and target will be manually applied before the loss computation. For a classification task, it has a shape of `(batch_size,)`. For a regression task, it has a shape of `(batch_size, num_targets)`. return_loss (`bool`, *optional*): Whether to return the loss in the `forward` call. """ if self.loss == "mse": loss = nn.MSELoss(reduction="mean") elif self.loss == "nll": loss = nll else: raise ValueError("Invalid loss function: Allowed values: mse and nll") return_dict = return_dict if return_dict is not None else self.use_return_dict model_output = self.model( past_values, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # model_output: [batch_size x nvars x num_patch x d_model] if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) if self.inject_scale is not None: model_output.last_hidden_state = self.inject_scale( model_output.last_hidden_state, loc=model_output.loc, scale=model_output.scale, ) # x: [batch_size x nvars x num_patch x d_model] y_hat = self.head(model_output.last_hidden_state) # [batch_size x num_targets] if target_values is not None and return_loss is True: if self.distribution_output: if self.distribution_output == "negative_binomial" and torch.any(target_values < 0): raise Exception("target_values cannot be negative for negative_binomial distribution.") distribution = self.distribution_output.distribution(y_hat) # y_hat should be a 2-tuple, each with dimension [bs, num_targets] y_hat = tuple(item.view(-1, self.config.num_targets) for item in y_hat) loss_val = loss(distribution, target_values) # take average of the loss loss_val = weighted_average(loss_val) else: loss_val = loss(y_hat, target_values) else: loss_val = None if not return_dict: return tuple( v for v in [ loss_val, y_hat, model_output.last_hidden_state, model_output.hidden_states, ] ) return PatchTSMixerForRegressionOutput( loss=loss_val, regression_outputs=y_hat, # tensor [batch_size x num_targets] last_hidden_state=model_output.last_hidden_state, # [batch_size x nvars x num_patch x d_model] hidden_states=model_output.hidden_states, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, ) -> SamplePatchTSMixerRegressionOutput: """ Generate sequences of sample predictions from a model with a probability distribution head. Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Past values of the time series that serves as context in order to predict the target values. Return: [`SamplePatchTSMixerRegressionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, num_targets)`. """ # get number of samples num_parallel_samples = self.num_parallel_samples # get model output outputs = self( past_values=past_values, target_values=None, output_hidden_states=False, ) # get distribution distribution = self.distribution_output.distribution(outputs.regression_outputs) # get samples samples = [ distribution.sample() for _ in range(num_parallel_samples) ] # samples: list of [batch_size x num_targets] # stack tensors # [batch_size x num_samples x num_targets] samples = torch.stack(samples, dim=1).view(-1, num_parallel_samples, self.config.num_targets) return SamplePatchTSMixerRegressionOutput(sequences=samples) __all__ = [ "PatchTSMixerPreTrainedModel", "PatchTSMixerModel", "PatchTSMixerForPretraining", "PatchTSMixerForPrediction", "PatchTSMixerForTimeSeriesClassification", "PatchTSMixerForRegression", ]
PatchTSMixerForRegression
python
django__django
django/db/backends/sqlite3/operations.py
{ "start": 616, "end": 16315 }
class ____(BaseDatabaseOperations): cast_char_field_without_max_length = "text" cast_data_types = { "DateField": "TEXT", "DateTimeField": "TEXT", } explain_prefix = "EXPLAIN QUERY PLAN" # List of datatypes to that cannot be extracted with JSON_EXTRACT() on # SQLite. Use JSON_TYPE() instead. jsonfield_datatype_values = frozenset(["null", "false", "true"]) def bulk_batch_size(self, fields, objs): """ SQLite has a variable limit defined by SQLITE_LIMIT_VARIABLE_NUMBER (reflected in max_query_params). """ fields = list( chain.from_iterable( ( field.fields if isinstance(field, models.CompositePrimaryKey) else [field] ) for field in fields ) ) if fields: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( "You cannot use Sum, Avg, StdDev, and Variance " "aggregations on date/time fields in sqlite3 " "since date/time is saved as text." ) if ( isinstance(expression, models.Aggregate) and expression.distinct and len(expression.source_expressions) > 1 ): raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, sql, params): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return f"django_date_extract(%s, {sql})", (lookup_type.lower(), *params) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_date_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_trunc_sql(self, lookup_type, sql, params, tzname=None): return f"django_time_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def _convert_tznames_to_sql(self, tzname): if tzname and settings.USE_TZ: return tzname, self.connection.timezone_name return None, None def datetime_cast_date_sql(self, sql, params, tzname): return f"django_datetime_cast_date({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, sql, params, tzname): return f"django_datetime_cast_time({sql}, %s, %s)", ( *params, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_extract(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, sql, params, tzname): return f"django_datetime_trunc(%s, {sql}, %s, %s)", ( lookup_type.lower(), *params, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, sql, params): return f"django_time_extract(%s, {sql})", (lookup_type.lower(), *params) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ connection = self.connection.connection variable_limit = self.connection.features.max_query_params column_limit = connection.getlimit(sqlite3.SQLITE_LIMIT_COLUMN) batch_size = min(variable_limit, column_limit) if len(params) > batch_size: results = () for index in range(0, len(params), batch_size): chunk = params[index : index + batch_size] results += self._quote_params_for_last_executed_query(chunk) return results sql = "SELECT " + ", ".join(["QUOTE(?)"] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # bind_parameters(state, self->statement, parameters); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set( chain.from_iterable(self._references_graph(table) for table in tables) ) sql = [ "%s %s %s;" % ( style.SQL_KEYWORD("DELETE"), style.SQL_KEYWORD("FROM"), style.SQL_FIELD(self.quote_name(table)), ) for table in tables ] if reset_sequences: sequences = [{"table": table} for table in tables] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): if not sequences: return [] return [ "%s %s %s %s = 0 %s %s %s (%s);" % ( style.SQL_KEYWORD("UPDATE"), style.SQL_TABLE(self.quote_name("sqlite_sequence")), style.SQL_KEYWORD("SET"), style.SQL_FIELD(self.quote_name("seq")), style.SQL_KEYWORD("WHERE"), style.SQL_FIELD(self.quote_name("name")), style.SQL_KEYWORD("IN"), ", ".join( ["'%s'" % sequence_info["table"] for sequence_info in sequences] ), ), ] def adapt_datetimefield_value(self, value): if value is None: return None # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError( "SQLite backend does not support timezone-aware datetimes when " "USE_TZ is False." ) return str(value) def adapt_timefield_value(self, value): if value is None: return None # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == "DateTimeField": converters.append(self.convert_datetimefield_value) elif internal_type == "DateField": converters.append(self.convert_datefield_value) elif internal_type == "TimeField": converters.append(self.convert_timefield_value) elif internal_type == "DecimalField": converters.append(self.get_decimalfield_converter(expression)) elif internal_type == "UUIDField": converters.append(self.convert_uuidfield_value) elif internal_type == "BooleanField": converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb( -expression.output_field.decimal_places ) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize( quantize_value, context=expression.output_field.context ) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == "^": return "POWER(%s)" % ",".join(sub_expressions) elif connector == "#": return "BITXOR(%s)" % ",".join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ["+", "-", "*", "/"]: raise DatabaseError("Invalid connector for timedelta: %s." % connector) fn_params = ["'%s'" % connector, *sub_expressions] if len(fn_params) > 3: raise ValueError("Too many params for timedelta operations.") return "django_format_dtdelta(%s)" % ", ".join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints, but sqlite3 supports # integers up to 64 bits. if internal_type in [ "PositiveBigIntegerField", "PositiveIntegerField", "PositiveSmallIntegerField", ]: return (0, 9223372036854775807) return (-9223372036854775808, 9223372036854775807) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == "TimeField": return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), params return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), params def insert_statement(self, on_conflict=None): if on_conflict == OnConflict.IGNORE: return "INSERT OR IGNORE INTO" return super().insert_statement(on_conflict=on_conflict) def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields): if ( on_conflict == OnConflict.UPDATE and self.connection.features.supports_update_conflicts_with_target ): return "ON CONFLICT(%s) DO UPDATE SET %s" % ( ", ".join(map(self.quote_name, unique_fields)), ", ".join( [ f"{field} = EXCLUDED.{field}" for field in map(self.quote_name, update_fields) ] ), ) return super().on_conflict_suffix_sql( fields, on_conflict, update_fields, unique_fields, ) def force_group_by(self): return ["GROUP BY TRUE"] if Database.sqlite_version_info < (3, 39) else [] def format_json_path_numeric_index(self, num): return "[#%s]" % num if num < 0 else super().format_json_path_numeric_index(num)
DatabaseOperations
python
joke2k__faker
tests/providers/test_date_time.py
{ "start": 3196, "end": 25018 }
class ____(unittest.TestCase): def setUp(self): self.fake = Faker() Faker.seed(0) def assertBetween(self, date, start_date, end_date): assert date <= end_date assert date >= start_date def test_date(self): date_format = "%Y-%m-%d" date_string = self.fake.date(pattern=date_format) assert isinstance(date_string, str) assert isinstance(datetime.strptime(date_string, date_format), datetime) def test_day(self): day = self.fake.day_of_week() assert isinstance(day, str) def test_month(self): month = self.fake.month() assert isinstance(month, str) def test_past_datetime(self): past_datetime = self.fake.past_datetime() assert past_datetime < datetime.now() def test_past_date(self): past_date = self.fake.past_date() assert past_date < date.today() def test_future_datetime(self): future_datetime, now = self.fake.future_datetime(), datetime.now() assert future_datetime > now def test_future_date(self): future_date = self.fake.future_date() assert future_date > date.today() def test_parse_date_time(self): timestamp = DatetimeProvider._parse_date_time("+30d") now = DatetimeProvider._parse_date_time("now") assert timestamp > now delta = timedelta(days=30) from_delta = DatetimeProvider._parse_date_time(delta) from_int = DatetimeProvider._parse_date_time(timestamp) assert datetime.fromtimestamp(from_delta).date() == (datetime.fromtimestamp(timestamp).date()) assert datetime.fromtimestamp(from_int).date() == (datetime.fromtimestamp(timestamp).date()) def test_parse_date(self): parsed = DatetimeProvider._parse_date("+30d") now = DatetimeProvider._parse_date("now") today = DatetimeProvider._parse_date("today") assert isinstance(parsed, date) assert isinstance(now, date) assert isinstance(today, date) assert today == date.today() assert now == today assert parsed == today + timedelta(days=30) assert DatetimeProvider._parse_date(datetime.now()) == today assert DatetimeProvider._parse_date(parsed) == parsed assert DatetimeProvider._parse_date(30) == parsed assert DatetimeProvider._parse_date(timedelta(days=30)) == parsed def test_timezone_conversion(self): from faker.providers.date_time import datetime_to_timestamp now = datetime.now(utc).replace(microsecond=0) timestamp = datetime_to_timestamp(now) now_back = datetime.fromtimestamp(timestamp, utc) assert now == now_back today = date.today() timestamp = datetime_to_timestamp(today) today_back = datetime.fromtimestamp(timestamp, utc).date() assert today == today_back def test_pytimezone(self): pytz = self.fake.pytimezone() assert isinstance(pytz, zoneinfo.ZoneInfo) def test_pytimezone_usable(self): pytz = self.fake.pytimezone() date = datetime(2000, 1, 1, tzinfo=pytz) assert date.tzinfo == pytz def test_datetimes_with_and_without_tzinfo(self): assert self.fake.date_time().tzinfo is None assert self.fake.date_time(utc).tzinfo == utc assert self.fake.date_time_ad().tzinfo is None assert self.fake.date_time_ad(utc).tzinfo == utc assert not self.fake.iso8601().endswith("+00:00") assert self.fake.iso8601(utc).endswith("+00:00") assert self.fake.iso8601()[10] == "T" assert len(self.fake.iso8601(timespec="hours")) == 13 assert len(self.fake.iso8601(timespec="minutes")) == 16 assert len(self.fake.iso8601(timespec="seconds")) == 19 assert len(self.fake.iso8601(timespec="milliseconds")) == 23 assert len(self.fake.iso8601(timespec="microseconds")) == 26 # frequently used RFC 3339 separators assert self.fake.iso8601(tzinfo=utc, sep="t")[10] == "t" assert self.fake.iso8601(tzinfo=utc, sep=" ")[10] == " " assert self.fake.iso8601(tzinfo=utc, sep="_")[10] == "_" @pytest.mark.skipif( not sys.platform.startswith("win"), reason="windows does not support sub second precision", ) def test_iso8601_fractional_seconds_win(self): assert len(self.fake.iso8601()) == 19 @pytest.mark.skipif( sys.platform.startswith("win"), reason="non windows does support sub second precision", ) def test_iso8601_fractional_seconds_non_win(self): assert len(self.fake.iso8601()) == 26 def test_date_object(self): assert isinstance(self.fake.date_object(), date) def test_time_object(self): assert isinstance(self.fake.time_object(), datetime_time) def test_timedelta(self): delta = self.fake.time_delta(end_datetime=timedelta(seconds=60)) assert delta.seconds <= 60 delta = self.fake.time_delta(end_datetime=timedelta(seconds=-60)) assert delta.seconds >= -60 delta = self.fake.time_delta(end_datetime="+60s") assert delta.seconds <= 60 delta = self.fake.time_delta(end_datetime="-60s") assert delta.seconds >= 60 delta = self.fake.time_delta(end_datetime="now") assert delta.seconds <= 0 delta = self.fake.time_delta() assert delta.seconds <= 0 def test_date_time_between_dates(self): timestamp_start = random.randint(0, 2000000000) timestamp_end = timestamp_start + 1 datetime_start = datetime.fromtimestamp(timestamp_start) datetime_end = datetime.fromtimestamp(timestamp_end) random_date = self.fake.date_time_between_dates(datetime_start, datetime_end) assert datetime_start <= random_date assert datetime_end >= random_date def test_date_time_between_dates_with_no_date_overlap(self): with pytest.raises(ValueError): self.fake.date_time_between_dates("-1y", "-2y") def test_date_time_between_dates_with_tzinfo(self): timestamp_start = random.randint(0, 2000000000) timestamp_end = timestamp_start + 1 datetime_start = datetime.fromtimestamp(timestamp_start, utc) datetime_end = datetime.fromtimestamp(timestamp_end, utc) random_date_naive = self.fake.date_time_between_dates(datetime_start, datetime_end) with pytest.raises(TypeError): datetime_start <= random_date_naive random_date = self.fake.date_time_between_dates(datetime_start, datetime_end, utc) assert datetime_start <= random_date assert datetime_end >= random_date def test_past_datetime_within_second(self): # Should not raise a ``ValueError`` self.fake.past_datetime(start_date="+1s") def test_date_between_dates(self): date_end = date.today() date_start = date_end - timedelta(days=10) random_date = self.fake.date_between_dates(date_start, date_end) assert date_start <= random_date assert date_end >= random_date def test_date_time_between_long_past_dates(self): random_date = self.fake.date_between("-100y", "-50y") assert random_date def _datetime_to_time(self, value): return int(time.mktime(value.timetuple())) @unittest.skipUnless(is64bit(), "requires 64bit") def test_date_time_this_period(self): # test century this_century_start = self._datetime_to_time( datetime(datetime.now().year - (datetime.now().year % 100), 1, 1), ) assert self._datetime_to_time(self.fake.date_time_this_century(after_now=False)) <= self._datetime_to_time( datetime.now() ) assert self._datetime_to_time( self.fake.date_time_this_century(before_now=False, after_now=True) ) >= self._datetime_to_time(datetime.now()) assert ( self._datetime_to_time(self.fake.date_time_this_century(before_now=True, after_now=True)) >= this_century_start ) # test decade this_decade_start = self._datetime_to_time( datetime(datetime.now().year - (datetime.now().year % 10), 1, 1), ) assert self._datetime_to_time(self.fake.date_time_this_decade(after_now=False)) <= self._datetime_to_time( datetime.now() ) assert self._datetime_to_time( self.fake.date_time_this_decade(before_now=False, after_now=True) ) >= self._datetime_to_time(datetime.now()) assert self._datetime_to_time( self.fake.date_time_this_decade(before_now=False, after_now=False) ) == self._datetime_to_time(datetime.now()) assert ( self._datetime_to_time(self.fake.date_time_this_decade(before_now=True, after_now=True)) >= this_decade_start ) # test year assert self._datetime_to_time(self.fake.date_time_this_year(after_now=False)) <= self._datetime_to_time( datetime.now() ) assert self._datetime_to_time( self.fake.date_time_this_year(before_now=False, after_now=True) ) >= self._datetime_to_time(datetime.now()) assert self._datetime_to_time( self.fake.date_time_this_year(before_now=False, after_now=False) ) == self._datetime_to_time(datetime.now()) # test month assert self._datetime_to_time(self.fake.date_time_this_month(after_now=False)) <= self._datetime_to_time( datetime.now() ) assert self._datetime_to_time( self.fake.date_time_this_month(before_now=False, after_now=True) ) >= self._datetime_to_time(datetime.now()) assert self._datetime_to_time( self.fake.date_time_this_month(before_now=False, after_now=False) ) == self._datetime_to_time(datetime.now()) @unittest.skipUnless(is64bit(), "requires 64bit") def test_date_time_this_period_with_tzinfo(self): # ensure all methods provide timezone aware datetimes with pytest.raises(TypeError): self.fake.date_time_this_century(before_now=False, after_now=True, tzinfo=utc) >= datetime.now() with pytest.raises(TypeError): self.fake.date_time_this_decade(after_now=False, tzinfo=utc) <= datetime.now() with pytest.raises(TypeError): self.fake.date_time_this_year(after_now=False, tzinfo=utc) <= datetime.now() with pytest.raises(TypeError): self.fake.date_time_this_month(after_now=False, tzinfo=utc) <= datetime.now() # test century assert self.fake.date_time_this_century(after_now=False, tzinfo=utc) <= datetime.now(utc) assert self.fake.date_time_this_century(before_now=False, after_now=True, tzinfo=utc) >= datetime.now(utc) assert self.fake.date_time_this_century(before_now=False, after_now=False, tzinfo=utc).replace( second=0, microsecond=0 ) == datetime.now(utc).replace(second=0, microsecond=0) # test decade assert self.fake.date_time_this_decade(after_now=False, tzinfo=utc) <= datetime.now(utc) assert self.fake.date_time_this_decade(before_now=False, after_now=True, tzinfo=utc) >= datetime.now(utc) assert self.fake.date_time_this_decade(before_now=False, after_now=False, tzinfo=utc).replace( second=0, microsecond=0 ) == datetime.now(utc).replace(second=0, microsecond=0) # test year assert self.fake.date_time_this_year(after_now=False, tzinfo=utc) <= datetime.now(utc) assert self.fake.date_time_this_year(before_now=False, after_now=True, tzinfo=utc) >= datetime.now(utc) assert self.fake.date_time_this_year(before_now=False, after_now=False, tzinfo=utc).replace( second=0, microsecond=0 ) == datetime.now(utc).replace(second=0, microsecond=0) assert self.fake.date_time_this_year(before_now=True, after_now=True, tzinfo=utc).year == datetime.now(utc).year # test month assert self.fake.date_time_this_month(after_now=False, tzinfo=utc) <= datetime.now(utc) assert self.fake.date_time_this_month(before_now=False, after_now=True, tzinfo=utc) >= datetime.now(utc) assert self.fake.date_time_this_month(before_now=False, after_now=False, tzinfo=utc).replace( second=0, microsecond=0 ) == datetime.now(utc).replace(second=0, microsecond=0) assert ( self.fake.date_time_this_month(before_now=True, after_now=True, tzinfo=utc).month == datetime.now(utc).month ) and ( self.fake.date_time_this_month(before_now=True, after_now=True, tzinfo=utc).year == datetime.now(utc).year ) @unittest.skipUnless(is64bit(), "requires 64bit") def test_date_this_period(self): # test century assert ( self.fake.date_this_century(before_today=True, after_today=True).strftime("%G")[:2] == datetime.now().strftime("%G")[:2] ) assert self.fake.date_this_century(after_today=False) <= date.today() assert self.fake.date_this_century(before_today=False, after_today=True) >= date.today() assert self.fake.date_this_century(before_today=False, after_today=False) == date.today() # test decade assert ( self.fake.date_this_decade(before_today=True, after_today=True).strftime("%G")[:3] == datetime.now().strftime("%G")[:3] ) assert self.fake.date_this_decade(after_today=False) <= date.today() assert self.fake.date_this_decade(before_today=False, after_today=True) >= date.today() assert self.fake.date_this_decade(before_today=False, after_today=False) == date.today() # test year assert self.fake.date_this_year(before_today=True, after_today=True).year == datetime.now().year assert self.fake.date_this_year(after_today=False) <= date.today() assert self.fake.date_this_year(before_today=False, after_today=True) >= date.today() assert self.fake.date_this_year(before_today=False, after_today=False) == date.today() # test month assert (self.fake.date_this_month(before_today=True, after_today=True).month == datetime.now().month) and ( self.fake.date_this_month(before_today=True, after_today=True).year == datetime.now().year ) assert self.fake.date_this_month(after_today=False) <= date.today() assert self.fake.date_this_month(before_today=False, after_today=True) >= date.today() assert self.fake.date_this_month(before_today=False, after_today=False) == date.today() def test_date_time_between(self): now = datetime.now() _30_years_ago = change_year(now, -30) _20_years_ago = change_year(now, -20) random_datetime = self.fake.date_time_between(start_date="-30y", end_date="-20y") assert isinstance(random_datetime, datetime) self.assertBetween(random_datetime, _30_years_ago, _20_years_ago) now = datetime.now(tz=utc) _30_years_ago = change_year(now, -30) _20_years_ago = change_year(now, -20) random_datetime = self.fake.date_time_between(start_date="-30y", end_date="-20y", tzinfo=utc) assert isinstance(random_datetime, datetime) self.assertBetween(random_datetime, _30_years_ago, _20_years_ago) def test_date_between(self): today = date.today() _30_years_ago = change_year(today, -30) _20_years_ago = change_year(today, -20) random_date = self.fake.date_between(start_date="-30y", end_date="-20y") assert isinstance(random_date, date) self.assertBetween(random_date, _30_years_ago, _20_years_ago) def test_date_between_months(self): today = date.today() _2_months_ago = today - timedelta(days=2 * (365.24 / 12)) _9_months_ago = today - timedelta(days=9 * (365.24 / 12)) random_date = self.fake.date_between(start_date="-9M", end_date="-2M") assert isinstance(random_date, date) self.assertBetween(random_date, _9_months_ago, _2_months_ago) def test_parse_timedelta(self): from faker.providers.date_time import Provider td = timedelta(days=7) seconds = Provider._parse_timedelta(td) assert seconds == 604800 seconds = Provider._parse_timedelta("+1w") assert seconds == 604800 seconds = Provider._parse_timedelta("+1y") assert seconds == 31556736 with pytest.raises(ValueError): Provider._parse_timedelta("foobar") def test_time_series(self): series = list(self.fake.time_series()) assert len(series), 30 assert series[1][0] - series[0][0], timedelta(days=1) uniform = lambda dt: random.uniform(0, 5) # noqa series = list(self.fake.time_series("now", "+1w", "+1d", uniform)) assert len(series), 7 assert series[1][0] - series[0][0], timedelta(days=1) end = datetime.now() + timedelta(days=7) series = list(self.fake.time_series("now", end, "+1d", uniform)) assert len(series), 7 assert series[1][0] - series[0][0], timedelta(days=1) assert series[-1][0] <= end with pytest.raises(ValueError): list(self.fake.time_series("+1w", "now", "+1d", uniform)) with pytest.raises(ValueError): list(self.fake.time_series("now", "+1w", "+1d", "uniform")) series = list(self.fake.time_series("now", end, "+1d", uniform, tzinfo=utc)) assert len(series), 7 assert series[1][0] - series[0][0], timedelta(days=1) # avoid microseconds as provider's internal parsing uses POSIX timestamps which only have second granularity end = datetime.now(utc).replace(microsecond=0) start = end - timedelta(days=15) series = list(self.fake.time_series(start_date=start, end_date=end, tzinfo=start.tzinfo)) assert series[0][0] == start def test_unix_time(self): from faker.providers.date_time import datetime_to_timestamp for _ in range(100): now = datetime.now().replace(microsecond=0) epoch_start = datetime(1970, 1, 1, tzinfo=utc) # Ensure doubly-constrained unix_times are generated correctly start_datetime = datetime(2001, 1, 1, tzinfo=utc) end_datetime = datetime(2001, 1, 2, tzinfo=utc) constrained_unix_time = self.fake.unix_time(end_datetime=end_datetime, start_datetime=start_datetime) self.assertIsInstance(constrained_unix_time, (int, float)) self.assertBetween( constrained_unix_time, datetime_to_timestamp(start_datetime), datetime_to_timestamp(end_datetime), ) # Ensure relative unix_times partially-constrained by a start time are generated correctly one_day_ago = datetime.today() - timedelta(days=1) recent_unix_time = self.fake.unix_time(start_datetime=one_day_ago) self.assertIsInstance(recent_unix_time, (int, float)) self.assertBetween( recent_unix_time, datetime_to_timestamp(one_day_ago), datetime_to_timestamp(now), ) # Ensure relative unix_times partially-constrained by an end time are generated correctly one_day_after_epoch_start = datetime(1970, 1, 2, tzinfo=utc) distant_unix_time = self.fake.unix_time(end_datetime=one_day_after_epoch_start) self.assertIsInstance(distant_unix_time, (int, float)) self.assertBetween( distant_unix_time, datetime_to_timestamp(epoch_start), datetime_to_timestamp(one_day_after_epoch_start), ) # Ensure wide-open unix_times are generated correctly self.fake.unix_time() self.assertIsInstance(constrained_unix_time, (int, float)) self.assertBetween(constrained_unix_time, 0, datetime_to_timestamp(now)) # Ensure it does not throw error with startdate='now' for machines with negative offset if platform.system() != "Windows": os.environ["TZ"] = "Europe/Paris" time.tzset() self.fake.unix_time(start_datetime="now") if platform.system() != "Windows": del os.environ["TZ"] @pytest.mark.skipif( not sys.platform.startswith("win"), reason="windows does not support sub second precision", ) def test_unix_time_win(self): unix_time = self.fake.unix_time() assert isinstance(unix_time, float) assert unix_time % 1 == 0.0 @pytest.mark.skipif( sys.platform.startswith("win"), reason="non windows does support sub second precision", ) def test_unix_time_non_win(self): unix_time = self.fake.unix_time() assert isinstance(unix_time, float) def test_change_year(self): _2020_06_01 = datetime.strptime("2020-06-01", "%Y-%m-%d") _20_years_ago = change_year(_2020_06_01, -20) self.assertEqual(_20_years_ago.strftime("%Y-%m-%d"), "2000-06-01") # Verify a leap day today = datetime.strptime("2020-02-29", "%Y-%m-%d") with self.assertRaises(ValueError): _11_years_ago = today.replace(year=11) _11_years_ago = change_year(today, -11) self.assertEqual(_11_years_ago.strftime("%Y-%m-%d"), "2009-03-01") # 0 is an invalid year, so it should still raise a ValueError with self.assertRaises(ValueError): change_year(today, -today.year)
TestDateTime
python
ray-project__ray
python/ray/data/_internal/datasource/uc_datasource.py
{ "start": 212, "end": 7082 }
class ____: """ Load a Unity Catalog table or files into a Ray Dataset, handling cloud credentials automatically. Currently only supports Databricks-managed Unity Catalog Supported formats: delta, parquet. Supports AWS, Azure, and GCP with automatic credential handoff. """ def __init__( self, *, base_url: str, token: str, table_full_name: str, region: Optional[str] = None, data_format: Optional[str] = "delta", operation: str = "READ", ray_init_kwargs: Optional[Dict] = None, reader_kwargs: Optional[Dict] = None, ): self.base_url = base_url.rstrip("/") self.token = token self.table_full_name = table_full_name self.data_format = data_format.lower() if data_format else None self.region = region self.operation = operation self.ray_init_kwargs = ray_init_kwargs or {} self.reader_kwargs = reader_kwargs or {} self._gcp_temp_file = None def _get_table_info(self) -> dict: url = f"{self.base_url}/api/2.1/unity-catalog/tables/{self.table_full_name}" headers = {"Authorization": f"Bearer {self.token}"} resp = requests.get(url, headers=headers) resp.raise_for_status() data = resp.json() self._table_info = data self._table_id = data["table_id"] return data def _get_creds(self): url = f"{self.base_url}/api/2.1/unity-catalog/temporary-table-credentials" headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.token}", } payload = {"table_id": self._table_id, "operation": self.operation} resp = requests.post(url, json=payload, headers=headers) resp.raise_for_status() self._creds_response = resp.json() self._table_url = self._creds_response["url"] def _set_env(self): env_vars = {} creds = self._creds_response if "aws_temp_credentials" in creds: aws = creds["aws_temp_credentials"] env_vars["AWS_ACCESS_KEY_ID"] = aws["access_key_id"] env_vars["AWS_SECRET_ACCESS_KEY"] = aws["secret_access_key"] env_vars["AWS_SESSION_TOKEN"] = aws["session_token"] if self.region: env_vars["AWS_REGION"] = self.region env_vars["AWS_DEFAULT_REGION"] = self.region elif "azuresasuri" in creds: env_vars["AZURE_STORAGE_SAS_TOKEN"] = creds["azuresasuri"] elif "gcp_service_account" in creds: gcp_json = creds["gcp_service_account"] temp_file = tempfile.NamedTemporaryFile( mode="w", prefix="gcp_sa_", suffix=".json", delete=False, ) temp_file.write(gcp_json) temp_file.close() env_vars["GOOGLE_APPLICATION_CREDENTIALS"] = temp_file.name self._gcp_temp_file = temp_file.name atexit.register(self._cleanup_gcp_temp_file, temp_file.name) else: raise ValueError( "No known credential type found in Databricks UC response." ) for k, v in env_vars.items(): os.environ[k] = v self._runtime_env = {"env_vars": env_vars} @staticmethod def _cleanup_gcp_temp_file(temp_file_path: str): """Clean up temporary GCP service account file.""" if temp_file_path and os.path.exists(temp_file_path): try: os.unlink(temp_file_path) except OSError: pass def _infer_data_format(self) -> str: if self.data_format: return self.data_format info = self._table_info or self._get_table_info() if "data_source_format" in info and info["data_source_format"]: fmt = info["data_source_format"].lower() return fmt storage_loc = info.get("storage_location") or getattr(self, "_table_url", None) if storage_loc: ext = os.path.splitext(storage_loc)[-1].replace(".", "").lower() if ext in _FILE_FORMAT_TO_RAY_READER: return ext raise ValueError("Could not infer data format from table metadata.") def _get_ray_reader(self, data_format: str) -> Callable[..., Any]: fmt = data_format.lower() if fmt in _FILE_FORMAT_TO_RAY_READER: reader_func = getattr(ray.data, _FILE_FORMAT_TO_RAY_READER[fmt], None) if reader_func: return reader_func raise ValueError(f"Unsupported data format: {fmt}") def _read_delta_with_credentials(self): """Read Delta table with proper PyArrow filesystem for session tokens.""" import pyarrow.fs as pafs creds = self._creds_response reader_kwargs = self.reader_kwargs.copy() # For AWS, create PyArrow S3FileSystem with session tokens if "aws_temp_credentials" in creds: if not self.region: raise ValueError( "The 'region' parameter is required for AWS S3 access. " "Please specify the AWS region (e.g., region='us-west-2')." ) aws = creds["aws_temp_credentials"] filesystem = pafs.S3FileSystem( access_key=aws["access_key_id"], secret_key=aws["secret_access_key"], session_token=aws["session_token"], region=self.region, ) reader_kwargs["filesystem"] = filesystem # Call ray.data.read_delta with proper error handling try: return ray.data.read_delta(self._table_url, **reader_kwargs) except Exception as e: error_msg = str(e) if ( "DeletionVectors" in error_msg or "Unsupported reader features" in error_msg ): raise RuntimeError( f"Delta table uses Deletion Vectors, which requires deltalake>=0.10.0. " f"Error: {error_msg}\n" f"Solution: pip install --upgrade 'deltalake>=0.10.0'" ) from e raise def read(self): self._get_table_info() self._get_creds() self._set_env() data_format = self._infer_data_format() if not ray.is_initialized(): ray.init(runtime_env=self._runtime_env, **self.ray_init_kwargs) # Use special Delta reader for proper filesystem handling if data_format == "delta": return self._read_delta_with_credentials() # Use standard reader for other formats reader = self._get_ray_reader(data_format) return reader(self._table_url, **self.reader_kwargs)
UnityCatalogConnector
python
FactoryBoy__factory_boy
tests/test_using.py
{ "start": 87441, "end": 88071 }
class ____(unittest.TestCase): def test_example(self): sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) from .cyclic import foo f = foo.FooFactory.build(bar__foo=None) self.assertEqual(42, f.x) self.assertEqual(13, f.bar.y) self.assertIsNone(f.bar.foo) from .cyclic import bar b = bar.BarFactory.build(foo__bar__foo__bar=None) self.assertEqual(13, b.y) self.assertEqual(42, b.foo.x) self.assertEqual(13, b.foo.bar.y) self.assertEqual(42, b.foo.bar.foo.x) self.assertIsNone(b.foo.bar.foo.bar)
CircularTestCase
python
pytorch__pytorch
test/test_dataloader.py
{ "start": 32570, "end": 35206 }
class ____(SynchronizedDataset): def __getitem__(self, idx): self.sync_once() return torch.tensor(self.value) # Should be used as worker_init_fn with TestWorkerInfoDataset. # See _test_get_worker_info below for usage. def _test_worker_info_init_fn(worker_id): worker_info = torch.utils.data.get_worker_info() assert worker_id == worker_info.id, ( "worker_init_fn and worker_info should have consistent id" ) assert worker_id < worker_info.num_workers, ( "worker_init_fn and worker_info should have valid id" ) assert worker_info.seed == torch.initial_seed(), ( "worker_init_fn and worker_info should have consistent seed" ) dataset = worker_info.dataset assert isinstance(dataset, TestWorkerInfoDataset), ( "worker_info should have correct dataset copy" ) assert not hasattr(dataset, "value"), "worker_info should have correct dataset copy" # test that WorkerInfo attributes are read-only try: worker_info.id = 3999 except RuntimeError as e: assert str(e) == "Cannot assign attributes to WorkerInfo objects" try: worker_info.a = 3 except RuntimeError as e: assert str(e) == "Cannot assign attributes to WorkerInfo objects" for k in ["id", "num_workers", "seed", "dataset"]: assert f"{k}=" in repr(worker_info) dataset.value = [worker_id, os.getpid()] def _test_get_worker_info(): # get_worker_info returns None in main proc assert torch.utils.data.get_worker_info() is None num_workers = 2 batch_size = 2 dataset = TestWorkerInfoDataset(6, batch_size, num_workers) dataloader = DataLoader( dataset, batch_size=batch_size, num_workers=num_workers, worker_init_fn=_test_worker_info_init_fn, ) it = iter(dataloader) data = [] for d in it: data.append(d) # noqa: PERF402 worker_pids = [w.pid for w in it._workers] data = torch.cat(data, 0) for d in data: # each `d` is a [worker_id, worker_pid] pair, which is set in # _test_worker_info_init_fn assert d[1] == worker_pids[d[0]] # get_worker_info returns None in main proc after data loading assert torch.utils.data.get_worker_info() is None # main proc dataset was never assigned this attribute assert not hasattr(dataset, "value") try: _ = dataset[0] except AttributeError: return raise RuntimeError("Expected AttributeError") # test custom init function def init_fn(worker_id): torch.manual_seed(12345) # used with test_error_in_init
TestWorkerInfoDataset
python
google__pytype
pytype/pytd/pytd_visitors.py
{ "start": 3727, "end": 6423 }
class ____(base_visitor.Visitor): """Renames a TypeDeclUnit.""" def __init__(self, old_module_name, new_module_name): """Constructor. Args: old_module_name: The old name of the module as a string, e.g. "foo.bar.module1" new_module_name: The new name of the module as a string, e.g. "barfoo.module2" Raises: ValueError: If the old_module name is an empty string. """ super().__init__() if not old_module_name: raise ValueError("old_module_name must be a non empty string.") assert not old_module_name.endswith(".") assert not new_module_name.endswith(".") self._module_name = new_module_name self._old = old_module_name + "." if old_module_name else "" self._new = new_module_name + "." if new_module_name else "" def _MaybeNewName(self, name): """Decides if a name should be replaced. Args: name: A name for which a prefix should be changed. Returns: If name is local to the module described by old_module_name the old_module_part will be replaced by new_module_name and returned, otherwise node.name will be returned. """ if not name: return name if name == self._old[:-1]: return self._module_name before, match, after = name.partition(self._old) if match and not before: return self._new + after else: return name def _ReplaceModuleName(self, node): new_name = self._MaybeNewName(node.name) if new_name != node.name: return node.Replace(name=new_name) else: return node def VisitClassType(self, node): new_name = self._MaybeNewName(node.name) if new_name != node.name: return pytd.ClassType(new_name, node.cls) else: return node def VisitTypeDeclUnit(self, node): return node.Replace(name=self._module_name) def VisitTypeParameter(self, node): new_scope = self._MaybeNewName(node.scope) if new_scope != node.scope: return node.Replace(scope=new_scope) return node def VisitParamSpec(self, node): new_scope = self._MaybeNewName(node.scope) if new_scope != node.scope: return node.Replace(scope=new_scope) return node VisitConstant = _ReplaceModuleName # pylint: disable=invalid-name VisitAlias = _ReplaceModuleName # pylint: disable=invalid-name VisitClass = _ReplaceModuleName # pylint: disable=invalid-name VisitFunction = _ReplaceModuleName # pylint: disable=invalid-name VisitStrictType = _ReplaceModuleName # pylint: disable=invalid-name VisitModule = _ReplaceModuleName # pylint: disable=invalid-name VisitNamedType = _ReplaceModuleName # pylint: disable=invalid-name
RenameModuleVisitor
python
gevent__gevent
src/gevent/testing/testrunner.py
{ "start": 9284, "end": 36740 }
class ____(object): package_dir = None package = None def __init__( self, tests=None, ignore_files=None, ignored=(), coverage=False, package=None, config=None, allow_combine=True, ): self.config = config or {} self.ignore = set(ignored or ()) self.tests = tests self.configured_test_options = config.get('TEST_FILE_OPTIONS', set()) self.allow_combine = allow_combine if ignore_files: ignore_files = ignore_files.split(',') for f in ignore_files: self.ignore.update(set(load_list_from_file(f, package))) if coverage: self.ignore.update(config.get('IGNORE_COVERAGE', set())) if package: self.package = package self.package_dir = _dir_from_package_name(package) class Discovered(object): def __init__(self, package, configured_test_options, ignore, config, allow_combine): self.orig_dir = os.getcwd() self.configured_run_alone = config['RUN_ALONE'] self.configured_failing_tests = config['FAILING_TESTS'] self.package = package self.configured_test_options = configured_test_options self.allow_combine = allow_combine self.ignore = ignore self.to_import = [] self.std_monkey_patch_files = [] self.no_monkey_patch_files = [] self.commands = [] @staticmethod def __makes_simple_monkey_patch( contents, _patch_present=re.compile(br'[^#].*patch_all\(\)'), _patch_indented=re.compile(br' .*patch_all\(\)') ): return ( # A non-commented patch_all() call is present bool(_patch_present.search(contents)) # that is not indented (because that implies its not at the top-level, # so some preconditions are being set) and not _patch_indented.search(contents) ) @staticmethod def __file_allows_monkey_combine(contents): return b'testrunner-no-monkey-combine' not in contents @staticmethod def __file_allows_combine(contents): return b'testrunner-no-combine' not in contents @staticmethod def __calls_unittest_main_toplevel( contents, _greentest_main=re.compile(br' greentest.main\(\)'), _unittest_main=re.compile(br' unittest.main\(\)'), _import_main=re.compile(br'from gevent.testing import.*main'), _main=re.compile(br' main\(\)'), ): # TODO: Add a check that this comes in a line directly after # if __name__ == __main__. return ( _greentest_main.search(contents) or _unittest_main.search(contents) or (_import_main.search(contents) and _main.search(contents)) ) def __has_config(self, filename): return ( RUN_LEAKCHECKS or filename in self.configured_test_options or filename in self.configured_run_alone or matches(self.configured_failing_tests, filename) ) def __can_monkey_combine(self, filename, contents): return ( self.allow_combine and not self.__has_config(filename) and self.__makes_simple_monkey_patch(contents) and self.__file_allows_monkey_combine(contents) and self.__file_allows_combine(contents) and self.__calls_unittest_main_toplevel(contents) ) @staticmethod def __makes_no_monkey_patch(contents, _patch_present=re.compile(br'[^#].*patch_\w*\(')): return not _patch_present.search(contents) def __can_nonmonkey_combine(self, filename, contents): return ( self.allow_combine and not self.__has_config(filename) and self.__makes_no_monkey_patch(contents) and self.__file_allows_combine(contents) and self.__calls_unittest_main_toplevel(contents) ) def __begin_command(self): cmd = [sys.executable, '-u'] # XXX: -X track-resources is broken. This happened when I updated to # PyPy 7.3.2. It started failing to even start inside the virtual environment # with # # debug: OperationError: # debug: operror-type: ImportError # debug: operror-value: No module named traceback # # I don't know if this is PyPy's problem or a problem in virtualenv: # # virtualenv==20.0.35 # virtualenv-clone==0.5.4 # virtualenvwrapper==4.8.4 # # Deferring investigation until I need this... # if PYPY and PY2: # # Doesn't seem to be an env var for this. # # XXX: track-resources is broken in virtual environments # # on 7.3.2. # cmd.extend(('-X', 'track-resources')) return cmd def __add_test(self, qualified_name, filename, contents): if b'TESTRUNNER' in contents: # test__monkey_patching.py # XXX: Rework this to avoid importing. # XXX: Rework this to allow test combining (it could write the files out and return # them directly; we would use 'python -m gevent.monkey --module unittest ...) self.to_import.append(qualified_name) elif self.__can_monkey_combine(filename, contents): self.std_monkey_patch_files.append(qualified_name if self.package else filename) elif self.__can_nonmonkey_combine(filename, contents): self.no_monkey_patch_files.append(qualified_name if self.package else filename) else: # XXX: For simple python module tests, try this with # `runpy.run_module`, very similar to the way we run # things for monkey patching. The idea here is that we # can perform setup ahead of time (e.g., # setup_resources()) in each test without having to do # it manually or force calls or modifications to those # tests. cmd = self.__begin_command() if self.package: # Using a package is the best way to work with coverage 5 # when we specify 'source = <package>' cmd.append('-m' + qualified_name) else: cmd.append(filename) options = DEFAULT_RUN_OPTIONS.copy() options.update(self.configured_test_options.get(filename, {})) self.commands.append((cmd, options)) @staticmethod def __remove_options(lst): return [x for x in lst if x and not x.startswith('-')] def __expand_imports(self): for qualified_name in self.to_import: module = importlib.import_module(qualified_name) for cmd, options in module.TESTRUNNER(): if self.__remove_options(cmd)[-1] in self.ignore: continue self.commands.append((cmd, options)) del self.to_import[:] def __combine_commands(self, files, group_size=5): if not files: return from itertools import groupby cnt = [0, 0] def make_group(_): if cnt[0] > group_size: cnt[0] = 0 cnt[1] += 1 cnt[0] += 1 return cnt[1] for _, group in groupby(files, make_group): cmd = self.__begin_command() cmd.append('-m') cmd.append('unittest') # cmd.append('-v') for name in group: cmd.append(name) self.commands.insert(0, (cmd, DEFAULT_RUN_OPTIONS.copy())) del files[:] def visit_file(self, filename): # Support either 'gevent.tests.foo' or 'gevent/tests/foo.py' if filename.startswith('gevent.tests'): # XXX: How does this interact with 'package'? Probably not well qualified_name = module_name = filename filename = filename[len('gevent.tests') + 1:] filename = filename.replace('.', os.sep) + '.py' else: module_name = os.path.splitext(filename)[0] qualified_name = self.package + '.' + module_name if self.package else module_name # Also allow just 'foo' as a shortcut for 'gevent.tests.foo' abs_filename = os.path.abspath(filename) if ( not os.path.exists(abs_filename) and not filename.endswith('.py') and os.path.exists(abs_filename + '.py') ): abs_filename += '.py' with open(abs_filename, 'rb') as f: # Some of the test files (e.g., test__socket_dns) are # UTF8 encoded. Depending on the environment, Python 3 may # try to decode those as ASCII, which fails with UnicodeDecodeError. # Thus, be sure to open and compare in binary mode. # Open the absolute path to make errors more clear, # but we can't store the absolute path, our configuration is based on # relative file names. contents = f.read() self.__add_test(qualified_name, filename, contents) def visit_files(self, filenames): for filename in filenames: self.visit_file(filename) with Discovery._in_dir(self.orig_dir): self.__expand_imports() self.__combine_commands(self.std_monkey_patch_files) self.__combine_commands(self.no_monkey_patch_files) @staticmethod @contextmanager def _in_dir(package_dir): olddir = os.getcwd() if package_dir: os.chdir(package_dir) try: yield finally: os.chdir(olddir) @Lazy def discovered(self): tests = self.tests discovered = self.Discovered(self.package, self.configured_test_options, self.ignore, self.config, self.allow_combine) # We need to glob relative names, our config is based on filenames still with self._in_dir(self.package_dir): if not tests: tests = set(glob.glob('test_*.py')) - set(['test_support.py']) else: tests = set(tests) if self.ignore: # Always ignore the designated list, even if tests # were specified on the command line. This fixes a # nasty interaction with # test__threading_vs_settrace.py being run under # coverage when 'grep -l subprocess test*py' is used # to list the tests to run. tests -= self.ignore tests = sorted(tests) discovered.visit_files(tests) return discovered def __iter__(self): return iter(self.discovered.commands) # pylint:disable=no-member def __len__(self): return len(self.discovered.commands) # pylint:disable=no-member def load_list_from_file(filename, package): result = [] if filename: # pylint:disable=unspecified-encoding with open(_package_relative_filename(filename, package)) as f: for x in f: x = x.split('#', 1)[0].strip() if x: result.append(x) return result def matches(possibilities, command, include_flaky=True): if isinstance(command, list): command = ' '.join(command) for line in possibilities: if not include_flaky and line.startswith('FLAKY '): continue line = line.replace('FLAKY ', '') # Our configs are still mostly written in terms of file names, # but the non-monkey tests are now using package names. # Strip off '.py' from filenames to see if we match a module. # XXX: This could be much better. Our command needs better structure. if command.endswith(' ' + line) or command.endswith(line.replace(".py", '')): return True if ' ' not in command and command == line: return True return False def format_seconds(seconds): if seconds < 20: return '%.1fs' % seconds seconds = str(timedelta(seconds=round(seconds))) if seconds.startswith('0:'): seconds = seconds[2:] return seconds def _show_longest_running(result_collector, how_many=5): longest_running_tests = result_collector.longest_running_tests if not longest_running_tests: return # The only tricky part is handling repeats. we want to show them, # but not count them as a distinct entry. util.log('\nLongest-running tests:') length_of_longest_formatted_decimal = len('%.1f' % longest_running_tests[0].run_duration) frmt = '%' + str(length_of_longest_formatted_decimal) + '.1f seconds: %s' seen_names = set() for result in longest_running_tests: util.log(frmt, result.run_duration, result.name) seen_names.add(result.name) if len(seen_names) >= how_many: break def report(result_collector, # type: ResultCollector exit=True, took=None, configured_failing_tests=()): # pylint:disable=redefined-builtin,too-many-branches,too-many-locals total = result_collector.total failed = result_collector.failed passed = result_collector.passed total_cases = result_collector.total_cases total_skipped = result_collector.total_skipped _show_longest_running(result_collector) if took: took = ' in %s' % format_seconds(took) else: took = '' failed_expected = [] failed_unexpected = [] passed_unexpected = [] for name in passed: if matches(configured_failing_tests, name, include_flaky=False): passed_unexpected.append(name) if passed_unexpected: util.log('\n%s/%s unexpected passes', len(passed_unexpected), total, color='error') print_list(passed_unexpected) if result_collector.reran: util.log('\n%s/%s tests rerun', len(result_collector.reran), total, color='warning') print_list(result_collector.reran) if failed: util.log('\n%s/%s tests failed%s', len(failed), total, took, color='warning') for name in failed: if matches(configured_failing_tests, name, include_flaky=True): failed_expected.append(name) else: failed_unexpected.append(name) if failed_expected: util.log('\n%s/%s expected failures', len(failed_expected), total, color='warning') print_list(failed_expected) if failed_unexpected: util.log('\n%s/%s unexpected failures', len(failed_unexpected), total, color='error') print_list(failed_unexpected) util.log( '\nRan %s tests%s in %s files%s', total_cases, util._colorize('skipped', " (skipped=%d)" % total_skipped) if total_skipped else '', total, took, ) if exit: if failed_unexpected: sys.exit(min(100, len(failed_unexpected))) if passed_unexpected: sys.exit(101) if total <= 0: sys.exit('No tests found.') def print_list(lst): for name in lst: util.log(' - %s', name) def _setup_environ(debug=False): def not_set(key): return not bool(os.environ.get(key)) if (not_set('PYTHONWARNINGS') and (not sys.warnoptions # Python 3.7 goes from [] to ['default'] for nothing or sys.warnoptions == ['default'])): # action:message:category:module:line # - when a warning matches # more than one option, the action for the last matching # option is performed. # - action is one of : ignore, default, all, module, once, error # Enable default warnings such as ResourceWarning. # ResourceWarning doesn't exist on Py2, so don't put it # in there to avoid a warnnig. defaults = [ 'default', 'default::DeprecationWarning', ] if not PY2: defaults.append('default::ResourceWarning') os.environ['PYTHONWARNINGS'] = ','.join(defaults + [ # action:message:category:module:line # On Python 3[.6], the system site.py module has # "open(fullname, 'rU')" which produces the warning that # 'U' is deprecated, so ignore warnings from site.py 'ignore:::site:', # pkgutil on Python 2 complains about missing __init__.py 'ignore:::pkgutil:', # importlib/_bootstrap.py likes to spit out "ImportWarning: # can't resolve package from __spec__ or __package__, falling # back on __name__ and __path__". I have no idea what that means, but it seems harmless # and is annoying. 'ignore:::importlib._bootstrap:', 'ignore:::importlib._bootstrap_external:', # importing ABCs from collections, not collections.abc 'ignore:::pkg_resources._vendor.pyparsing:', 'ignore:::dns.namedict:', # dns.hash itself is being deprecated, importing it raises the warning; # we don't import it, but dnspython still does 'ignore:::dns.hash:', # dns.zone uses some raw regular expressions # without the r'' syntax, leading to DeprecationWarning: invalid # escape sequence. This is fixed in 2.0 (Python 3 only). 'ignore:::dns.zone:', # Coverage has started issuing warnings when it can't import # CTracer in subinterpreters. This breaks those tests. 'ignore:::coverage.core:', ]) if not_set('PYTHONFAULTHANDLER'): os.environ['PYTHONFAULTHANDLER'] = 'true' if not_set('GEVENT_DEBUG') and debug: os.environ['GEVENT_DEBUG'] = 'debug' if not_set('PYTHONTRACEMALLOC') and debug: # This slows the tests down quite a bit. Reserve # for debugging. os.environ['PYTHONTRACEMALLOC'] = '10' if not_set('PYTHONDEVMODE'): # Python 3.7 and above. os.environ['PYTHONDEVMODE'] = '1' if not_set('PYTHONMALLOC') and debug: # Python 3.6 and above. # This slows the tests down some, but # can detect memory corruption. Unfortunately # it can also be flaky, especially in pre-release # versions of Python (e.g., lots of crashes on Python 3.8b4). os.environ['PYTHONMALLOC'] = 'debug' if sys.version_info.releaselevel != 'final' and not debug: os.environ['PYTHONMALLOC'] = 'default' os.environ['PYTHONDEVMODE'] = '' # PYTHONSAFEPATH breaks the assumptions of some tests, notably test_interpreters.py os.environ.pop('PYTHONSAFEPATH', None) interesting_envs = { k: os.environ[k] for k in os.environ if k.startswith(('PYTHON', 'GEVENT')) } widest_k = max(len(k) for k in interesting_envs) for k, v in sorted(interesting_envs.items()): util.log('%*s\t=\t%s', widest_k, k, v, color="debug") def main(): # pylint:disable=too-many-locals,too-many-statements,too-many-branches import argparse parser = argparse.ArgumentParser() parser.add_argument('--ignore') parser.add_argument( '--discover', action='store_true', help="Only print the tests found." ) parser.add_argument( '--config', default='known_failures.py', help="The path to the config file containing " "FAILING_TESTS, IGNORED_TESTS and RUN_ALONE. " "Defaults to %(default)s." ) parser.add_argument( "--coverage", action="store_true", help="Enable coverage recording with coverage.py." ) # TODO: Quiet and verbose should be mutually exclusive parser.add_argument( "--quiet", action="store_true", default=True, help="Be quiet. Defaults to %(default)s. Also the " "GEVENTTEST_QUIET environment variable." ) parser.add_argument("--verbose", action="store_false", dest='quiet') parser.add_argument( "--debug", action="store_true", default=False, help="Enable debug settings. If the GEVENT_DEBUG environment variable is not set, " "this sets it to 'debug'. This can also enable PYTHONTRACEMALLOC and the debug PYTHONMALLOC " "allocators, if not already set. Defaults to %(default)s." ) parser.add_argument( "--package", default="gevent.tests", help="Load tests from the given package. Defaults to %(default)s." ) parser.add_argument( "--processes", "-j", default=DEFAULT_NWORKERS, type=int, help="Use up to the given number of parallel processes to execute tests. " "Defaults to %(default)s." ) parser.add_argument( '--no-combine', default=True, action='store_false', help="Do not combine tests into process groups." ) parser.add_argument('-u', '--use', metavar='RES1,RES2,...', action='store', type=parse_resources, help='specify which special resource intensive tests ' 'to run. "all" is the default; "none" may also be used. ' 'Disable individual resources with a leading -.' 'For example, "-u-network". GEVENTTEST_USE_RESOURCES is used ' 'if no argument is given. To only use one resources, specify ' '"-unone,resource".') parser.add_argument("--travis-fold", metavar="MSG", help="Emit Travis CI log fold markers around the output.") fail_parser = parser.add_mutually_exclusive_group() fail_parser.add_argument( "--second-chance", action="store_true", default=False, help="Give failed tests a second chance.") fail_parser.add_argument( '--failfast', '-x', action='store_true', default=False, help="Stop running after the first failure.") parser.add_argument('tests', nargs='*') options = parser.parse_args() # options.use will be either None for not given, or a list # of the last specified -u argument. # If not given, use the default, which we'll take from the environment, if set. options.use = list(set(parse_resources() if options.use is None else options.use)) # Whether or not it came from the environment, put it in the # environment now. os.environ['GEVENTTEST_USE_RESOURCES'] = unparse_resources(options.use) setup_resources(options.use) # Set this before any test imports in case of 'from .util import QUIET'; # not that this matters much because we spawn tests in subprocesses, # it's the environment setting that matters util.QUIET = options.quiet if 'GEVENTTEST_QUIET' not in os.environ: os.environ['GEVENTTEST_QUIET'] = str(options.quiet) FAILING_TESTS = [] IGNORED_TESTS = [] RUN_ALONE = [] coverage = False if options.coverage or os.environ.get("GEVENTTEST_COVERAGE"): if PYPY and RUNNING_ON_CI: print("Ignoring coverage option on PyPy on CI; slow") else: coverage = True cov_config = os.environ['COVERAGE_PROCESS_START'] = os.path.abspath(".coveragerc") if PYPY: cov_config = os.environ['COVERAGE_PROCESS_START'] = os.path.abspath(".coveragerc-pypy") this_dir = os.path.dirname(__file__) site_dir = os.path.join(this_dir, 'coveragesite') site_dir = os.path.abspath(site_dir) os.environ['PYTHONPATH'] = site_dir + os.pathsep + os.environ.get("PYTHONPATH", "") # We change directory often, use an absolute path to keep all the # coverage files (which will have distinct suffixes because of parallel=true in .coveragerc # in this directory; makes them easier to combine and use with coverage report) os.environ['COVERAGE_FILE'] = os.path.abspath(".") + os.sep + ".coverage" # XXX: Log this with color. Right now, it interferes (buffering) with other early # output. print("Enabling coverage to", os.environ['COVERAGE_FILE'], "with site", site_dir, "and configuration file", cov_config) assert os.path.exists(cov_config) assert os.path.exists(os.path.join(site_dir, 'sitecustomize.py')) _setup_environ(debug=options.debug) if options.config: config = {} options.config = _package_relative_filename(options.config, options.package) with open(options.config) as f: # pylint:disable=unspecified-encoding config_data = f.read() six.exec_(config_data, config) FAILING_TESTS = config['FAILING_TESTS'] IGNORED_TESTS = config['IGNORED_TESTS'] RUN_ALONE = config['RUN_ALONE'] tests = Discovery( options.tests, ignore_files=options.ignore, ignored=IGNORED_TESTS, coverage=coverage, package=options.package, config=config, allow_combine=options.no_combine, ) if options.discover: for cmd, options in tests: print(util.getname(cmd, env=options.get('env'), setenv=options.get('setenv'))) print('%s tests found.' % len(tests)) else: if PYPY and RESOLVER_ARES: # XXX: Add a way to force these. print("Not running tests on pypy with c-ares; not a supported configuration") return if options.package: # Put this directory on the path so relative imports work. package_dir = _dir_from_package_name(options.package) os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', "") + os.pathsep + package_dir allowed_return_codes = () if sys.version_info[:3] >= (3, 12, 1): # unittest suddenly started failing with this return code # if all tests in a module are skipped in 3.12.1. allowed_return_codes += (5,) runner = Runner( tests, allowed_return_codes=allowed_return_codes, configured_failing_tests=FAILING_TESTS, failfast=options.failfast, quiet=options.quiet, configured_run_alone_tests=RUN_ALONE, worker_count=options.processes, second_chance=options.second_chance, ) if options.travis_fold: runner = TravisFoldingRunner(runner, options.travis_fold) runner() if __name__ == '__main__': main()
Discovery
python
ray-project__ray
python/ray/train/tensorflow/tensorflow_trainer.py
{ "start": 352, "end": 7436 }
class ____(DataParallelTrainer): """A Trainer for data parallel Tensorflow training. This Trainer runs the function ``train_loop_per_worker`` on multiple Ray Actors. These actors already have the necessary TensorFlow process group already configured for distributed TensorFlow training. The ``train_loop_per_worker`` function is expected to take in either 0 or 1 arguments: .. testcode:: def train_loop_per_worker(): ... .. testcode:: def train_loop_per_worker(config: Dict): ... If ``train_loop_per_worker`` accepts an argument, then ``train_loop_config`` will be passed in as the argument. This is useful if you want to tune the values in ``train_loop_config`` as hyperparameters. If the ``datasets`` dict contains a training dataset (denoted by the "train" key), then it will be split into multiple dataset shards that can then be accessed by ``ray.train.get_dataset_shard("train")`` inside ``train_loop_per_worker``. All the other datasets will not be split and ``ray.train.get_dataset_shard(...)`` will return the entire Dataset. Inside the ``train_loop_per_worker`` function, you can use any of the :ref:`Ray Train loop methods <train-loop-api>`. .. warning:: Ray will not automatically set any environment variables or configuration related to local parallelism / threading :ref:`aside from "OMP_NUM_THREADS" <omp-num-thread-note>`. If you desire greater control over TensorFlow threading, use the ``tf.config.threading`` module (eg. ``tf.config.threading.set_inter_op_parallelism_threads(num_cpus)``) at the beginning of your ``train_loop_per_worker`` function. .. testcode:: from ray import train def train_loop_per_worker(): # Report intermediate results for callbacks or logging and # checkpoint data. train.report(...) # Returns dict of last saved checkpoint. train.get_checkpoint() # Returns the Dataset shard for the given key. train.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. train.get_context().get_world_size() # Returns the rank of this worker. train.get_context().get_world_rank() # Returns the rank of the worker on the current node. train.get_context().get_local_rank() Any returns from the ``train_loop_per_worker`` will be discarded and not used or persisted anywhere. To save a model to use for the ``TensorflowPredictor``, you must save it under the "model" kwarg in ``Checkpoint`` passed to ``train.report()``. Example: .. testcode:: import os import tempfile import tensorflow as tf import numpy as np import ray from ray import train from ray.train import Checkpoint, ScalingConfig from ray.train.tensorflow import TensorflowTrainer def build_model(): # toy neural network : 1-layer return tf.keras.Sequential( [tf.keras.layers.Dense( 1, activation="linear", input_shape=(1,))] ) def train_loop_per_worker(config): dataset_shard = train.get_dataset_shard("train") strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() with strategy.scope(): model = build_model() model.compile( optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) tf_dataset = dataset_shard.to_tf( feature_columns="x", label_columns="y", batch_size=1 ) for epoch in range(config["num_epochs"]): model.fit(tf_dataset) # Create checkpoint. checkpoint_dir = tempfile.mkdtemp() model.save_weights( os.path.join(checkpoint_dir, "my_checkpoint") ) checkpoint = Checkpoint.from_directory(checkpoint_dir) train.report( {}, checkpoint=checkpoint, ) train_dataset = ray.data.from_items([{"x": np.array([x], dtype=np.float32), "y": x + 1} for x in range(32)]) trainer = TensorflowTrainer( train_loop_per_worker=train_loop_per_worker, scaling_config=ScalingConfig(num_workers=3, use_gpu=True), datasets={"train": train_dataset}, train_loop_config={"num_epochs": 2}, ) result = trainer.fit() .. testoutput:: :options:+ELLIPSIS :hide: ... Args: train_loop_per_worker: The training function to execute. This can either take in no arguments or a ``config`` dict. train_loop_config: Configurations to pass into ``train_loop_per_worker`` if it accepts an argument. tensorflow_config: Configuration for setting up the TensorFlow backend. If set to None, use the default configuration. This replaces the ``backend_config`` arg of ``DataParallelTrainer``. scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. resume_from_checkpoint: A checkpoint to resume training from. metadata: Dict that should be made available via `ray.train.get_context().get_metadata()` and in `checkpoint.get_metadata()` for checkpoints saved from this Trainer. Must be JSON-serializable. """ def __init__( self, train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]], *, train_loop_config: Optional[Dict] = None, tensorflow_config: Optional[TensorflowConfig] = None, scaling_config: Optional[ScalingConfig] = None, dataset_config: Optional[DataConfig] = None, run_config: Optional[RunConfig] = None, datasets: Optional[Dict[str, GenDataset]] = None, metadata: Optional[Dict[str, Any]] = None, resume_from_checkpoint: Optional[Checkpoint] = None, ): if not tensorflow_config: tensorflow_config = TensorflowConfig() super(TensorflowTrainer, self).__init__( train_loop_per_worker=train_loop_per_worker, train_loop_config=train_loop_config, backend_config=tensorflow_config, scaling_config=scaling_config, dataset_config=dataset_config, run_config=run_config, datasets=datasets, resume_from_checkpoint=resume_from_checkpoint, metadata=metadata, )
TensorflowTrainer
python
plotly__plotly.py
tests/test_core/test_utils/test_utils.py
{ "start": 126, "end": 587 }
class ____(TestCase): def test_nan_to_null(self): array = [1, float("NaN"), float("Inf"), float("-Inf"), "platypus"] result = _json.dumps(array, cls=PlotlyJSONEncoder) expected_result = '[1, null, null, null, "platypus"]' self.assertEqual(result, expected_result) def test_invalid_encode_exception(self): with self.assertRaises(TypeError): _json.dumps({"a": {1}}, cls=PlotlyJSONEncoder)
TestJSONEncoder
python
vyperlang__vyper
vyper/ast/nodes.py
{ "start": 36794, "end": 37769 }
class ____(ExprNode): __slots__ = ("func", "args", "keywords") @property def is_extcall(self): return isinstance(self._parent, ExtCall) @property def is_staticcall(self): return isinstance(self._parent, StaticCall) @property def is_plain_call(self): return not (self.is_extcall or self.is_staticcall) @property def kind_str(self): if self.is_extcall: return "extcall" if self.is_staticcall: return "staticcall" raise CompilerPanic("unreachable!") # pragma: nocover @property def is_terminus(self): # cursed import cycle! from vyper.builtins.functions import get_builtin_functions if not isinstance(self.func, Name): return False funcname = self.func.id builtin_t = get_builtin_functions().get(funcname) if builtin_t is None: return False return builtin_t._is_terminus
Call
python
getsentry__sentry
tests/sentry/seer/endpoints/test_organization_seer_setup_check.py
{ "start": 686, "end": 5120 }
class ____(OrganizationSeerSetupCheckTestBase): def test_successful_setup_default_state(self) -> None: """ Test the default state with no acknowledgements and quotas available. """ response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data == { "setupAcknowledgement": { "orgHasAcknowledged": False, "userHasAcknowledged": False, }, "billing": { "hasAutofixQuota": True, "hasScannerQuota": True, }, } def test_current_user_acknowledged_setup(self) -> None: """ Test when the current user has acknowledged the setup. """ feature = "seer_autofix_setup_acknowledged" PromptsActivity.objects.create( user_id=self.user.id, feature=feature, organization_id=self.organization.id, project_id=0, data=orjson.dumps( {"dismissed_ts": calendar.timegm(timezone.now().utctimetuple())} ).decode("utf-8"), ) response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data["setupAcknowledgement"] == { "orgHasAcknowledged": True, "userHasAcknowledged": True, } def test_org_acknowledged_not_user(self) -> None: """ Test when another user in the org has acknowledged, but not the requesting user. """ other_user = self.create_user() self.create_member(user=other_user, organization=self.organization, role="member") feature = "seer_autofix_setup_acknowledged" PromptsActivity.objects.create( user_id=other_user.id, feature=feature, organization_id=self.organization.id, project_id=0, data=orjson.dumps( {"dismissed_ts": calendar.timegm(timezone.now().utctimetuple())} ).decode("utf-8"), ) response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data["setupAcknowledgement"] == { "orgHasAcknowledged": True, "userHasAcknowledged": False, } @patch("sentry.quotas.backend.has_available_reserved_budget") def test_no_autofix_quota(self, mock_has_budget: MagicMock) -> None: """ Test when the organization has no autofix quota available. """ def side_effect(org_id, data_category): from sentry.constants import DataCategory if data_category == DataCategory.SEER_AUTOFIX: return False return True # Scanner quota available mock_has_budget.side_effect = side_effect response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data["billing"] == { "hasAutofixQuota": False, "hasScannerQuota": True, } @patch("sentry.quotas.backend.has_available_reserved_budget") def test_no_scanner_quota(self, mock_has_budget: MagicMock) -> None: """ Test when the organization has no scanner quota available. """ def side_effect(org_id, data_category): from sentry.constants import DataCategory if data_category == DataCategory.SEER_SCANNER: return False return True # Autofix quota available mock_has_budget.side_effect = side_effect response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data["billing"] == { "hasAutofixQuota": True, "hasScannerQuota": False, } @patch("sentry.quotas.backend.has_available_reserved_budget") def test_no_quotas_available(self, mock_has_budget: MagicMock) -> None: """ Test when the organization has no quotas available for either service. """ mock_has_budget.return_value = False response = self.get_response(self.organization.slug) assert response.status_code == 200 assert response.data["billing"] == { "hasAutofixQuota": False, "hasScannerQuota": False, }
OrganizationSeerSetupCheckSuccessTest
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataform.py
{ "start": 14534, "end": 15293 }
class ____: @mock.patch(HOOK_STR) @mock.patch(INSTALL_NPM_PACKAGES_RESPONSE_STR) def test_execute(self, _, hook_mock): op = DataformInstallNpmPackagesOperator( task_id="remove-directory", project_id=PROJECT_ID, region=REGION, repository_id=REPOSITORY_ID, workspace_id=WORKSPACE_ID, ) op.execute(context=mock.MagicMock()) hook_mock.return_value.install_npm_packages.assert_called_once_with( project_id=PROJECT_ID, region=REGION, repository_id=REPOSITORY_ID, workspace_id=WORKSPACE_ID, retry=DEFAULT, timeout=None, metadata=(), )
TestDataformInstallNpmPackagesOperator
python
mlflow__mlflow
mlflow/gateway/providers/openai.py
{ "start": 1350, "end": 8811 }
class ____(ProviderAdapter): @classmethod def chat_to_model(cls, payload, config): return cls._add_model_to_payload_if_necessary(payload, config) @classmethod def completion_to_model(cls, payload, config): return cls._add_model_to_payload_if_necessary(payload, config) @classmethod def embeddings_to_model(cls, payload, config): return cls._add_model_to_payload_if_necessary(payload, config) @classmethod def _add_model_to_payload_if_necessary(cls, payload, config): # NB: For Azure OpenAI, the deployment name (which is included in the URL) specifies # the model; it is not specified in the payload. For OpenAI outside of Azure, the # model is always specified in the payload if config.model.config.openai_api_type not in (OpenAIAPIType.AZURE, OpenAIAPIType.AZUREAD): return {"model": config.model.name, **payload} else: return payload @classmethod def model_to_chat(cls, resp, config): # Response example (https://platform.openai.com/docs/api-reference/chat/create) # ``` # { # "id":"chatcmpl-abc123", # "object":"chat.completion", # "created":1677858242, # "model":"gpt-4o-mini", # "usage":{ # "prompt_tokens":13, # "completion_tokens":7, # "total_tokens":20 # }, # "choices":[ # { # "message":{ # "role":"assistant", # "content":"\n\nThis is a test!" # }, # "finish_reason":"stop", # "index":0 # } # ] # } # ``` return chat.ResponsePayload( id=resp["id"], object=resp["object"], created=resp["created"], model=resp["model"], choices=[ chat.Choice( index=idx, message=chat.ResponseMessage( role=c["message"]["role"], content=c["message"].get("content"), tool_calls=( (calls := c["message"].get("tool_calls")) and [chat.ToolCall(**c) for c in calls] ), ), finish_reason=c.get("finish_reason"), ) for idx, c in enumerate(resp["choices"]) ], usage=chat.ChatUsage( prompt_tokens=resp["usage"]["prompt_tokens"], completion_tokens=resp["usage"]["completion_tokens"], total_tokens=resp["usage"]["total_tokens"], ), ) @classmethod def model_to_chat_streaming(cls, resp, config): return chat.StreamResponsePayload( id=resp["id"], object=resp["object"], created=resp["created"], model=resp["model"], choices=[ chat.StreamChoice( index=c["index"], finish_reason=c["finish_reason"], delta=chat.StreamDelta( role=c["delta"].get("role"), content=c["delta"].get("content"), tool_calls=( (calls := c["delta"].get("tool_calls")) and [chat.ToolCallDelta(**c) for c in calls] ), ), ) for c in resp["choices"] ], ) @classmethod def model_to_completions(cls, resp, config): # Response example (https://platform.openai.com/docs/api-reference/completions/create) # ``` # { # "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", # "object": "text_completion", # "created": 1589478378, # "model": "text-davinci-003", # "choices": [ # { # "text": "\n\nThis is indeed a test", # "index": 0, # "logprobs": null, # "finish_reason": "length" # } # ], # "usage": { # "prompt_tokens": 5, # "completion_tokens": 7, # "total_tokens": 12 # } # } # ``` return completions.ResponsePayload( id=resp["id"], # The chat models response from OpenAI is of object type "chat.completion". Since # we're using the completions response format here, we hardcode the "text_completion" # object type in the response instead object="text_completion", created=resp["created"], model=resp["model"], choices=[ completions.Choice( index=idx, text=c.get("message", {}).get("content") or c.get("text") or "", finish_reason=c["finish_reason"], ) for idx, c in enumerate(resp["choices"]) ], usage=completions.CompletionsUsage( prompt_tokens=resp["usage"]["prompt_tokens"], completion_tokens=resp["usage"]["completion_tokens"], total_tokens=resp["usage"]["total_tokens"], ), ) @classmethod def model_to_completions_streaming(cls, resp, config): return completions.StreamResponsePayload( id=resp["id"], # The chat models response from OpenAI is of object type "chat.completion.chunk". # Since we're using the completions response format here, we hardcode the # "text_completion_chunk" object type in the response instead object="text_completion_chunk", created=resp["created"], model=resp["model"], choices=[ completions.StreamChoice( index=c["index"], finish_reason=c["finish_reason"], text=c["delta"].get("content"), ) for c in resp["choices"] ], ) @classmethod def model_to_embeddings(cls, resp, config): # Response example (https://platform.openai.com/docs/api-reference/embeddings/create): # ``` # { # "object": "list", # "data": [ # { # "object": "embedding", # "embedding": [ # 0.0023064255, # -0.009327292, # .... (1536 floats total for ada-002) # -0.0028842222, # ], # "index": 0 # } # ], # "model": "text-embedding-ada-002", # "usage": { # "prompt_tokens": 8, # "total_tokens": 8 # } # } # ``` return embeddings.ResponsePayload( data=[ embeddings.EmbeddingObject( embedding=d["embedding"], index=idx, ) for idx, d in enumerate(resp["data"]) ], model=resp["model"], usage=embeddings.EmbeddingsUsage( prompt_tokens=resp["usage"]["prompt_tokens"], total_tokens=resp["usage"]["total_tokens"], ), )
OpenAIAdapter
python
scipy__scipy
scipy/stats/tests/test_multicomp.py
{ "start": 181, "end": 17826 }
class ____: # For the following tests, p-values were computed using Matlab, e.g. # sample = [18. 15. 18. 16. 17. 15. 14. 14. 14. 15. 15.... # 14. 15. 14. 22. 18. 21. 21. 10. 10. 11. 9.... # 25. 26. 17.5 16. 15.5 14.5 22. 22. 24. 22.5 29.... # 24.5 20. 18. 18.5 17.5 26.5 13. 16.5 13. 13. 13.... # 28. 27. 34. 31. 29. 27. 24. 23. 38. 36. 25.... # 38. 26. 22. 36. 27. 27. 32. 28. 31.... # 24. 27. 33. 32. 28. 19. 37. 31. 36. 36.... # 34. 38. 32. 38. 32.... # 26. 24. 26. 25. 29. 29.5 16.5 36. 44.... # 25. 27. 19.... # 25. 20.... # 28.]; # j = [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ... # 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ... # 0 0 0 0... # 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1... # 2 2 2 2 2 2 2 2 2... # 3 3 3... # 4 4... # 5]; # [~, ~, stats] = anova1(sample, j, "off"); # [results, ~, ~, gnames] = multcompare(stats, ... # "CriticalValueType", "dunnett", ... # "Approximate", false); # tbl = array2table(results, "VariableNames", ... # ["Group", "Control Group", "Lower Limit", ... # "Difference", "Upper Limit", "P-value"]); # tbl.("Group") = gnames(tbl.("Group")); # tbl.("Control Group") = gnames(tbl.("Control Group")) # Matlab doesn't report the statistic, so the statistics were # computed using R multcomp `glht`, e.g.: # library(multcomp) # options(digits=16) # control < - c(18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, # 15.0, 15.0, 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, # 10.0, 10.0, 11.0, 9.0, 25.0, 26.0, 17.5, 16.0, 15.5, # 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, 24.5, 20.0, 18.0, # 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, 28.0, # 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, # 25.0, 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, # 31.0) # t < - c(24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0, # 34.0, 38.0, 32.0, 38.0, 32.0) # w < - c(26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0) # x < - c(25.0, 27.0, 19.0) # y < - c(25.0, 20.0) # z < - c(28.0) # # groups = factor(rep(c("control", "t", "w", "x", "y", "z"), # times=c(length(control), length(t), length(w), # length(x), length(y), length(z)))) # df < - data.frame(response=c(control, t, w, x, y, z), # group=groups) # model < - aov(response # ~group, data = df) # test < - glht(model=model, # linfct=mcp(group="Dunnett"), # alternative="g") # summary(test) # confint(test) # p-values agreed with those produced by Matlab to at least atol=1e-3 # From Matlab's documentation on multcompare samples_1 = [ [ 24.0, 27.0, 33.0, 32.0, 28.0, 19.0, 37.0, 31.0, 36.0, 36.0, 34.0, 38.0, 32.0, 38.0, 32.0 ], [26.0, 24.0, 26.0, 25.0, 29.0, 29.5, 16.5, 36.0, 44.0], [25.0, 27.0, 19.0], [25.0, 20.0], [28.0] ] control_1 = [ 18.0, 15.0, 18.0, 16.0, 17.0, 15.0, 14.0, 14.0, 14.0, 15.0, 15.0, 14.0, 15.0, 14.0, 22.0, 18.0, 21.0, 21.0, 10.0, 10.0, 11.0, 9.0, 25.0, 26.0, 17.5, 16.0, 15.5, 14.5, 22.0, 22.0, 24.0, 22.5, 29.0, 24.5, 20.0, 18.0, 18.5, 17.5, 26.5, 13.0, 16.5, 13.0, 13.0, 13.0, 28.0, 27.0, 34.0, 31.0, 29.0, 27.0, 24.0, 23.0, 38.0, 36.0, 25.0, 38.0, 26.0, 22.0, 36.0, 27.0, 27.0, 32.0, 28.0, 31.0 ] pvalue_1 = [4.727e-06, 0.022346, 0.97912, 0.99953, 0.86579] # Matlab # Statistic, alternative p-values, and CIs computed with R multcomp `glht` p_1_twosided = [1e-4, 0.02237, 0.97913, 0.99953, 0.86583] p_1_greater = [1e-4, 0.011217, 0.768500, 0.896991, 0.577211] p_1_less = [1, 1, 0.99660, 0.98398, .99953] statistic_1 = [5.27356, 2.91270, 0.60831, 0.27002, 0.96637] ci_1_twosided = [[5.3633917835622, 0.7296142201217, -8.3879817106607, -11.9090753452911, -11.7655021543469], [15.9709832164378, 13.8936496687672, 13.4556900439941, 14.6434503452911, 25.4998771543469]] ci_1_greater = [5.9036402398526, 1.4000632918725, -7.2754756323636, -10.5567456382391, -9.8675629499576] ci_1_less = [15.4306165948619, 13.2230539537359, 12.3429406339544, 13.2908248513211, 23.6015228251660] pvalues_1 = dict(twosided=p_1_twosided, less=p_1_less, greater=p_1_greater) cis_1 = dict(twosided=ci_1_twosided, less=ci_1_less, greater=ci_1_greater) case_1 = dict(samples=samples_1, control=control_1, statistic=statistic_1, pvalues=pvalues_1, cis=cis_1) # From Dunnett1955 comparing with R's DescTools: DunnettTest samples_2 = [[9.76, 8.80, 7.68, 9.36], [12.80, 9.68, 12.16, 9.20, 10.55]] control_2 = [7.40, 8.50, 7.20, 8.24, 9.84, 8.32] pvalue_2 = [0.6201, 0.0058] # Statistic, alternative p-values, and CIs computed with R multcomp `glht` p_2_twosided = [0.6201020, 0.0058254] p_2_greater = [0.3249776, 0.0029139] p_2_less = [0.91676, 0.99984] statistic_2 = [0.85703, 3.69375] ci_2_twosided = [[-1.2564116462124, 0.8396273539789], [2.5564116462124, 4.4163726460211]] ci_2_greater = [-0.9588591188156, 1.1187563667543] ci_2_less = [2.2588591188156, 4.1372436332457] pvalues_2 = dict(twosided=p_2_twosided, less=p_2_less, greater=p_2_greater) cis_2 = dict(twosided=ci_2_twosided, less=ci_2_less, greater=ci_2_greater) case_2 = dict(samples=samples_2, control=control_2, statistic=statistic_2, pvalues=pvalues_2, cis=cis_2) samples_3 = [[55, 64, 64], [55, 49, 52], [50, 44, 41]] control_3 = [55, 47, 48] pvalue_3 = [0.0364, 0.8966, 0.4091] # Statistic, alternative p-values, and CIs computed with R multcomp `glht` p_3_twosided = [0.036407, 0.896539, 0.409295] p_3_greater = [0.018277, 0.521109, 0.981892] p_3_less = [0.99944, 0.90054, 0.20974] statistic_3 = [3.09073, 0.56195, -1.40488] ci_3_twosided = [[0.7529028025053, -8.2470971974947, -15.2470971974947], [21.2470971974947, 12.2470971974947, 5.2470971974947]] ci_3_greater = [2.4023682323149, -6.5976317676851, -13.5976317676851] ci_3_less = [19.5984402363662, 10.5984402363662, 3.5984402363662] pvalues_3 = dict(twosided=p_3_twosided, less=p_3_less, greater=p_3_greater) cis_3 = dict(twosided=ci_3_twosided, less=ci_3_less, greater=ci_3_greater) case_3 = dict(samples=samples_3, control=control_3, statistic=statistic_3, pvalues=pvalues_3, cis=cis_3) # From Thomson and Short, # Mucociliary function in health, chronic obstructive airway disease, # and asbestosis, Journal of Applied Physiology, 1969. Table 1 # Comparing with R's DescTools: DunnettTest samples_4 = [[3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]] control_4 = [2.9, 3.0, 2.5, 2.6, 3.2] pvalue_4 = [0.5832, 0.9982] # Statistic, alternative p-values, and CIs computed with R multcomp `glht` p_4_twosided = [0.58317, 0.99819] p_4_greater = [0.30225, 0.69115] p_4_less = [0.91929, 0.65212] statistic_4 = [0.90875, -0.05007] ci_4_twosided = [[-0.6898153448579, -1.0333456251632], [1.4598153448579, 0.9933456251632]] ci_4_greater = [-0.5186459268412, -0.8719655502147 ] ci_4_less = [1.2886459268412, 0.8319655502147] pvalues_4 = dict(twosided=p_4_twosided, less=p_4_less, greater=p_4_greater) cis_4 = dict(twosided=ci_4_twosided, less=ci_4_less, greater=ci_4_greater) case_4 = dict(samples=samples_4, control=control_4, statistic=statistic_4, pvalues=pvalues_4, cis=cis_4) @pytest.mark.parametrize( 'rho, n_groups, df, statistic, pvalue, alternative', [ # From Dunnett1955 # Tables 1a and 1b pages 1117-1118 (0.5, 1, 10, 1.81, 0.05, "greater"), # different than two-sided (0.5, 3, 10, 2.34, 0.05, "greater"), (0.5, 2, 30, 1.99, 0.05, "greater"), (0.5, 5, 30, 2.33, 0.05, "greater"), (0.5, 4, 12, 3.32, 0.01, "greater"), (0.5, 7, 12, 3.56, 0.01, "greater"), (0.5, 2, 60, 2.64, 0.01, "greater"), (0.5, 4, 60, 2.87, 0.01, "greater"), (0.5, 4, 60, [2.87, 2.21], [0.01, 0.05], "greater"), # Tables 2a and 2b pages 1119-1120 (0.5, 1, 10, 2.23, 0.05, "two-sided"), # two-sided (0.5, 3, 10, 2.81, 0.05, "two-sided"), (0.5, 2, 30, 2.32, 0.05, "two-sided"), (0.5, 3, 20, 2.57, 0.05, "two-sided"), (0.5, 4, 12, 3.76, 0.01, "two-sided"), (0.5, 7, 12, 4.08, 0.01, "two-sided"), (0.5, 2, 60, 2.90, 0.01, "two-sided"), (0.5, 4, 60, 3.14, 0.01, "two-sided"), (0.5, 4, 60, [3.14, 2.55], [0.01, 0.05], "two-sided"), ], ) def test_critical_values( self, rho, n_groups, df, statistic, pvalue, alternative ): rng = np.random.default_rng(165250594791731684851746311027739134893) rho = np.full((n_groups, n_groups), rho) np.fill_diagonal(rho, 1) statistic = np.array(statistic) res = _pvalue_dunnett( rho=rho, df=df, statistic=statistic, alternative=alternative, rng=rng ) assert_allclose(res, pvalue, atol=5e-3) @pytest.mark.parametrize( 'samples, control, pvalue, statistic', [ (samples_1, control_1, pvalue_1, statistic_1), (samples_2, control_2, pvalue_2, statistic_2), (samples_3, control_3, pvalue_3, statistic_3), (samples_4, control_4, pvalue_4, statistic_4), ] ) def test_basic(self, samples, control, pvalue, statistic): rng = np.random.default_rng(11681140010308601919115036826969764808) res = stats.dunnett(*samples, control=control, rng=rng) assert isinstance(res, DunnettResult) assert_allclose(res.statistic, statistic, rtol=5e-5) assert_allclose(res.pvalue, pvalue, rtol=1e-2, atol=1e-4) @pytest.mark.parametrize( 'alternative', ['two-sided', 'less', 'greater'] ) def test_ttest_ind(self, alternative): # check that `dunnett` agrees with `ttest_ind` # when there are only two groups rng = np.random.default_rng(114184017807316971636137493526995620351) for _ in range(10): sample = rng.integers(-100, 100, size=(10,)) control = rng.integers(-100, 100, size=(10,)) # preserve use of old random_state during SPEC 7 transition res = stats.dunnett( sample, control=control, alternative=alternative, random_state=rng ) ref = stats.ttest_ind( sample, control, alternative=alternative ) assert_allclose(res.statistic, ref.statistic, rtol=1e-3, atol=1e-5) assert_allclose(res.pvalue, ref.pvalue, rtol=1e-3, atol=1e-5) @pytest.mark.parametrize( 'alternative, pvalue', [ ('less', [0, 1]), ('greater', [1, 0]), ('two-sided', [0, 0]), ] ) def test_alternatives(self, alternative, pvalue): rng = np.random.default_rng(114184017807316971636137493526995620351) # width of 20 and min diff between samples/control is 60 # and maximal diff would be 100 sample_less = rng.integers(0, 20, size=(10,)) control = rng.integers(80, 100, size=(10,)) sample_greater = rng.integers(160, 180, size=(10,)) res = stats.dunnett( sample_less, sample_greater, control=control, alternative=alternative, rng=rng ) assert_allclose(res.pvalue, pvalue, atol=1e-7) ci = res.confidence_interval() # two-sided is comparable for high/low if alternative == 'less': assert np.isneginf(ci.low).all() assert -100 < ci.high[0] < -60 assert 60 < ci.high[1] < 100 elif alternative == 'greater': assert -100 < ci.low[0] < -60 assert 60 < ci.low[1] < 100 assert np.isposinf(ci.high).all() elif alternative == 'two-sided': assert -100 < ci.low[0] < -60 assert 60 < ci.low[1] < 100 assert -100 < ci.high[0] < -60 assert 60 < ci.high[1] < 100 @pytest.mark.parametrize("case", [case_1, case_2, case_3, case_4]) @pytest.mark.parametrize("alternative", ['less', 'greater', 'two-sided']) def test_against_R_multicomp_glht(self, case, alternative): rng = np.random.default_rng(189117774084579816190295271136455278291) samples = case['samples'] control = case['control'] alternatives = {'less': 'less', 'greater': 'greater', 'two-sided': 'twosided'} p_ref = case['pvalues'][alternative.replace('-', '')] res = stats.dunnett(*samples, control=control, alternative=alternative, rng=rng) # atol can't be tighter because R reports some pvalues as "< 1e-4" assert_allclose(res.pvalue, p_ref, rtol=5e-3, atol=1e-4) ci_ref = case['cis'][alternatives[alternative]] if alternative == "greater": ci_ref = [ci_ref, np.inf] elif alternative == "less": ci_ref = [-np.inf, ci_ref] assert res._ci is None assert res._ci_cl is None ci = res.confidence_interval(confidence_level=0.95) assert_allclose(ci.low, ci_ref[0], rtol=5e-3, atol=1e-5) assert_allclose(ci.high, ci_ref[1], rtol=5e-3, atol=1e-5) # re-run to use the cached value "is" to check id as same object assert res._ci is ci assert res._ci_cl == 0.95 ci_ = res.confidence_interval(confidence_level=0.95) assert ci_ is ci @pytest.mark.parametrize('alternative', ["two-sided", "less", "greater"]) def test_str(self, alternative): rng = np.random.default_rng(189117774084579816190295271136455278291) res = stats.dunnett( *self.samples_3, control=self.control_3, alternative=alternative, rng=rng ) # check some str output res_str = str(res) assert '(Sample 2 - Control)' in res_str assert '95.0%' in res_str if alternative == 'less': assert '-inf' in res_str assert '19.' in res_str elif alternative == 'greater': assert 'inf' in res_str assert '-13.' in res_str else: assert 'inf' not in res_str assert '21.' in res_str def test_warnings(self): rng = np.random.default_rng(189117774084579816190295271136455278291) res = stats.dunnett( *self.samples_3, control=self.control_3, rng=rng ) msg = r"Computation of the confidence interval did not converge" with pytest.warns(UserWarning, match=msg): res._allowance(tol=1e-5) def test_raises(self): samples, control = self.samples_3, self.control_3 # alternative with pytest.raises(ValueError, match="alternative must be"): stats.dunnett(*samples, control=control, alternative='bob') # 2D for a sample samples_ = copy.deepcopy(samples) samples_[0] = [samples_[0]] with pytest.raises(ValueError, match="must be 1D arrays"): stats.dunnett(*samples_, control=control) # 2D for control control_ = copy.deepcopy(control) control_ = [control_] with pytest.raises(ValueError, match="must be 1D arrays"): stats.dunnett(*samples, control=control_) # No obs in a sample samples_ = copy.deepcopy(samples) samples_[1] = [] with pytest.raises(ValueError, match="at least 1 observation"): stats.dunnett(*samples_, control=control) # No obs in control control_ = [] with pytest.raises(ValueError, match="at least 1 observation"): stats.dunnett(*samples, control=control_) res = stats.dunnett(*samples, control=control) with pytest.raises(ValueError, match="Confidence level must"): res.confidence_interval(confidence_level=3) @pytest.mark.filterwarnings("ignore:Computation of the confidence") @pytest.mark.parametrize('n_samples', [1, 2, 3]) def test_shapes(self, n_samples): rng = np.random.default_rng(689448934110805334) samples = rng.normal(size=(n_samples, 10)) control = rng.normal(size=10) res = stats.dunnett(*samples, control=control, rng=rng) assert res.statistic.shape == (n_samples,) assert res.pvalue.shape == (n_samples,) ci = res.confidence_interval() assert ci.low.shape == (n_samples,) assert ci.high.shape == (n_samples,)
TestDunnett
python
jazzband__django-oauth-toolkit
oauth2_provider/migrations/0004_auto_20200902_2022.py
{ "start": 176, "end": 2700 }
class ____(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('oauth2_provider', '0003_auto_20201211_1314'), ] operations = [ migrations.AddField( model_name='application', name='algorithm', field=models.CharField(blank=True, choices=[("", "No OIDC support"), ('RS256', 'RSA with SHA-2 256'), ('HS256', 'HMAC with SHA-2 256')], default='', max_length=5), ), migrations.AlterField( model_name='application', name='authorization_grant_type', field=models.CharField(choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials'), ('openid-hybrid', 'OpenID connect hybrid')], max_length=32), ), migrations.CreateModel( name='IDToken', fields=[ ('id', models.BigAutoField(primary_key=True, serialize=False)), ("jti", models.UUIDField(unique=True, default=uuid.uuid4, editable=False, verbose_name="JWT Token ID")), ('expires', models.DateTimeField()), ('scope', models.TextField(blank=True)), ('created', models.DateTimeField(auto_now_add=True)), ('updated', models.DateTimeField(auto_now=True)), ('application', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=oauth2_settings.APPLICATION_MODEL)), ('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='oauth2_provider_idtoken', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, 'swappable': 'OAUTH2_PROVIDER_ID_TOKEN_MODEL', }, ), migrations.AddField( model_name='accesstoken', name='id_token', field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='access_token', to=oauth2_settings.ID_TOKEN_MODEL), ), migrations.AddField( model_name="grant", name="nonce", field=models.CharField(blank=True, max_length=255, default=""), ), migrations.AddField( model_name="grant", name="claims", field=models.TextField(blank=True), ), ]
Migration
python
django-import-export__django-import-export
tests/core/tests/test_resources/test_modelresource/test_resource_setup.py
{ "start": 262, "end": 2192 }
class ____(TestCase): def setUp(self): self.resource = BookResource() self.book = Book.objects.create(name="Some book") self.dataset = tablib.Dataset(headers=["id", "name", "author_email", "price"]) row = [self.book.pk, "Some book", "test@example.com", "10.25"] self.dataset.append(row) def test_default_instance_loader_class(self): self.assertIs(self.resource._meta.instance_loader_class, ModelInstanceLoader) def test_fields(self): fields = self.resource.fields self.assertIn("id", fields) self.assertIn("name", fields) self.assertIn("author_email", fields) self.assertIn("price", fields) def test_fields_foreign_key(self): fields = self.resource.fields self.assertIn("author", fields) widget = fields["author"].widget self.assertIsInstance(widget, widgets.ForeignKeyWidget) self.assertEqual(widget.model, Author) def test_get_display_name(self): display_name = self.resource.get_display_name() self.assertEqual(display_name, "BookResource") class BookResource(resources.ModelResource): class Meta: name = "Foo Name" model = Book import_id_fields = ["name"] resource = BookResource() display_name = resource.get_display_name() self.assertEqual(display_name, "Foo Name") def test_fields_m2m(self): fields = self.resource.fields self.assertIn("categories", fields) def test_excluded_fields(self): self.assertNotIn("imported", self.resource.fields) def test_init_instance(self): instance = self.resource.init_instance() self.assertIsInstance(instance, Book) def test_default(self): self.assertEqual( WithDefaultResource.fields["name"].clean({"name": ""}), "foo_bar" )
TestResourceSetup
python
huggingface__transformers
tests/models/grounding_dino/test_modeling_grounding_dino.py
{ "start": 25536, "end": 36850 }
class ____(unittest.TestCase): @cached_property def default_processor(self): return AutoProcessor.from_pretrained("IDEA-Research/grounding-dino-tiny") if is_vision_available() else None def test_inference_object_detection_head(self): model = GroundingDinoForObjectDetection.from_pretrained("IDEA-Research/grounding-dino-tiny").to(torch_device) processor = self.default_processor image = prepare_img() text = prepare_text() encoding = processor(images=image, text=text, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**encoding) expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.d_model)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expectations = Expectations( { (None, None): [[0.7674, 0.4136, 0.4572], [0.2566, 0.5463, 0.4760], [0.2585, 0.5442, 0.4641]], ("cuda", 8): [[0.7674, 0.4135, 0.4571], [0.2566, 0.5463, 0.4760], [0.2585, 0.5442, 0.4640]], } ) expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device) expectations = Expectations( { (None, None): [[-4.8913, -0.1900, -0.2161], [-4.9653, -0.3719, -0.3950], [-5.9599, -3.3765, -3.3104]], ("cuda", 8): [[-4.8915, -0.1900, -0.2161], [-4.9658, -0.3716, -0.3948], [-5.9596, -3.3763, -3.3103]], } ) expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3) expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-4) # verify postprocessing results = processor.image_processor.post_process_object_detection( outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] expectations = Expectations( { (None, None): [0.4526, 0.4082], ("cuda", 8): [0.4524, 0.4074], } ) expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device) expectations = Expectations( { (None, None): [344.8143, 23.1796, 637.4004, 373.8295], ("cuda", 8): [344.8210, 23.1831, 637.3943, 373.8227], } ) expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device) self.assertEqual(len(results["scores"]), 2) torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) # verify grounded postprocessing expected_labels = ["a cat", "a cat"] results = processor.post_process_grounded_object_detection( outputs=outputs, input_ids=encoding.input_ids, threshold=0.35, text_threshold=0.3, target_sizes=[(image.height, image.width)], )[0] torch.testing.assert_close(results["scores"], expected_scores, rtol=1e-3, atol=1e-3) torch.testing.assert_close(results["boxes"][0, :], expected_slice_boxes, rtol=1e-2, atol=1e-2) self.assertListEqual(results["text_labels"], expected_labels) @require_torch_accelerator @is_flaky() def test_inference_object_detection_head_equivalence_cpu_accelerator(self): processor = self.default_processor image = prepare_img() text = prepare_text() encoding = processor(images=image, text=text, return_tensors="pt") # 1. run model on CPU model = GroundingDinoForObjectDetection.from_pretrained("IDEA-Research/grounding-dino-tiny") with torch.no_grad(): cpu_outputs = model(**encoding) # 2. run model on accelerator model.to(torch_device) encoding = encoding.to(torch_device) with torch.no_grad(): gpu_outputs = model(**encoding) # 3. assert equivalence for key in cpu_outputs: torch.testing.assert_close(cpu_outputs[key], gpu_outputs[key].cpu(), rtol=1e-3, atol=1e-3) expected_logits = torch.tensor( [[-4.8915, -0.1900, -0.2161], [-4.9658, -0.3716, -0.3948], [-5.9596, -3.3763, -3.3103]] ) torch.testing.assert_close(cpu_outputs.logits[0, :3, :3], expected_logits, rtol=1e-3, atol=1e-3) # assert postprocessing results_cpu = processor.image_processor.post_process_object_detection( cpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] result_gpu = processor.image_processor.post_process_object_detection( gpu_outputs, threshold=0.35, target_sizes=[(image.height, image.width)] )[0] torch.testing.assert_close(results_cpu["scores"], result_gpu["scores"].cpu(), rtol=1e-3, atol=1e-3) torch.testing.assert_close(results_cpu["boxes"], result_gpu["boxes"].cpu(), rtol=1e-3, atol=1e-3) @is_flaky() def test_cross_attention_mask(self): model = GroundingDinoForObjectDetection.from_pretrained("IDEA-Research/grounding-dino-tiny").to(torch_device) processor = self.default_processor image = prepare_img() text1 = "a cat." text2 = "a remote control." text_batched = [text1, text2] encoding1 = processor(images=image, text=text1, return_tensors="pt").to(torch_device) encoding2 = processor(images=image, text=text2, return_tensors="pt").to(torch_device) # If we batch the text and cross attention masking is working the batched result should be equal to # The single text result encoding_batched = processor( images=[image] * len(text_batched), text=text_batched, padding="longest", return_tensors="pt" ).to(torch_device) with torch.no_grad(): outputs1 = model(**encoding1) outputs2 = model(**encoding2) outputs_batched = model(**encoding_batched) torch.testing.assert_close(outputs1.logits, outputs_batched.logits[:1], rtol=1e-3, atol=1e-3) # For some reason 12 elements are > 1e-3, but the rest are fine self.assertTrue(torch.allclose(outputs2.logits, outputs_batched.logits[1:], atol=1.8e-3)) def test_grounding_dino_loss(self): ds = load_dataset("EduardoPacheco/aquarium-sample", split="train") image_processor = self.default_processor.image_processor tokenizer = self.default_processor.tokenizer id2label = {0: "fish", 1: "jellyfish", 2: "penguins", 3: "sharks", 4: "puffins", 5: "stingrays", 6: "starfish"} prompt = ". ".join(id2label.values()) + "." text_inputs = tokenizer([prompt, prompt], return_tensors="pt") image_inputs = image_processor( images=list(ds["image"]), annotations=list(ds["annotations"]), return_tensors="pt" ) # Passing auxiliary_loss=True to compare with the expected loss model = GroundingDinoForObjectDetection.from_pretrained( "IDEA-Research/grounding-dino-tiny", auxiliary_loss=True, ) # Interested in the loss only model.eval() with torch.no_grad(): outputs = model(**text_inputs, **image_inputs) # Loss differs by CPU and accelerator, also this can be changed in future. expected_loss_dicts = Expectations( { ("xpu", 3): { "loss_ce": torch.tensor(1.1147), "loss_bbox": torch.tensor(0.2031), "loss_giou": torch.tensor(0.5819), "loss_ce_0": torch.tensor(1.1941), "loss_bbox_0": torch.tensor(0.1978), "loss_giou_0": torch.tensor(0.5524), "loss_ce_1": torch.tensor(1.1621), "loss_bbox_1": torch.tensor(0.1909), "loss_giou_1": torch.tensor(0.5892), "loss_ce_2": torch.tensor(1.1641), "loss_bbox_2": torch.tensor(0.1892), "loss_giou_2": torch.tensor(0.5626), "loss_ce_3": torch.tensor(1.1943), "loss_bbox_3": torch.tensor(0.1941), "loss_giou_3": torch.tensor(0.5592), "loss_ce_4": torch.tensor(1.0956), "loss_bbox_4": torch.tensor(0.2037), "loss_giou_4": torch.tensor(0.5813), "loss_ce_enc": torch.tensor(16226.3164), "loss_bbox_enc": torch.tensor(0.3063), "loss_giou_enc": torch.tensor(0.7380), }, ("cuda", None): { "loss_ce": torch.tensor(1.1147), "loss_bbox": torch.tensor(0.2031), "loss_giou": torch.tensor(0.5819), "loss_ce_0": torch.tensor(1.1941), "loss_bbox_0": torch.tensor(0.1978), "loss_giou_0": torch.tensor(0.5524), "loss_ce_1": torch.tensor(1.1621), "loss_bbox_1": torch.tensor(0.1909), "loss_giou_1": torch.tensor(0.5892), "loss_ce_2": torch.tensor(1.1641), "loss_bbox_2": torch.tensor(0.1892), "loss_giou_2": torch.tensor(0.5626), "loss_ce_3": torch.tensor(1.1943), "loss_bbox_3": torch.tensor(0.1941), "loss_giou_3": torch.tensor(0.5607), "loss_ce_4": torch.tensor(1.0956), "loss_bbox_4": torch.tensor(0.2008), "loss_giou_4": torch.tensor(0.5836), "loss_ce_enc": torch.tensor(16226.3164), "loss_bbox_enc": torch.tensor(0.3063), "loss_giou_enc": torch.tensor(0.7380), }, } ) # fmt: skip expected_loss_dict = expected_loss_dicts.get_expectation() expected_loss = torch.tensor(32482.2305) for key in expected_loss_dict: torch.testing.assert_close(outputs.loss_dict[key], expected_loss_dict[key], rtol=1e-5, atol=1e-3) self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-3))
GroundingDinoModelIntegrationTests
python
getsentry__sentry
tests/sentry/rules/history/backends/test_postgres.py
{ "start": 1640, "end": 10182 }
class ____(BasePostgresRuleHistoryBackendTest): def run_test(self, rule, start, end, expected, cursor=None, per_page=25): result = self.backend.fetch_rule_groups_paginated(rule, start, end, cursor, per_page) assert result.results == expected, (result.results, expected) return result def test(self) -> None: history = [] rule = Rule.objects.create(project=self.event.project) for i in range(3): history.append( RuleFireHistory( project=rule.project, rule=rule, group=self.group, date_added=before_now(days=i + 1), ) ) group_2 = self.create_group() history.append( RuleFireHistory( project=rule.project, rule=rule, group=group_2, date_added=before_now(days=1) ) ) group_3 = self.create_group() for i in range(2): history.append( RuleFireHistory( project=rule.project, rule=rule, group=group_3, date_added=before_now(days=i + 1), ) ) rule_2 = Rule.objects.create(project=self.event.project) history.append( RuleFireHistory( project=rule.project, rule=rule_2, group=self.group, date_added=before_now(days=0) ) ) RuleFireHistory.objects.bulk_create(history) base_triggered_date = before_now(days=1) self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory(self.group, count=3, last_triggered=base_triggered_date), RuleGroupHistory(group_3, count=2, last_triggered=base_triggered_date), RuleGroupHistory(group_2, count=1, last_triggered=base_triggered_date), ], ) result = self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory(self.group, count=3, last_triggered=base_triggered_date), ], per_page=1, ) result = self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory(group_3, count=2, last_triggered=base_triggered_date), ], cursor=result.next, per_page=1, ) self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory(group_2, count=1, last_triggered=base_triggered_date), ], cursor=result.next, per_page=1, ) self.run_test( rule, before_now(days=1), before_now(days=0), [ RuleGroupHistory(self.group, count=1, last_triggered=base_triggered_date), RuleGroupHistory(group_2, count=1, last_triggered=base_triggered_date), RuleGroupHistory(group_3, count=1, last_triggered=base_triggered_date), ], ) self.run_test( rule, before_now(days=3), before_now(days=2), [ RuleGroupHistory( self.group, count=1, last_triggered=base_triggered_date - timedelta(days=2) ), ], ) def test_event_id(self) -> None: rule = Rule.objects.create(project=self.event.project) for i in range(3): RuleFireHistory.objects.create( project=rule.project, rule=rule, group=self.group, date_added=before_now(days=i + 1), event_id=i, ) base_triggered_date = before_now(days=1) self.run_test( rule, before_now(days=3), before_now(days=0), [ RuleGroupHistory( group=self.group, count=3, last_triggered=base_triggered_date, event_id="0" ) ], ) group_2 = self.create_group() for i in range(3): RuleFireHistory.objects.create( project=rule.project, rule=rule, group=group_2, date_added=before_now(days=i + 4), event_id=i + 3, ) self.run_test( rule, before_now(days=5), before_now(days=2), [ RuleGroupHistory( group=group_2, count=2, last_triggered=base_triggered_date - timedelta(days=3), event_id="3", ), RuleGroupHistory( group=self.group, count=1, last_triggered=base_triggered_date - timedelta(days=2), event_id="2", ), ], ) def test_combined_rule_and_workflow_history(self) -> None: """Test combining RuleFireHistory and WorkflowFireHistory when feature flag is enabled""" rule = self.create_project_rule(project=self.event.project) workflow = self.create_workflow(organization=self.event.project.organization) AlertRuleWorkflow.objects.create(rule_id=rule.id, workflow=workflow) # Create some RuleFireHistory entries rule_history = [] for i in range(2): rule_history.append( RuleFireHistory( project=rule.project, rule=rule, group=self.group, date_added=before_now(days=i + 1), event_id=f"rule_event_{i}", ) ) # Create some WorkflowFireHistory entries for i in range(2): wfh = WorkflowFireHistory.objects.create( workflow=workflow, group=self.group, date_added=before_now(days=i + 3), event_id=f"workflow_event_{i}", ) wfh.update(date_added=before_now(days=i + 3)) RuleFireHistory.objects.bulk_create(rule_history) group_2 = self.create_group() RuleFireHistory.objects.create( project=rule.project, rule=rule, group=group_2, date_added=before_now(days=3), event_id="rule_event_group2", ) new_workflow_history = WorkflowFireHistory.objects.create( workflow=workflow, group=group_2, date_added=before_now(days=2), event_id="workflow_event_group2", ) new_workflow_history.update(date_added=before_now(days=2)) self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory( group=self.group, count=4, # 4 from the original data last_triggered=before_now(days=1), # Most recent from RuleFireHistory event_id="rule_event_0", ), RuleGroupHistory( group=group_2, count=2, # 2 from both Rule and WorkflowFireHistory last_triggered=before_now(days=2), event_id="workflow_event_group2", ), ], ) # Test pagination result = self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory( group=self.group, count=4, last_triggered=before_now(days=1), event_id="rule_event_0", ), ], per_page=1, ) self.run_test( rule, before_now(days=6), before_now(days=0), [ RuleGroupHistory( group=group_2, count=2, last_triggered=before_now(days=2), event_id="workflow_event_group2", ), ], per_page=1, cursor=result.next, ) @freeze_time()
FetchRuleGroupsPaginatedTest
python
PrefectHQ__prefect
src/integrations/prefect-databricks/prefect_databricks/models/jobs.py
{ "start": 104898, "end": 117709 }
class ____(BaseModel): """ See source code for the fields' description. """ model_config = ConfigDict(extra="allow", frozen=True) autoscale: Optional[AutoScale] = Field( None, description=( "If autoscale, parameters needed in order to automatically scale clusters" " up and down based on load." ), ) autotermination_minutes: Optional[int] = Field( None, description=( "Automatically terminates the cluster after it is inactive for this time in" " minutes. If not set, this cluster is not be automatically terminated. If" " specified, the threshold must be between 10 and 10000 minutes. You can" " also set this value to 0 to explicitly disable automatic termination." ), ) aws_attributes: Optional[AwsAttributes] = Field( None, description=( "Attributes related to clusters running on Amazon Web Services. If not" " specified at cluster creation, a set of default values is used." ), ) cluster_cores: Optional[float] = Field( None, description=( "Number of CPU cores available for this cluster. This can be fractional" " since certain node types are configured to share cores between Spark" " nodes on the same instance." ), ) cluster_id: Optional[str] = Field( None, description=( "Canonical identifier for the cluster. This ID is retained during cluster" " restarts and resizes, while each new cluster has a globally unique ID." ), ) cluster_log_conf: Optional[ClusterLogConf] = Field( None, description=( "The configuration for delivering Spark logs to a long-term storage" " destination. Only one destination can be specified for one cluster. If" " the conf is given, the logs are delivered to the destination every `5" " mins`. The destination of driver logs is" " `<destination>/<cluster-ID>/driver`, while the destination of executor" " logs is `<destination>/<cluster-ID>/executor`." ), ) cluster_log_status: Optional[LogSyncStatus] = Field( None, description="Cluster log delivery status." ) cluster_memory_mb: Optional[int] = Field( None, description="Total amount of cluster memory, in megabytes." ) cluster_name: Optional[str] = Field( None, description=( "Cluster name requested by the user. This doesn’t have to be unique. If not" " specified at creation, the cluster name is an empty string." ), ) cluster_source: Optional[ClusterSource] = Field( None, description=( "Determines whether the cluster was created by a user through the UI, by" " the Databricks Jobs scheduler, or through an API request." ), ) creator_user_name: Optional[str] = Field( None, description=( "Creator user name. The field won’t be included in the response if the user" " has already been deleted." ), ) custom_tags: Optional[List[ClusterTag]] = Field( None, description=( "An object containing a set of tags for cluster resources. Databricks tags" " all cluster resources (such as AWS instances and EBS volumes) with these" " tags in addition to default_tags.\n\n**Note**:\n\n* Tags are not" " supported on legacy node types such as compute-optimized and" " memory-optimized\n* Databricks allows at most 45 custom tags" ), ) default_tags: Optional[ClusterTag] = Field( None, description=( "An object containing a set of tags that are added by Databricks regardless" " of any custom_tags, including:\n\n* Vendor: Databricks\n* Creator:" " <username-of-creator>\n* ClusterName: <name-of-cluster>\n* ClusterId:" " <id-of-cluster>\n* Name: <Databricks internal use> \nOn job clusters:\n*" " RunName: <name-of-job>\n* JobId: <id-of-job> \nOn resources used by" " Databricks SQL:\n* SqlEndpointId: <id-of-endpoint>" ), ) docker_image: Optional[DockerImage] = Field( None, description=( "Docker image for a [custom" " container](https://docs.databricks.com/clusters/custom-containers.html)." ), ) driver: Optional[SparkNode] = Field( None, description=( "Node on which the Spark driver resides. The driver node contains the Spark" " master and the Databricks application that manages the per-notebook Spark" " REPLs." ), ) driver_node_type_id: Optional[str] = Field( None, description=( "The node type of the Spark driver. This field is optional; if unset, the" " driver node type is set as the same value as `node_type_id` defined" " above." ), ) enable_elastic_disk: Optional[bool] = Field( None, description=( "Autoscaling Local Storage: when enabled, this cluster dynamically acquires" " additional disk space when its Spark workers are running low on disk" " space. This feature requires specific AWS permissions to function" " correctly - refer to [Autoscaling local" " storage](https://docs.databricks.com/clusters/configure.html#autoscaling-local-storage)" " for details." ), ) executors: Optional[List[SparkNode]] = Field( None, description="Nodes on which the Spark executors reside." ) init_scripts: Optional[List[InitScriptInfo]] = Field( None, description=( "The configuration for storing init scripts. Any number of destinations can" " be specified. The scripts are executed sequentially in the order" " provided. If `cluster_log_conf` is specified, init script logs are sent" " to `<destination>/<cluster-ID>/init_scripts`." ), ) instance_pool_id: Optional[str] = Field( None, description=( "The optional ID of the instance pool to which the cluster belongs. Refer" " to [Pools](https://docs.databricks.com/clusters/instance-pools/index.html)" " for details." ), ) jdbc_port: Optional[int] = Field( None, description=( "Port on which Spark JDBC server is listening in the driver node. No" " service listens on this port in executor nodes." ), ) last_activity_time: Optional[int] = Field( None, description=( "Time (in epoch milliseconds) when the cluster was last active. A cluster" " is active if there is at least one command that has not finished on the" " cluster. This field is available after the cluster has reached a" " `RUNNING` state. Updates to this field are made as best-effort attempts." " Certain versions of Spark do not support reporting of cluster activity." " Refer to [Automatic" " termination](https://docs.databricks.com/clusters/clusters-manage.html#automatic-termination)" " for details." ), ) last_state_loss_time: Optional[int] = Field( None, description=( "Time when the cluster driver last lost its state (due to a restart or" " driver failure)." ), ) node_type_id: Optional[str] = Field( None, description=( "This field encodes, through a single value, the resources available to" " each of the Spark nodes in this cluster. For example, the Spark nodes can" " be provisioned and optimized for memory or compute intensive workloads. A" " list of available node types can be retrieved by using the [List node" " types](https://docs.databricks.com/dev-tools/api/latest/clusters.html#list-node-types)" " API call." ), ) num_workers: Optional[int] = Field( None, description=( "If num_workers, number of worker nodes that this cluster must have. A" " cluster has one Spark driver and num_workers executors for a total of" " num_workers + 1 Spark nodes. **Note:** When reading the properties of a" " cluster, this field reflects the desired number of workers rather than" " the actual number of workers. For instance, if a cluster is resized from" " 5 to 10 workers, this field is immediately updated to reflect the target" " size of 10 workers, whereas the workers listed in `executors` gradually" " increase from 5 to 10 as the new nodes are provisioned." ), ) spark_conf: Optional[SparkConfPair] = Field( None, description=( "An object containing a set of optional, user-specified Spark configuration" " key-value pairs. You can also pass in a string of extra JVM options to" " the driver and the executors via `spark.driver.extraJavaOptions` and" " `spark.executor.extraJavaOptions` respectively.\n\nExample Spark confs:" ' `{"spark.speculation": true, "spark.streaming.ui.retainedBatches": 5}` or' ' `{"spark.driver.extraJavaOptions": "-verbose:gc -XX:+PrintGCDetails"}`' ), ) spark_context_id: Optional[int] = Field( None, description=( "A canonical SparkContext identifier. This value _does_ change when the" " Spark driver restarts. The pair `(cluster_id, spark_context_id)` is a" " globally unique identifier over all Spark contexts." ), ) spark_env_vars: Optional[SparkEnvPair] = Field( None, description=( "An object containing a set of optional, user-specified environment" " variable key-value pairs. Key-value pairs of the form (X,Y) are exported" " as is (that is, `export X='Y'`) while launching the driver and" " workers.\n\nTo specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we" " recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the" " following example. This ensures that all default databricks managed" " environmental variables are included as well.\n\nExample Spark" ' environment variables: `{"SPARK_WORKER_MEMORY": "28000m",' ' "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS":' ' "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}`' ), ) spark_version: Optional[str] = Field( None, description=( "The runtime version of the cluster. You can retrieve a list of available" " runtime versions by using the [Runtime" " versions](https://docs.databricks.com/dev-tools/api/latest/clusters.html#runtime-versions)" " API call." ), ) ssh_public_keys: Optional[List[str]] = Field( None, description=( "SSH public key contents that are added to each Spark node in this cluster." " The corresponding private keys can be used to login with the user name" " `ubuntu` on port `2200`. Up to 10 keys can be specified." ), ) start_time: Optional[int] = Field( None, description=( "Time (in epoch milliseconds) when the cluster creation request was" " received (when the cluster entered a `PENDING` state)." ), ) state: Optional[ClusterState] = Field(None, description="State of the cluster.") state_message: Optional[str] = Field( None, description=( "A message associated with the most recent state transition (for example," " the reason why the cluster entered a `TERMINATED` state). This field is" " unstructured, and its exact format is subject to change." ), ) terminated_time: Optional[int] = Field( None, description=( "Time (in epoch milliseconds) when the cluster was terminated, if" " applicable." ), ) termination_reason: Optional[TerminationReason] = Field( None, description=( "Information about why the cluster was terminated. This field only appears" " when the cluster is in a `TERMINATING` or `TERMINATED` state." ), )
ClusterInfo
python
wandb__wandb
wandb/vendor/pygments/lexers/markup.py
{ "start": 16904, "end": 20452 }
class ____(RegexLexer): """ For `Markdown <https://help.github.com/categories/writing-on-github/>`_ markup. .. versionadded:: 2.2 """ name = 'markdown' aliases = ['md'] filenames = ['*.md'] mimetypes = ["text/x-markdown"] flags = re.MULTILINE def _handle_codeblock(self, match): """ match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks """ from pygments.lexers import get_lexer_by_name # section header yield match.start(1), String , match.group(1) yield match.start(2), String , match.group(2) yield match.start(3), Text , match.group(3) # lookup lexer if wanted and existing lexer = None if self.handlecodeblocks: try: lexer = get_lexer_by_name( match.group(2).strip() ) except ClassNotFound: pass code = match.group(4) # no lexer for this language. handle it like it was a code block if lexer is None: yield match.start(4), String, code return for item in do_insertions([], lexer.get_tokens_unprocessed(code)): yield item yield match.start(5), String , match.group(5) tokens = { 'root': [ # heading with pound prefix (r'^(#)([^#].+\n)', bygroups(Generic.Heading, Text)), (r'^(#{2,6})(.+\n)', bygroups(Generic.Subheading, Text)), # task list (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)', bygroups(Text, Keyword, Keyword, using(this, state='inline'))), # bulleted lists (r'^(\s*)([*-])(\s)(.+\n)', bygroups(Text, Keyword, Text, using(this, state='inline'))), # numbered lists (r'^(\s*)([0-9]+\.)( .+\n)', bygroups(Text, Keyword, using(this, state='inline'))), # quote (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)), # text block (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)), # code block with language (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock), include('inline'), ], 'inline': [ # escape (r'\\.', Text), # italics (r'(\s)([*_][^*_]+[*_])(\W|\n)', bygroups(Text, Generic.Emph, Text)), # bold # warning: the following rule eats internal tags. eg. **foo _bar_ baz** bar is not italics (r'(\s)((\*\*|__).*\3)((?=\W|\n))', bygroups(Text, Generic.Strong, None, Text)), # "proper way" (r'(\s)([*_]{2}[^*_]+[*_]{2})((?=\W|\n))', bygroups(Text, Generic.Strong, Text)), # strikethrough (r'(\s)(~~[^~]+~~)((?=\W|\n))', bygroups(Text, Generic.Deleted, Text)), # inline code (r'`[^`]+`', String.Backtick), # mentions and topics (twitter and github stuff) (r'[@#][\w/:]+', Name.Entity), # (image?) links eg: ![Image of Yaktocat](https://octodex.github.com/images/yaktocat.png) (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))', bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)), # general text, must come last! (r'[^\\\s]+', Text), (r'.', Text), ], } def __init__(self, **options): self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True) RegexLexer.__init__(self, **options)
MarkdownLexer
python
Unity-Technologies__ml-agents
ml-agents/setup.py
{ "start": 357, "end": 3683 }
class ____(install): """ Custom command to verify that the git tag is the expected one for the release. Originally based on https://circleci.com/blog/continuously-deploying-python-packages-to-pypi-with-circleci/ This differs slightly because our tags and versions are different. """ description = "verify that the git tag matches our version" def run(self): tag = os.getenv("GITHUB_REF", "NO GITHUB TAG!").replace("refs/tags/", "") if tag != EXPECTED_TAG: info = "Git tag: {} does not match the expected tag of this app: {}".format( tag, EXPECTED_TAG ) sys.exit(info) # Get the long description from the README file with open(os.path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() setup( name="mlagents", version=VERSION, description="Unity Machine Learning Agents", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/Unity-Technologies/ml-agents", author="Unity Technologies", author_email="ML-Agents@unity3d.com", classifiers=[ "Intended Audience :: Developers", "Topic :: Scientific/Engineering :: Artificial Intelligence", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3.10", ], # find_namespace_packages will recurse through the directories and find all the packages packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), zip_safe=False, install_requires=[ # Test-only dependencies should go in test_requirements.txt, not here. "grpcio>=1.11.0,<=1.53.2", "h5py>=2.9.0", f"mlagents_envs=={VERSION}", "numpy>=1.23.5,<1.24.0", "Pillow>=4.2.1", "protobuf>=3.6,<3.21", "pyyaml>=3.1.0", "torch>=2.1.1,<=2.8.0", "tensorboard>=2.14", # adding six explicit dependency since tensorboard needs it but doesn't declare it as a dep "six>=1.16", # cattrs 1.1.0 dropped support for python 3.6, but 1.0.0 doesn't work for python 3.9 # Since there's no version that supports both, we have to draw the line somewhere. "cattrs>=1.1.0,<1.7; python_version>='3.8'", "attrs>=19.3.0", "huggingface_hub>=0.14", 'pypiwin32==223;platform_system=="Windows"', "onnx==1.15.0", ], python_requires=">=3.10.1,<=3.10.12", entry_points={ "console_scripts": [ "mlagents-learn=mlagents.trainers.learn:main", "mlagents-run-experiment=mlagents.trainers.run_experiment:main", "mlagents-push-to-hf=mlagents.utils.push_to_hf:main", "mlagents-load-from-hf=mlagents.utils.load_from_hf:main", ], # Plugins - each plugin type should have an entry here for the default behavior ML_AGENTS_STATS_WRITER: [ "default=mlagents.plugins.stats_writer:get_default_stats_writers" ], ML_AGENTS_TRAINER_TYPE: [ "default=mlagents.plugins.trainer_type:get_default_trainer_types" ], }, # TODO: Remove this once mypy stops having spurious setuptools issues. cmdclass={"verify": VerifyVersionCommand}, # type: ignore )
VerifyVersionCommand
python
tornadoweb__tornado
tornado/httputil.py
{ "start": 23518, "end": 24460 }
class ____: """Implement this interface to handle requests from `.HTTPServer`. .. versionadded:: 4.0 """ def start_request( self, server_conn: object, request_conn: "HTTPConnection" ) -> "HTTPMessageDelegate": """This method is called by the server when a new request has started. :arg server_conn: is an opaque object representing the long-lived (e.g. tcp-level) connection. :arg request_conn: is a `.HTTPConnection` object for a single request/response exchange. This method should return a `.HTTPMessageDelegate`. """ raise NotImplementedError() def on_close(self, server_conn: object) -> None: """This method is called when a connection has been closed. :arg server_conn: is a server connection that has previously been passed to ``start_request``. """ pass
HTTPServerConnectionDelegate