language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
getsentry__sentry
src/sentry/discover/migrations/0001_move_discover_models.py
{ "start": 395, "end": 6339 }
class ____(CheckedMigration): # This flag is used to mark that a migration shouldn't be automatically run in production. # This should only be used for operations where it's safe to run the migration after your # code has deployed. So this should not be used for most operations that alter the schema # of a table. # Here are some things that make sense to mark as post deployment: # - Large data migrations. Typically we want these to be run manually so that they can be # monitored and not block the deploy for a long period of time while they run. # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to # run this outside deployments so that we don't block them. Note that while adding an index # is a schema change, it's completely safe to run the operation after the code has deployed. # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment is_post_deployment = False initial = True dependencies = [ ("sentry", "0945_move_discover_models"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.CreateModel( name="DiscoverSavedQuery", fields=[ ( "id", sentry.db.models.fields.bounded.BoundedBigAutoField( primary_key=True, serialize=False ), ), ( "created_by_id", sentry.db.models.fields.hybrid_cloud_foreign_key.HybridCloudForeignKey( "sentry.User", db_index=True, null=True, on_delete="SET_NULL" ), ), ("name", models.CharField(max_length=255)), ("query", sentry.db.models.fields.jsonfield.JSONField(default=dict)), ("version", models.IntegerField(null=True)), ("date_created", models.DateTimeField(auto_now_add=True)), ("date_updated", models.DateTimeField(auto_now=True)), ( "visits", sentry.db.models.fields.bounded.BoundedBigIntegerField( default=1, null=True ), ), ( "last_visited", models.DateTimeField(default=django.utils.timezone.now, null=True), ), ("is_homepage", models.BooleanField(blank=True, null=True)), ( "dataset", sentry.db.models.fields.bounded.BoundedPositiveIntegerField( db_default=0, default=0 ), ), ( "dataset_source", sentry.db.models.fields.bounded.BoundedPositiveIntegerField( db_default=0, default=0 ), ), ( "organization", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="sentry.organization", ), ), ], options={ "db_table": "sentry_discoversavedquery", }, ), migrations.CreateModel( name="DiscoverSavedQueryProject", fields=[ ( "id", sentry.db.models.fields.bounded.BoundedBigAutoField( primary_key=True, serialize=False ), ), ( "discover_saved_query", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="discover.discoversavedquery", ), ), ( "project", sentry.db.models.fields.foreignkey.FlexibleForeignKey( on_delete=django.db.models.deletion.CASCADE, to="sentry.project" ), ), ], options={ "db_table": "sentry_discoversavedqueryproject", "unique_together": {("project", "discover_saved_query")}, }, ), migrations.AddField( model_name="discoversavedquery", name="projects", field=models.ManyToManyField( through="discover.DiscoverSavedQueryProject", to="sentry.project" ), ), migrations.AddConstraint( model_name="discoversavedquery", constraint=models.UniqueConstraint( condition=models.Q(("is_homepage", True)), fields=("organization", "created_by_id", "is_homepage"), name="unique_user_homepage_query", ), ), ] ) ]
Migration
python
doocs__leetcode
solution/0200-0299/0284.Peeking Iterator/Solution.py
{ "start": 520, "end": 1757 }
class ____: def __init__(self, iterator): """ Initialize your data structure here. :type iterator: Iterator """ self.iterator = iterator self.has_peeked = False self.peeked_element = None def peek(self): """ Returns the next element in the iteration without advancing the iterator. :rtype: int """ if not self.has_peeked: self.peeked_element = self.iterator.next() self.has_peeked = True return self.peeked_element def next(self): """ :rtype: int """ if not self.has_peeked: return self.iterator.next() result = self.peeked_element self.has_peeked = False self.peeked_element = None return result def hasNext(self): """ :rtype: bool """ return self.has_peeked or self.iterator.hasNext() # Your PeekingIterator object will be instantiated and called as such: # iter = PeekingIterator(Iterator(nums)) # while iter.hasNext(): # val = iter.peek() # Get the next element but not advance the iterator. # iter.next() # Should return the same value as [val].
PeekingIterator
python
apache__airflow
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/synapse.py
{ "start": 8123, "end": 8530 }
class ____: """Azure Synapse pipeline operation statuses.""" QUEUED = "Queued" IN_PROGRESS = "InProgress" SUCCEEDED = "Succeeded" FAILED = "Failed" CANCELING = "Canceling" CANCELLED = "Cancelled" TERMINAL_STATUSES = {CANCELLED, FAILED, SUCCEEDED} INTERMEDIATE_STATES = {QUEUED, IN_PROGRESS, CANCELING} FAILURE_STATES = {FAILED, CANCELLED}
AzureSynapsePipelineRunStatus
python
airbytehq__airbyte
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
{ "start": 9976, "end": 10556 }
class ____(BaseModel): class Config: extra = Extra.forbid kid: Optional[str] = Field( None, description="Private key ID for user account.", examples=["{{ config['kid'] }}"], title="Key Identifier", ) typ: Optional[str] = Field( "JWT", description="The media type of the complete JWT.", examples=["JWT"], title="Type", ) cty: Optional[str] = Field( None, description="Content type of JWT header.", examples=["JWT"], title="Content Type", )
JwtHeaders
python
dagster-io__dagster
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
{ "start": 129199, "end": 129685 }
class ____(GeneratedAirbyteSource): @public def __init__(self, name: str, api_key: str): """Airbyte Source for Glassfrog. Documentation can be found at https://docs.airbyte.com/integrations/sources/glassfrog Args: name (str): The name of the destination. api_key (str): API key provided by Glassfrog """ self.api_key = check.str_param(api_key, "api_key") super().__init__("Glassfrog", name)
GlassfrogSource
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 56640, "end": 57330 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "workflow_run_id", "environment_ids", "comment", "client_mutation_id", ) workflow_run_id = sgqlc.types.Field( sgqlc.types.non_null(ID), graphql_name="workflowRunId" ) environment_ids = sgqlc.types.Field( sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(ID))), graphql_name="environmentIds", ) comment = sgqlc.types.Field(String, graphql_name="comment") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
ApproveDeploymentsInput
python
google__jax
tests/pallas/fusion_test.py
{ "start": 8507, "end": 8952 }
class ____(hijax.HiType): x0: jax_core.ShapedArray x1: jax_core.ShapedArray def lo_ty(self) -> list[jax_core.ShapedArray]: return [self.x0, self.x1] def lower_val(self, hi_val: ArrayTuple) -> list[jax.Array]: return [hi_val.x0, hi_val.x1] def raise_val(self, x0, x1) -> ArrayTuple: return ArrayTuple(x0, x1) hijax.register_hitype( ArrayTuple, lambda t: ArrayTupleTy(jax.typeof(t.x0), jax.typeof(t.x1)) )
ArrayTupleTy
python
tornadoweb__tornado
tornado/test/simple_httpclient_test.py
{ "start": 25064, "end": 27063 }
class ____(AsyncHTTPTestCase): def respond_204(self, request): self.http1 = request.version.startswith("HTTP/1.") if not self.http1: # Close the request cleanly in HTTP/2; it will be skipped anyway. request.connection.write_headers( ResponseStartLine("", 200, "OK"), HTTPHeaders() ) request.connection.finish() return # A 204 response never has a body, even if doesn't have a content-length # (which would otherwise mean read-until-close). We simulate here a # server that sends no content length and does not close the connection. # # Tests of a 204 response with no Content-Length header are included # in SimpleHTTPClientTestMixin. stream = request.connection.detach() stream.write(b"HTTP/1.1 204 No content\r\n") if request.arguments.get("error", [False])[-1]: stream.write(b"Content-Length: 5\r\n") else: stream.write(b"Content-Length: 0\r\n") stream.write(b"\r\n") stream.close() def get_app(self): return self.respond_204 def test_204_no_content(self): resp = self.fetch("/") if not self.http1: self.skipTest("requires HTTP/1.x") self.assertEqual(resp.code, 204) self.assertEqual(resp.body, b"") def test_204_invalid_content_length(self): # 204 status with non-zero content length is malformed with ExpectLog( gen_log, ".*Response with code 204 should not have body", level=logging.INFO ): with self.assertRaises(HTTPStreamClosedError): self.fetch("/?error=1", raise_error=True) if not self.http1: self.skipTest("requires HTTP/1.x") if self.http_client.configured_class != SimpleAsyncHTTPClient: self.skipTest("curl client accepts invalid headers")
HTTP204NoContentTestCase
python
PyCQA__pylint
tests/functional/u/useless/useless_parent_delegation_py38.py
{ "start": 293, "end": 440 }
class ____(Egg): def __init__(self, first: Any, /, second: Any) -> None: # [useless-parent-delegation] super().__init__(first, second)
Ham
python
django__django
tests/foreign_object/tests.py
{ "start": 25491, "end": 26100 }
class ____(TestCase): @translation.override("fi") def test_extra_join_filter_q(self): a = Article.objects.create(pub_date=datetime.datetime.today()) ArticleTranslation.objects.create( article=a, lang="fi", title="title", body="body" ) qs = Article.objects.all() with self.assertNumQueries(2): self.assertEqual(qs[0].active_translation_q.title, "title") qs = qs.select_related("active_translation_q") with self.assertNumQueries(1): self.assertEqual(qs[0].active_translation_q.title, "title")
TestExtraJoinFilterQ
python
spack__spack
lib/spack/spack/version/common.py
{ "start": 700, "end": 811 }
class ____(spack.error.SpackError): """This is raised when something is wrong with a version."""
VersionError
python
apache__airflow
providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_batch.py
{ "start": 1142, "end": 11302 }
class ____: @pytest.fixture(autouse=True) def setup_test_cases(self, create_mock_connections): # set up the test variable self.test_vm_conn_id = "test_azure_batch_vm" self.test_cloud_conn_id = "test_azure_batch_cloud" self.test_account_name = "test_account_name" self.test_account_key = "test_account_key" self.test_account_url = "http://test-endpoint:29000" self.test_vm_size = "test-vm-size" self.test_vm_publisher = "test.vm.publisher" self.test_vm_offer = "test.vm.offer" self.test_vm_sku = "test-sku" self.test_cloud_os_family = "test-family" self.test_cloud_os_version = "test-version" self.test_node_agent_sku = "test-node-agent-sku" create_mock_connections( # connect with vm configuration Connection( conn_id=self.test_vm_conn_id, conn_type="azure-batch", extra={"account_url": self.test_account_url}, ), # connect with cloud service Connection( conn_id=self.test_cloud_conn_id, conn_type="azure-batch", extra={"account_url": self.test_account_url}, ), ) def test_connection_and_client(self): hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) assert isinstance(hook.get_conn(), BatchServiceClient) conn = hook.connection assert isinstance(conn, BatchServiceClient) assert hook.connection is conn, "`connection` property should be cached" @mock.patch(f"{MODULE}.batch_auth.SharedKeyCredentials") @mock.patch(f"{MODULE}.AzureIdentityCredentialAdapter") def test_fallback_to_azure_identity_credential_adppter_when_name_and_key_is_not_provided( self, mock_azure_identity_credential_adapter, mock_shared_key_credentials ): self.test_account_name = None self.test_account_key = None hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) assert isinstance(hook.get_conn(), BatchServiceClient) mock_azure_identity_credential_adapter.assert_called_with( None, resource_id="https://batch.core.windows.net/.default", managed_identity_client_id=None, workload_identity_tenant_id=None, ) assert not mock_shared_key_credentials.auth.called self.test_account_name = "test_account_name" self.test_account_key = "test_account_key" def test_configure_pool_with_vm_config(self): hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) pool = hook.configure_pool( pool_id="mypool", vm_size="test_vm_size", vm_node_agent_sku_id=self.test_vm_sku, target_dedicated_nodes=1, vm_publisher="test.vm.publisher", vm_offer="test.vm.offer", sku_starts_with="test-sku", ) assert isinstance(pool, batch_models.PoolAddParameter) def test_configure_pool_with_cloud_config(self): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) pool = hook.configure_pool( pool_id="mypool", vm_size="test_vm_size", vm_node_agent_sku_id=self.test_vm_sku, target_dedicated_nodes=1, vm_publisher="test.vm.publisher", vm_offer="test.vm.offer", sku_starts_with="test-sku", ) assert isinstance(pool, batch_models.PoolAddParameter) def test_configure_pool_with_latest_vm(self): with mock.patch( "airflow.providers.microsoft.azure.hooks." "batch.AzureBatchHook._get_latest_verified_image_vm_and_sku" ) as mock_getvm: hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) getvm_instance = mock_getvm getvm_instance.return_value = ["test-image", "test-sku"] pool = hook.configure_pool( pool_id="mypool", vm_size="test_vm_size", vm_node_agent_sku_id=self.test_vm_sku, use_latest_image_and_sku=True, vm_publisher="test.vm.publisher", vm_offer="test.vm.offer", sku_starts_with="test-sku", ) assert isinstance(pool, batch_models.PoolAddParameter) @mock.patch(f"{MODULE}.BatchServiceClient") def test_create_pool_with_vm_config(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) mock_instance = mock_batch.return_value.pool.add pool = hook.configure_pool( pool_id="mypool", vm_size="test_vm_size", vm_node_agent_sku_id=self.test_vm_sku, target_dedicated_nodes=1, vm_publisher="test.vm.publisher", vm_offer="test.vm.offer", sku_starts_with="test-sku", ) hook.create_pool(pool=pool) mock_instance.assert_called_once_with(pool) @mock.patch(f"{MODULE}.BatchServiceClient") def test_create_pool_with_cloud_config(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) mock_instance = mock_batch.return_value.pool.add pool = hook.configure_pool( pool_id="mypool", vm_size="test_vm_size", vm_node_agent_sku_id=self.test_vm_sku, target_dedicated_nodes=1, vm_publisher="test.vm.publisher", vm_offer="test.vm.offer", sku_starts_with="test-sku", ) hook.create_pool(pool=pool) mock_instance.assert_called_once_with(pool) @mock.patch(f"{MODULE}.BatchServiceClient") def test_wait_for_all_nodes(self, mock_batch): # TODO: Add test pass @mock.patch(f"{MODULE}.BatchServiceClient") def test_job_configuration_and_create_job(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) mock_instance = mock_batch.return_value.job.add job = hook.configure_job(job_id="myjob", pool_id="mypool") hook.create_job(job) assert isinstance(job, batch_models.JobAddParameter) mock_instance.assert_called_once_with(job) @mock.patch(f"{MODULE}.BatchServiceClient") def test_add_single_task_to_job(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_vm_conn_id) mock_instance = mock_batch.return_value.task.add task = hook.configure_task(task_id="mytask", command_line="echo hello") hook.add_single_task_to_job(job_id="myjob", task=task) assert isinstance(task, batch_models.TaskAddParameter) mock_instance.assert_called_once_with(job_id="myjob", task=task) @mock.patch(f"{MODULE}.BatchServiceClient") def test_wait_for_all_task_to_complete_timeout(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) with pytest.raises(TimeoutError): hook.wait_for_job_tasks_to_complete("myjob", -1) @mock.patch(f"{MODULE}.BatchServiceClient") def test_wait_for_all_task_to_complete_all_success(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) hook.connection.task.list.return_value = iter( [ batch_models.CloudTask( id="mytask_1", execution_info=batch_models.TaskExecutionInformation( retry_count=0, requeue_count=0, result=batch_models.TaskExecutionResult.success ), state=batch_models.TaskState.completed, ), batch_models.CloudTask( id="mytask_2", execution_info=batch_models.TaskExecutionInformation( retry_count=0, requeue_count=0, result=batch_models.TaskExecutionResult.success ), state=batch_models.TaskState.completed, ), ] ) results = hook.wait_for_job_tasks_to_complete("myjob", 60) assert results == [] hook.connection.task.list.assert_called_once_with("myjob") @mock.patch(f"{MODULE}.BatchServiceClient") def test_wait_for_all_task_to_complete_failures(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) tasks = [ batch_models.CloudTask( id="mytask_1", execution_info=batch_models.TaskExecutionInformation( retry_count=0, requeue_count=0, result=batch_models.TaskExecutionResult.success ), state=batch_models.TaskState.completed, ), batch_models.CloudTask( id="mytask_2", execution_info=batch_models.TaskExecutionInformation( retry_count=0, requeue_count=0, result=batch_models.TaskExecutionResult.failure ), state=batch_models.TaskState.completed, ), ] hook.connection.task.list.return_value = iter(tasks) results = hook.wait_for_job_tasks_to_complete("myjob", 60) assert results == [tasks[1]] hook.connection.task.list.assert_called_once_with("myjob") @mock.patch(f"{MODULE}.BatchServiceClient") def test_connection_success(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) hook.connection.job.return_value = {} status, msg = hook.test_connection() assert status is True assert msg == "Successfully connected to Azure Batch." @mock.patch(f"{MODULE}.BatchServiceClient") def test_connection_failure(self, mock_batch): hook = AzureBatchHook(azure_batch_conn_id=self.test_cloud_conn_id) hook.connection.job.list = PropertyMock(side_effect=Exception("Authentication failed.")) status, msg = hook.test_connection() assert status is False assert msg == "Authentication failed."
TestAzureBatchHook
python
dagster-io__dagster
python_modules/dagster/dagster/_core/remote_origin.py
{ "start": 16372, "end": 17540 }
class ____(LegacyNamedTupleMixin): """Serializable representation of an ExternalJob that can be used to uniquely it or reload it in across process boundaries. """ repository_origin: RemoteRepositoryOrigin job_name: str def get_id(self) -> str: return create_snapshot_id(self) @property def location_name(self) -> str: return self.repository_origin.code_location_origin.location_name @whitelist_for_serdes( # ExternalInstigatorOrigin used to be called ExternalJobOrigin, before the concept of "job" was # introduced in 0.12.0. For clarity, we changed the name of the namedtuple with `0.14.0`, but we # need to maintain the serialized format in order to avoid changing the origin id that is stored in # our schedule storage. This registers the serialized ExternalJobOrigin named tuple class to be # deserialized as an ExternalInstigatorOrigin, using its corresponding serializer for serdes. storage_name="ExternalJobOrigin", storage_field_names={ "repository_origin": "external_repository_origin", "instigator_name": "job_name", }, ) @record(kw_only=False)
RemoteJobOrigin
python
django__django
tests/test_client/tests.py
{ "start": 43115, "end": 43625 }
class ____(SimpleTestCase): def test_csrf_enabled_client(self): "A client can be instantiated with CSRF checks enabled" csrf_client = Client(enforce_csrf_checks=True) # The normal client allows the post response = self.client.post("/post_view/", {}) self.assertEqual(response.status_code, 200) # The CSRF-enabled client rejects it response = csrf_client.post("/post_view/", {}) self.assertEqual(response.status_code, 403)
CSRFEnabledClientTests
python
conda__conda
conda/gateways/disk/update.py
{ "start": 887, "end": 6150 }
class ____(Exception): pass def update_file_in_place_as_binary(file_full_path, callback): # callback should be a callable that takes one positional argument, which is the # content of the file before updating # this method updates the file in-place, without releasing the file lock fh = None try: fh = exp_backoff_fn(open, file_full_path, "rb+") log.log(TRACE, "in-place update path locked for %s", file_full_path) data = fh.read() fh.seek(0) try: fh.write(callback(data)) fh.truncate() return True except CancelOperation: pass finally: if fh: fh.close() return False def rename(source_path, destination_path, force=False): if lexists(destination_path) and force: rm_rf(destination_path) if lexists(source_path): log.log(TRACE, "renaming %s => %s", source_path, destination_path) try: os.rename(source_path, destination_path) except OSError as e: if ( on_win and dirname(source_path) == dirname(destination_path) and os.path.isfile(source_path) ): condabin_dir = join(context.conda_prefix, "condabin") rename_script = join(condabin_dir, "rename_tmp.bat") if exists(rename_script): _dirname, _src_fn = split(source_path) _dest_fn = basename(destination_path) p = Popen( ["cmd.exe", "/C", rename_script, _dirname, _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE, ) stdout, stderr = p.communicate() else: log.debug( f"{rename_script} is missing. Conda was not installed correctly or has been " "corrupted. Please file an issue on the conda github repo." ) elif e.errno in (EINVAL, EXDEV, EPERM): # https://github.com/conda/conda/issues/6811 # https://github.com/conda/conda/issues/6711 log.log( TRACE, "Could not rename %s => %s due to errno [%s]. Falling back" " to copy/unlink", source_path, destination_path, e.errno, ) # https://github.com/moby/moby/issues/25409#issuecomment-238537855 # shutil.move() falls back to copy+unlink move(source_path, destination_path) else: raise else: log.log(TRACE, "cannot rename; source path does not exist '%s'", source_path) @contextmanager def rename_context(source: str, destination: str | None = None, dry_run: bool = False): """ Used for removing a directory when there are dependent actions (i.e. you need to ensure other actions succeed before removing it). Example: with rename_context(directory): # Do dependent actions here """ if destination is None: destination = tempfile.mkdtemp() if dry_run: print(f"{DRY_RUN_PREFIX} rename_context {source} > {destination}") yield return try: rename(source, destination, force=True) yield except Exception as exc: # Error occurred, roll back change rename(destination, source, force=True) raise exc def backoff_rename(source_path, destination_path, force=False): exp_backoff_fn(rename, source_path, destination_path, force) def touch(path, mkdir=False, sudo_safe=False): # sudo_safe: use any time `path` is within the user's home directory # returns: # True if the file did not exist but was created # False if the file already existed # raises: NotWritableError, which is also an OSError having attached errno try: path = expand(path) log.log(TRACE, "touching path %s", path) if lexists(path): os.utime(path, None) return True else: dirpath = dirname(path) if not isdir(dirpath) and mkdir: if sudo_safe: mkdir_p_sudo_safe(dirpath) else: mkdir_p(dirpath) else: if not isdir(dirname(path)): raise OSError("Target path is not a directory.") with open(path, "a"): pass # This chown call causes a false positive PermissionError to be # raised (similar to #7109) when called in an environment which # comes from sudo -u. # # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None: # uid = int(os.environ['SUDO_UID']) # gid = int(os.environ.get('SUDO_GID', -1)) # log.log(TRACE, "chowning %s:%s %s", uid, gid, path) # os.chown(path, uid, gid) return False except OSError as e: raise NotWritableError(path, e.errno, caused_by=e)
CancelOperation
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 1137, "end": 1204 }
class ____: # [invalid-slots] __slots__ = NotIterable()
FourthBad
python
getsentry__sentry
src/sentry/api/base.py
{ "start": 25150, "end": 28666 }
class ____(SiloLimit): def __init__(self, modes: SiloMode | Iterable[SiloMode], internal: bool = False) -> None: if isinstance(modes, SiloMode): modes = [modes] self.modes = frozenset(modes) self.internal = internal def modify_endpoint_class(self, decorated_class: type[Endpoint]) -> type: dispatch_override = self.create_override(decorated_class.dispatch) new_class = type( decorated_class.__name__, (decorated_class,), { "dispatch": dispatch_override, "silo_limit": self, }, ) new_class.__module__ = decorated_class.__module__ return new_class def modify_endpoint_method(self, decorated_method: Callable[..., Any]) -> Callable[..., Any]: return self.create_override(decorated_method) def handle_when_unavailable( self, original_method: Callable[..., Any], current_mode: SiloMode, available_modes: Iterable[SiloMode], ) -> Callable[..., Any]: def handle(obj: Any, request: Request, *args: Any, **kwargs: Any) -> HttpResponse: mode_str = ", ".join(str(m) for m in available_modes) message = ( f"Received {request.method} request at {request.path!r} to server in " f"{current_mode} mode. This endpoint is available only in: {mode_str}" ) if settings.FAIL_ON_UNAVAILABLE_API_CALL: raise self.AvailabilityError(message) else: logger.warning(message) return HttpResponse(status=status.HTTP_404_NOT_FOUND) return handle def __call__(self, decorated_obj: Any) -> Any: if isinstance(decorated_obj, type): if not issubclass(decorated_obj, Endpoint): raise ValueError("`@EndpointSiloLimit` can decorate only Endpoint subclasses") return self.modify_endpoint_class(decorated_obj) if callable(decorated_obj): return self.modify_endpoint_method(decorated_obj) raise TypeError("`@EndpointSiloLimit` must decorate a class or method") control_silo_endpoint = EndpointSiloLimit(SiloMode.CONTROL) """ Apply to endpoints that exist in CONTROL silo. If a request is received and the application is not in CONTROL mode 404s will be returned. """ region_silo_endpoint = EndpointSiloLimit(SiloMode.REGION) """ Apply to endpoints that exist in REGION silo. If a request is received and the application is not in REGION mode 404s will be returned. """ internal_region_silo_endpoint = EndpointSiloLimit(SiloMode.REGION, internal=True) """ Apply to endpoints that exist in REGION silo that are internal only. Internal endpoints are not subject to URL pattern rules required for public endpoints in cells. If a request is received and the application is not in REGION mode 404s will be returned. """ all_silo_endpoint = EndpointSiloLimit([SiloMode.CONTROL, SiloMode.REGION, SiloMode.MONOLITH]) """ Apply to endpoints that are available in all silo modes. This should be rarely used, but is relevant for resources like ROBOTS.txt. """ internal_all_silo_endpoint = EndpointSiloLimit( [SiloMode.CONTROL, SiloMode.REGION, SiloMode.MONOLITH], internal=True ) """ Apply to endpoints that exist in all silo modes that are internal only. Internal endpoints are not subject to URL pattern rules required for public endpoints in cells. This should be rarely used. """
EndpointSiloLimit
python
google__jax
jax/_src/core.py
{ "start": 108081, "end": 109024 }
class ____(Primitive): multiple_results = True call_primitive = True def bind(self, *args, **params): return self._true_bind(*args, **params) def bind_with_trace(self, trace, fun_and_args, params): fun = fun_and_args[0] args = fun_and_args[1:] return trace.process_call(self, fun, args, params) def get_bind_params(self, params): new_params = dict(params) jaxpr = new_params.pop('call_jaxpr') subfun = lu.hashable_partial( lu.wrap_init(eval_jaxpr, debug_info=jaxpr.debug_info), jaxpr, ()) if config.dynamic_shapes.value: subfun = lu.annotate(subfun, _jaxpr_type_to_callable_annotation(jaxpr)) return [subfun], new_params def call_impl(f: lu.WrappedFun, *args, **params): del params # params parameterize the call primitive, not the function return f.call_wrapped(*args) call_p: CallPrimitive = CallPrimitive('call') call = call_p.bind call_p.def_impl(call_impl)
CallPrimitive
python
pypa__pip
src/pip/_internal/models/pylock.py
{ "start": 1083, "end": 1189 }
class ____: path: str editable: bool | None subdirectory: str | None @dataclass
PackageDirectory
python
google__jax
jax/_src/pallas/mosaic/sc_core.py
{ "start": 2047, "end": 2799 }
class ____(state.AbstractRef): """An AbstractRef for SparseCore.""" tiling: Tiling | None = None def __init__( self, aval: jax_core.AbstractValue, memory_space: tpu_core.MemorySpace, tiling: Tiling | None, ): super().__init__(aval, memory_space) self.tiling = tiling def update( # type: ignore[override] self, inner_aval: Any | None = None, memory_space: Any | None = None, tiling: Tiling | None = None, ) -> AbstractRef: return AbstractRef( inner_aval if inner_aval is not None else self.inner_aval, memory_space if memory_space is not None else self.memory_space, tiling if tiling is not None else self.tiling, ) @dataclasses.dataclass
AbstractRef
python
spyder-ide__spyder
spyder/plugins/debugger/widgets/main_widget.py
{ "start": 2164, "end": 2243 }
class ____: ToolbarStretcher = 'toolbar_stretcher'
DebuggerWidgetToolbarItems
python
getsentry__sentry
src/sentry/workflow_engine/endpoints/organization_workflow_index.py
{ "start": 3851, "end": 14611 }
class ____(OrganizationEndpoint): publish_status = { "GET": ApiPublishStatus.EXPERIMENTAL, "POST": ApiPublishStatus.EXPERIMENTAL, "PUT": ApiPublishStatus.EXPERIMENTAL, "DELETE": ApiPublishStatus.EXPERIMENTAL, } owner = ApiOwner.ISSUES permission_classes = (OrganizationWorkflowPermission,) def filter_workflows(self, request: Request, organization: Organization) -> QuerySet[Workflow]: """ Helper function to filter workflows based on request parameters. """ queryset: QuerySet[Workflow] = Workflow.objects.filter(organization_id=organization.id) if raw_idlist := request.GET.getlist("id"): try: ids = [int(id) for id in raw_idlist] except ValueError: raise ValidationError({"id": ["Invalid ID format"]}) queryset = queryset.filter(id__in=ids) # If specific IDs are provided, skip query and project filtering return queryset if raw_query := request.GET.get("query"): for filter in parse_workflow_query(raw_query): assert isinstance(filter, SearchFilter) match filter: case SearchFilter(key=SearchKey("name"), operator=("=" | "IN" | "!=")): queryset = apply_filter(queryset, filter, "name") case SearchFilter(key=SearchKey("action"), operator=("=" | "IN" | "!=")): queryset = apply_filter( queryset, filter, "workflowdataconditiongroup__condition_group__dataconditiongroupaction__action__type", distinct=True, ) case SearchFilter(key=SearchKey("query"), operator="="): # 'query' is our free text key; all free text gets returned here # as '=', and we search any relevant fields for it. queryset = queryset.filter( Q(name__icontains=filter.value.value) | Q( workflowdataconditiongroup__condition_group__dataconditiongroupaction__action__type__icontains=filter.value.value, ) ).distinct() case _: # TODO: What about unrecognized keys? pass projects = self.get_projects(request, organization) if projects: queryset = queryset.filter( Q(detectorworkflow__detector__project__in=projects) | Q(detectorworkflow__isnull=True) ).distinct() return queryset @extend_schema( operation_id="Fetch Workflows", parameters=[ GlobalParams.ORG_ID_OR_SLUG, WorkflowParams.SORT_BY, WorkflowParams.QUERY, WorkflowParams.ID, OrganizationParams.PROJECT, ], responses={ 201: WorkflowSerializer, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def get(self, request, organization): """ Returns a list of workflows for a given org """ sort_by = SortByParam.parse(request.GET.get("sortBy", "id"), SORT_COL_MAP) queryset = self.filter_workflows(request, organization) # Add synthetic fields to the queryset if needed. match sort_by.db_field_name: case "connected_detectors": queryset = queryset.annotate(connected_detectors=Count("detectorworkflow")) case "actions": queryset = queryset.annotate( actions=Count( "workflowdataconditiongroup__condition_group__dataconditiongroupaction__action", ) ) case "last_triggered": long_ago = ensure_aware(datetime(1970, 1, 1)) # We've got an index on (workflow, date_added) which allows a subquery # to be more efficient than a Max() aggregation, because it lets us look at ~1 # workflow fire history row per workflow. latest_fire_subquery = Subquery( WorkflowFireHistory.objects.filter(workflow=OuterRef("pk")) .order_by("-date_added") .values("date_added")[:1] ) queryset = queryset.annotate( last_triggered=Coalesce(latest_fire_subquery, long_ago) ) queryset = queryset.order_by(*sort_by.db_order_by) return self.paginate( request=request, queryset=queryset, order_by=sort_by.db_order_by, paginator_cls=OffsetPaginator, on_results=lambda x: serialize(x, request.user), count_hits=True, ) @extend_schema( operation_id="Create a Workflow", parameters=[ GlobalParams.ORG_ID_OR_SLUG, ], responses={ 201: WorkflowSerializer, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def post(self, request, organization): """ Creates a workflow for an organization ````````````````````````````````````` :param string name: The name of the workflow :param bool enabled: Whether the workflow is enabled or not :param object config: The configuration of the workflow :param object triggers: The Data Condition and DataConditionGroup for the when condition of a workflow :param object action_filters: The Data Conditions, Data Condition Group, and Actions to invoke when a workflow is triggered """ validator = WorkflowValidator( data=request.data, context={"organization": organization, "request": request}, ) validator.is_valid(raise_exception=True) with transaction.atomic(router.db_for_write(Workflow)): workflow = validator.create(validator.validated_data) detector_ids = request.data.get("detectorIds", []) if detector_ids: bulk_validator = BulkWorkflowDetectorsValidator( data={ "workflow_id": workflow.id, "detector_ids": detector_ids, }, context={"organization": organization, "request": request}, ) bulk_validator.is_valid(raise_exception=True) bulk_validator.save() return Response(serialize(workflow, request.user), status=status.HTTP_201_CREATED) @extend_schema( operation_id="Mutate an Organization's Workflows", description=("Currently supports bulk enabling/disabling workflows."), parameters=[ GlobalParams.ORG_ID_OR_SLUG, ], responses={ 200: RESPONSE_SUCCESS, 201: WorkflowSerializer, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def put(self, request, organization): """ Mutates workflows for a given org """ if not ( request.GET.getlist("id") or request.GET.get("query") or request.GET.getlist("project") or request.GET.getlist("projectSlug") ): return Response( { "detail": "At least one of 'id', 'query', 'project', or 'projectSlug' must be provided." }, status=status.HTTP_400_BAD_REQUEST, ) validator = DetectorWorkflowMutationValidator(data=request.data) validator.is_valid(raise_exception=True) enabled = validator.validated_data["enabled"] queryset = self.filter_workflows(request, organization) if not queryset: return Response( {"detail": "No workflows found."}, status=status.HTTP_200_OK, ) with transaction.atomic(router.db_for_write(Workflow)): # We update workflows individually to ensure post_save signals are called for workflow in queryset: workflow.update(enabled=enabled) return self.paginate( request=request, queryset=queryset, order_by="id", paginator_cls=OffsetPaginator, on_results=lambda x: serialize(x, request.user), ) @extend_schema( operation_id="Delete an Organization's Workflows", parameters=[ GlobalParams.ORG_ID_OR_SLUG, ], responses={ 200: RESPONSE_SUCCESS, 204: RESPONSE_NO_CONTENT, 400: RESPONSE_BAD_REQUEST, 401: RESPONSE_UNAUTHORIZED, 403: RESPONSE_FORBIDDEN, 404: RESPONSE_NOT_FOUND, }, ) def delete(self, request, organization): """ Deletes workflows for a given org """ if not ( request.GET.getlist("id") or request.GET.get("query") or request.GET.getlist("project") or request.GET.getlist("projectSlug") ): return Response( { "detail": "At least one of 'id', 'query', 'project', or 'projectSlug' must be provided." }, status=status.HTTP_400_BAD_REQUEST, ) queryset = self.filter_workflows(request, organization) if not queryset: return Response( {"detail": "No workflows found."}, status=status.HTTP_200_OK, ) for workflow in queryset: with transaction.atomic(router.db_for_write(Workflow)): RegionScheduledDeletion.schedule(workflow, days=0, actor=request.user) create_audit_entry( request=request, organization=organization, target_object=workflow.id, event=audit_log.get_event_id("WORKFLOW_REMOVE"), data=workflow.get_audit_log_data(), ) workflow.update(status=ObjectStatus.PENDING_DELETION) return Response(status=status.HTTP_204_NO_CONTENT)
OrganizationWorkflowIndexEndpoint
python
doocs__leetcode
solution/0600-0699/0690.Employee Importance/Solution.py
{ "start": 31, "end": 227 }
class ____: def __init__(self, id: int, importance: int, subordinates: List[int]): self.id = id self.importance = importance self.subordinates = subordinates """
Employee
python
huggingface__transformers
src/transformers/models/videomae/modeling_videomae.py
{ "start": 7947, "end": 11031 }
class ____(nn.Module): def __init__(self, config: VideoMAEConfig) -> None: super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.config = config self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.dropout_prob = config.attention_probs_dropout_prob self.scaling = self.attention_head_size**-0.5 self.is_causal = False self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) if config.qkv_bias: self.q_bias = nn.Parameter(torch.zeros(self.all_head_size)) self.v_bias = nn.Parameter(torch.zeros(self.all_head_size)) else: self.q_bias = None self.v_bias = None def forward(self, hidden_states: Optional[torch.Tensor] = None) -> tuple[torch.Tensor, torch.Tensor]: batch_size, seq_length, _ = hidden_states.shape k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias) values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias) queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias) key_layer = keys.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) value_layer = values.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) query_layer = queries.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2) attention_interface: Callable = eager_attention_forward if self.config._attn_implementation != "eager": attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] context_layer, attention_probs = attention_interface( self, query_layer, key_layer, value_layer, None, is_causal=self.is_causal, scaling=self.scaling, dropout=0.0 if not self.training else self.dropout_prob, ) new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.reshape(new_context_layer_shape) return context_layer, attention_probs # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE
VideoMAESelfAttention
python
huggingface__transformers
src/transformers/models/sew/configuration_sew.py
{ "start": 829, "end": 14231 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`SEWModel`]. It is used to instantiate a SEW model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SEW [asapp/sew-tiny-100k](https://huggingface.co/asapp/sew-tiny-100k) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 32): Vocabulary size of the SEW model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SEW`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. squeeze_factor (`int`, *optional*, defaults to 2): Sequence length downsampling factor after the encoder and upsampling factor after the transformer. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. activation_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for activations inside the fully connected layer. attention_dropout (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. final_dropout (`float`, *optional*, defaults to 0.1): The dropout probability for the final projection layer of [`SEWForCTC`]. layerdrop (`float`, *optional*, defaults to 0.1): The LayerDrop probability. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556) for more details. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. feat_extract_norm (`str`, *optional*, defaults to `"group"`): The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D convolutional layers. feat_proj_dropout (`float`, *optional*, defaults to 0.0): The dropout probability for output of the feature encoder. feat_extract_activation (`str, `optional`, defaults to `"gelu"`): The non-linear activation function (function or string) in the 1D convolutional layers of the feature extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512)`): A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers. conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1)`): A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1)`): A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The length of *conv_kernel* defines the number of convolutional layers and has to match the length of *conv_dim*. conv_bias (`bool`, *optional*, defaults to `False`): Whether the 1D convolutional layers have a bias. num_conv_pos_embeddings (`int`, *optional*, defaults to 128): Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional embeddings layer. num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16): Number of groups of 1D convolutional positional embeddings layer. apply_spec_augment (`bool`, *optional*, defaults to `True`): Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition](https://huggingface.co/papers/1904.08779). mask_time_prob (`float`, *optional*, defaults to 0.05): Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_time_length (`int`, *optional*, defaults to 10): Length of vector span along the time axis. mask_time_min_masks (`int`, *optional*, defaults to 2),: The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length < mask_time_min_masks'' mask_feature_prob (`float`, *optional*, defaults to 0.0): Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`. mask_feature_length (`int`, *optional*, defaults to 10): Length of vector span along the feature axis. mask_feature_min_masks (`int`, *optional*, defaults to 0),: The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time step, irrespectively of `mask_feature_prob`. Only relevant if ''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks'' ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`): Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an instance of [`SEWForCTC`]. ctc_zero_infinity (`bool`, *optional*, defaults to `False`): Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance of [`SEWForCTC`]. use_weighted_layer_sum (`bool`, *optional*, defaults to `False`): Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an instance of [`Wav2Vec2ForSequenceClassification`]. classifier_proj_size (`int`, *optional*, defaults to 256): Dimensionality of the projection before token mean-pooling for classification. Example: ```python >>> from transformers import SEWConfig, SEWModel >>> # Initializing a SEW asapp/sew-tiny-100k style configuration >>> configuration = SEWConfig() >>> # Initializing a model (with random weights) from the asapp/sew-tiny-100k style configuration >>> model = SEWModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "sew" def __init__( self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, squeeze_factor=2, hidden_act="gelu", hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-5, feat_extract_norm="group", feat_extract_activation="gelu", conv_dim=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), conv_stride=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), conv_kernel=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, ctc_loss_reduction="mean", ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, pad_token_id=0, bos_token_id=1, eos_token_id=2, **kwargs, ): super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id) self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_activation = feat_extract_activation self.conv_dim = list(conv_dim) self.conv_stride = list(conv_stride) self.conv_kernel = list(conv_kernel) self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_feat_extract_layers = len(self.conv_dim) self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.squeeze_factor = squeeze_factor self.hidden_act = hidden_act self.num_attention_heads = num_attention_heads self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.feat_proj_dropout = feat_proj_dropout self.final_dropout = final_dropout self.layerdrop = layerdrop self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.vocab_size = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. " "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, " f"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) " f"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`." ) # fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779 self.apply_spec_augment = apply_spec_augment self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.mask_time_min_masks = mask_time_min_masks self.mask_feature_prob = mask_feature_prob self.mask_feature_length = mask_feature_length self.mask_feature_min_masks = mask_feature_min_masks # ctc loss self.ctc_loss_reduction = ctc_loss_reduction self.ctc_zero_infinity = ctc_zero_infinity # sequence classification self.use_weighted_layer_sum = use_weighted_layer_sum self.classifier_proj_size = classifier_proj_size @property def inputs_to_logits_ratio(self): return functools.reduce(operator.mul, self.conv_stride, 1) __all__ = ["SEWConfig"]
SEWConfig
python
Textualize__textual
docs/examples/how-to/render_compose.py
{ "start": 483, "end": 974 }
class ____(Container): """Custom widget that extends Container.""" DEFAULT_CSS = """ Splash { align: center middle; } Static { width: 40; padding: 2 4; } """ def on_mount(self) -> None: self.auto_refresh = 1 / 30 # (1)! def compose(self) -> ComposeResult: yield Static("Making a splash with Textual!") # (2)! def render(self) -> RenderResult: return LinearGradient(time() * 90, STOPS) # (3)!
Splash
python
pytorch__pytorch
torch/_inductor/codecache.py
{ "start": 66124, "end": 111068 }
class ____: """ Compile AOT Inductor generated code. """ @classmethod def compile( cls, graph: GraphLowering, wrapper_code: str, kernel_code: str, serialized_extern_kernel_nodes: str | None, *, device_type: str, additional_files: list[str], ) -> list[Union[str, Weights]] | str: """ Returns the .so path, or returns a list of files that were generated if config.aot_inductor.package=True. """ generated_files: list[str | Weights] = additional_files # type: ignore[assignment] _set_gpu_runtime_env() # cpp_extension consults the env picked_vec_isa = pick_vec_isa() vec_isa_cmd_gen = CppBuilder( name="o", sources="i", BuildOption=CppTorchDeviceOptions( vec_isa=picked_vec_isa, device_type=device_type, aot_mode=graph.aot_mode, ), ) # write function will calc source_code hash, the same source code with different # ISA level should be generate different hash. # So we need get a command_line which contains isa related parameter as a part of hash key. # And then pass the command_line to below write function as extra parameter to # guarantee the source code hash contains ISA difference. cpp_command = repr(vec_isa_cmd_gen.get_command_line()) # Meta internal AOTInductor CPU use_relative_path = ( config.is_fbcode() and device_type == "cpu" and graph.aot_mode ) ( specified_output_path, specified_artifact_name, ) = split_aot_inductor_output_path(config.aot_inductor.output_path) # TODO (benjaminglass1): the CMake packaging path doesn't support linking files # built with different flags. Until that's implemented, append the kernel code # to the wrapper and build everything at max optimization. if config.aot_inductor.package_cpp_only: wrapper_code = "\n".join((wrapper_code, kernel_code)) kernel_code = "" wrapper_key, wrapper_path = write( wrapper_code, "wrapper.cpp", extra=cpp_command, specified_dir=specified_output_path, key=config.aot_inductor.model_name_for_generated_files, ) kernel_code = ( f"// Triton kernels are embedded as comments in {wrapper_path}\n" + kernel_code ) _, kernel_path = write( kernel_code, "kernel.cpp", extra=cpp_command, specified_dir=specified_output_path, key=config.aot_inductor.model_name_for_generated_files, ) header_code = "" header_path = "" if not config.aot_inductor.dynamic_linkage: # to link statically, we also need a header file with open( os.path.join( os.path.dirname(os.path.dirname(__file__)), "csrc", "inductor", "aoti_runtime", "model.h", ) ) as f: # model_name_for_generated_files is guaranteed to be non-empty when compile_standalone model_class_name = config.aot_inductor.model_name_for_generated_files class_name = f"AOTInductorModel{model_class_name}" header_code = f.read() # we replace like this to avoid replacing # AOTInductorModelBase and AOTInductorModelKernelsBase header_code = ( header_code.replace("<AOTInductorModel>", f"<{class_name}>") .replace("AOTInductorModel(", f"{class_name}(") .replace("AOTInductorModel :", f"{class_name} :") ) _, header_path = write( header_code, "h", specified_dir=specified_output_path, key=model_class_name, ) # Log the AOTInductor wrapper and kernel code, if needed. with WritableTempFile("w+") as t: """ Avoid "Permission denied error" on Windows: with tempfile.NamedTemporaryFile("w", suffix=".gv") as temp_file: # Not writable on Windows: # https://docs.python.org/3/library/tempfile.html#tempfile.NamedTemporaryFile Example: with WritableTempFile("w", suffix=".gv") as temp_file: tree.to_dotfile(temp_file.name) """ t.writelines((wrapper_code, "\n", kernel_code, "\n")) t.flush() V.debug.output_code(t.name, extension="cpp") if config.aot_inductor.package: generated_files.append(wrapper_path) if not config.aot_inductor.package_cpp_only: generated_files.append(kernel_path) if not config.aot_inductor.dynamic_linkage: generated_files.append(header_path) output_code_log.info("Wrapper code written to: %s", wrapper_path) output_code_log.info("Kernel code written to: %s", kernel_path) trace_structured( "graph_dump", lambda: { "name": "inductor_aot_wrapper_code", "type": "cpp", "filename": wrapper_path, }, payload_fn=lambda: wrapper_code, ) trace_structured( "graph_dump", lambda: { "name": "inductor_aot_kernel_code", "type": "cpp", "filename": kernel_path, }, payload_fn=lambda: kernel_code, ) if not config.aot_inductor.dynamic_linkage: output_code_log.info("Header code written to: %s", header_path) trace_structured( "graph_dump", lambda: { "name": "inductor_aot_header_code", "type": "cpp", "filename": header_path, }, payload_fn=lambda: header_code, ) # We use a file lock below to protect FS operations. The lock file # is scoped to the 'key', so make sure the consts_s is protected # by the same lock: wrapper_path_operator = Path(wrapper_path) kernel_path_operator = Path(kernel_path) specified_sub_dir = wrapper_path_operator.parent / wrapper_key if not specified_sub_dir.exists(): specified_sub_dir.mkdir(exist_ok=True) cmake_path = str(Path(specified_sub_dir) / "CMakeLists.txt") def _compile_consts(consts: bytes, platform: str) -> str: # Load from aot_inductor, and update the value on demand. use_asm_build: bool = config.aot_inductor.use_consts_asm_build if platform == "linux": if graph.mutated_buffers & OrderedSet(graph.constants.keys()): # .data section is between .text and .bss. When the size of .data is large, # during the linking, the relocation of .text against .bss may overflow. # Rename it to .ldata so that it won't be in between the .text and .bss section if len(consts) > 2_000_000_000: raise ValueError( "Models with buffer mutation included doesn't support constants greater than 2GB!" ) section_attr = '.ldata, "aw"' else: section_attr = '.lrodata, "a"' symbol_prefix = "" elif platform == "darwin": section_attr = "__DATA,__data" symbol_prefix = "_" elif platform == "win32": symbol_prefix = "" # ASM build is not supported on Windows, force use CPP build. use_asm_build = False else: raise RuntimeError(f"Unsupported platform: {platform}") # Intel compiler failed to compile this manually constructed assembly file. # Switch XPU to use consts cpp build. if device_type == "xpu": use_asm_build = False is_large_consts = len(consts) > 1024 is_zero_size_consts = len(consts) == 0 def format_consts_to_gnu_asm( consts: bytes, align_bytes: int, symbol_prefix: str, is_large_consts: bool, ) -> tuple[str, str]: consts_asm = f"\t.section\t{section_attr}\n" consts_asm += f"\t.balign {align_bytes}\n" consts_asm += f"\t.globl\t{symbol_prefix}_binary_constants_bin_start\n" consts_asm += f"{symbol_prefix}_binary_constants_bin_start:\n" if not is_large_consts: for c in consts: consts_asm += f"\t.byte {c}\n" # Add one element even if constants are empty # Otherwise assembler will not put them in data section if not consts: consts_asm += "\t.space 1\n" else: consts_asm += "\t.quad 0x1234567899abcdef\n" consts_asm += f"\t.space {len(consts) - 8}\n" consts_asm += f".globl\t{symbol_prefix}_binary_constants_bin_end\n" consts_asm += f"{symbol_prefix}_binary_constants_bin_end:\n" return consts_asm, "weights.S" # Use c++ to convert consts to object file can support more compilers, such as msvc and icx. def format_consts_to_cpp( consts: bytes, align_bytes: int, symbol_prefix: str ) -> tuple[str, str]: consts_size = len(consts) asan_attr = """#if defined(__clang__) || defined (__GNUC__)\t\n\ #define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize("address")))\t\n\ #else\t\n\ #define ATTRIBUTE_NO_SANITIZE_ADDRESS\t\n\ #endif\t\n\ \t\n\ ATTRIBUTE_NO_SANITIZE_ADDRESS\t\n""" const_cpp = asan_attr const_cpp += f"alignas({align_bytes}) extern " const_cpp += f"unsigned char {symbol_prefix}_binary_constants_bin_start[{consts_size}] = {{\t\n" count_bytes = 0 for c in consts: const_cpp += f"{c}, " count_bytes = count_bytes + 1 if count_bytes % 16 == 0: const_cpp += "\t\n" const_cpp += "};\t\n" const_cpp += f"alignas({align_bytes}) extern unsigned char * {symbol_prefix}_binary_constants_bin_end;\t\n" return const_cpp, "weights.cpp" def get_zero_consts_asm_code( align_bytes: int, symbol_prefix: str, ) -> tuple[str, str]: """ This function handles zero-sized constants because the C++ standard prohibits zero-length arrays: https://stackoverflow.com/questions/9722632/what-happens-if-i-define-a-0-size-array-in-c-c On Windows (MSVC): The compiler reports error C2466 for zero-sized arrays: https://learn.microsoft.com/en-us/cpp/error-messages/compiler-errors-1/compiler-error-c2466 Solution: Use assembly compilation to handle this case. Why not use Win32 assembly for all paths? ml64 only supports alignment up to 16 bytes, which isn't optimal for performance. Cross-platform implementation: Linux: Added '-pedantic' to disable zero-sized arrays in C++ compiler Windows: MSVC naturally rejects zero-sized arrays by default """ if _IS_WINDOWS: # Windows ml64 is max support align to 16, but it is no effect to zero size data. asm_code = """ option casemap:none .data ?_binary_constants_bin_start@@3PAEA: align 16 ?_binary_constants_bin_end@@3PAEA: align 16 public ?_binary_constants_bin_start@@3PAEA public ?_binary_constants_bin_end@@3PAEA end """ asm_ext = "asm" else: asm_code = f"\t.section\t{section_attr}\n" asm_code += f"\t.balign {align_bytes}\n" asm_code += ( f"\t.globl\t{symbol_prefix}_binary_constants_bin_start\n" ) asm_code += f"{symbol_prefix}_binary_constants_bin_start:\n" asm_code += f".globl\t{symbol_prefix}_binary_constants_bin_end\n" asm_code += f"{symbol_prefix}_binary_constants_bin_end:\n" asm_ext = "S" return asm_code, asm_ext if use_asm_build: consts_code, code_ext = format_consts_to_gnu_asm( consts, ALIGN_BYTES, symbol_prefix, is_large_consts ) else: if is_zero_size_consts: consts_code, code_ext = get_zero_consts_asm_code( ALIGN_BYTES, symbol_prefix ) else: consts_code, code_ext = format_consts_to_cpp( consts, ALIGN_BYTES, symbol_prefix ) _, consts_s = write( consts_code, code_ext, specified_dir=str(specified_sub_dir), key=config.aot_inductor.model_name_for_generated_files, ) consts_s = Path(consts_s) object_build_options = CppTorchDeviceOptions( device_type=device_type, aot_mode=graph.aot_mode, compile_only=True, use_relative_path=use_relative_path, ) object_builder = CppBuilder( name=str(consts_s.stem), sources=str(consts_s), output_dir=str(consts_s.parent), BuildOption=object_build_options, ) consts_o = object_builder.get_target_file_path() if use_asm_build is False and is_zero_size_consts: run_asm_build_object(str(consts_s), consts_o, str(consts_s.parent)) else: object_builder.build() if is_large_consts and use_asm_build: with open(consts_o, "r+b") as f: f.seek(0) hdr = f.read(1024) # Search for magic number and write the actual data over it start_idx = ( hdr.find(b"\xef\xcd\xab\x99\x78\x56\x34\x12") if sys.byteorder == "little" else hdr.find(b"\x12\x34\x56\x78\x99\xab\xcd\xef") ) assert start_idx != -1 f.seek(start_idx) pos = 0 while pos < len(consts): rc = f.write(consts[pos:]) pos += rc # Remove the .S file to save space os.remove(consts_s) return consts_o from torch.utils._filelock import FileLock lock_dir = get_lock_dir() lock = FileLock( os.path.join(lock_dir, wrapper_key + ".lock"), timeout=LOCK_TIMEOUT ) with lock: if serialized_extern_kernel_nodes: extern_kernel_nodes_json = str( wrapper_path_operator.with_suffix(".json") ) with open(extern_kernel_nodes_json, "w") as f: f.write(serialized_extern_kernel_nodes) if config.aot_inductor.package: generated_files.append(extern_kernel_nodes_json) metadata = config.aot_inductor.metadata metadata["AOTI_DEVICE_KEY"] = device_type # Add environment information to ensure .so compatibility metadata.update(get_device_information(device_type)) # Save user provided metadata meta_json = str( wrapper_path_operator.with_name( f"{wrapper_path_operator.stem}_metadata.json" ) ) for k, v in config.aot_inductor.metadata.items(): assert isinstance(k, str) and isinstance(v, (str)), ( "Metadata must only contain strings" ) with open(meta_json, "w") as f: f.write(json.dumps(config.aot_inductor.metadata)) kernel_meta_json = str( kernel_path_operator.with_name( f"{kernel_path_operator.stem}_metadata.json" ) ) shutil.copy(meta_json, kernel_meta_json) if config.aot_inductor.package: generated_files.append(meta_json) if not config.aot_inductor.package_cpp_only: generated_files.append(kernel_meta_json) output_so = ( config.aot_inductor.output_path if specified_artifact_name else str(wrapper_path_operator.with_suffix(".so")) ) all_cuda = all( graph.get_original_value_of_constant(name).is_cuda for name in graph.constants if name not in graph.folded_constants ) def _to_bytes(t: torch.Tensor, all_cuda: bool) -> bytes: def _pad_to_alignment(raw_bytes: bytes) -> bytes: padded_bytes = raw_bytes.ljust( (len(raw_bytes) + ALIGN_BYTES - 1) // ALIGN_BYTES * ALIGN_BYTES, b"\x00", ) return padded_bytes # This serializes the tensor's untyped_storage to bytes by accessing # the raw data of the underlying structure. import ctypes if t.numel() == 0: return b"" if t.is_mkldnn: data_ptr = torch.ops.mkldnn.data_ptr(t) nbytes = torch.ops.mkldnn._nbytes(t) else: t_cpu = t.untyped_storage().cpu() data_ptr = t_cpu.data_ptr() nbytes = t_cpu.nbytes() raw_array = ctypes.cast( data_ptr, ctypes.POINTER(ctypes.c_ubyte * nbytes), ) # pyrefly: ignore [missing-attribute] raw_bytes = bytes(raw_array.contents) return raw_bytes if all_cuda else _pad_to_alignment(raw_bytes) if ( config.aot_inductor.package_constants_in_so or config.aot_inductor.package_constants_on_disk_format == "binary_blob" ): serialized_weights = b"".join( _to_bytes(graph.get_original_value_of_constant(name), all_cuda) for name in graph.constants if name not in graph.folded_constants ) else: serialized_weights = b"" if config.aot_inductor.package_constants_on_disk_format == "pickle_weights": # We need to return a storage key here because the original value tensor might be a clone weights_dict = Weights( { graph.allocated_constant_name[name]: ( graph.get_original_value_of_constant(name), TensorProperties(graph.constants[name]), ) for name in graph.constants if name not in graph.folded_constants } ) generated_files.append(weights_dict) consts_size = len(serialized_weights) use_external_weights, use_mmap_weights = determine_aoti_mmap_flags( consts_size ) if use_external_weights and use_mmap_weights: # Should never reach here, just a check for sanity raise RuntimeError( "use_external_weights and use_mmap_weights cannot both be True." ) external_weights_path = None if use_external_weights: external_weights_filename = f"{wrapper_path_operator.stem}_weights.blob" external_weights_path = str( wrapper_path_operator.with_name(external_weights_filename) ) compile_command: dict[str, Any] = { "aot_mode": graph.aot_mode, "device_type": device_type, "use_mmap_weights": use_mmap_weights, "use_mmap_weights_external": use_external_weights, "use_relative_path": use_relative_path, "vec_isa": picked_vec_isa, } # If we're packaging via CMake, we build the whole code at max optimization. wrapper_build_options = CppTorchDeviceOptions( compile_only=True, min_optimize=not config.aot_inductor.package_cpp_only, **compile_command, ) kernel_build_options = CppTorchDeviceOptions( compile_only=True, **compile_command, ) # potentially, precompile the AOT header for this device if config.aot_inductor.precompile_headers and not _IS_WINDOWS: header_file = _get_cpp_wrapper_header( device_type, aot_mode=graph.aot_mode ) wrapper_build_options.precompiled_header = _precompile_header( header_file, cpp_command, min_optimize=not config.aot_inductor.package_cpp_only, **compile_command, ) if cpp_prefix := _get_cpp_prefix_header(device_type): kernel_build_options.precompiled_header = _precompile_header( cpp_prefix, cpp_command, **compile_command, ) wrapper_builder = CppBuilder( name=str(wrapper_path_operator.stem), sources=wrapper_path, output_dir=str(wrapper_path_operator.parent), BuildOption=wrapper_build_options, ) wrapper_compile_cmd = wrapper_builder.get_command_line() wrapper_o = wrapper_builder.get_target_file_path() kernel_builder = CppBuilder( name=str(kernel_path_operator.stem), sources=kernel_path, output_dir=str(wrapper_path_operator.parent), BuildOption=kernel_build_options, ) kernel_compile_cmd = kernel_builder.get_command_line() kernel_o = kernel_builder.get_target_file_path() log.debug("aot wrapper compilation command: %s", wrapper_compile_cmd) log.debug("aot kernel compilation command: %s", kernel_compile_cmd) if config.aot_inductor.package_cpp_only: # Not doing the actual compilation here compile_flags = str( wrapper_path_operator.with_name( f"{wrapper_path_operator.stem}_compile_flags.json" ) ) wrapper_build_options.save_flags_to_json(compile_flags) generated_files.append(compile_flags) wrapper_builder.save_compile_cmd_to_cmake(cmake_path, device_type) wrapper_builder.save_src_to_cmake(cmake_path, wrapper_path) generated_files.append(cmake_path) else: try: wrapper_builder.build() except (exc.CppCompileError, SkipFrame) as e: if " is too big to optimize" in str(e): raise RuntimeError( "Please use torch._inductor.config.aot_inductor.compile_wrapper_opt_level = 'O0' flag." ) from e raise e kernel_builder.build() if not use_mmap_weights: aot_constants = serialized_weights magic_number = 0 if use_external_weights: aot_constants = struct.pack("q", consts_size) assert external_weights_path is not None # For external weights, write weights to separate file and embed minimal placeholder with open(external_weights_path, "wb") as f_weights: f_weights.write(serialized_weights) generated_files.append(external_weights_path) else: # we'll append weights binary to the end of .so file and mmap it when loading magic_number = cast( int, torch.randint(0, torch.iinfo(torch.int64).max, (1,)).item() ) aot_constants = struct.pack("qq", consts_size + 8, magic_number) consts_o = _compile_consts(aot_constants, sys.platform) custom_obj_idx = 0 # Note that custom_objs_config.json file is different from the model_constants_config.json file produced # in package_sigmoid(). The keys in custom_objs_config.json directly correspond to the arg name in extern # nodes json. The key in model_constants_config.json produced by package_sigmoid is the attribute name in the # user model code. qual_name_to_id = {} # Map from constant name to its name in constants folder for custom_obj_idx, (name, constant) in enumerate( graph.torchbind_constants.items() ): if isinstance( constant, torch._library.fake_class_registry.FakeScriptObject ): constant = constant.real_obj assert isinstance(constant, torch._C.ScriptObject) custom_obj_name = f"{CUSTOM_OBJ_FILENAME_PREFIX}{custom_obj_idx}" log.debug("saving script object %s as %s", name, custom_obj_name) qual_name_to_id[name] = custom_obj_name custom_obj_bytes = torch._C._pickle_save(constant) custom_obj_path = os.path.join( wrapper_path_operator.parent, custom_obj_name ) write_atomic(custom_obj_path, custom_obj_bytes, True) generated_files.append(custom_obj_path) if qual_name_to_id: constants_config_json = os.path.join( wrapper_path_operator.parent, "custom_objs_config.json" ) with open(constants_config_json, "w") as f: f.write(json.dumps(qual_name_to_id)) generated_files.append(constants_config_json) gpu_codecache: ROCmCodeCache | CUDACodeCache = ( ROCmCodeCache() if torch.version.hip else CUDACodeCache() ) gpu_kernels_o = gpu_codecache.aot_kernels_o.copy() # clear the list of aot kernels after each linking gpu_codecache.aot_kernels_o.clear() if gpu_kernels_o: assert not config.aot_inductor.emit_multi_arch_kernel, ( "TODO: add emit_multi_arch_kernel support for cutlass kernels" ) cubins_o = [] asm_files = [] if not _IS_WINDOWS: ld, objcopy = get_ld_and_objcopy(use_relative_path) kernels = getattr(V.graph.wrapper_code, "_kernel_name_to_body", {}) for kernel_name, value in CudaKernelParamCache.cache.items(): if kernel_name not in kernels: # It is possible that CudaKernelParamCache contains more Triton kernels # than what the current graph uses continue if asm_file := value["asm"]: asm_files.append(asm_file) cubin_file = value[get_cpp_wrapper_cubin_path_name()] if ( config.aot_inductor.emit_multi_arch_kernel and device_type == "cuda" ): if torch.version.hip is None: current_arch = _nvcc_arch_as_compile_option() cmd = ( # pyrefly: ignore [unbound-name] f"{_cuda_compiler()} -fatbin {asm_file} -o {cubin_file} " # Triton only allows generating PTX version as same as the current arch f"-gencode arch=compute_{current_arch},code=compute_{current_arch} " # Include SASS for the current specific arch f"-gencode arch=compute_{current_arch},code=sm_{current_arch} " ) try: subprocess.run( cmd.split(), capture_output=True, text=True, check=True, ) except subprocess.CalledProcessError as e: print( f"{cmd} failed with:\nstdout:\n{e.stdout}\nstderr:\n{e.stderr}", file=sys.stderr, ) raise else: # ROCm multi-arch: compile LLVM IR to multi-arch bundle from torch._inductor.rocm_multiarch_utils import ( compile_multiarch_bundle_from_llvm_ir, ) if not os.path.exists(asm_file): raise RuntimeError( f"Multi-arch ROCm compilation requires LLVM IR file, " f"but {asm_file} not found. " f"Ensure asm_type='ll' is captured in triton_heuristics.py" ) # Compile for multiple archs and bundle them success = compile_multiarch_bundle_from_llvm_ir( llvm_ir_path=asm_file, output_bundle_path=cubin_file, target_archs=None, ) if not success: raise RuntimeError( f"Failed to compile multi-arch bundle for kernel {kernel_name}. " f"Check that ROCm toolchain is available and LLVM IR is valid." ) log.info("Created multi-arch bundle: %s", cubin_file) if config.aot_inductor.embed_kernel_binary: # Embed cubin files into model.so using objcopy cubins_o.append( convert_cubin_to_obj(cubin_file, kernel_name, ld, objcopy) ) output_name, output_dir = get_name_and_dir_from_output_file_path(output_so) so_build_options = CppTorchDeviceOptions( vec_isa=picked_vec_isa, device_type=device_type, aot_mode=graph.aot_mode, use_relative_path=use_relative_path, ) obj_srcs = [wrapper_o, kernel_o, consts_o, *gpu_kernels_o, *cubins_o] so_builder = CppBuilder( name=output_name, sources=obj_srcs, output_dir=output_dir, BuildOption=so_build_options, ) link_cmd = so_builder.get_command_line() output_so = so_builder.get_target_file_path() log.debug("aot linkage command: %s", link_cmd) # Append cmds to the end of codegen-ed wrapper file with open(wrapper_path, "a") as f: f.write("\n") f.write(f"// Compile cmd\n// {wrapper_compile_cmd}\n") f.write(f"// Link cmd\n// {link_cmd}\n") with open(kernel_path, "a") as f: f.write("\n") f.write(f"// Compile cmd\n// {kernel_compile_cmd}\n") f.write(f"// Link cmd\n// {link_cmd}\n") if config.aot_inductor.package_cpp_only: linker_flags = str( wrapper_path_operator.with_name( f"{wrapper_path_operator.stem}_linker_flags.json" ) ) so_build_options.save_flags_to_json(linker_flags) generated_files.append(linker_flags) generated_files.append(_LINKER_SCRIPT) # If we only want to package the cpp, then we need to save the # weights separately into a bin, and we also need to prevent compiling the so if use_mmap_weights: weight_file = str( wrapper_path_operator.with_name( f"{wrapper_path_operator.stem}_serialized_weights.bin" ) ) with open(weight_file, "wb") as f_weights: f_weights.write(serialized_weights) f_weights.write(struct.pack("q", magic_number)) generated_files.append(weight_file) else: # TODO: unify to always use mmap_weights generated_files.append(consts_o) so_builder.save_src_to_cmake(cmake_path, consts_o) # Different CMake strategies for CUDA vs ROCm: # - CUDA: Save asm for CMake to recompile (user has nvcc) # - ROCm: Link pre-compiled bundle (user may lack dev tools) if ( config.aot_inductor.emit_multi_arch_kernel and torch.version.hip is None ): so_builder.save_kernel_asm_to_cmake(cmake_path, asm_files) generated_files.extend(asm_files) else: # ROCm multi-arch + all single-arch: Link pre-compiled objects # Bundle already embedded in .o files - just link into .so obj_srcs = [*gpu_kernels_o, *cubins_o] generated_files.extend(obj_srcs) for obj in obj_srcs: so_builder.save_src_to_cmake(cmake_path, obj) so_builder.save_link_cmd_to_cmake(cmake_path) else: so_builder.build() for o_file in obj_srcs: if o_file in gpu_kernels_o: continue # Remove these as they are not needed anymore os.remove(o_file) if use_mmap_weights: if config.aot_inductor.cross_target_platform == "windows": raise RuntimeError( "when cross_target_platform is windows, use_mmap_weights should not be true." ) def get_page_size() -> int: # Don't use resource.getpagesize() on Windows, as it is a Unix specific package # as seen in https://docs.python.org/2/library/resource.html if _IS_WINDOWS: from ctypes import ( # type: ignore[attr-defined] byref, Structure, windll, ) from ctypes.wintypes import DWORD, LPVOID, WORD class SYSTEM_INFO(Structure): _fields_ = [ ("wProcessorArchitecture", WORD), ("wReserved", WORD), ("dwPageSize", DWORD), ("lpMinimumApplicationAddress", LPVOID), ("lpMaximumApplicationAddress", LPVOID), ("dwActiveProcessorMask", DWORD), ("dwNumberOfProcessors", DWORD), ("dwProcessorType", DWORD), ("dwAllocationGranularity", DWORD), ("wProcessorLevel", WORD), ("wProcessorRevision", WORD), ] si = SYSTEM_INFO() windll.kernel32.GetSystemInfo(byref(si)) sys_page_size = si.dwPageSize else: import resource sys_page_size = resource.getpagesize() return sys_page_size page_size_ = get_page_size() page_size = max(16384, page_size_) with open(output_so, "a+b") as f_so: so_size = f_so.tell() # Page align the weights f_so.write(b" " * (page_size - so_size % page_size)) f_so.write(serialized_weights) f_so.write(struct.pack("q", magic_number)) if config.aot_inductor.package: generated_files.append(output_so) if config.trace.provenance_tracking_level != 0: kernel_info = torch._inductor.debug.create_kernel_information_json() kernel_info_json = os.path.join( wrapper_path_operator.parent, "kernel_information.json" ) with open(kernel_info_json, "w") as f: f.write(json.dumps(kernel_info, indent=4)) generated_files.append(kernel_info_json) if config.aot_inductor.package: # We want to return the directory that contains all the AOTI # generated files, not just the so # return os.path.split(output_so)[0] return generated_files return output_so _libgomp: CDLL | None = None def custom_op_wrapper(op: str, *args: Any) -> list[c_void_p] | c_void_p | None: # This function will be called from generated cpp wrapper code in the JIT mode. # Because tensors will be passed in as AtenTensorHandle, we need to explicitly convert them. def convert_arg(arg: Any) -> Any: if str(type(arg)) == "<class 'PyCapsule'>": # No easy way to do isinstance check on PyCapsule return torch._C._aoti.alloc_tensor_by_stealing_from_void_ptr(arg) elif isinstance(arg, (list, tuple)): return type(arg)(convert_arg(a) for a in arg) else: return arg converted_args = [convert_arg(arg) for arg in args] assert op.startswith("torch.ops."), ( op + " can not be called through custom_op_wrapper" ) func = None for i, s in enumerate(op.split(".")): if i == 0: func = importlib.import_module(s) func = getattr(func, s) assert callable(func), op + " can not be loaded through custom_op_wrapper" # convert any kwarg-only arguments to kwargs kwargs = dict() # pyrefly: ignore [missing-attribute] for func_arg, conv_arg in zip(func._schema.arguments, converted_args): if func_arg.kwarg_only: kwargs[func_arg.name] = conv_arg if kwargs: del converted_args[-len(kwargs) :] result = func(*converted_args, **kwargs) if result is None: return None if isinstance(result, (list, tuple)): # unsafe_alloc_void_ptrs_from_tensors expects result contains tensor only result = [torch.tensor([]) if r is None else r for r in result] for r in result: assert isinstance(r, torch.Tensor), op + " returns a list of non-tensors" return torch._C._aoti.unsafe_alloc_void_ptrs_from_tensors(result) # type: ignore[arg-type] assert isinstance(result, torch.Tensor), op + " returns a non-tensor" return torch._C._aoti.unsafe_alloc_void_ptr_from_tensor(result) # Precompiled headers are persistent past program runtime, but associated with one # specific compiler version and set of flags. We explicitly use default_cache_dir here # because these headers need to be global, rather than ignored by fresh_cache. _HEADER_DIR = os.path.join(default_cache_dir(), "precompiled_headers") _HEADER_LOCK_DIR = os.path.join(_HEADER_DIR, "locks") @functools.cache def _precompile_header( header: str, hashable_cmd_line: str, **compile_command: Any, ) -> str: assert not _IS_WINDOWS, ( "CppBuilder does not currently support precompiling on Windows!" ) # Get the preprocessed output from the header file to be precompiled. This allows # us to properly invalidate the file cache when any header dependency changes. This # is thread-safe, as each thread will get its own temporary directory. # # N.B. we can't use NamedTemporaryFile here because Windows errors out on attempts # to read from a file with an open write handle. with tempfile.TemporaryDirectory() as preprocessing_dir: preprocessing_header = Path(preprocessing_dir) / "header.hpp" preprocessing_header.write_text(f"#include <{header}>\n") preprocessor = CppBuilder( name=str(preprocessing_header)[:-4], # strip off the .hpp extension sources=str(preprocessing_header), BuildOption=CppTorchDeviceOptions(**compile_command, preprocessing=True), ) preprocessor.build() def _get_file_checksum(filename: str) -> str: """Reading the whole preprocessed header in for hashing is very expensive, but calling a fast hashing utility in a subprocess is cheap.""" # If Windows support needs to be added here, use certutil -hashfile. cmd_output = subprocess.run( ("openssl", "sha512", filename), capture_output=True, text=True ) return cmd_output.stdout.split()[-1] preprocessor_hash = _get_file_checksum(preprocessor.get_target_file_path()) header_build_option = CppTorchDeviceOptions(**compile_command, precompiling=True) header_hash, header_full_path = write( content=f"#include <{header}>\n", extension="h", extra=( hashable_cmd_line + preprocessor_hash + get_compiler_version_info(header_build_option.get_compiler()) ), specified_dir=_HEADER_DIR, ) cpp_builder = CppBuilder( name=header_full_path, sources=header_full_path, BuildOption=header_build_option, ) # _worker_compile_cpp will automatically ignore any compilation whose result already # exists, so this is always safe. os.makedirs(_HEADER_LOCK_DIR, exist_ok=True) _worker_compile_cpp( os.path.join(_HEADER_LOCK_DIR, f"{header_hash}.lock"), (cpp_builder,), ) return header_full_path def _get_cpp_prefix_header(device: str) -> str | None: if device.startswith("cpu"): return "torch/csrc/inductor/cpp_prefix.h" return None def _get_cpp_wrapper_header(device: str, aot_mode: bool = False) -> str: """Given a device type (and optionally whether we're in AOT Inductor mode), returns the path to the cpp_wrapper header file to be precompiled.""" base_device = device.split(":", maxsplit=1)[0] is_array_ref = config.aot_inductor.allow_stack_allocation and base_device == "cpu" return ( "torch/csrc/inductor/" f"{'aoti_include' if aot_mode else 'cpp_wrapper'}/" f"{'array_ref' if is_array_ref else base_device}.h" ) @clear_on_fresh_cache
AotCodeCompiler
python
jazzband__django-simple-history
simple_history/tests/tests/test_models.py
{ "start": 76129, "end": 78634 }
class ____(TestCase): def setUp(self): self.model_book = PollChildBookWithManyToMany self.model_rstr = PollChildRestaurantWithManyToMany self.place = Place.objects.create(name="Home") self.book = Book.objects.create(isbn="1234") self.restaurant = Restaurant.objects.create(rating=1) self.poll_book = self.model_book.objects.create( question="what's up?", pub_date=today ) self.poll_rstr = self.model_rstr.objects.create( question="what's up?", pub_date=today ) def test_separation(self): self.assertEqual(self.poll_book.history.all().count(), 1) self.poll_book.places.add(self.place) self.poll_book.books.add(self.book) self.assertEqual(self.poll_book.history.all().count(), 3) self.assertEqual(self.poll_rstr.history.all().count(), 1) self.poll_rstr.places.add(self.place) self.poll_rstr.restaurants.add(self.restaurant) self.assertEqual(self.poll_rstr.history.all().count(), 3) book, place, add = self.poll_book.history.all() self.assertEqual(book.books.all().count(), 1) self.assertEqual(book.places.all().count(), 1) self.assertEqual(book.books.first().book, self.book) self.assertEqual(place.books.all().count(), 0) self.assertEqual(place.places.all().count(), 1) self.assertEqual(place.places.first().place, self.place) self.assertEqual(add.books.all().count(), 0) self.assertEqual(add.places.all().count(), 0) restaurant, place, add = self.poll_rstr.history.all() self.assertEqual(restaurant.restaurants.all().count(), 1) self.assertEqual(restaurant.places.all().count(), 1) self.assertEqual(restaurant.restaurants.first().restaurant, self.restaurant) self.assertEqual(place.restaurants.all().count(), 0) self.assertEqual(place.places.all().count(), 1) self.assertEqual(place.places.first().place, self.place) self.assertEqual(add.restaurants.all().count(), 0) self.assertEqual(add.places.all().count(), 0) def test_self_field(self): poll1 = PollWithSelfManyToMany.objects.create() poll2 = PollWithSelfManyToMany.objects.create() self.assertEqual(poll1.history.all().count(), 1) poll1.relations.add(poll2) self.assertIn(poll2, poll1.relations.all()) self.assertEqual(poll1.history.all().count(), 2)
InheritedManyToManyTest
python
ethereum__web3.py
web3/exceptions.py
{ "start": 4825, "end": 4933 }
class ____(Web3Exception): """ Raised when an ABI doesn't contain any events. """
NoABIEventsFound
python
pandas-dev__pandas
pandas/tests/arrays/boolean/test_comparison.py
{ "start": 518, "end": 1971 }
class ____(ComparisonOps): def test_compare_scalar(self, data, comparison_op): self._compare_other(data, comparison_op, True) def test_compare_array(self, data, comparison_op): other = pd.array([True] * len(data), dtype="boolean") self._compare_other(data, comparison_op, other) other = np.array([True] * len(data)) self._compare_other(data, comparison_op, other) other = pd.Series([True] * len(data)) self._compare_other(data, comparison_op, other) @pytest.mark.parametrize("other", [True, False, pd.NA]) def test_scalar(self, other, comparison_op, dtype): ComparisonOps.test_scalar(self, other, comparison_op, dtype) def test_array(self, comparison_op): op = comparison_op a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") b = pd.array([True, False, None] * 3, dtype="boolean") result = op(a, b) values = op(a._data, b._data) mask = a._mask | b._mask expected = BooleanArray(values, mask) tm.assert_extension_array_equal(result, expected) # ensure we haven't mutated anything inplace result[0] = None tm.assert_extension_array_equal( a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") ) tm.assert_extension_array_equal( b, pd.array([True, False, None] * 3, dtype="boolean") )
TestComparisonOps
python
langchain-ai__langchain
libs/partners/openai/tests/unit_tests/chat_models/test_base_standard.py
{ "start": 200, "end": 902 }
class ____(ChatModelUnitTests): @property def chat_model_class(self) -> type[BaseChatModel]: return ChatOpenAI @property def init_from_env_params(self) -> tuple[dict, dict, dict]: return ( { "OPENAI_API_KEY": "api_key", "OPENAI_ORG_ID": "org_id", "OPENAI_API_BASE": "api_base", "OPENAI_PROXY": "https://proxy.com", }, {}, { "openai_api_key": "api_key", "openai_organization": "org_id", "openai_api_base": "api_base", "openai_proxy": "https://proxy.com", }, )
TestOpenAIStandard
python
chroma-core__chroma
chromadb/db/migrations.py
{ "start": 540, "end": 601 }
class ____(MigrationFile): hash: str sql: str
Migration
python
neetcode-gh__leetcode
python/0148-sort-list.py
{ "start": 0, "end": 1236 }
class ____: def sortList(self, head: Optional[ListNode]) -> Optional[ListNode]: if not head or not head.next: return head mid = self.get_mid(head) left, right = self.sortList(head), self.sortList(mid) return self.merge_two_sorted(left, right) def merge_two_sorted(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]: if not list1: return list2 if not list2: return list1 sentinel = ListNode() prev = sentinel while list1 and list2: if list1.val < list2.val: prev.next = list1 list1 = list1.next else: prev.next = list2 list2 = list2.next prev = prev.next if list1: prev.next = list1 else: prev.next = list2 return sentinel.next def get_mid(self, head: Optional[ListNode]) -> Optional[ListNode]: mid_prev = None while head and head.next: mid_prev = mid_prev.next if mid_prev else head head = head.next.next mid = mid_prev.next mid_prev.next = None return mid
Solution
python
gevent__gevent
src/greentest/3.12/test_ftplib.py
{ "start": 9041, "end": 16600 }
class ____(asyncore.dispatcher, threading.Thread): handler = DummyFTPHandler def __init__(self, address, af=socket.AF_INET, encoding=DEFAULT_ENCODING): threading.Thread.__init__(self) asyncore.dispatcher.__init__(self) self.daemon = True self.create_socket(af, socket.SOCK_STREAM) self.bind(address) self.listen(5) self.active = False self.active_lock = threading.Lock() self.host, self.port = self.socket.getsockname()[:2] self.handler_instance = None self.encoding = encoding def start(self): assert not self.active self.__flag = threading.Event() threading.Thread.start(self) self.__flag.wait() def run(self): self.active = True self.__flag.set() while self.active and asyncore.socket_map: self.active_lock.acquire() asyncore.loop(timeout=0.1, count=1) self.active_lock.release() asyncore.close_all(ignore_all=True) def stop(self): assert self.active self.active = False self.join() def handle_accepted(self, conn, addr): self.handler_instance = self.handler(conn, encoding=self.encoding) def handle_connect(self): self.shutdown() handle_read = handle_connect def writable(self): return 0 def handle_error(self): default_error_handler() if ssl is not None: CERTFILE = os.path.join(os.path.dirname(__file__), "certdata", "keycert3.pem") CAFILE = os.path.join(os.path.dirname(__file__), "certdata", "pycacert.pem") class SSLConnection(asyncore.dispatcher): """An asyncore.dispatcher subclass supporting TLS/SSL.""" _ssl_accepting = False _ssl_closing = False def secure_connection(self): context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) context.load_cert_chain(CERTFILE) socket = context.wrap_socket(self.socket, suppress_ragged_eofs=False, server_side=True, do_handshake_on_connect=False) self.del_channel() self.set_socket(socket) self._ssl_accepting = True def _do_ssl_handshake(self): try: self.socket.do_handshake() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return elif err.args[0] == ssl.SSL_ERROR_EOF: return self.handle_close() # TODO: SSLError does not expose alert information elif "SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1]: return self.handle_close() raise except OSError as err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def _do_ssl_shutdown(self): self._ssl_closing = True try: self.socket = self.socket.unwrap() except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return except OSError: # Any "socket error" corresponds to a SSL_ERROR_SYSCALL return # from OpenSSL's SSL_shutdown(), corresponding to a # closed socket condition. See also: # http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html pass self._ssl_closing = False if getattr(self, '_ccc', False) is False: super(SSLConnection, self).close() else: pass def handle_read_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_read_event() def handle_write_event(self): if self._ssl_accepting: self._do_ssl_handshake() elif self._ssl_closing: self._do_ssl_shutdown() else: super(SSLConnection, self).handle_write_event() def send(self, data): try: return super(SSLConnection, self).send(data) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return 0 raise def recv(self, buffer_size): try: return super(SSLConnection, self).recv(buffer_size) except ssl.SSLError as err: if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE): return b'' if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN): self.handle_close() return b'' raise def handle_error(self): default_error_handler() def shutdown(self): if (isinstance(self.socket, ssl.SSLSocket) and self.socket._sslobj is not None): self._do_ssl_shutdown() else: self.close() class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler): """A DummyDTPHandler subclass supporting TLS/SSL.""" def __init__(self, conn, baseclass): DummyDTPHandler.__init__(self, conn, baseclass) if self.baseclass.secure_data_channel: self.secure_connection() class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler): """A DummyFTPHandler subclass supporting TLS/SSL.""" dtp_handler = DummyTLS_DTPHandler def __init__(self, conn, encoding=DEFAULT_ENCODING): DummyFTPHandler.__init__(self, conn, encoding=encoding) self.secure_data_channel = False self._ccc = False def cmd_auth(self, line): """Set up secure control channel.""" self.push('234 AUTH TLS successful') self.secure_connection() def cmd_ccc(self, line): self.push('220 Reverting back to clear-text') self._ccc = True self._do_ssl_shutdown() def cmd_pbsz(self, line): """Negotiate size of buffer for secure data transfer. For TLS/SSL the only valid value for the parameter is '0'. Any other value is accepted but ignored. """ self.push('200 PBSZ=0 successful.') def cmd_prot(self, line): """Setup un/secure data channel.""" arg = line.upper() if arg == 'C': self.push('200 Protection set to Clear') self.secure_data_channel = False elif arg == 'P': self.push('200 Protection set to Private') self.secure_data_channel = True else: self.push("502 Unrecognized PROT type (use C or P).") class DummyTLS_FTPServer(DummyFTPServer): handler = DummyTLS_FTPHandler
DummyFTPServer
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-huggingface-fs/llama_index/readers/huggingface_fs/base.py
{ "start": 288, "end": 1806 }
class ____(BaseReader): """ Hugging Face File System reader. Uses the new Filesystem API from the Hugging Face Hub client library. """ def __init__(self) -> None: from huggingface_hub import HfFileSystem self.fs = HfFileSystem() def load_dicts(self, path: str) -> List[Dict]: """Parse file.""" test_data = self.fs.read_bytes(path) path = Path(path) if ".gz" in path.suffixes: import gzip with TemporaryDirectory() as tmp: tmp = Path(tmp) with open(tmp / "tmp.jsonl.gz", "wb") as fp: fp.write(test_data) f = gzip.open(tmp / "tmp.jsonl.gz", "rb") raw = f.read() data = raw.decode() else: data = test_data.decode() text_lines = data.split("\n") json_dicts = [] for t in text_lines: try: json_dict = json.loads(t) except json.decoder.JSONDecodeError: continue json_dicts.append(json_dict) return json_dicts def load_df(self, path: str) -> pd.DataFrame: """Load pandas dataframe.""" return pd.DataFrame(self.load_dicts(path)) def load_data(self, path: str) -> List[Document]: """Load data.""" json_dicts = self.load_dicts(path) docs = [] for d in json_dicts: docs.append(Document(text=str(d))) return docs
HuggingFaceFSReader
python
openai__openai-python
src/openai/types/responses/response_input_text_content_param.py
{ "start": 229, "end": 455 }
class ____(TypedDict, total=False): text: Required[str] """The text input to the model.""" type: Required[Literal["input_text"]] """The type of the input item. Always `input_text`."""
ResponseInputTextContentParam
python
TheAlgorithms__Python
graphs/breadth_first_search_zero_one_shortest_path.py
{ "start": 452, "end": 4660 }
class ____: """Graph adjacency list.""" def __init__(self, size: int): self._graph: list[list[Edge]] = [[] for _ in range(size)] self._size = size def __getitem__(self, vertex: int) -> Iterator[Edge]: """Get all the vertices adjacent to the given one.""" return iter(self._graph[vertex]) @property def size(self): return self._size def add_edge(self, from_vertex: int, to_vertex: int, weight: int): """ >>> g = AdjacencyList(2) >>> g.add_edge(0, 1, 0) >>> g.add_edge(1, 0, 1) >>> list(g[0]) [Edge(destination_vertex=1, weight=0)] >>> list(g[1]) [Edge(destination_vertex=0, weight=1)] >>> g.add_edge(0, 1, 2) Traceback (most recent call last): ... ValueError: Edge weight must be either 0 or 1. >>> g.add_edge(0, 2, 1) Traceback (most recent call last): ... ValueError: Vertex indexes must be in [0; size). """ if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1.") if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size).") self._graph[from_vertex].append(Edge(to_vertex, weight)) def get_shortest_path(self, start_vertex: int, finish_vertex: int) -> int | None: """ Return the shortest distance from start_vertex to finish_vertex in 0-1-graph. 1 1 1 0--------->3 6--------7>------->8 | ^ ^ ^ |1 | | | |0 v 0| |0 1| 9-------->10 | | | ^ 1 v | | |0 1--------->2<-------4------->5 0 1 1 >>> g = AdjacencyList(11) >>> g.add_edge(0, 1, 0) >>> g.add_edge(0, 3, 1) >>> g.add_edge(1, 2, 0) >>> g.add_edge(2, 3, 0) >>> g.add_edge(4, 2, 1) >>> g.add_edge(4, 5, 1) >>> g.add_edge(4, 6, 1) >>> g.add_edge(5, 9, 0) >>> g.add_edge(6, 7, 1) >>> g.add_edge(7, 8, 1) >>> g.add_edge(8, 10, 1) >>> g.add_edge(9, 7, 0) >>> g.add_edge(9, 10, 1) >>> g.add_edge(1, 2, 2) Traceback (most recent call last): ... ValueError: Edge weight must be either 0 or 1. >>> g.get_shortest_path(0, 3) 0 >>> g.get_shortest_path(0, 4) Traceback (most recent call last): ... ValueError: No path from start_vertex to finish_vertex. >>> g.get_shortest_path(4, 10) 2 >>> g.get_shortest_path(4, 8) 2 >>> g.get_shortest_path(0, 1) 0 >>> g.get_shortest_path(1, 0) Traceback (most recent call last): ... ValueError: No path from start_vertex to finish_vertex. """ queue = deque([start_vertex]) distances: list[int | None] = [None] * self.size distances[start_vertex] = 0 while queue: current_vertex = queue.popleft() current_distance = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: new_distance = current_distance + edge.weight dest_vertex_distance = distances[edge.destination_vertex] if ( isinstance(dest_vertex_distance, int) and new_distance >= dest_vertex_distance ): continue distances[edge.destination_vertex] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex.") return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
AdjacencyList
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance1.py
{ "start": 6315, "end": 6356 }
class ____(Generic[T]): value: T
Base15
python
pydantic__pydantic
pydantic/v1/errors.py
{ "start": 7047, "end": 7140 }
class ____(PydanticTypeError): msg_template = 'value is not a valid iterable'
IterableError
python
apache__airflow
providers/http/src/airflow/providers/http/triggers/http.py
{ "start": 9009, "end": 13024 }
class ____(HttpTrigger, BaseEventTrigger): """ HttpEventTrigger for event-based DAG scheduling when the API response satisfies the response check. :param response_check_path: Path to the function that evaluates whether the API response passes the conditions set by the user to fire the trigger. The method must be asynchronous. :param http_conn_id: http connection id that has the base API url i.e https://www.google.com/ and optional authentication credentials. Default headers can also be specified in the Extra field in json format. :param auth_type: The auth type for the service :param method: The API method to be called :param endpoint: Endpoint to be called, i.e. ``resource/v1/query?``. :param headers: Additional headers to be passed through as a dict. :param data: Payload to be uploaded or request parameters. :param extra_options: Additional kwargs to pass when creating a request. :parama poll_interval: How often, in seconds, the trigger should send a request to the API. """ def __init__( self, response_check_path: str, http_conn_id: str = "http_default", auth_type: Any = None, method: str = "GET", endpoint: str | None = None, headers: dict[str, str] | None = None, data: dict[str, Any] | str | None = None, extra_options: dict[str, Any] | None = None, poll_interval: float = 60.0, ): super().__init__(http_conn_id, auth_type, method, endpoint, headers, data, extra_options) self.response_check_path = response_check_path self.poll_interval = poll_interval def serialize(self) -> tuple[str, dict[str, Any]]: """Serialize HttpEventTrigger arguments and classpath.""" return ( self.__class__.__module__ + "." + self.__class__.__qualname__, { "http_conn_id": self.http_conn_id, "method": self.method, "auth_type": serialize_auth_type(self.auth_type), "endpoint": self.endpoint, "headers": self.headers, "data": self.data, "extra_options": self.extra_options, "response_check_path": self.response_check_path, "poll_interval": self.poll_interval, }, ) async def run(self) -> AsyncIterator[TriggerEvent]: """Make a series of asynchronous http calls via a http hook until the response passes the response check.""" hook = super()._get_async_hook() try: while True: response = await super()._get_response(hook) if await self._run_response_check(response): break await asyncio.sleep(self.poll_interval) yield TriggerEvent( { "status": "success", "response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"), } ) except Exception as e: self.log.error("status: error, message: %s", str(e)) async def _import_from_response_check_path(self): """Import the response check callable from the path provided by the user.""" module_path, func_name = self.response_check_path.rsplit(".", 1) if module_path in sys.modules: module = await sync_to_async(importlib.reload)(sys.modules[module_path]) module = await sync_to_async(importlib.import_module)(module_path) return getattr(module, func_name) async def _run_response_check(self, response) -> bool: """Run the response_check callable provided by the user.""" response_check = await self._import_from_response_check_path() if not inspect.iscoroutinefunction(response_check): raise AirflowException("The response_check callable is not asynchronous.") check = await response_check(response) return check
HttpEventTrigger
python
kamyu104__LeetCode-Solutions
Python/strobogrammatic-number-iii.py
{ "start": 35, "end": 2625 }
class ____(object): lookup = {'0':'0', '1':'1', '6':'9', '8':'8', '9':'6'} cache = {} # @param {string} low # @param {string} high # @return {integer} def strobogrammaticInRange(self, low, high): count = self.countStrobogrammaticUntil(high, False) - \ self.countStrobogrammaticUntil(low, False) + \ self.isStrobogrammatic(low) return count if count >= 0 else 0 def countStrobogrammaticUntil(self, num, can_start_with_0): if can_start_with_0 and num in self.cache: return self.cache[num] count = 0 if len(num) == 1: for c in ['0', '1', '8']: if num[0] >= c: count += 1 self.cache[num] = count return count for key, val in self.lookup.iteritems(): if can_start_with_0 or key != '0': if num[0] > key: if len(num) == 2: # num is like "21" count += 1 else: # num is like "201" count += self.countStrobogrammaticUntil('9' * (len(num) - 2), True) elif num[0] == key: if len(num) == 2: # num is like 12". if num[-1] >= val: count += 1 else: if num[-1] >= val: # num is like "102". count += self.countStrobogrammaticUntil(self.getMid(num), True) elif (self.getMid(num) != '0' * (len(num) - 2)): # num is like "110". count += self.countStrobogrammaticUntil(self.getMid(num), True) - \ self.isStrobogrammatic(self.getMid(num)) if not can_start_with_0: # Sum up each length. for i in xrange(len(num) - 1, 0, -1): count += self.countStrobogrammaticByLength(i) else: self.cache[num] = count return count def getMid(self, num): return num[1:len(num) - 1] def countStrobogrammaticByLength(self, n): if n == 1: return 3 elif n == 2: return 4 elif n == 3: return 4 * 3 else: return 5 * self.countStrobogrammaticByLength(n - 2) def isStrobogrammatic(self, num): n = len(num) for i in xrange((n+1) / 2): if num[n-1-i] not in self.lookup or \ num[i] != self.lookup[num[n-1-i]]: return False return True
Solution
python
huggingface__transformers
tests/models/deformable_detr/test_image_processing_deformable_detr.py
{ "start": 1195, "end": 4801 }
class ____: def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to DeformableDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision
DeformableDetrImageProcessingTester
python
langchain-ai__langchain
libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py
{ "start": 2307, "end": 3017 }
class ____: """Test text editor middleware functionality.""" def test_middleware_initialization(self) -> None: """Test middleware initializes correctly.""" middleware = StateClaudeTextEditorMiddleware() assert middleware.state_schema == AnthropicToolsState assert middleware.tool_type == "text_editor_20250728" assert middleware.tool_name == "str_replace_based_edit_tool" assert middleware.state_key == "text_editor_files" # With path restrictions middleware = StateClaudeTextEditorMiddleware( allowed_path_prefixes=["/workspace"] ) assert middleware.allowed_prefixes == ["/workspace"]
TestTextEditorMiddleware
python
huggingface__transformers
src/transformers/models/cohere2/modeling_cohere2.py
{ "start": 20324, "end": 24600 }
class ____(Cohere2PreTrainedModel, GenerationMixin): _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"} _tp_plan = {"lm_head": "colwise_rep"} _pp_plan = {"lm_head": (["hidden_states"], ["logits"])} def __init__(self, config): super().__init__(config) self.model = Cohere2Model(config) self.vocab_size = config.vocab_size self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.logit_scale = config.logit_scale self.tie_word_embeddings = config.tie_word_embeddings # Initialize weights and apply final processing self.post_init() @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >> from transformers import AutoTokenizer, Cohere2ForCausalLM >> model = Cohere2ForCausalLM.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> tokenizer = AutoTokenizer.from_pretrained("Cohere2ForAI/c4ai-command-r-v01") >> prompt = "Hey, are you conscious? Can you talk to me?" >> inputs = tokenizer(prompt, return_tensors="pt") >> # Generate >> generate_ids = model.generate(inputs.input_ids, max_length=30) >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs: BaseModelOutputWithPast = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, cache_position=cache_position, **kwargs, ) hidden_states = outputs.last_hidden_state # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) logits = logits * self.logit_scale # main diff from Llama loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = ["Cohere2ForCausalLM", "Cohere2Model", "Cohere2PreTrainedModel"]
Cohere2ForCausalLM
python
ray-project__ray
rllib/algorithms/ppo/tests/test_ppo.py
{ "start": 1611, "end": 5560 }
class ____(unittest.TestCase): @classmethod def setUpClass(cls): ray.init() @classmethod def tearDownClass(cls): ray.shutdown() def test_ppo_compilation_and_schedule_mixins(self): """Test whether PPO can be built with all frameworks.""" # Build a PPOConfig object with the `SingleAgentEnvRunner` class. config = ( ppo.PPOConfig() .env_runners(num_env_runners=0) .training( num_epochs=2, # Setup lr schedule for testing lr-scheduling correctness. lr=[[0, 0.00001], [512, 0.0]], # 512=4x128 # Setup `entropy_coeff` schedule for testing whether it's scheduled # correctly. entropy_coeff=[[0, 0.1], [256, 0.0]], # 256=2x128, train_batch_size=128, ) .callbacks(on_train_result=on_train_result) .evaluation( # Also test evaluation with remote workers. evaluation_num_env_runners=2, evaluation_duration=3, evaluation_duration_unit="episodes", evaluation_parallel_to_training=True, ) ) num_iterations = 2 for env in [ "CartPole-v1", "Pendulum-v1", ]: print("Env={}".format(env)) for lstm in [False]: print("LSTM={}".format(lstm)) config.rl_module(model_config=get_model_config(lstm=lstm)) algo = config.build(env=env) # TODO: Maybe add an API to get the Learner(s) instances within # a learner group, remote or not. learner = algo.learner_group._learner optim = learner.get_optimizer() # Check initial LR directly set in optimizer vs the first (ts=0) # value from the schedule. lr = optim.param_groups[0]["lr"] check(lr, config.lr[0][1]) # Check current entropy coeff value using the respective Scheduler. entropy_coeff = learner.entropy_coeff_schedulers_per_module[ DEFAULT_MODULE_ID ].get_current_value() check(entropy_coeff, 0.1) for i in range(num_iterations): results = algo.train() check_train_results_new_api_stack(results) print(results) # algo.evaluate() algo.stop() def test_ppo_free_log_std(self): """Tests the free log std option works.""" config = ( ppo.PPOConfig() .environment("Pendulum-v1") .env_runners( num_env_runners=1, ) .rl_module( model_config=DefaultModelConfig( fcnet_hiddens=[10], fcnet_activation="linear", free_log_std=True, vf_share_layers=True, ), ) .training( gamma=0.99, ) ) algo = config.build() module = algo.get_module(DEFAULT_MODULE_ID) # Check the free log std var is created. matching = [v for (n, v) in module.named_parameters() if "log_std" in n] assert len(matching) == 1, matching log_std_var = matching[0] def get_value(log_std_var=log_std_var): return log_std_var.detach().cpu().numpy()[0] # Check the variable is initially zero. init_std = get_value() assert init_std == 0.0, init_std algo.train() # Check the variable is updated. post_std = get_value() assert post_std != 0.0, post_std algo.stop() if __name__ == "__main__": import sys import pytest sys.exit(pytest.main(["-v", __file__]))
TestPPO
python
scipy__scipy
scipy/sparse/tests/test_64bit.py
{ "start": 7698, "end": 9180 }
class ____(RunAll64Bit): # assert_32bit=True only for spmatrix cuz sparray does not check index content @pytest.mark.fail_slow(5) @pytest.mark.parametrize('cls,method_name', cases_64bit("spmatrix-extra")) def test_no_64(self, cls, method_name): self._check_resiliency(cls, method_name, assert_32bit=True) # inheritance of pytest test classes does not separate marks for subclasses. # So we define these functions in both Array and Matrix versions. @pytest.mark.parametrize('cls,method_name', cases_64bit("spmatrix-extra")) def test_resiliency_limit_10(self, cls, method_name): self._check_resiliency(cls, method_name, maxval_limit=10) @pytest.mark.parametrize('cls,method_name', cases_64bit("spmatrix-extra")) def test_resiliency_all_32(self, cls, method_name): self._check_resiliency(cls, method_name, fixed_dtype=np.int32) @pytest.mark.parametrize('cls,method_name', cases_64bit("spmatrix-extra")) def test_resiliency_all_64(self, cls, method_name): self._check_resiliency(cls, method_name, fixed_dtype=np.int64) @pytest.mark.fail_slow(2) @pytest.mark.parametrize('cls,method_name', cases_64bit("spmatrix-extra")) def test_resiliency_random(self, cls, method_name): # Resiliency check that sparse deals with varying index data types. self._check_resiliency(cls, method_name) @pytest.mark.thread_unsafe(reason="Fails in parallel for unknown reasons")
Test64BitMatrixExtra
python
pola-rs__polars
py-polars/src/polars/schema.py
{ "start": 2157, "end": 9076 }
class ____(BaseSchema): """ Ordered mapping of column names to their data type. Parameters ---------- schema The schema definition given by column names and their associated Polars data type. Accepts a mapping, or an iterable of tuples, or any object implementing the `__arrow_c_schema__` PyCapsule interface (e.g. pyarrow schemas). Examples -------- Define a schema by passing instantiated data types. >>> schema = pl.Schema( ... { ... "foo": pl.String(), ... "bar": pl.Duration("us"), ... "baz": pl.Array(pl.Int8, 4), ... } ... ) >>> schema Schema({'foo': String, 'bar': Duration(time_unit='us'), 'baz': Array(Int8, shape=(4,))}) Access the data type associated with a specific column name. >>> schema["baz"] Array(Int8, shape=(4,)) Access various schema properties using the `names`, `dtypes`, and `len` methods. >>> schema.names() ['foo', 'bar', 'baz'] >>> schema.dtypes() [String, Duration(time_unit='us'), Array(Int8, shape=(4,))] >>> schema.len() 3 Import a pyarrow schema. >>> import pyarrow as pa >>> pl.Schema(pa.schema([pa.field("x", pa.int32())])) Schema({'x': Int32}) Export a schema to pyarrow. >>> pa.schema(pl.Schema({"x": pl.Int32})) x: int32 """ # noqa: W505 def __init__( self, schema: ( Mapping[str, SchemaInitDataType] | Iterable[tuple[str, SchemaInitDataType] | ArrowSchemaExportable] | ArrowSchemaExportable | None ) = None, *, check_dtypes: bool = True, ) -> None: if hasattr(schema, "__arrow_c_schema__") and not isinstance(schema, Schema): init_polars_schema_from_arrow_c_schema(self, schema) return input = schema.items() if isinstance(schema, Mapping) else (schema or ()) for v in input: name, tp = ( polars_schema_field_from_arrow_c_schema(v) if hasattr(v, "__arrow_c_schema__") and not isinstance(v, DataType) else v ) if name in self: msg = f"iterable passed to pl.Schema contained duplicate name '{name}'" raise DuplicateError(msg) if not check_dtypes: super().__setitem__(name, tp) # type: ignore[assignment] elif is_polars_dtype(tp): super().__setitem__(name, _check_dtype(tp)) else: self[name] = tp def __eq__(self, other: object) -> bool: if not isinstance(other, Mapping): return False if len(self) != len(other): return False for (nm1, tp1), (nm2, tp2) in zip(self.items(), other.items()): if nm1 != nm2 or not tp1.is_(tp2): return False return True def __ne__(self, other: object) -> bool: return not self.__eq__(other) def __setitem__( self, name: str, dtype: DataType | DataTypeClass | PythonDataType ) -> None: dtype = _check_dtype(parse_into_dtype(dtype)) super().__setitem__(name, dtype) @unstable() def __arrow_c_schema__(self) -> object: """ Export a Schema via the Arrow PyCapsule Interface. https://arrow.apache.org/docs/dev/format/CDataInterface/PyCapsuleInterface.html """ return polars_schema_to_pycapsule(self, CompatLevel.newest()._version) def names(self) -> list[str]: """ Get the column names of the schema. Examples -------- >>> s = pl.Schema({"x": pl.Float64(), "y": pl.Datetime(time_zone="UTC")}) >>> s.names() ['x', 'y'] """ return list(self.keys()) def dtypes(self) -> list[DataType]: """ Get the data types of the schema. Examples -------- >>> s = pl.Schema({"x": pl.UInt8(), "y": pl.List(pl.UInt8)}) >>> s.dtypes() [UInt8, List(UInt8)] """ return list(self.values()) @unstable() def to_arrow(self, *, compat_level: CompatLevel | None = None) -> pa.Schema: """ Convert the schema to a pyarrow schema. Parameters ---------- compat_level Use a specific compatibility level when exporting Polars' internal data types. Examples -------- >>> pl.Schema({"x": pl.String}).to_arrow() x: string_view """ class SchemaCapsuleProvider: def __init__(self, schema: Schema, compat_level: CompatLevel) -> None: self.schema = schema self.compat_level = compat_level def __arrow_c_schema__(self) -> object: return polars_schema_to_pycapsule( self.schema, self.compat_level._version ) return pa.schema( SchemaCapsuleProvider( self, CompatLevel.newest() if compat_level is None else compat_level ) ) @overload def to_frame(self, *, eager: Literal[False]) -> LazyFrame: ... @overload def to_frame(self, *, eager: Literal[True] = ...) -> DataFrame: ... def to_frame(self, *, eager: bool = True) -> DataFrame | LazyFrame: """ Create an empty DataFrame (or LazyFrame) from this Schema. Parameters ---------- eager If True, create a DataFrame; otherwise, create a LazyFrame. Examples -------- >>> s = pl.Schema({"x": pl.Int32(), "y": pl.String()}) >>> s.to_frame() shape: (0, 2) ┌─────┬─────┐ │ x ┆ y │ │ --- ┆ --- │ │ i32 ┆ str │ ╞═════╪═════╡ └─────┴─────┘ >>> s.to_frame(eager=False) # doctest: +IGNORE_RESULT <LazyFrame at 0x11BC0AD80> """ from polars import DataFrame, LazyFrame return DataFrame(schema=self) if eager else LazyFrame(schema=self) def len(self) -> int: """ Get the number of schema entries. Examples -------- >>> s = pl.Schema({"x": pl.Int32(), "y": pl.List(pl.String)}) >>> s.len() 2 >>> len(s) 2 """ return len(self) def to_python(self) -> dict[str, type]: """ Return a dictionary of column names and Python types. Examples -------- >>> s = pl.Schema( ... { ... "x": pl.Int8(), ... "y": pl.String(), ... "z": pl.Duration("us"), ... } ... ) >>> s.to_python() {'x': <class 'int'>, 'y': <class 'str'>, 'z': <class 'datetime.timedelta'>} """ return {name: tp.to_python() for name, tp in self.items()}
Schema
python
getsentry__sentry
src/sentry/types/grouphash_metadata.py
{ "start": 566, "end": 1825 }
class ____(TypedDict): """ Fingerprint data, gathered both during stand-alone custom/built-in fingerprinting and hybrid fingerprinting involving message, stacktrace, security, or template hashing """ # The fingerprint value fingerprint: str # Either "client", "server_builtin_rule", or "server_custom_rule". (We don't have a "none of the # above" option here because we only record fingerprint metadata in cases where there's some # sort of custom fingerprint.) fingerprint_source: str # The fingerprint value set in the SDK, if anything other than ["{{ default }}"]. Note that just # because this is set doesn't mean we necessarily used it for grouping, since server-side rules # take precedence over client fingerprints. See `fingerprint_source` above. client_fingerprint: NotRequired[str] # The server-side rule applied, if any matched_fingerprinting_rule: NotRequired[str] # Whether or not a hybrid fingerprint (one involving both the signal value `{{ default }}` and a # custom value) was used. In that case, we group as we normally would, but then split the events # into more granular groups based on the custom value. is_hybrid_fingerprint: bool
FingerprintHashingMetadata
python
pytest-dev__pytest-xdist
testing/test_dsession.py
{ "start": 1546, "end": 3223 }
class ____: def test_schedule_load_simple(self, pytester: pytest.Pytester) -> None: config = pytester.parseconfig("--tx=2*popen") sched = EachScheduling(config) node1, node2 = MockNode(), MockNode() sched.add_node(node1) sched.add_node(node2) collection = ["a.py::test_1"] assert not sched.collection_is_completed sched.add_node_collection(node1, collection) assert not sched.collection_is_completed sched.add_node_collection(node2, collection) assert bool(sched.collection_is_completed) assert sched.node2collection[node1] == collection assert sched.node2collection[node2] == collection sched.schedule() assert sched.tests_finished assert node1.sent == ["ALL"] assert node2.sent == ["ALL"] sched.mark_test_complete(node1, 0) assert sched.tests_finished sched.mark_test_complete(node2, 0) assert sched.tests_finished def test_schedule_remove_node(self, pytester: pytest.Pytester) -> None: config = pytester.parseconfig("--tx=popen") sched = EachScheduling(config) node1 = MockNode() sched.add_node(node1) collection = ["a.py::test_1"] assert not sched.collection_is_completed sched.add_node_collection(node1, collection) assert bool(sched.collection_is_completed) assert sched.node2collection[node1] == collection sched.schedule() assert sched.tests_finished crashitem = sched.remove_node(node1) assert crashitem assert sched.tests_finished assert not sched.nodes
TestEachScheduling
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/runs_feed.py
{ "start": 1682, "end": 1825 }
class ____(graphene.ObjectType): class Meta: name = "RunsFeedCount" count = graphene.NonNull(graphene.Int)
GrapheneRunsFeedCount
python
python__mypy
mypy/types.py
{ "start": 40669, "end": 42407 }
class ____(ProperType): """Type operator Unpack from PEP646. Can be either with Unpack[] or unpacking * syntax. The inner type should be either a TypeVarTuple, or a variable length tuple. In an exceptional case of callable star argument it can be a fixed length tuple. Note: the above restrictions are only guaranteed by normalizations after semantic analysis, if your code needs to handle UnpackType *during* semantic analysis, it is wild west, technically anything can be present in the wrapped type. """ __slots__ = ["type", "from_star_syntax"] def __init__( self, typ: Type, line: int = -1, column: int = -1, from_star_syntax: bool = False ) -> None: super().__init__(line, column) self.type = typ self.from_star_syntax = from_star_syntax def accept(self, visitor: TypeVisitor[T]) -> T: return visitor.visit_unpack_type(self) def serialize(self) -> JsonDict: return {".class": "UnpackType", "type": self.type.serialize()} def write(self, data: WriteBuffer) -> None: write_tag(data, UNPACK_TYPE) self.type.write(data) write_tag(data, END_TAG) @classmethod def read(cls, data: ReadBuffer) -> UnpackType: ret = UnpackType(read_type(data)) assert read_tag(data) == END_TAG return ret @classmethod def deserialize(cls, data: JsonDict) -> UnpackType: assert data[".class"] == "UnpackType" typ = data["type"] return UnpackType(deserialize_type(typ)) def __hash__(self) -> int: return hash(self.type) def __eq__(self, other: object) -> bool: return isinstance(other, UnpackType) and self.type == other.type
UnpackType
python
apache__airflow
airflow-core/src/airflow/dag_processing/processor.py
{ "start": 2806, "end": 3360 }
class ____(BaseModel): """ Request for DAG File Parsing. This is the request that the manager will send to the DAG parser with the dag file and any other necessary metadata. """ file: str bundle_path: Path """Passing bundle path around lets us figure out relative file path.""" bundle_name: str """Bundle name for team-specific executor validation.""" callback_requests: list[CallbackRequest] = Field(default_factory=list) type: Literal["DagFileParseRequest"] = "DagFileParseRequest"
DagFileParseRequest
python
kamyu104__LeetCode-Solutions
Python/length-of-longest-v-shaped-diagonal-segment.py
{ "start": 42, "end": 2827 }
class ____(object): def lenOfVDiagonal(self, grid): """ :type grid: List[List[int]] :rtype: int """ n, m = len(grid), len(grid[0]) result = 0 down_right = [[1]*m for _ in xrange(n)] down_left = [[1]*m for _ in xrange(n)] for i in xrange(n): for j in xrange(m): x = grid[i][j] if x == 1: result = 1 continue if i-1 >= 0 and j-1 >= 0 and grid[i-1][j-1] == 2-x: down_right[i][j] = down_right[i-1][j-1]+1 if i-1 >= 0 and j+1 < m and grid[i-1][j+1] == 2-x: down_left[i][j] = down_left[i-1][j+1]+1 up_right = [[1]*m for _ in xrange(n)] up_left = [[1]*m for _ in xrange(n)] for i in reversed(xrange(n)): for j in xrange(m): x = grid[i][j] if x == 1: continue if i+1 < n and j-1 >= 0 and grid[i+1][j-1] == 2-x: up_right[i][j] = up_right[i+1][j-1]+1 if i+1 < n and j+1 < m and grid[i+1][j+1] == 2-x: up_left[i][j] = up_left[i+1][j+1]+1 for i in xrange(n): for j in xrange(m): x = grid[i][j] if x == 1: continue if (down_right[i][j]%2*2 == 0 and x == 0) or (down_right[i][j]%2 == 1 and x == 2): ni = i-down_right[i][j] nj = j-down_right[i][j] if 0 <= ni < n and 0 <= nj < m and grid[ni][nj] == 1: result = max(result, down_right[i][j]+up_right[i][j]) # > if (down_left[i][j]%2 == 0 and x == 0) or (down_left[i][j]%2 == 1 and x == 2): ni = i-down_left[i][j] nj = j+down_left[i][j] if 0 <= ni< n and 0 <= nj < m and grid[ni][nj] == 1: result = max(result, down_left[i][j]+down_right[i][j]) # v if (up_left[i][j]%2 == 0 and x == 0) or (up_left[i][j]%2 == 1 and x == 2): ni = i+up_left[i][j] nj = j+up_left[i][j] if 0 <= ni < n and 0 <= nj < m and grid[ni][nj] == 1: result = max(result, up_left[i][j]+down_left[i][j]) # < if (up_right[i][j]%2 == 0 and x == 0) or (up_right[i][j]%2 == 1 and x == 2): ni = i+up_right[i][j] nj = j-up_right[i][j] if 0 <= ni < n and 0 <= nj < m and grid[ni][nj] == 1: result = max(result, up_right[i][j]+up_left[i][j]) # ^ return result # Time: O(n * m) # Space: O(n * m) # memoization
Solution
python
huggingface__transformers
src/transformers/models/switch_transformers/modeling_switch_transformers.py
{ "start": 55522, "end": 57447 }
class ____(SwitchTransformersPreTrainedModel): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", } _can_record_outputs = { "hidden_states": SwitchTransformersBlock, "attentions": OutputRecorder(SwitchTransformersAttention, index=-1, layer_name="layer.0"), "router_logits": SwitchTransformersTop1Router, } def __init__(self, config: SwitchTransformersConfig): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = SwitchTransformersStack(encoder_config) self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) @auto_docstring @check_model_inputs() def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, **kwargs: Unpack[TransformersKwargs], ) -> Union[tuple[torch.FloatTensor], MoEModelOutput]: use_cache = False encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs, ) return encoder_outputs __all__ = [ "SwitchTransformersEncoderModel", "SwitchTransformersForConditionalGeneration", "SwitchTransformersModel", "SwitchTransformersPreTrainedModel", "SwitchTransformersTop1Router", "SwitchTransformersSparseMLP", ]
SwitchTransformersEncoderModel
python
sympy__sympy
sympy/assumptions/cnf.py
{ "start": 616, "end": 2030 }
class ____: """ The smallest element of a CNF object. Parameters ========== lit : Boolean expression is_Not : bool Examples ======== >>> from sympy import Q >>> from sympy.assumptions.cnf import Literal >>> from sympy.abc import x >>> Literal(Q.even(x)) Literal(Q.even(x), False) >>> Literal(~Q.even(x)) Literal(Q.even(x), True) """ def __new__(cls, lit, is_Not=False): if isinstance(lit, Not): lit = lit.args[0] is_Not = True elif isinstance(lit, (AND, OR, Literal)): return ~lit if is_Not else lit obj = super().__new__(cls) obj.lit = lit obj.is_Not = is_Not return obj @property def arg(self): return self.lit def rcall(self, expr): if callable(self.lit): lit = self.lit(expr) else: lit = self.lit.apply(expr) return type(self)(lit, self.is_Not) def __invert__(self): is_Not = not self.is_Not return Literal(self.lit, is_Not) def __str__(self): return '{}({}, {})'.format(type(self).__name__, self.lit, self.is_Not) __repr__ = __str__ def __eq__(self, other): return self.arg == other.arg and self.is_Not == other.is_Not def __hash__(self): h = hash((type(self).__name__, self.arg, self.is_Not)) return h
Literal
python
bokeh__bokeh
src/bokeh/core/property/datetime.py
{ "start": 1545, "end": 2474 }
class ____(Property[str | datetime.date]): """ Accept ISO format Date (but not DateTime) values. """ def transform(self, value: Any) -> Any: value = super().transform(value) if isinstance(value, datetime.date): value = value.isoformat() return value def validate(self, value: Any, detail: bool = True) -> None: super().validate(value, detail) # datetime.datetime is datetime.date, exclude manually up front if isinstance(value, datetime.datetime): msg = "" if not detail else "Expected a date value, got a datetime.datetime" raise ValueError(msg) if isinstance(value, datetime.date): return try: datetime.datetime.fromisoformat(value) except Exception: msg = "" if not detail else f"Expected an ISO date string, got {value!r}" raise ValueError(msg)
Date
python
redis__redis-py
benchmarks/command_packer_benchmark.py
{ "start": 2612, "end": 3273 }
class ____(Benchmark): ARGUMENTS = ( { "name": "connection_class", "values": [StringJoiningConnection, ListJoiningConnection], }, { "name": "value_size", "values": [10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000], }, ) def setup(self, connection_class, value_size): self.get_client(connection_class=connection_class) def run(self, connection_class, value_size): r = self.get_client() x = "a" * value_size r.set("benchmark", x) if __name__ == "__main__": CommandPackerBenchmark().run_benchmark()
CommandPackerBenchmark
python
PyCQA__pylint
tests/functional/a/assigning/assigning_non_slot.py
{ "start": 1119, "end": 1293 }
class ____: """ Using __dict__ in slots will be safe. """ __slots__ = ['__dict__', 'comp'] def __init__(self): self.comp = 4 self.missing = 5
Good2
python
python-poetry__poetry
src/poetry/inspection/lazy_wheel.py
{ "start": 1528, "end": 3663 }
class ____(LazyWheelUnsupportedError): """Invalid (e.g. corrupt) wheel.""" def __init__(self, location: str, name: str) -> None: self.location = location self.name = name def __str__(self) -> str: return f"Wheel {self.name} located at {self.location} is invalid." def metadata_from_wheel_url( name: str, url: str, session: Session | Authenticator ) -> RawMetadata: """Fetch metadata from the given wheel URL. This uses HTTP range requests to only fetch the portion of the wheel containing metadata, just enough for the object to be constructed. :raises HTTPRangeRequestUnsupportedError: if range requests are unsupported for ``url``. :raises InvalidWheelError: if the zip file contents could not be parsed. """ try: # After context manager exit, wheel.name will point to a deleted file path. # Add `delete_backing_file=False` to disable this for debugging. with LazyWheelOverHTTP(url, session) as lazy_file: metadata_bytes = lazy_file.read_metadata(name) metadata, _ = parse_email(metadata_bytes) return metadata except (BadZipFile, UnsupportedWheelError): # We assume that these errors have occurred because the wheel contents # themselves are invalid, not because we've messed up our bookkeeping # and produced an invalid file. raise InvalidWheelError(url, name) except Exception as e: if isinstance(e, LazyWheelUnsupportedError): # this is expected when the code handles issues with lazy wheel metadata retrieval correctly raise e logger.debug( "There was an unexpected %s when handling lazy wheel metadata retrieval for %s from %s: %s", type(e).__name__, name, url, e, ) # Catch all exception to handle any issues that may have occurred during # attempts to use Lazy Wheel. raise LazyWheelUnsupportedError( f"Attempts to use lazy wheel metadata retrieval for {name} from {url} failed" ) from e
InvalidWheelError
python
bokeh__bokeh
src/bokeh/models/formatters.py
{ "start": 8779, "end": 10976 }
class ____(TickFormatter): ''' Tick formatter based on a printf-style format string. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) format = String("%s", help=""" The number format, as defined as follows: the placeholder in the format string is marked by % and is followed by one or more of these elements, in this order: * An optional ``+`` sign Causes the result to be preceded with a plus or minus sign on numeric values. By default, only the ``-`` sign is used on negative numbers. * An optional padding specifier Specifies what (if any) character to use for padding. Possible values are 0 or any other character preceded by a ``'`` (single quote). The default is to pad with spaces. * An optional ``-`` sign Causes sprintf to left-align the result of this placeholder. The default is to right-align the result. * An optional number Specifies how many characters the result should have. If the value to be returned is shorter than this number, the result will be padded. * An optional precision modifier Consists of a ``.`` (dot) followed by a number, specifies how many digits should be displayed for floating point numbers. When used on a string, it causes the result to be truncated. * A type specifier Can be any of: - ``%`` --- yields a literal ``%`` character - ``b`` --- yields an integer as a binary number - ``c`` --- yields an integer as the character with that ASCII value - ``d`` or ``i`` --- yields an integer as a signed decimal number - ``e`` --- yields a float using scientific notation - ``u`` --- yields an integer as an unsigned decimal number - ``f`` --- yields a float as is - ``o`` --- yields an integer as an octal number - ``s`` --- yields a string as is - ``x`` --- yields an integer as a hexadecimal number (lower-case) - ``X`` --- yields an integer as a hexadecimal number (upper-case) """)
PrintfTickFormatter
python
sqlalchemy__sqlalchemy
test/orm/test_defaults.py
{ "start": 7437, "end": 13692 }
class ____(fixtures.MappedTest): """test that computed columns are recognized as server oninsert/onupdate defaults.""" __sparse_driver_backend__ = True __requires__ = ("computed_columns",) @classmethod def define_tables(cls, metadata): Table( "test", metadata, Column("id", Integer, primary_key=True), Column("foo", Integer), Column("bar", Integer, Computed("foo + 42")), ) @classmethod def setup_classes(cls): class Thing(cls.Basic): pass class ThingNoEager(cls.Basic): pass @classmethod def setup_mappers(cls): Thing = cls.classes.Thing cls.mapper_registry.map_imperatively( Thing, cls.tables.test, eager_defaults=True ) ThingNoEager = cls.classes.ThingNoEager cls.mapper_registry.map_imperatively( ThingNoEager, cls.tables.test, eager_defaults=False ) @testing.combinations(("eager", True), ("noneager", False), id_="ia") def test_insert_computed(self, eager): if eager: Thing = self.classes.Thing else: Thing = self.classes.ThingNoEager s = fixture_session() t1, t2 = (Thing(id=1, foo=5), Thing(id=2, foo=10)) s.add_all([t1, t2]) with assert_engine(testing.db) as asserter: s.flush() eq_(t1.bar, 5 + 42) eq_(t2.bar, 10 + 42) asserter.assert_( Conditional( eager and testing.db.dialect.insert_returning, [ Conditional( testing.db.dialect.insert_executemany_returning, [ RegexSQL( r"INSERT INTO test \(id, foo\) .*" r"VALUES \(.*\) .*" r"RETURNING test.bar, test.id", [{"foo": 5, "id": 1}, {"foo": 10, "id": 2}], dialect="postgresql", ), ], [ RegexSQL( r"INSERT INTO test \(id, foo\) .*" r"VALUES \(.*\) .*" r"RETURNING test.bar, test.id", [{"foo": 5, "id": 1}], dialect="postgresql", ), RegexSQL( r"INSERT INTO test \(id, foo\) .*" r"VALUES \(.*\) .*" r"RETURNING test.bar, test.id", [{"foo": 10, "id": 2}], dialect="postgresql", ), ], ) ], [ CompiledSQL( "INSERT INTO test (id, foo) VALUES (:id, :foo)", [{"foo": 5, "id": 1}, {"foo": 10, "id": 2}], ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 1}], ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 2}], ), ], ) ) @testing.combinations( ( "eagerload", True, testing.requires.computed_columns_on_update_returning, ), ( "noneagerload", False, ), id_="ia", ) def test_update_computed(self, eager): if eager: Thing = self.classes.Thing else: Thing = self.classes.ThingNoEager s = fixture_session() t1, t2 = (Thing(id=1, foo=1), Thing(id=2, foo=2)) s.add_all([t1, t2]) s.flush() t1.foo = 5 t2.foo = 6 with assert_engine(testing.db) as asserter: s.flush() eq_(t1.bar, 5 + 42) eq_(t2.bar, 6 + 42) if eager and testing.db.dialect.update_returning: asserter.assert_( CompiledSQL( "UPDATE test SET foo=%(foo)s " "WHERE test.id = %(test_id)s " "RETURNING test.bar", [{"foo": 5, "test_id": 1}], dialect="postgresql", ), CompiledSQL( "UPDATE test SET foo=%(foo)s " "WHERE test.id = %(test_id)s " "RETURNING test.bar", [{"foo": 6, "test_id": 2}], dialect="postgresql", ), ) elif eager: asserter.assert_( CompiledSQL( "UPDATE test SET foo=:foo WHERE test.id = :test_id", [{"foo": 5, "test_id": 1}, {"foo": 6, "test_id": 2}], enable_returning=False, ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 1}], enable_returning=False, ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 2}], enable_returning=False, ), ) else: asserter.assert_( CompiledSQL( "UPDATE test SET foo=:foo WHERE test.id = :test_id", [{"foo": 5, "test_id": 1}, {"foo": 6, "test_id": 2}], ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 1}], ), CompiledSQL( "SELECT test.bar FROM test WHERE test.id = :pk_1", [{"pk_1": 2}], ), )
ComputedDefaultsOnUpdateTest
python
gevent__gevent
src/gevent/_config.py
{ "start": 18789, "end": 19314 }
class ____(AresSettingMixin, Setting): document = True name = 'ares_servers' default = None environment_key = 'GEVENTARES_SERVERS' desc = """\ A list of strings giving the IP addresses of nameservers for the ares resolver. In the environment variable, these strings are separated by commas. .. deprecated:: 1.3a2 Prefer the :attr:`resolver_nameservers` setting. If both are set, the results are not defined. """ # Generic nameservers, works for dnspython and ares.
AresServers
python
tensorflow__tensorflow
tensorflow/python/eager/backprop_test.py
{ "start": 62071, "end": 66718 }
class ____(test.TestCase, parameterized.TestCase): def _batch_jacobian(self, experimental_use_pfor): persistent = context.executing_eagerly and not experimental_use_pfor with backprop.GradientTape(persistent=persistent) as g: x = constant_op.constant([[1., 2.], [3., 4.]]) y = constant_op.constant([[3., 4.], [5., 6.]]) g.watch(x) z = x * x * y batch_jacobian = g.batch_jacobian( z, x, experimental_use_pfor=experimental_use_pfor) answer = array_ops_stack.stack( [array_ops.diag(2 * x[0] * y[0]), array_ops.diag(2 * x[1] * y[1])]) return batch_jacobian, answer def testPfor(self): batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=True) self.assertAllEqual(answer, batch_jacobian) def testWhileLoop(self): batch_jacobian, answer = self._batch_jacobian(experimental_use_pfor=False) self.assertAllEqual(answer, batch_jacobian) def testPforDefun(self): @def_function.function def _f(): return self._batch_jacobian(experimental_use_pfor=True) batch_jacobian, answer = _f() self.assertAllEqual(answer, batch_jacobian) def testWhileLoopDefun(self): @def_function.function def _f(): return self._batch_jacobian(experimental_use_pfor=False) batch_jacobian, answer = _f() self.assertAllEqual(answer, batch_jacobian) def testPersistentTape(self): if not context.executing_eagerly(): return with backprop.GradientTape() as g: x = constant_op.constant([[1.0, 2.0]]) g.watch(x) y = x * x with self.assertRaisesRegex(RuntimeError, 'persistent'): g.batch_jacobian(y, x, experimental_use_pfor=False) def testBadShape(self): x = random_ops.random_uniform([2, 3]) with backprop.GradientTape() as g: y = array_ops.concat([x, x], axis=0) with self.assertRaisesRegex(ValueError, 'Need first dimension'): g.batch_jacobian(y, x) def testBadInputRank(self): x = random_ops.random_uniform([2]) with backprop.GradientTape() as g: y = random_ops.random_uniform([2, 2]) with self.assertRaisesRegex(ValueError, 'must have rank at least 2'): g.batch_jacobian(y, x) def testBadOutputRank(self): x = random_ops.random_uniform([2, 2]) with backprop.GradientTape() as g: y = random_ops.random_uniform([2]) with self.assertRaisesRegex(ValueError, 'must have rank at least 2'): g.batch_jacobian(y, x) def test_parallel_iterations(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant([[1., 2], [3, 4]]) g.watch(x) w = constant_op.constant([[1., 2, 3, 4], [5, 6, 7, 8]]) y = math_ops.matmul(x, w) self.assertAllClose( g.batch_jacobian(y, x, parallel_iterations=2), g.batch_jacobian(y, x, parallel_iterations=3)) @parameterized.parameters((True, True), (True, False), (False, True), (False, False)) def test_degenerate_shape(self, use_function, use_pfor): def f(x): with backprop.GradientTape(persistent=True) as tape: tape.watch(x) y = x**2 return tape.batch_jacobian(y, x, experimental_use_pfor=use_pfor) if use_function: f = def_function.function(f) self.assertAllEqual([1, 0, 0], array_ops.shape(f(array_ops.zeros([1, 0])))) @parameterized.parameters((True,), (False)) def test_zeros_type_correct(self, use_pfor): for dtype in [dtypes.float32, dtypes.float64]: @def_function.function def f(x): del x return constant_op.constant([[1.]], dtype=dtype) # pylint: disable=cell-var-from-loop with backprop.GradientTape(persistent=True) as tape: x = constant_op.constant([[2.]], dtype=dtype) tape.watch(x) y = f(x) jac = tape.batch_jacobian(y, x, experimental_use_pfor=use_pfor) self.assertEqual(dtype, jac.dtype) self.assertAllClose([[[0.]]], jac) with backprop.GradientTape(persistent=True) as tape: x = constant_op.constant([[2.]], dtype=dtype) tape.watch(x) y = f(x) jac = tape.batch_jacobian(y, x, unconnected_gradients='zero', experimental_use_pfor=use_pfor) self.assertEqual(dtype, jac.dtype) self.assertAllClose([[[0.]]], jac) def test_strided_slice(self): x = array_ops.ones([2, 4, 2]) length = constant_op.constant([2, 3, 4, 4], dtype=dtypes.int64) with backprop.GradientTape() as tape: tape.watch(x) y = array_ops.repeat(x, [2], axis=1) y = y[:, :math_ops.reduce_max(length), :] tape.batch_jacobian(y, x)
BatchJacobianTest
python
getsentry__sentry
src/sentry/models/featureadoption.py
{ "start": 6195, "end": 8968 }
class ____(BaseManager["FeatureAdoption"]): cache_backend: FeatureAdoptionRedisBackend = cast( FeatureAdoptionRedisBackend, build_instance_from_options(settings.SENTRY_FEATURE_ADOPTION_CACHE_OPTIONS), ) def in_cache(self, organization_id, feature_id): return self.cache_backend.in_cache(organization_id, feature_id) def set_cache(self, organization_id, feature_id): return self.bulk_set_cache(organization_id, feature_id) def get_all_cache(self, organization_id): return self.cache_backend.get_all_cache(organization_id) def bulk_set_cache(self, organization_id, *args): return self.cache_backend.bulk_set_cache(organization_id, *args) def record(self, organization_id, feature_slug, **kwargs): try: feature_id = manager.get_by_slug(feature_slug).id except UnknownFeature as e: logger.exception(str(e)) return False if not self.in_cache(organization_id, feature_id): row, created = self.create_or_update( organization_id=organization_id, feature_id=feature_id, complete=True ) self.set_cache(organization_id, feature_id) return created return False def bulk_record(self, organization_id, feature_slugs, **kwargs): features = [] try: feature_ids = {manager.get_by_slug(slug).id for slug in feature_slugs} except UnknownFeature as e: logger.exception(str(e)) return False incomplete_feature_ids = feature_ids - self.get_all_cache(organization_id) if not incomplete_feature_ids: return False for feature_id in incomplete_feature_ids: features.append( FeatureAdoption( organization_id=organization_id, feature_id=feature_id, complete=True ) ) try: with transaction.atomic(router.db_for_write(FeatureAdoption)): self.bulk_create(features) except IntegrityError: # This can occur if redis somehow loses the set of complete features and # we attempt to insert duplicate (org_id, feature_id) rows # This also will happen if we get parallel processes running `bulk_record` and # `get_all_cache` returns in the second process before the first process # can `bulk_set_cache`. pass return self.bulk_set_cache(organization_id, *incomplete_feature_ids) def get_by_slug(self, organization, slug): return self.filter( organization=organization, feature_id=manager.get_by_slug(slug).id ).first() @region_silo_model
FeatureAdoptionManager
python
kamyu104__LeetCode-Solutions
Python/two-furthest-houses-with-different-colors.py
{ "start": 29, "end": 402 }
class ____(object): def maxDistance(self, colors): """ :type colors: List[int] :rtype: int """ result = 0 for i, x in enumerate(colors): if x != colors[0]: result = max(result, i) if x != colors[-1]: result = max(result, len(colors)-1-i) return result
Solution
python
ray-project__ray
python/ray/tune/tune_config.py
{ "start": 412, "end": 4089 }
class ____: """Tune specific configs. Args: metric: Metric to optimize. This metric should be reported with `tune.report()`. If set, will be passed to the search algorithm and scheduler. mode: Must be one of [min, max]. Determines whether objective is minimizing or maximizing the metric attribute. If set, will be passed to the search algorithm and scheduler. search_alg: Search algorithm for optimization. Default to random search. scheduler: Scheduler for executing the experiment. Choose among FIFO (default), MedianStopping, AsyncHyperBand, HyperBand and PopulationBasedTraining. Refer to ray.tune.schedulers for more options. num_samples: Number of times to sample from the hyperparameter space. Defaults to 1. If `grid_search` is provided as an argument, the grid will be repeated `num_samples` of times. If this is -1, (virtually) infinite samples are generated until a stopping condition is met. max_concurrent_trials: Maximum number of trials to run concurrently. Must be non-negative. If None or 0, no limit will be applied. This is achieved by wrapping the ``search_alg`` in a :class:`ConcurrencyLimiter`, and thus setting this argument will raise an exception if the ``search_alg`` is already a :class:`ConcurrencyLimiter`. Defaults to None. time_budget_s: Global time budget in seconds after which all trials are stopped. Can also be a ``datetime.timedelta`` object. reuse_actors: Whether to reuse actors between different trials when possible. This can drastically speed up experiments that start and stop actors often (e.g., PBT in time-multiplexing mode). This requires trials to have the same resource requirements. Defaults to ``False``. trial_name_creator: Optional function that takes in a Trial and returns its name (i.e. its string representation). Be sure to include some unique identifier (such as `Trial.trial_id`) in each trial's name. NOTE: This API is in alpha and subject to change. trial_dirname_creator: Optional function that takes in a trial and generates its trial directory name as a string. Be sure to include some unique identifier (such as `Trial.trial_id`) is used in each trial's directory name. Otherwise, trials could overwrite artifacts and checkpoints of other trials. The return value cannot be a path. NOTE: This API is in alpha and subject to change. chdir_to_trial_dir: Deprecated. Set the `RAY_CHDIR_TO_TRIAL_DIR` env var instead """ # Currently this is not at feature parity with `tune.run`, nor should it be. # The goal is to reach a fine balance between API flexibility and conciseness. # We should carefully introduce arguments here instead of just dumping everything. mode: Optional[str] = None metric: Optional[str] = None search_alg: Optional[Union[Searcher, SearchAlgorithm]] = None scheduler: Optional[TrialScheduler] = None num_samples: int = 1 max_concurrent_trials: Optional[int] = None time_budget_s: Optional[Union[int, float, datetime.timedelta]] = None reuse_actors: bool = False trial_name_creator: Optional[Callable[[Trial], str]] = None trial_dirname_creator: Optional[Callable[[Trial], str]] = None chdir_to_trial_dir: bool = _DEPRECATED_VALUE @DeveloperAPI @dataclass
TuneConfig
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
{ "start": 3783, "end": 5371 }
class ____(SQLiteDialect_pysqlite): driver = "pysqlcipher" supports_statement_cache = True pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac") @classmethod def import_dbapi(cls): try: import sqlcipher3 as sqlcipher except ImportError: pass else: return sqlcipher from pysqlcipher3 import dbapi2 as sqlcipher return sqlcipher @classmethod def get_pool_class(cls, url): return pool.SingletonThreadPool def on_connect_url(self, url): super_on_connect = super().on_connect_url(url) # pull the info we need from the URL early. Even though URL # is immutable, we don't want any in-place changes to the URL # to affect things passphrase = url.password or "" url_query = dict(url.query) def on_connect(conn): cursor = conn.cursor() cursor.execute('pragma key="%s"' % passphrase) for prag in self.pragmas: value = url_query.get(prag, None) if value is not None: cursor.execute('pragma %s="%s"' % (prag, value)) cursor.close() if super_on_connect: super_on_connect(conn) return on_connect def create_connect_args(self, url): plain_url = url._replace(password=None) plain_url = plain_url.difference_update_query(self.pragmas) return super().create_connect_args(plain_url) dialect = SQLiteDialect_pysqlcipher
SQLiteDialect_pysqlcipher
python
mlflow__mlflow
mlflow/gateway/schemas/embeddings.py
{ "start": 199, "end": 374 }
class ____(RequestModel): input: str | list[str] | list[int] | list[list[int]] model_config = ConfigDict(json_schema_extra=_REQUEST_PAYLOAD_EXTRA_SCHEMA)
RequestPayload
python
walkccc__LeetCode
solutions/2558. Take Gifts From the Richest Pile/2558.py
{ "start": 0, "end": 287 }
class ____: def pickGifts(self, gifts: list[int], k: int) -> int: maxHeap = [-gift for gift in gifts] heapq.heapify(maxHeap) for _ in range(k): squaredMax = math.isqrt(-heapq.heappop(maxHeap)) heapq.heappush(maxHeap, -squaredMax) return -sum(maxHeap)
Solution
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 15791, "end": 16099 }
class ____(Pix2SkyProjection, Zenithal): r""" Zenithal equidistant projection - pixel to sky. Corresponds to the ``ARC`` projection in FITS WCS. See `Zenithal` for a definition of the full transformation. .. math:: \theta = 90^\circ - R_\theta """
Pix2Sky_ZenithalEquidistant
python
getsentry__sentry
tests/sentry/api/endpoints/test_project_rule_actions.py
{ "start": 6265, "end": 13722 }
class ____(APITestCase, BaseWorkflowTest): endpoint = "sentry-api-0-project-rule-actions" method = "POST" def setUp(self) -> None: super().setUp() self.login_as(self.user) self.workflow = self.create_workflow() def setup_pd_service(self) -> PagerDutyServiceDict: service_info = { "type": "service", "integration_key": "PND123", "service_name": "Sentry Service", } _integration, org_integration = self.create_provider_integration_for( provider="pagerduty", name="Example PagerDuty", external_id="example-pd", metadata={"services": [service_info]}, organization=self.organization, user=self.user, ) with assume_test_silo_mode(SiloMode.CONTROL): return add_service( org_integration, service_name=service_info["service_name"], integration_key=service_info["integration_key"], ) @mock.patch.object(NotifyEventAction, "after") @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=PluginIssueAlertHandler, ) def test_actions(self, mock_get_handler, action) -> None: action_data = [ { "id": "sentry.rules.actions.notify_event.NotifyEventAction", } ] self.get_success_response(self.organization.slug, self.project.slug, actions=action_data) assert action.called def test_unknown_action_returns_400(self) -> None: action_data = [ { "id": "sentry.rules.actions.fake_action.FakeAction", } ] response = self.get_error_response( self.organization.slug, self.project.slug, actions=action_data ) assert response.status_code == 400 @mock.patch.object(PagerDutyClient, "send_trigger") @mock.patch( "sentry.notifications.notification_action.registry.group_type_notification_registry.get", return_value=IssueAlertRegistryHandler, ) @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=PagerDutyIssueAlertHandler, ) def test_name_action_default( self, mock_get_issue_alert_handler, mock_get_group_type_handler, mock_send_trigger ): """ Tests that label will be used as 'Test Alert' if not present. Uses PagerDuty since those notifications will differ based on the name of the alert. """ service_info = self.setup_pd_service() action_data = [ { "account": service_info["integration_id"], "service": service_info["id"], "severity": "info", "id": "sentry.integrations.pagerduty.notify_action.PagerDutyNotifyServiceAction", } ] self.get_success_response(self.organization.slug, self.project.slug, actions=action_data) assert mock_send_trigger.call_count == 1 pagerduty_data = mock_send_trigger.call_args.kwargs.get("data") assert pagerduty_data["payload"]["summary"].startswith("[Test Alert]:") @mock.patch.object(PagerDutyClient, "send_trigger") @mock.patch( "sentry.notifications.notification_action.registry.group_type_notification_registry.get", return_value=IssueAlertRegistryHandler, ) @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=PagerDutyIssueAlertHandler, ) def test_name_action_with_custom_name( self, mock_get_issue_alert_handler, mock_get_group_type_handler, mock_send_trigger ): """ Tests that custom names can be provided to the test notification. Uses PagerDuty since those notifications will differ based on the name of the alert. """ service_info = self.setup_pd_service() action_data = [ { "account": service_info["integration_id"], "service": service_info["id"], "severity": "info", "id": "sentry.integrations.pagerduty.notify_action.PagerDutyNotifyServiceAction", } ] custom_alert_name = "Check #feed-issues" self.get_success_response( self.organization.slug, self.project.slug, actions=action_data, name=custom_alert_name ) assert mock_send_trigger.call_count == 1 pagerduty_data = mock_send_trigger.call_args.kwargs.get("data") assert pagerduty_data["payload"]["summary"].startswith(f"[{custom_alert_name}]:") @mock.patch.object(JiraIntegration, "create_issue") @mock.patch.object(sentry_sdk, "capture_exception") def test_sample_event_raises_exceptions_workflow_engine( self, mock_sdk_capture, mock_create_issue ): with assume_test_silo_mode(SiloMode.CONTROL): self.jira_integration = self.create_provider_integration( provider="jira", name="Jira", external_id="jira:1" ) self.jira_integration.add_organization(self.organization, self.user) form_errors = {"broken": "something went wrong"} mock_create_issue.side_effect = IntegrationFormError(form_errors) mock_sdk_capture.return_value = "abc-1234" action_data = [ { "id": "sentry.integrations.jira.notify_action.JiraCreateTicketAction", "dynamic_form_fields": { "fake_field": "fake_value", }, } ] response = self.get_error_response( self.organization.slug, self.project.slug, actions=action_data ) assert response.status_code == 400 assert mock_create_issue.call_count == 1 assert response.data == {"actions": [str(form_errors)]} mock_create_issue.side_effect = Exception("Something went wrong") response = self.get_error_response( self.organization.slug, self.project.slug, actions=action_data ) actions = response.data.get("actions") assert actions is not None assert actions == ["An unexpected error occurred. Error ID: 'abc-1234'"] def test_no_events(self) -> None: response = self.get_response(self.organization.slug, self.project.slug) assert response.status_code == 400 @mock.patch.object(MailAdapter, "notify") @mock.patch( "sentry.notifications.notification_action.registry.group_type_notification_registry.get", return_value=IssueAlertRegistryHandler, ) @mock.patch( "sentry.notifications.notification_action.registry.issue_alert_handler_registry.get", return_value=EmailIssueAlertHandler, ) def test_email_action( self, mock_get_issue_alert_handler, mock_get_group_type_handler, mock_notify ) -> None: action_data = [ { "id": "sentry.mail.actions.NotifyEmailAction", "targetIdentifier": str(self.user.id), "targetType": "Member", } ] self.get_success_response(self.organization.slug, self.project.slug, actions=action_data) assert mock_notify.call_count == 1
ProjectRuleActionsEndpointWorkflowEngineTest
python
facebookresearch__faiss
tests/test_residual_quantizer.py
{ "start": 24135, "end": 26544 }
class ____(unittest.TestCase): def test_search_IP(self): ds = datasets.SyntheticDataset(32, 1000, 200, 100) xt = ds.get_train() xb = ds.get_database() xq = ds.get_queries() ir = faiss.IndexResidualQuantizer( ds.d, 3, 4, faiss.METRIC_INNER_PRODUCT) ir.rq.train_type = faiss.ResidualQuantizer.Train_default ir.train(xt) ir.add(xb) Dref, Iref = ir.search(xq, 4) AQ = faiss.AdditiveQuantizer ir2 = faiss.IndexResidualQuantizer( ds.d, 3, 4, faiss.METRIC_INNER_PRODUCT, AQ.ST_LUT_nonorm) ir2.rq.codebooks = ir.rq.codebooks # fake training ir2.rq.is_trained = True ir2.is_trained = True ir2.add(xb) D2, I2 = ir2.search(xq, 4) np.testing.assert_array_equal(Iref, I2) np.testing.assert_array_almost_equal(Dref, D2, decimal=5) def test_search_L2(self): ds = datasets.SyntheticDataset(32, 1000, 200, 100) xt = ds.get_train() xb = ds.get_database() xq = ds.get_queries() gt = ds.get_groundtruth(10) ir = faiss.IndexResidualQuantizer(ds.d, 3, 4) ir.rq.train_type = faiss.ResidualQuantizer.Train_default ir.rq.max_beam_size = 30 ir.train(xt) # reference run w/ decoding ir.add(xb) Dref, Iref = ir.search(xq, 10) # 388 inter_ref = faiss.eval_intersection(Iref, gt) AQ = faiss.AdditiveQuantizer for st in AQ.ST_norm_float, AQ.ST_norm_qint8, AQ.ST_norm_qint4, \ AQ.ST_norm_cqint8, AQ.ST_norm_cqint4: ir2 = faiss.IndexResidualQuantizer(ds.d, 3, 4, faiss.METRIC_L2, st) ir2.rq.max_beam_size = 30 ir2.train(xt) # to get the norm bounds ir2.rq.codebooks = ir.rq.codebooks # fake training ir2.add(xb) D2, I2 = ir2.search(xq, 10) if st == AQ.ST_norm_float: np.testing.assert_array_almost_equal(Dref, D2, decimal=5) self.assertLess((Iref != I2).sum(), Iref.size * 0.05) else: inter_2 = faiss.eval_intersection(I2, gt) self.assertGreaterEqual(inter_ref, inter_2) ########################################################### # IVF version ###########################################################
TestIndexResidualQuantizerSearch
python
agronholm__apscheduler
src/apscheduler/_exceptions.py
{ "start": 858, "end": 1103 }
class ____(Exception): """ Raised by :meth:`~Scheduler.get_job_result` if the job result is not ready. """ def __init__(self, job_id: UUID): super().__init__(f"No job by the id of {job_id} was found")
JobResultNotReady
python
dask__dask
dask/array/_array_expr/_creation.py
{ "start": 4444, "end": 4514 }
class ____(BroadcastTrick): func = staticmethod(np.zeros_like)
Zeros
python
kamyu104__LeetCode-Solutions
Python/escape-the-spreading-fire.py
{ "start": 64, "end": 2136 }
class ____(object): def maximumMinutes(self, grid): """ :type grid: List[List[int]] :rtype: int """ DIRECTIONS = [(0, 1), (1, 0), (0, -1), (-1, 0)] GRASS, FIRE, WALL, PERSON = range(4) INF = 10**9 def bfs(grid): time = collections.defaultdict(int) d = 0 q = [(r, c, FIRE) for r in xrange(len(grid)) for c in xrange(len(grid[0])) if grid[r][c] == FIRE] q.append((0, 0, PERSON)) while q: new_q = [] for r, c, t in q: for dr, dc in DIRECTIONS: nr, nc = r+dr, c+dc if not (0 <= nr < len(grid) and 0 <= nc < len(grid[0]) and grid[nr][nc] != WALL and ((t == FIRE and grid[nr][nc] != FIRE) or (t == PERSON and (grid[nr][nc] == GRASS or (grid[nr][nc] == FIRE and (nr, nc) == (len(grid)-1, len(grid[0])-1) and d+1 == time[FIRE, nr, nc]))))): continue if grid[nr][nc] != FIRE: grid[nr][nc] = t if (nr, nc) in ((len(grid)-1, len(grid[0])-1), (len(grid)-1, len(grid[0])-2), (len(grid)-2, len(grid[0])-1)): time[t, nr, nc] = d+1 new_q.append((nr, nc, t)) q = new_q d += 1 return time time = bfs(grid) if not time[PERSON, len(grid)-1, len(grid[0])-1]: return -1 if not time[FIRE, len(grid)-1, len(grid[0])-1]: return INF diff = time[FIRE, len(grid)-1, len(grid[0])-1]-time[PERSON, len(grid)-1, len(grid[0])-1] return diff if diff+2 in (time[FIRE, len(grid)-1, len(grid[0])-2]-time[PERSON, len(grid)-1, len(grid[0])-2], time[FIRE, len(grid)-2, len(grid[0])-1]-time[PERSON, len(grid)-2, len(grid[0])-1]) else diff-1 # Time: O(m * n) # Space: O(m * n) # bfs
Solution
python
mwaskom__seaborn
tests/test_distributions.py
{ "start": 4968, "end": 10226 }
class ____(SharedAxesLevelTests): func = staticmethod(rugplot) def get_last_color(self, ax, **kwargs): return ax.collections[-1].get_color() def assert_rug_equal(self, a, b): assert_array_equal(a.get_segments(), b.get_segments()) @pytest.mark.parametrize("variable", ["x", "y"]) def test_long_data(self, long_df, variable): vector = long_df[variable] vectors = [ variable, vector, np.asarray(vector), vector.to_list(), ] f, ax = plt.subplots() for vector in vectors: rugplot(data=long_df, **{variable: vector}) for a, b in itertools.product(ax.collections, ax.collections): self.assert_rug_equal(a, b) def test_bivariate_data(self, long_df): f, (ax1, ax2) = plt.subplots(ncols=2) rugplot(data=long_df, x="x", y="y", ax=ax1) rugplot(data=long_df, x="x", ax=ax2) rugplot(data=long_df, y="y", ax=ax2) self.assert_rug_equal(ax1.collections[0], ax2.collections[0]) self.assert_rug_equal(ax1.collections[1], ax2.collections[1]) def test_wide_vs_long_data(self, wide_df): f, (ax1, ax2) = plt.subplots(ncols=2) rugplot(data=wide_df, ax=ax1) for col in wide_df: rugplot(data=wide_df, x=col, ax=ax2) wide_segments = np.sort( np.array(ax1.collections[0].get_segments()) ) long_segments = np.sort( np.concatenate([c.get_segments() for c in ax2.collections]) ) assert_array_equal(wide_segments, long_segments) def test_flat_vector(self, long_df): f, ax = plt.subplots() rugplot(data=long_df["x"]) rugplot(x=long_df["x"]) self.assert_rug_equal(*ax.collections) def test_datetime_data(self, long_df): ax = rugplot(data=long_df["t"]) vals = np.stack(ax.collections[0].get_segments())[:, 0, 0] assert_array_equal(vals, mpl.dates.date2num(long_df["t"])) def test_empty_data(self): ax = rugplot(x=[]) assert not ax.collections def test_a_deprecation(self, flat_series): f, ax = plt.subplots() with pytest.warns(UserWarning): rugplot(a=flat_series) rugplot(x=flat_series) self.assert_rug_equal(*ax.collections) @pytest.mark.parametrize("variable", ["x", "y"]) def test_axis_deprecation(self, flat_series, variable): f, ax = plt.subplots() with pytest.warns(UserWarning): rugplot(flat_series, axis=variable) rugplot(**{variable: flat_series}) self.assert_rug_equal(*ax.collections) def test_vertical_deprecation(self, flat_series): f, ax = plt.subplots() with pytest.warns(UserWarning): rugplot(flat_series, vertical=True) rugplot(y=flat_series) self.assert_rug_equal(*ax.collections) def test_rug_data(self, flat_array): height = .05 ax = rugplot(x=flat_array, height=height) segments = np.stack(ax.collections[0].get_segments()) n = flat_array.size assert_array_equal(segments[:, 0, 1], np.zeros(n)) assert_array_equal(segments[:, 1, 1], np.full(n, height)) assert_array_equal(segments[:, 1, 0], flat_array) def test_rug_colors(self, long_df): ax = rugplot(data=long_df, x="x", hue="a") order = categorical_order(long_df["a"]) palette = color_palette() expected_colors = np.ones((len(long_df), 4)) for i, val in enumerate(long_df["a"]): expected_colors[i, :3] = palette[order.index(val)] assert_array_equal(ax.collections[0].get_color(), expected_colors) def test_expand_margins(self, flat_array): f, ax = plt.subplots() x1, y1 = ax.margins() rugplot(x=flat_array, expand_margins=False) x2, y2 = ax.margins() assert x1 == x2 assert y1 == y2 f, ax = plt.subplots() x1, y1 = ax.margins() height = .05 rugplot(x=flat_array, height=height) x2, y2 = ax.margins() assert x1 == x2 assert y1 + height * 2 == pytest.approx(y2) def test_multiple_rugs(self): values = np.linspace(start=0, stop=1, num=5) ax = rugplot(x=values) ylim = ax.get_ylim() rugplot(x=values, ax=ax, expand_margins=False) assert ylim == ax.get_ylim() def test_matplotlib_kwargs(self, flat_series): lw = 2 alpha = .2 ax = rugplot(y=flat_series, linewidth=lw, alpha=alpha) rug = ax.collections[0] assert np.all(rug.get_alpha() == alpha) assert np.all(rug.get_linewidth() == lw) def test_axis_labels(self, flat_series): ax = rugplot(x=flat_series) assert ax.get_xlabel() == flat_series.name assert not ax.get_ylabel() def test_log_scale(self, long_df): ax1, ax2 = plt.figure().subplots(2) ax2.set_xscale("log") rugplot(data=long_df, x="z", ax=ax1) rugplot(data=long_df, x="z", ax=ax2) rug1 = np.stack(ax1.collections[0].get_segments()) rug2 = np.stack(ax2.collections[0].get_segments()) assert_array_almost_equal(rug1, rug2)
TestRugPlot
python
great-expectations__great_expectations
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_diff_less_than_threshold.py
{ "start": 924, "end": 5506 }
class ____(DataProfilerProfileMetricProvider): metric_name = "data_profiler.profile_numeric_columns_diff_less_than_threshold" value_keys = ( "profile_path", "limit_check_report_keys", "numerical_diff_statistics", ) @metric_value(engine=PandasExecutionEngine) def _pandas( # noqa: C901 - too complex cls, execution_engine: PandasExecutionEngine, metric_domain_kwargs: Dict, metric_value_kwargs: Dict, metrics: Dict[str, Any], runtime_configuration: Dict, ): profile_diff = metrics.get("data_profiler.profile_diff") numeric_columns = metrics.get("data_profiler.profile_numeric_columns") limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"] numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"] columns = list(profile_diff["global_stats"]["profile_schema"][1].keys()) data_stats = profile_diff["data_stats"] requested_columns = {} # Adds columns if generic column key is provided # Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys) limit_check_report_keys_copy = replace_generic_operator_in_report_keys( limit_check_report_keys_copy, numeric_columns ) for col, stats in limit_check_report_keys_copy.items(): if col not in numeric_columns: # Makes sure column requested is numeric requested_columns[col] = "Column is Non-Numeric" continue # adds stats if generic stat key is provided numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics) stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy) if col not in columns: # Makes sure column exists within profile schema requested_columns[col] = "Column requested was not found." continue col_data_stats = {} for data_stat in data_stats: if data_stat["column_name"] == col: col_data_stats = data_stat["statistics"] break requested_columns[col] = {} for stat, threshold in stats.items(): if stat not in col_data_stats: requested_columns[col][stat] = "Statistic requested was not found." continue diff_val = col_data_stats[stat] if diff_val == "unchanged": # In the case there is no delta diff_val = 0 is_less = is_value_less_than_threshold(diff_val, threshold) if not is_less: requested_columns[col][stat] = { "threshold": threshold, "value_found": diff_val, } else: requested_columns[col][stat] = True return requested_columns @classmethod def _get_evaluation_dependencies( cls, metric: MetricConfiguration, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): """ Returns a dictionary of given metric names and their corresponding configuration, specifying the metric types and their respective domains""" dependencies: dict = super()._get_evaluation_dependencies( metric=metric, configuration=configuration, execution_engine=execution_engine, runtime_configuration=runtime_configuration, ) if metric.metric_name == "data_profiler.profile_numeric_columns_diff_less_than_threshold": dependencies["data_profiler.profile_diff"] = MetricConfiguration( metric_name="data_profiler.profile_diff", metric_domain_kwargs=metric.metric_domain_kwargs, metric_value_kwargs=metric.metric_value_kwargs, ) dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration( metric_name="data_profiler.profile_numeric_columns", metric_domain_kwargs=metric.metric_domain_kwargs, metric_value_kwargs=metric.metric_value_kwargs, ) return dependencies
DataProfilerProfileNumericColumnsDiffLessThanThreshold
python
getsentry__sentry
src/sentry/audit_log/manager.py
{ "start": 1304, "end": 2091 }
class ____: # Unique event ID (ex. 1) event_id: int # Unique event name (ex. MEMBER_INVITE) name: str # Unique event api name (ex. member.invite) api_name: str # Simple template for rendering the audit log message using # the AuditLogEntry.data fields. For more complicated messages, # subclass AuditLogEvent and override the render method. template: str | None = None def __init__(self, event_id, name, api_name, template=None): self.event_id = event_id self.name = name self.api_name = api_name self.template = template def render(self, audit_log_entry: AuditLogEntry) -> str: if not self.template: return "" return self.template.format(**audit_log_entry.data)
AuditLogEvent
python
huggingface__transformers
src/transformers/models/maskformer/convert_maskformer_original_pytorch_checkpoint_to_pytorch.py
{ "start": 2877, "end": 3235 }
class ____: """Fake command line arguments needed by maskformer/detectron implementation""" config_file: str def setup_cfg(args: Args): # load config from file and command-line arguments cfg = get_cfg() add_deeplab_config(cfg) add_mask_former_config(cfg) cfg.merge_from_file(args.config_file) cfg.freeze() return cfg
Args
python
django-crispy-forms__django-crispy-forms
crispy_forms/bootstrap.py
{ "start": 30366, "end": 32432 }
class ____(Div): """ Generates markup in the form of an alert dialog. Attributes ---------- template: str The default template which this Layout Object will be rendered with. css_class : str The CSS classes to be applied to the alert. By default "alert". Parameters ---------- content : str The content of the alert. dismiss : bool If true the alert contains a button to dismiss the alert. By default True. block : str, optional Additional CSS classes to be applied to the ``<button>``. By default None. css_id : str, optional A DOM id for the layout object which will be added to the alert if provided. By default None. css_class : str, optional Additional CSS classes to be applied in addition to those declared by the class itself. By default None. template : str, optional Overrides the default template, if provided. By default None. **kwargs : dict, optional Additional attributes are passed to ``flatatt`` and converted into key="value", pairs. These attributes are then available in the template context. Examples -------- Example:: Alert(content='<strong>Warning!</strong> Best check yo self, you're not looking too good.') """ template = "%s/layout/alert.html" css_class = "alert" def __init__(self, content, dismiss=True, block=False, css_id=None, css_class=None, template=None, **kwargs): fields = [] if block: self.css_class += " alert-block" super().__init__(*fields, css_id=css_id, css_class=css_class, template=template, **kwargs) self.content = content self.dismiss = dismiss def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs): template = self.get_template_name(template_pack) context.update({"alert": self, "content": self.content, "dismiss": self.dismiss}) return render_to_string(template, context.flatten())
Alert
python
ray-project__ray
python/ray/_common/usage/usage_lib.py
{ "start": 2933, "end": 5658 }
class ____: """Usage stats to report""" #: The schema version of the report. schema_version: str #: The source of the data (i.e. OSS). source: str #: When the data is collected and reported. collect_timestamp_ms: int #: The total number of successful reports for the lifetime of the cluster. total_success: Optional[int] = None #: The total number of failed reports for the lifetime of the cluster. total_failed: Optional[int] = None #: The sequence number of the report. seq_number: Optional[int] = None #: The Ray version in use. ray_version: Optional[str] = None #: The Python version in use. python_version: Optional[str] = None #: A random id of the cluster session. session_id: Optional[str] = None #: The git commit hash of Ray (i.e. ray.__commit__). git_commit: Optional[str] = None #: The operating system in use. os: Optional[str] = None #: When the cluster is started. session_start_timestamp_ms: Optional[int] = None #: The cloud provider found in the cluster.yaml file (e.g., aws). cloud_provider: Optional[str] = None #: The min_workers found in the cluster.yaml file. min_workers: Optional[int] = None #: The max_workers found in the cluster.yaml file. max_workers: Optional[int] = None #: The head node instance type found in the cluster.yaml file (e.g., i3.8xlarge). head_node_instance_type: Optional[str] = None #: The worker node instance types found in the cluster.yaml file (e.g., i3.8xlarge). worker_node_instance_types: Optional[List[str]] = None #: The total num of cpus in the cluster. total_num_cpus: Optional[int] = None #: The total num of gpus in the cluster. total_num_gpus: Optional[int] = None #: The total size of memory in the cluster. total_memory_gb: Optional[float] = None #: The total size of object store memory in the cluster. total_object_store_memory_gb: Optional[float] = None #: The Ray libraries that are used (e.g., rllib). library_usages: Optional[List[str]] = None #: The extra tags to report when specified by an # environment variable RAY_USAGE_STATS_EXTRA_TAGS extra_usage_tags: Optional[Dict[str, str]] = None #: The number of alive nodes when the report is generated. total_num_nodes: Optional[int] = None #: The total number of running jobs excluding internal ones # when the report is generated. total_num_running_jobs: Optional[int] = None #: The libc version in the OS. libc_version: Optional[str] = None #: The hardwares that are used (e.g. Intel Xeon). hardware_usages: Optional[List[str]] = None @dataclass(init=True)
UsageStatsToReport
python
run-llama__llama_index
llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/events.py
{ "start": 2379, "end": 2484 }
class ____(StepFinishedEvent, Event): type: EventType = EventType.STEP_FINISHED
StepFinishedWorkflowEvent
python
tensorflow__tensorflow
tensorflow/python/debug/lib/debug_events_reader.py
{ "start": 1170, "end": 12209 }
class ____: """Reader class for a tfdbg v2 DebugEvents directory.""" # Number of digests after which a read lock is released and re-acquired during # serial reading of digests for SourceFiles, Execution, and # GraphExecutionTrace. This allows us to avoid releasing and re-acquiring the # lock too often (i.e., after each digest) and to minimize performance # penalty. _READER_RELEASE_PER = 100 _METADATA_SUFFIX = ".metadata" _SOURCE_FILE_SUFFIX = ".source_files" _STACK_FRAMES_SUFFIX = ".stack_frames" _GRAPHS_SUFFIX = ".graphs" _EXECUTION_SUFFIX = ".execution" _GRAPH_EXECUTION_TRACES_SUFFIX = ".graph_execution_traces" def __init__(self, dump_root): if not file_io.is_directory(dump_root): raise ValueError("Specified dump_root is not a directory: %s" % dump_root) self._dump_root = dump_root self._metadata_paths = self._load_metadata_files() prefixes = [ metadata_path[:-len(self._METADATA_SUFFIX)] for metadata_path in self._metadata_paths ] prefix = prefixes[0] # This is the prefix of the main file set. self._source_files_path = compat.as_bytes(prefix + self._SOURCE_FILE_SUFFIX) self._stack_frames_path = compat.as_bytes(prefix + self._STACK_FRAMES_SUFFIX) self._graphs_path = compat.as_bytes(prefix + self._GRAPHS_SUFFIX) self._execution_path = compat.as_bytes(prefix + self._EXECUTION_SUFFIX) # There can be multiple .graph_execution_trace files each belonging # to a file set generated on an individual host, in the case of # a distributed TensorFlow job. # This is different from the other debug event files in the file set. self._graph_execution_traces_paths = [ compat.as_bytes(prefix + self._GRAPH_EXECUTION_TRACES_SUFFIX) for prefix in prefixes ] self._readers = dict() # A map from file path to reader. # A map from file path to current reading offset. self._reader_offsets = dict() # Lock for reader creation. self._readers_lock = threading.Lock() # Locks for read operation on individual readers. self._reader_read_locks = dict() self._offsets = dict() def _load_metadata_files(self): """Load and parse metadata files in the dump root. Check that all metadata files have a common tfdbg_run_id, and raise a ValueError if their tfdbg_run_ids differ. Returns: A list of metadata file paths in ascending order of their starting wall_time timestamp. """ metadata_paths = file_io.get_matching_files( os.path.join(self._dump_root, "*%s" % self._METADATA_SUFFIX)) if not metadata_paths: raise ValueError("Cannot find any tfdbg metadata file in directory: %s" % self._dump_root) wall_times = [] run_ids = [] tensorflow_versions = [] file_versions = [] for metadata_path in metadata_paths: reader = tf_record.tf_record_random_reader(metadata_path) try: record = reader.read(0)[0] debug_event = debug_event_pb2.DebugEvent.FromString(record) wall_times.append(debug_event.wall_time) run_ids.append(debug_event.debug_metadata.tfdbg_run_id) tensorflow_versions.append( debug_event.debug_metadata.tensorflow_version ) file_versions.append(debug_event.debug_metadata.file_version) except Exception as e: raise errors.DataLossError( None, None, "Error reading tfdbg metadata from paths %s" % metadata_paths, ) from e finally: reader.close() self._starting_wall_time = wall_times[0] self._tfdbg_run_id = run_ids[0] self._tensorflow_version = tensorflow_versions[0] self._file_version = file_versions[0] if len(metadata_paths) == 1: # Fast path for a common case (only one DebugEvent file set.) return metadata_paths num_no_id = len([run_id for run_id in run_ids if not run_id]) if num_no_id: paths_without_run_id = [ metadata_path for metadata_path, run_id in zip(metadata_paths, run_ids) if not run_id ] raise ValueError( "Found %d tfdbg metadata files and %d of them do not " "have tfdbg run ids. The metadata files without run ids are: %s" % (len(run_ids), num_no_id, paths_without_run_id)) elif len(set(run_ids)) != 1: raise ValueError( "Unexpected: Found multiple (%d) tfdbg2 runs in directory %s" % (len(set(run_ids)), self._dump_root)) # Return the metadata files in ascending order of their timestamps. paths_and_timestamps = sorted( zip(metadata_paths, wall_times), key=lambda t: t[1]) self._starting_wall_time = paths_and_timestamps[0][1] return [path[0] for path in paths_and_timestamps] def starting_wall_time(self): """Get the starting timestamp of the instrumented TensorFlow program. When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest timestamp among the file sets is returned. It is assumed to be the job that starts first (e.g., the coordinator). Returns: Starting timestamp in seconds since the epoch, as a float. """ return self._starting_wall_time def tfdbg_run_id(self): """Get the run ID of the instrumented TensorFlow program.""" return self._tfdbg_run_id def tensorflow_version(self): """Get the version string of TensorFlow that the debugged program ran on.""" return self._tensorflow_version def tfdbg_file_version(self): """Get the tfdbg file format version.""" return self._file_version def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): del exception_type, exception_value, traceback # Unused self.close() def _generic_iterator(self, file_path): """A helper method that makes an iterator given a debug-events file path. Repeated calls to this method create iterators that remember the last successful reading position (offset) for each given `file_path`. So the iterators are meant for incremental reading of the file. Args: file_path: Path to the file to create the iterator for. Yields: A tuple of (offset, debug_event_proto) on each `next()` call. """ yield_count = 0 reader = self._get_reader(file_path) read_lock = self._reader_read_locks[file_path] read_lock.acquire() try: while True: current_offset = self._reader_offsets[file_path] try: record, self._reader_offsets[file_path] = reader.read(current_offset) except (errors.DataLossError, IndexError): # We ignore partial read exceptions, because a record may be # truncated. The PyRandomRecordReader throws an `IndexError` when # offset goes out of bound. break yield DebugEventWithOffset( debug_event=debug_event_pb2.DebugEvent.FromString(record), offset=current_offset) yield_count += 1 # The read lock must be periodically released to allow for concurrent # random reads. But we do so at a number of reads, instead of after # every single read, in order to minimize the performance penalty. if yield_count % self._READER_RELEASE_PER == 0: read_lock.release() read_lock.acquire() finally: read_lock.release() def _get_reader(self, file_path): """Get a random-access reader for TFRecords file at file_path.""" file_path = compat.as_bytes(file_path) # The following code uses the double-checked locking pattern to optimize # the common case (where the reader is already initialized). if file_path not in self._readers: # 1st check, without lock. with self._readers_lock: if file_path not in self._readers: # 2nd check, with lock. self._readers[file_path] = tf_record.tf_record_random_reader( file_path) self._reader_read_locks[file_path] = threading.Lock() self._reader_offsets[file_path] = 0 return self._readers[file_path] def source_files_iterator(self): return self._generic_iterator(self._source_files_path) def stack_frames_iterator(self): return self._generic_iterator(self._stack_frames_path) def graphs_iterator(self): return self._generic_iterator(self._graphs_path) def read_source_files_event(self, offset): """Read a DebugEvent proto at given offset from the .source_files file.""" with self._reader_read_locks[self._source_files_path]: proto_string = self._get_reader(self._source_files_path).read(offset)[0] return debug_event_pb2.DebugEvent.FromString(proto_string) def read_graphs_event(self, offset): """Read a DebugEvent proto at a given offset from the .graphs file. Args: offset: Offset to read the DebugEvent proto from. Returns: A DebugEventProto. Raises: `errors.DataLossError` if offset is at a wrong location. `IndexError` if offset is out of range of the file. """ return debug_event_pb2.DebugEvent.FromString( self._get_reader(self._graphs_path).read(offset)[0]) def execution_iterator(self): return self._generic_iterator(self._execution_path) def read_execution_event(self, offset): """Read a DebugEvent proto at a given offset from the .execution file. Args: offset: Offset to read the DebugEvent proto from. Returns: A DebugEventProto. Raises: `errors.DataLossError` if offset is at a wrong location. `IndexError` if offset is out of range of the file. """ with self._reader_read_locks[self._execution_path]: proto_string = self._get_reader(self._execution_path).read(offset)[0] return debug_event_pb2.DebugEvent.FromString(proto_string) def graph_execution_traces_iterators(self): return [ self._generic_iterator(path) for path in self._graph_execution_traces_paths ] def read_graph_execution_traces_event(self, locator): """Read DebugEvent at given offset from given .graph_execution_traces file. Args: locator: A (file_index, offset) tuple that locates the DebugEvent containing the graph execution trace. Returns: A DebugEventProto. Raises: `errors.DataLossError` if offset is at a wrong location. `IndexError` if offset is out of range of the file. """ file_index, offset = locator graph_execution_traces_path = self._graph_execution_traces_paths[file_index] with self._reader_read_locks[graph_execution_traces_path]: proto_string = self._get_reader(graph_execution_traces_path).read( offset)[0] return debug_event_pb2.DebugEvent.FromString(proto_string) def close(self): with self._readers_lock: file_paths = list(self._readers.keys()) for file_path in file_paths: self._readers[file_path].close() del self._readers[file_path]
DebugEventsReader
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/engine/result.py
{ "start": 75582, "end": 77691 }
class ____(IteratorResult[Unpack[_Ts]]): """An :class:`_engine.IteratorResult` that works from an iterator-producing callable. The given ``chunks`` argument is a function that is given a number of rows to return in each chunk, or ``None`` for all rows. The function should then return an un-consumed iterator of lists, each list of the requested size. The function can be called at any time again, in which case it should continue from the same result set but adjust the chunk size as given. .. versionadded:: 1.4 """ def __init__( self, cursor_metadata: ResultMetaData, chunks: Callable[ [Optional[int]], Iterator[Sequence[_InterimRowType[_R]]] ], source_supports_scalars: bool = False, raw: Optional[Result[Any]] = None, dynamic_yield_per: bool = False, ): self._metadata = cursor_metadata self.chunks = chunks self._source_supports_scalars = source_supports_scalars self.raw = raw self.iterator = itertools.chain.from_iterable(self.chunks(None)) self.dynamic_yield_per = dynamic_yield_per @_generative def yield_per(self, num: int) -> Self: # TODO: this throws away the iterator which may be holding # onto a chunk. the yield_per cannot be changed once any # rows have been fetched. either find a way to enforce this, # or we can't use itertools.chain and will instead have to # keep track. self._yield_per = num self.iterator = itertools.chain.from_iterable(self.chunks(num)) return self def _soft_close(self, hard: bool = False, **kw: Any) -> None: super()._soft_close(hard=hard, **kw) self.chunks = lambda size: [] # type: ignore def _fetchmany_impl( self, size: Optional[int] = None ) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]: if self.dynamic_yield_per: self.iterator = itertools.chain.from_iterable(self.chunks(size)) return super()._fetchmany_impl(size=size)
ChunkedIteratorResult
python
spack__spack
lib/spack/spack/vendor/jinja2/nodes.py
{ "start": 11311, "end": 11677 }
class ____(Stmt): """A macro definition. `name` is the name of the macro, `args` a list of arguments and `defaults` a list of defaults if there are any. `body` is a list of nodes for the macro body. """ fields = ("name", "args", "defaults", "body") name: str args: t.List["Name"] defaults: t.List["Expr"] body: t.List[Node]
Macro
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/hooks/spanner.py
{ "start": 1815, "end": 2010 }
class ____(NamedTuple): """Information about Google Spanner connection parameters.""" project_id: str | None instance_id: str | None database_id: str | None
SpannerConnectionParams
python
doocs__leetcode
solution/1300-1399/1347.Minimum Number of Steps to Make Two Strings Anagram/Solution.py
{ "start": 0, "end": 197 }
class ____: def minSteps(self, s: str, t: str) -> int: cnt = Counter(s) ans = 0 for c in t: cnt[c] -= 1 ans += cnt[c] < 0 return ans
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1049634, "end": 1050028 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("UserContentEdit", graphql_name="node") """The item at the end of the edge."""
UserContentEditEdge
python
celery__celery
t/unit/worker/test_state.py
{ "start": 5367, "end": 6339 }
class ____(): @staticmethod def import_state(): with patch.dict(sys.modules): del sys.modules['celery.worker.state'] return import_module('celery.worker.state') @patch.dict(os.environ, { 'CELERY_WORKER_REVOKES_MAX': '50001', 'CELERY_WORKER_SUCCESSFUL_MAX': '1001', 'CELERY_WORKER_REVOKE_EXPIRES': '10801', 'CELERY_WORKER_SUCCESSFUL_EXPIRES': '10801', }) def test_custom_configuration(self): state = self.import_state() assert state.REVOKES_MAX == 50001 assert state.SUCCESSFUL_MAX == 1001 assert state.REVOKE_EXPIRES == 10801 assert state.SUCCESSFUL_EXPIRES == 10801 def test_default_configuration(self): state = self.import_state() assert state.REVOKES_MAX == 50000 assert state.SUCCESSFUL_MAX == 1000 assert state.REVOKE_EXPIRES == 10800 assert state.SUCCESSFUL_EXPIRES == 10800
test_state_configuration
python
tensorflow__tensorflow
tensorflow/python/ops/data_flow_ops.py
{ "start": 70837, "end": 78824 }
class ____(BaseStagingArea): """Class for staging inputs. No ordering guarantees. A `StagingArea` is a TensorFlow data structure that stores tensors across multiple steps, and exposes operations that can put and get tensors. Each `StagingArea` element is a tuple of one or more tensors, where each tuple component has a static dtype, and may have a static shape. The capacity of a `StagingArea` may be bounded or unbounded. It supports multiple concurrent producers and consumers; and provides exactly-once delivery. Each element of a `StagingArea` is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument. If the `shapes` argument is specified, each component of a staging area element must have the respective fixed shape. If it is unspecified, different elements may have different shapes, It can be configured with a capacity in which case put(values) will block until space becomes available. Similarly, it can be configured with a memory limit which will block put(values) until space is available. This is mostly useful for limiting the number of tensors on devices such as GPUs. All get() and peek() commands block if the requested data is not present in the Staging Area. """ def __init__(self, dtypes, shapes=None, names=None, shared_name=None, capacity=0, memory_limit=0): """Constructs a staging area object. The two optional lists, `shapes` and `names`, must be of the same length as `dtypes` if provided. The values at a given index `i` indicate the shape and name to use for the corresponding queue component in `dtypes`. The device scope at the time of object creation determines where the storage for the `StagingArea` will reside. Calls to `put` will incur a copy to this memory space, if necessary. Tensors returned by `get` will be placed according to the device scope when `get` is called. Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: (Optional.) Constraints on the shapes of tensors in an element. A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: (Optional.) If provided, the `get()` and `put()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. shared_name: (Optional.) A name to be used for the shared object. By passing the same name to two different python objects they will share the underlying staging area. Must be a string. capacity: (Optional.) Maximum number of elements. An integer. If zero, the Staging Area is unbounded memory_limit: (Optional.) Maximum number of bytes of all tensors in the Staging Area. An integer. If zero, the Staging Area is unbounded Raises: ValueError: If one of the arguments is invalid. """ super(StagingArea, self).__init__(dtypes, shapes, names, shared_name, capacity, memory_limit) def put(self, values, name=None): """Create an op that places a value into the staging area. This operation will block if the `StagingArea` has reached its capacity. Args: values: A single tensor, a list or tuple of tensors, or a dictionary with tensor values. The number of elements must match the length of the list provided to the dtypes argument when creating the StagingArea. name: A name for the operation (optional). Returns: The created op. Raises: ValueError: If the number or type of inputs don't match the staging area. """ with ops.name_scope(name, "%s_put" % self._name, self._scope_vals(values)) as scope: if not isinstance(values, (list, tuple, dict)): values = [values] # Hard-code indices for this staging area indices = list(range(len(values))) vals, _ = self._check_put_dtypes(values, indices) with ops.colocate_with(self._coloc_op): op = gen_data_flow_ops.stage( values=vals, shared_name=self._name, name=scope, capacity=self._capacity, memory_limit=self._memory_limit) return op def __internal_get(self, get_fn, name): with ops.colocate_with(self._coloc_op): ret = get_fn() indices = list(range(len(self._dtypes))) # Hard coded return self._get_return_value(ret, indices) def get(self, name=None): """Gets one element from this staging area. If the staging area is empty when this operation executes, it will block until there is an element to dequeue. Note that unlike others ops that can block, like the queue Dequeue operations, this can stop other work from happening. To avoid this, the intended use is for this to be called only when there will be an element already available. One method for doing this in a training loop would be to run a `put()` call during a warmup session.run call, and then call both `get()` and `put()` in each subsequent step. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: name: A name for the operation (optional). Returns: The tuple of tensors that was gotten. """ if name is None: name = "%s_get" % self._name # pylint: disable=bad-continuation fn = lambda: gen_data_flow_ops.unstage(dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit) # pylint: enable=bad-continuation return self.__internal_get(fn, name) def peek(self, index, name=None): """Peeks at an element in the staging area. If the staging area is too small to contain the element at the specified index, it will block until enough elements are inserted to complete the operation. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: index: The index of the tensor within the staging area to look up. name: A name for the operation (optional). Returns: The tuple of tensors that was gotten. """ if name is None: name = "%s_peek" % self._name # pylint: disable=bad-continuation fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes=self._dtypes, shared_name=self._name, name=name, capacity=self._capacity, memory_limit=self._memory_limit) # pylint: enable=bad-continuation return self.__internal_get(fn, name) def size(self, name=None): """Returns the number of elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_size" % self._name return gen_data_flow_ops.stage_size( name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit) def clear(self, name=None): """Clears the staging area. Args: name: A name for the operation (optional) Returns: The created op """ if name is None: name = "%s_clear" % self._name return gen_data_flow_ops.stage_clear( name=name, shared_name=self._name, dtypes=self._dtypes, capacity=self._capacity, memory_limit=self._memory_limit)
StagingArea
python
huggingface__transformers
src/transformers/models/lilt/modeling_lilt.py
{ "start": 32274, "end": 36481 }
class ____(LiltPreTrainedModel): # Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification.__init__ with Roberta->Lilt, roberta->lilt def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiltModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, bbox: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], TokenClassifierOutput]: r""" bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. See [Overview](#Overview) for normalization. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. Examples: ```python >>> from transformers import AutoTokenizer, AutoModelForTokenClassification >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base") >>> model = AutoModelForTokenClassification.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base") >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> encoding = tokenizer(words, boxes=boxes, return_tensors="pt") >>> outputs = model(**encoding) >>> predicted_class_indices = outputs.logits.argmax(-1) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: # move labels to correct device labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Lilt
LiltForTokenClassification
python
huggingface__transformers
src/transformers/models/aimv2/modular_aimv2.py
{ "start": 13280, "end": 13317 }
class ____(LlamaMLP): pass
Aimv2MLP
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_imdb_id.py
{ "start": 1663, "end": 3965 }
class ____(ColumnMapExpectation): """Expect column values to conform to valid IMDB ID format.""" # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "well_formed_imdb_id": [ "tt0068646", "nm1827914", "ch0000985", "ev0000003/2015", "tt11696836/characters/nm11012957", ], "malformed_imdb_id": [ "", "42", "ab0068646", "ev0000003/1891", "This is not a valid IMDB ID", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "well_formed_imdb_id"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "malformed_imdb_id"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.valid_imdb_id" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", "tags": ["experimental", "hackathon", "typed-entities"], "contributors": [ "@voidforall", ], } if __name__ == "__main__": ExpectColumnValuesToBeValidImdbId().print_diagnostic_checklist()
ExpectColumnValuesToBeValidImdbId
python
spyder-ide__spyder
spyder/plugins/debugger/widgets/main_widget.py
{ "start": 2584, "end": 27950 }
class ____(ShellConnectMainWidget): # PluginMainWidget class constants SHOW_MESSAGE_WHEN_EMPTY = True IMAGE_WHEN_EMPTY = "debugger" MESSAGE_WHEN_EMPTY = _("Debugging is not active") DESCRIPTION_WHEN_EMPTY = _( "Start a debugging session with the ⏯ button, allowing you to step " "through your code and see the functions here that Python has run." ) SET_LAYOUT_WHEN_EMPTY = False # Signals sig_edit_goto = Signal(str, int, str) """ This signal will request to open a file in a given row and column using a code editor. Parameters ---------- path: str Path to file. row: int Cursor starting row position. word: str Word to select on given row. """ sig_breakpoints_saved = Signal() """Breakpoints have been saved""" sig_toggle_breakpoints = Signal() """Add or remove a breakpoint on the current line.""" sig_toggle_conditional_breakpoints = Signal() """Add or remove a conditional breakpoint on the current line.""" sig_clear_all_breakpoints = Signal() """Clear all breakpoints in all files.""" sig_clear_breakpoint = Signal(str, int) """ Clear single breakpoint. Parameters ---------- filename: str The filename line_number: int The line number """ sig_pdb_state_changed = Signal(bool) """ This signal is emitted every time a Pdb interaction happens. Parameters ---------- pdb_state: bool Whether the debugger is waiting for input """ sig_load_pdb_file = Signal(str, int) """ This signal is emitted when Pdb reaches a new line. Parameters ---------- filename: str The filename the debugger stepped in line_number: int The line number the debugger stepped in """ sig_switch_to_plugin_requested = Signal() """ This signal will request to change the focus to the plugin. """ def __init__(self, name=None, plugin=None, parent=None): super().__init__(name, plugin, parent, set_layout=False) # Widgets self.breakpoints_table = BreakpointTableView(self, {}) self.breakpoints_table.hide() # Splitter self.splitter = QSplitter(self) self.splitter.addWidget(self._stack) self.splitter.addWidget(self.breakpoints_table) self.splitter.setContentsMargins(0, 0, 0, 0) self.splitter.setChildrenCollapsible(True) # This is necessary so that the border radius is maintained when # showing/hiding the breakpoints table self.splitter.setStyleSheet( f"border-radius: {SpyderPalette.SIZE_BORDER_RADIUS}" ) # To manipulate the control debugger buttons in the Debug toolbar self._control_debugger_toolbar_widgets = [] self._app_toolbar_button_width = ( int(APP_TOOLBAR_STYLESHEET.BUTTON_WIDTH.split("px")[0]) ) # Layout layout = QHBoxLayout() layout.setSpacing(0) layout.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.splitter) self.setLayout(layout) # Signals bpt = self.breakpoints_table bpt.sig_clear_all_breakpoints_requested.connect( self.sig_clear_all_breakpoints) bpt.sig_clear_breakpoint_requested.connect( self.sig_clear_breakpoint) bpt.sig_edit_goto_requested.connect(self.sig_edit_goto) bpt.sig_conditional_breakpoint_requested.connect( self.sig_toggle_conditional_breakpoints) self.sig_breakpoints_saved.connect(self.set_data) # ---- PluginMainWidget API # ------------------------------------------------------------------------ def get_title(self): return _('Debugger') def get_focus_widget(self): return self.current_widget() def setup(self): """Setup the widget.""" self.breakpoints_table.setup() self.set_data() # ---- Options menu actions exclude_internal_action = self.create_action( DebuggerWidgetActions.ToggleExcludeInternal, text=_("Exclude internal frames when inspecting execution"), tip=_("Exclude frames that are not part of the user code"), toggled=True, option='exclude_internal', ) # ---- Toolbar actions search_action = self.create_action( DebuggerWidgetActions.Search, text=_("Search frames"), icon=self.create_icon('find'), toggled=self.toggle_finder, register_shortcut=True ) inspect_action = self.create_action( DebuggerWidgetActions.Inspect, text=_("Inspect execution"), icon=self.create_icon('show'), triggered=self.capture_frames, register_shortcut=True, ) enter_debug_action = self.create_action( DebuggerWidgetActions.EnterDebug, text=_("Start debugging after last error"), icon=self.create_icon("postmortem_debug"), triggered=self.enter_debugger_after_exception, register_shortcut=True, ) interrupt_and_debug_action = self.create_action( DebuggerWidgetActions.InterrupAndDebug, text=_("Interrupt execution and start the debugger"), icon=self.create_icon("interrupt_and_debug"), triggered=self.interrupt_and_debug, register_shortcut=True, ) next_action = self.create_action( DebuggerWidgetActions.Next, text=_("Debug current line"), icon=self.create_icon('arrow-step-over'), triggered=lambda: self.debug_command("next"), register_shortcut=True, context=Qt.ApplicationShortcut, shortcut_context="_" ) continue_action = self.create_action( DebuggerWidgetActions.Continue, text=_("Execute until next breakpoint"), icon=self.create_icon('arrow-continue'), triggered=lambda: self.debug_command("continue"), register_shortcut=True, context=Qt.ApplicationShortcut, shortcut_context="_" ) step_action = self.create_action( DebuggerWidgetActions.Step, text=_("Step into function or method"), icon=self.create_icon('arrow-step-in'), triggered=lambda: self.debug_command("step"), register_shortcut=True, context=Qt.ApplicationShortcut, shortcut_context="_" ) return_action = self.create_action( DebuggerWidgetActions.Return, text=_("Execute until function returns"), icon=self.create_icon('arrow-step-out'), triggered=lambda: self.debug_command("return"), register_shortcut=True, context=Qt.ApplicationShortcut, shortcut_context="_" ) stop_action = self.create_action( DebuggerWidgetActions.Stop, text=_("Stop debugging"), icon=self.create_icon('stop_debug'), triggered=self.stop_debugging, register_shortcut=True, context=Qt.ApplicationShortcut, shortcut_context="_" ) goto_cursor_action = self.create_action( DebuggerWidgetActions.GotoCursor, text=_( "Show the file and line where the debugger is placed in the " "editor" ), icon=self.create_icon("go_to_editor"), triggered=self.goto_current_step, register_shortcut=True, ) self.create_action( DebuggerBreakpointActions.ToggleBreakpoint, text=_("Toggle breakpoint"), tip=_("Set or clear a breakpoint on the current line"), icon=self.create_icon('breakpoint_big'), triggered=self.sig_toggle_breakpoints, register_shortcut=True, ) self.create_action( DebuggerBreakpointActions.ToggleConditionalBreakpoint, text=_("Set/edit conditional breakpoint"), tip=_("Set a conditional breakpoint, or edit an existing one"), icon=self.create_icon('breakpoint_cond_big'), triggered=self.sig_toggle_conditional_breakpoints, register_shortcut=True, ) self.create_action( DebuggerBreakpointActions.ClearAllBreakpoints, text=_("Clear breakpoints in all files"), tip=_("Clear breakpoints in all files"), triggered=self.sig_clear_all_breakpoints ) self.create_action( DebuggerBreakpointActions.ShowBreakpointsTable, _("List breakpoints"), triggered=self.list_breakpoints, ) toggle_breakpoints_action = self.create_action( DebuggerBreakpointActions.ToggleBreakpointsTable, _("Show breakpoints"), icon=self.create_icon("list_breakpoints"), toggled=True, initial=self.get_conf('breakpoints_table_visible'), option='breakpoints_table_visible' ) # Options menu options_menu = self.get_options_menu() for item in [exclude_internal_action]: self.add_item_to_menu( item, menu=options_menu, section=DebuggerWidgetOptionsMenuSections.Display, ) # Main toolbar main_toolbar = self.get_main_toolbar() for item in [ next_action, continue_action, step_action, return_action, stop_action, ]: self.add_item_to_toolbar( item, toolbar=main_toolbar, section=DebuggerWidgetMainToolBarSections.Control, ) for item in [ enter_debug_action, interrupt_and_debug_action, inspect_action, ]: self.add_item_to_toolbar( item, toolbar=main_toolbar, section=DebuggerWidgetMainToolBarSections.InteractWithConsole, ) stretcher = self.create_stretcher( DebuggerWidgetToolbarItems.ToolbarStretcher ) for item in [ goto_cursor_action, search_action, stretcher, toggle_breakpoints_action, ]: self.add_item_to_toolbar( item, toolbar=main_toolbar, section=DebuggerWidgetMainToolBarSections.Extras, ) def update_actions(self): """Update actions.""" try: search_action = self.get_action(DebuggerWidgetActions.Search) enter_debug_action = self.get_action( DebuggerWidgetActions.EnterDebug ) interrupt_and_debug_action = self.get_action( DebuggerWidgetActions.InterrupAndDebug ) inspect_action = self.get_action( DebuggerWidgetActions.Inspect ) widget = self.current_widget() if self.is_current_widget_error_message() or widget is None: search_action.setEnabled(False) post_mortem = False executing = False pdb_prompt = False is_debugging = False else: search_action.setEnabled(True) search_action.setChecked(widget.finder_is_visible()) post_mortem = widget.state == FramesBrowserState.Error sw = widget.shellwidget executing = sw._executing pdb_prompt = sw.is_waiting_pdb_input() is_debugging = sw.is_debugging() enter_debug_action.setEnabled(post_mortem and not executing) interrupt_and_debug_action.setEnabled(executing) inspect_action.setEnabled(executing) for action_name in [ DebuggerWidgetActions.Next, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Stop, DebuggerWidgetActions.GotoCursor]: action = self.get_action(action_name) action.setEnabled(pdb_prompt) self._set_visible_control_debugger_buttons( pdb_prompt or is_debugging ) rows = self.breakpoints_table.selectionModel().selectedRows() initial_row = rows[0] if rows else None enabled = ( bool(self.breakpoints_table.model.breakpoints) and initial_row is not None ) clear_action = self.get_action( BreakpointTableViewActions.ClearBreakpoint) edit_action = self.get_action( BreakpointTableViewActions.EditBreakpoint) clear_action.setEnabled(enabled) edit_action.setEnabled(enabled) except RuntimeError: pass @on_conf_change(option='breakpoints_table_visible') def on_breakpoints_table_option_update(self, value): action = self.get_action( DebuggerBreakpointActions.ToggleBreakpointsTable) if value: self.breakpoints_table.show() action.setToolTip(_("Hide breakpoints")) action.setText(_("Hide breakpoints")) self._update_stylesheet(is_table_shown=True) else: self.breakpoints_table.hide() action.setToolTip(_("Show breakpoints")) action.setText(_("Show breakpoints")) self._update_stylesheet(is_table_shown=False) # ---- ShellConnectMainWidget API # ------------------------------------------------------------------------ def create_new_widget(self, shellwidget): """Create a new widget.""" widget = FramesBrowser(self, shellwidget=shellwidget) widget.sig_edit_goto.connect(self.sig_edit_goto) widget.sig_hide_finder_requested.connect(self.hide_finder) widget.sig_update_actions_requested.connect(self.update_actions) widget.sig_show_empty_message_requested.connect( self.switch_empty_message ) shellwidget.sig_prompt_ready.connect(widget.clear_if_needed) shellwidget.sig_pdb_prompt_ready.connect(widget.clear_if_needed) shellwidget.sig_prompt_ready.connect(self.update_actions) shellwidget.sig_pdb_prompt_ready.connect(self.update_actions) shellwidget.executing.connect(self.update_actions) shellwidget.register_kernel_call_handler( "show_traceback", widget.show_exception ) shellwidget.sig_pdb_stack.connect(widget.set_from_pdb) shellwidget.sig_config_spyder_kernel.connect( widget.on_config_kernel) widget.setup() self.sig_breakpoints_saved.connect(widget.set_breakpoints) shellwidget.sig_pdb_state_changed.connect(self.sig_pdb_state_changed) shellwidget.sig_pdb_step.connect(widget.pdb_has_stopped) widget.sig_load_pdb_file.connect(self.sig_load_pdb_file) return widget def switch_widget(self, widget, old_widget): """Set the current FramesBrowser.""" if not self.is_current_widget_error_message(): sw = widget.shellwidget state = sw.is_waiting_pdb_input() self.sig_pdb_state_changed.emit(state) def close_widget(self, widget): """Close widget.""" widget.sig_edit_goto.disconnect(self.sig_edit_goto) widget.sig_hide_finder_requested.disconnect(self.hide_finder) widget.sig_update_actions_requested.disconnect(self.update_actions) shellwidget = widget.shellwidget try: shellwidget.sig_prompt_ready.disconnect(widget.clear_if_needed) shellwidget.sig_prompt_ready.disconnect(self.update_actions) except (TypeError, RuntimeError): # disconnect was called elsewhere without argument pass shellwidget.sig_pdb_prompt_ready.disconnect(widget.clear_if_needed) shellwidget.sig_pdb_prompt_ready.disconnect(self.update_actions) shellwidget.executing.disconnect(self.update_actions) shellwidget.unregister_kernel_call_handler("show_traceback") shellwidget.sig_pdb_stack.disconnect(widget.set_from_pdb) shellwidget.sig_config_spyder_kernel.disconnect( widget.on_config_kernel) widget.on_unconfig_kernel() self.sig_breakpoints_saved.disconnect(widget.set_breakpoints) shellwidget.sig_pdb_state_changed.disconnect( self.sig_pdb_state_changed) shellwidget.sig_pdb_step.disconnect(widget.pdb_has_stopped) widget.sig_load_pdb_file.disconnect(self.sig_load_pdb_file) widget.close() widget.setParent(None) # ---- Public API # ------------------------------------------------------------------------ def goto_current_step(self): """Go to last pdb step.""" fname, lineno = self.get_pdb_last_step() if fname: self.sig_load_pdb_file.emit(fname, lineno) def print_debug_file_msg(self): """Print message in the current console when a file can't be closed.""" widget = self.current_widget() if widget is None: return False sw = widget.shellwidget debug_msg = _('The current file cannot be closed because it is ' 'in debug mode.') sw.append_html_message(debug_msg, before_prompt=True) def set_pdb_take_focus(self, take_focus): """ Set whether current Pdb session should take focus when stopping on the next call. """ widget = self.current_widget() if widget is None or self.is_current_widget_error_message(): return False widget.shellwidget._pdb_take_focus = take_focus @Slot(bool) def toggle_finder(self, checked): """Show or hide finder.""" widget = self.current_widget() if widget is None or self.is_current_widget_error_message(): return widget.toggle_finder(checked) def get_pdb_state(self): """Get debugging state of the current console.""" widget = self.current_widget() if widget is None or self.is_current_widget_error_message(): return False sw = widget.shellwidget if sw is not None: return sw.is_waiting_pdb_input() return False def get_pdb_last_step(self): """Get last pdb step of the current console.""" widget = self.current_widget() if widget is None or self.is_current_widget_error_message(): return None, None sw = widget.shellwidget if sw is not None: return sw.get_pdb_last_step() return None, None @Slot() def hide_finder(self): """Hide finder.""" action = self.get_action(DebuggerWidgetActions.Search) action.setChecked(False) def enter_debugger_after_exception(self): """Enter the debugger after an exception.""" widget = self.current_widget() if widget is None: return # Enter the debugger sw = widget.shellwidget if widget.state == FramesBrowserState.Error: # Debug the last exception sw.execute("%debug") return def interrupt_and_debug(self): """ If the shell is executing, interrupt execution and enter debugger. """ widget = self.current_widget() if widget is None: return # Enter the debugger sw = widget.shellwidget if sw._executing: sw.call_kernel( interrupt=True, callback=widget.show_pdb_preview ).get_current_frames( ignore_internal_threads=True ) sw.call_kernel(interrupt=True).request_pdb_stop() return def capture_frames(self): """Refresh frames table""" widget = self.current_widget() if widget is None: return if widget.shellwidget.is_waiting_pdb_input(): # Disabled while waiting pdb input as the pdb stack is shown return widget.shellwidget.call_kernel( interrupt=True, callback=widget.show_captured_frames ).get_current_frames( ignore_internal_threads=self.get_conf("exclude_internal") ) def stop_debugging(self): """Stop debugging""" self.sig_unmaximize_plugin_requested.emit() widget = self.current_widget() if widget is None: return widget.shellwidget.stop_debugging() def debug_command(self, command): """Debug actions""" self.sig_unmaximize_plugin_requested.emit() widget = self.current_widget() if widget is None: return widget.shellwidget.pdb_execute_command(command) def load_data(self): """ Load breakpoint data from configuration file. """ breakpoints_dict = self.get_conf( "breakpoints", default={}, ) for filename in list(breakpoints_dict.keys()): if not osp.isfile(filename): breakpoints_dict.pop(filename) continue # Make sure we don't have the same file under different names new_filename = osp.normcase(filename) if new_filename != filename: bp = breakpoints_dict.pop(filename) if new_filename in breakpoints_dict: breakpoints_dict[new_filename].extend(bp) else: breakpoints_dict[new_filename] = bp return breakpoints_dict def set_data(self, data=None): """ Set breakpoint data on widget. Parameters ---------- data: dict, optional Breakpoint data to use. If None, data from the configuration will be loaded. Default is None. """ if data is None: data = self.load_data() self.breakpoints_table.set_data(data) def list_breakpoints(self): """Show breakpoints state and switch to plugin.""" self.set_conf('breakpoints_table_visible', True) self.sig_switch_to_plugin_requested.emit() def update_splitter_widths(self, base_width): """ Update the splitter widths to provide the breakpoints table with a reasonable initial width. Parameters ---------- base_width: int The available widget width. """ if (base_width // 3) > self.breakpoints_table.MIN_INITIAL_WIDTH: table_width = base_width // 3 else: table_width = self.breakpoints_table.MIN_INITIAL_WIDTH if base_width - table_width > 0: self.splitter.setSizes([base_width - table_width, table_width]) def on_debug_toolbar_rendered(self): """Actions to take when the Debug toolbar is rendered.""" debug_toolbar = self.get_toolbar( ApplicationToolbars.Debug, plugin=Plugins.Toolbar ) # Get widgets corresponding to control debugger actions in the Debug # toolbar for action_id in [ DebuggerWidgetActions.Next, DebuggerWidgetActions.Step, DebuggerWidgetActions.Return, DebuggerWidgetActions.Continue, DebuggerWidgetActions.Stop, ]: action = self.get_action(action_id) widget = debug_toolbar.widgetForAction(action) # Hide widgets by default because no debugging session is # active at startup widget.setFixedWidth(0) # Save widgets in this list to manipulate them later self._control_debugger_toolbar_widgets.append(widget) # ---- Private API # ------------------------------------------------------------------------ def _update_stylesheet(self, is_table_shown=False): """Update stylesheet when the breakpoints table is shown/hidden.""" # Remove right border radius for stack when table is shown and restore # it when hidden. if is_table_shown: border_radius = '0px' else: border_radius = SpyderPalette.SIZE_BORDER_RADIUS css = qstylizer.style.StyleSheet() css.setValues( borderTopRightRadius=f'{border_radius}', borderBottomRightRadius=f'{border_radius}', ) self._stack.setStyleSheet(css.toString()) def _set_visible_control_debugger_buttons(self, visible: bool): """Show/hide control debugger buttons in the Debug toolbar.""" for widget in self._control_debugger_toolbar_widgets: if visible: widget.setFixedWidth(self._app_toolbar_button_width) else: widget.setFixedWidth(0)
DebuggerWidget
python
fastai__fastai
fastai/callback/training.py
{ "start": 1558, "end": 2254 }
class ____(Callback): "Clip norm of gradients" order=MixedPrecision.order+1 def __init__(self,max_norm:float=1., norm_type:float=2.0): store_attr() def before_step(self): nn.utils.clip_grad_norm_(self.parameters(), self.max_norm, self.norm_type) # %% ../../nbs/18a_callback.training.ipynb 23 bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d) def set_bn_eval(m:nn.Module, use_eval=True)->None: "Set bn layers in eval mode for all recursive children of `m`." for l in m.children(): if isinstance(l, bn_types) and not next(l.parameters()).requires_grad: if use_eval: l.eval() else: l.train() set_bn_eval(l)
GradientClip
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/triggers/eks.py
{ "start": 14400, "end": 16175 }
class ____(AwsBaseWaiterTrigger): """ Trigger for EksDeleteNodegroupOperator. The trigger will asynchronously poll the boto3 API and wait for the nodegroup to be in the state specified by the waiter. :param cluster_name: The name of the EKS cluster associated with the node group. :param nodegroup_name: The name of the nodegroup to check. :param waiter_delay: The amount of time in seconds to wait between attempts. :param waiter_max_attempts: The maximum number of attempts to be made. :param aws_conn_id: The Airflow connection used for AWS credentials. :param region_name: Which AWS region the connection should use. (templated) If this is None or empty then the default boto3 behaviour is used. """ def __init__( self, cluster_name: str, nodegroup_name: str, waiter_delay: int, waiter_max_attempts: int, aws_conn_id: str | None, region_name: str | None = None, ): super().__init__( serialized_fields={"cluster_name": cluster_name, "nodegroup_name": nodegroup_name}, waiter_name="nodegroup_deleted", waiter_args={"clusterName": cluster_name, "nodegroupName": nodegroup_name}, failure_message="Error deleting nodegroup", status_message="Nodegroup status is", status_queries=["nodegroup.status", "nodegroup.health.issues"], return_value=None, waiter_delay=waiter_delay, waiter_max_attempts=waiter_max_attempts, aws_conn_id=aws_conn_id, region_name=region_name, ) def hook(self) -> AwsGenericHook: return EksHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
EksDeleteNodegroupTrigger
python
pytorch__pytorch
torch/_inductor/pattern_matcher.py
{ "start": 28893, "end": 28963 }
class ____(_TargetExprVarArgs): op = "call_module"
CallModuleVarArgs
python
milvus-io__pymilvus
tests/test_milvus_client.py
{ "start": 268, "end": 2591 }
class ____: @pytest.mark.parametrize("index_params", [None, {}, "str", MilvusClient.prepare_index_params()]) def test_create_index_invalid_params(self, index_params): mock_handler = MagicMock() mock_handler.get_server_type.return_value = "milvus" with patch('pymilvus.milvus_client.milvus_client.create_connection', return_value="test"), \ patch('pymilvus.orm.connections.Connections._fetch_handler', return_value=mock_handler): client = MilvusClient() if isinstance(index_params, IndexParams): with pytest.raises(ParamError, match="IndexParams is empty, no index can be created"): client.create_index("test_collection", index_params) elif index_params is None: with pytest.raises(ParamError, match="missing required argument:.*"): client.create_index("test_collection", index_params) else: with pytest.raises(ParamError, match="wrong type of argument .*"): client.create_index("test_collection", index_params) def test_index_params(self): index_params = MilvusClient.prepare_index_params() assert len(index_params) == 0 index_params.add_index("vector", index_type="FLAT", metric_type="L2") assert len(index_params) == 1 index_params.add_index("vector2", index_type="HNSW", efConstruction=100, metric_type="L2") log.info(index_params) assert len(index_params) == 2 for index in index_params: log.info(index) def test_connection_reuse(self): mock_handler = MagicMock() mock_handler.get_server_type.return_value = "milvus" with patch("pymilvus.orm.connections.Connections.connect", return_value=None), \ patch("pymilvus.orm.connections.Connections._fetch_handler", return_value=mock_handler): client = MilvusClient() assert client._using == "http://localhost:19530" client = MilvusClient(user="test", password="foobar") assert client._using == "http://localhost:19530-test" client = MilvusClient(token="foobar") assert client._using == "http://localhost:19530-3858f62230ac3c915f300c664312c63f"
TestMilvusClient
python
tensorflow__tensorflow
tensorflow/python/data/experimental/kernel_tests/optimization/grappler_test.py
{ "start": 1415, "end": 3138 }
class ____(test_base.DatasetTestBase, parameterized.TestCase): @combinations.generate(test_base.default_test_combinations()) def testConstantFoldingVarLenFeature(self): example = example_pb2.Example(features=feature_pb2.Features(feature={})) dataset = dataset_ops.Dataset.from_tensors(example.SerializeToString()) def parse_fn(serialized): features = {"x": parsing_ops.VarLenFeature(dtypes.int64)} parsed = parsing_ops.parse_single_example(serialized, features) parsed = parsed["x"].values size = array_ops.size(parsed) value = math_ops.cast(parsed, dtypes.bool) return cond.cond(size > 0, lambda: array_ops.reshape(value, []), lambda: array_ops.zeros([], dtypes.bool)) dataset = dataset.map(parse_fn) self.assertDatasetProduces(dataset, expected_output=[0]) @combinations.generate(test_base.default_test_combinations()) def testLayoutOptimizationConv2D(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") # Compute convolution with input and filter of [1, 1, 1, 1] shape. # Verify that Grappler doesn't transpose Conv2D data format to NCHW. dataset = dataset_ops.Dataset.from_tensors((1, 1)) def map_function(x, y): i = math_ops.cast(x, dtypes.float32) i = array_ops.reshape(i, [1, 1, 1, 1]) f = math_ops.cast(y, dtypes.float32) f = array_ops.reshape(f, [1, 1, 1, 1]) c = nn_ops.conv2d(i, f, strides=[1, 1, 1, 1], padding="VALID") return array_ops.reshape(c, ()) dataset = dataset.map(map_function) self.assertDatasetProduces(dataset, expected_output=[1]) if __name__ == "__main__": test.main()
GrapplerTest
python
astropy__astropy
astropy/io/misc/ecsv.py
{ "start": 3807, "end": 6200 }
class ____: """ Class representing attributes of a column in an ECSV header. Parameters ---------- name : str The name of the column. datatype : str The data type of the column as specified in the ECSV header. subtype : str or None, optional The subtype of the column, if applicable. unit : str or None, optional The unit of the column values, if specified. description : str or None, optional A description of the column. format : str or None, optional The format string for the column values. meta : dict or None, optional Additional metadata associated with the column. Attributes ---------- csv_np_type : str Numpy type string describing the column CSV data. In practice this is the same as the ECSV ``datatype`` except that "string" => "str". This is provided to the engine ``convert_np_type()`` method to generate the engine-specific type provided to the CSV reader. For instance, for pandas the ``int32`` type gets converted to ``Int32`` to read columns as a nullable int32. dtype : `numpy.dtype` Numpy dtype in the final column data. This may be entirely different from ``csv_np_type`` in some cases, in particular JSON-encoded fields. shape : tuple of int Shape of the final column data. """ name: str datatype: str subtype: str | None = None unit: str | None = None description: str | None = None format: str | None = None meta: dict | None = None @functools.cached_property def csv_np_type(self) -> str: """Numpy type string describing the column CSV data.""" return self._derived_properties.csv_np_type @functools.cached_property def dtype(self) -> np.dtype: """Numpy dtype in the final column data""" return np.dtype(self._derived_properties.dtype) @functools.cached_property def shape(self) -> tuple[int, ...]: """Shape of the column data""" return self._derived_properties.shape @functools.cached_property def _derived_properties(self) -> DerivedColumnProperties: """Get the csv_np_type, dtype, and shape of the column from ECSV header.""" return get_csv_np_type_dtype_shape(self.datatype, self.subtype, self.name) @dataclass(slots=True, frozen=True)
ColumnECSV